1 //===- SelectionDAGBuilder.cpp - Selection-DAG building -------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This implements routines for translating from LLVM IR into SelectionDAG IR. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "SelectionDAGBuilder.h" 14 #include "SDNodeDbgValue.h" 15 #include "llvm/ADT/APFloat.h" 16 #include "llvm/ADT/APInt.h" 17 #include "llvm/ADT/BitVector.h" 18 #include "llvm/ADT/STLExtras.h" 19 #include "llvm/ADT/SmallPtrSet.h" 20 #include "llvm/ADT/SmallSet.h" 21 #include "llvm/ADT/StringRef.h" 22 #include "llvm/ADT/Twine.h" 23 #include "llvm/Analysis/AliasAnalysis.h" 24 #include "llvm/Analysis/BranchProbabilityInfo.h" 25 #include "llvm/Analysis/ConstantFolding.h" 26 #include "llvm/Analysis/Loads.h" 27 #include "llvm/Analysis/MemoryLocation.h" 28 #include "llvm/Analysis/TargetLibraryInfo.h" 29 #include "llvm/Analysis/TargetTransformInfo.h" 30 #include "llvm/Analysis/ValueTracking.h" 31 #include "llvm/Analysis/VectorUtils.h" 32 #include "llvm/CodeGen/Analysis.h" 33 #include "llvm/CodeGen/AssignmentTrackingAnalysis.h" 34 #include "llvm/CodeGen/CodeGenCommonISel.h" 35 #include "llvm/CodeGen/FunctionLoweringInfo.h" 36 #include "llvm/CodeGen/GCMetadata.h" 37 #include "llvm/CodeGen/ISDOpcodes.h" 38 #include "llvm/CodeGen/MachineBasicBlock.h" 39 #include "llvm/CodeGen/MachineFrameInfo.h" 40 #include "llvm/CodeGen/MachineFunction.h" 41 #include "llvm/CodeGen/MachineInstrBuilder.h" 42 #include "llvm/CodeGen/MachineInstrBundleIterator.h" 43 #include "llvm/CodeGen/MachineMemOperand.h" 44 #include "llvm/CodeGen/MachineModuleInfo.h" 45 #include "llvm/CodeGen/MachineOperand.h" 46 #include "llvm/CodeGen/MachineRegisterInfo.h" 47 #include "llvm/CodeGen/RuntimeLibcallUtil.h" 48 #include "llvm/CodeGen/SelectionDAG.h" 49 #include "llvm/CodeGen/SelectionDAGTargetInfo.h" 50 #include "llvm/CodeGen/StackMaps.h" 51 #include "llvm/CodeGen/SwiftErrorValueTracking.h" 52 #include "llvm/CodeGen/TargetFrameLowering.h" 53 #include "llvm/CodeGen/TargetInstrInfo.h" 54 #include "llvm/CodeGen/TargetOpcodes.h" 55 #include "llvm/CodeGen/TargetRegisterInfo.h" 56 #include "llvm/CodeGen/TargetSubtargetInfo.h" 57 #include "llvm/CodeGen/WinEHFuncInfo.h" 58 #include "llvm/IR/Argument.h" 59 #include "llvm/IR/Attributes.h" 60 #include "llvm/IR/BasicBlock.h" 61 #include "llvm/IR/CFG.h" 62 #include "llvm/IR/CallingConv.h" 63 #include "llvm/IR/Constant.h" 64 #include "llvm/IR/ConstantRange.h" 65 #include "llvm/IR/Constants.h" 66 #include "llvm/IR/DataLayout.h" 67 #include "llvm/IR/DebugInfo.h" 68 #include "llvm/IR/DebugInfoMetadata.h" 69 #include "llvm/IR/DerivedTypes.h" 70 #include "llvm/IR/DiagnosticInfo.h" 71 #include "llvm/IR/EHPersonalities.h" 72 #include "llvm/IR/Function.h" 73 #include "llvm/IR/GetElementPtrTypeIterator.h" 74 #include "llvm/IR/InlineAsm.h" 75 #include "llvm/IR/InstrTypes.h" 76 #include "llvm/IR/Instructions.h" 77 #include "llvm/IR/IntrinsicInst.h" 78 #include "llvm/IR/Intrinsics.h" 79 #include "llvm/IR/IntrinsicsAArch64.h" 80 #include "llvm/IR/IntrinsicsAMDGPU.h" 81 #include "llvm/IR/IntrinsicsWebAssembly.h" 82 #include "llvm/IR/LLVMContext.h" 83 #include "llvm/IR/MemoryModelRelaxationAnnotations.h" 84 #include "llvm/IR/Metadata.h" 85 #include "llvm/IR/Module.h" 86 #include "llvm/IR/Operator.h" 87 #include "llvm/IR/PatternMatch.h" 88 #include "llvm/IR/Statepoint.h" 89 #include "llvm/IR/Type.h" 90 #include "llvm/IR/User.h" 91 #include "llvm/IR/Value.h" 92 #include "llvm/MC/MCContext.h" 93 #include "llvm/Support/AtomicOrdering.h" 94 #include "llvm/Support/Casting.h" 95 #include "llvm/Support/CommandLine.h" 96 #include "llvm/Support/Compiler.h" 97 #include "llvm/Support/Debug.h" 98 #include "llvm/Support/InstructionCost.h" 99 #include "llvm/Support/MathExtras.h" 100 #include "llvm/Support/raw_ostream.h" 101 #include "llvm/Target/TargetIntrinsicInfo.h" 102 #include "llvm/Target/TargetMachine.h" 103 #include "llvm/Target/TargetOptions.h" 104 #include "llvm/TargetParser/Triple.h" 105 #include "llvm/Transforms/Utils/Local.h" 106 #include <cstddef> 107 #include <deque> 108 #include <iterator> 109 #include <limits> 110 #include <optional> 111 #include <tuple> 112 113 using namespace llvm; 114 using namespace PatternMatch; 115 using namespace SwitchCG; 116 117 #define DEBUG_TYPE "isel" 118 119 /// LimitFloatPrecision - Generate low-precision inline sequences for 120 /// some float libcalls (6, 8 or 12 bits). 121 static unsigned LimitFloatPrecision; 122 123 static cl::opt<bool> 124 InsertAssertAlign("insert-assert-align", cl::init(true), 125 cl::desc("Insert the experimental `assertalign` node."), 126 cl::ReallyHidden); 127 128 static cl::opt<unsigned, true> 129 LimitFPPrecision("limit-float-precision", 130 cl::desc("Generate low-precision inline sequences " 131 "for some float libcalls"), 132 cl::location(LimitFloatPrecision), cl::Hidden, 133 cl::init(0)); 134 135 static cl::opt<unsigned> SwitchPeelThreshold( 136 "switch-peel-threshold", cl::Hidden, cl::init(66), 137 cl::desc("Set the case probability threshold for peeling the case from a " 138 "switch statement. A value greater than 100 will void this " 139 "optimization")); 140 141 // Limit the width of DAG chains. This is important in general to prevent 142 // DAG-based analysis from blowing up. For example, alias analysis and 143 // load clustering may not complete in reasonable time. It is difficult to 144 // recognize and avoid this situation within each individual analysis, and 145 // future analyses are likely to have the same behavior. Limiting DAG width is 146 // the safe approach and will be especially important with global DAGs. 147 // 148 // MaxParallelChains default is arbitrarily high to avoid affecting 149 // optimization, but could be lowered to improve compile time. Any ld-ld-st-st 150 // sequence over this should have been converted to llvm.memcpy by the 151 // frontend. It is easy to induce this behavior with .ll code such as: 152 // %buffer = alloca [4096 x i8] 153 // %data = load [4096 x i8]* %argPtr 154 // store [4096 x i8] %data, [4096 x i8]* %buffer 155 static const unsigned MaxParallelChains = 64; 156 157 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL, 158 const SDValue *Parts, unsigned NumParts, 159 MVT PartVT, EVT ValueVT, const Value *V, 160 SDValue InChain, 161 std::optional<CallingConv::ID> CC); 162 163 /// getCopyFromParts - Create a value that contains the specified legal parts 164 /// combined into the value they represent. If the parts combine to a type 165 /// larger than ValueVT then AssertOp can be used to specify whether the extra 166 /// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT 167 /// (ISD::AssertSext). 168 static SDValue 169 getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, 170 unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V, 171 SDValue InChain, 172 std::optional<CallingConv::ID> CC = std::nullopt, 173 std::optional<ISD::NodeType> AssertOp = std::nullopt) { 174 // Let the target assemble the parts if it wants to 175 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 176 if (SDValue Val = TLI.joinRegisterPartsIntoValue(DAG, DL, Parts, NumParts, 177 PartVT, ValueVT, CC)) 178 return Val; 179 180 if (ValueVT.isVector()) 181 return getCopyFromPartsVector(DAG, DL, Parts, NumParts, PartVT, ValueVT, V, 182 InChain, CC); 183 184 assert(NumParts > 0 && "No parts to assemble!"); 185 SDValue Val = Parts[0]; 186 187 if (NumParts > 1) { 188 // Assemble the value from multiple parts. 189 if (ValueVT.isInteger()) { 190 unsigned PartBits = PartVT.getSizeInBits(); 191 unsigned ValueBits = ValueVT.getSizeInBits(); 192 193 // Assemble the power of 2 part. 194 unsigned RoundParts = llvm::bit_floor(NumParts); 195 unsigned RoundBits = PartBits * RoundParts; 196 EVT RoundVT = RoundBits == ValueBits ? 197 ValueVT : EVT::getIntegerVT(*DAG.getContext(), RoundBits); 198 SDValue Lo, Hi; 199 200 EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), RoundBits/2); 201 202 if (RoundParts > 2) { 203 Lo = getCopyFromParts(DAG, DL, Parts, RoundParts / 2, PartVT, HalfVT, V, 204 InChain); 205 Hi = getCopyFromParts(DAG, DL, Parts + RoundParts / 2, RoundParts / 2, 206 PartVT, HalfVT, V, InChain); 207 } else { 208 Lo = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[0]); 209 Hi = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[1]); 210 } 211 212 if (DAG.getDataLayout().isBigEndian()) 213 std::swap(Lo, Hi); 214 215 Val = DAG.getNode(ISD::BUILD_PAIR, DL, RoundVT, Lo, Hi); 216 217 if (RoundParts < NumParts) { 218 // Assemble the trailing non-power-of-2 part. 219 unsigned OddParts = NumParts - RoundParts; 220 EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits); 221 Hi = getCopyFromParts(DAG, DL, Parts + RoundParts, OddParts, PartVT, 222 OddVT, V, InChain, CC); 223 224 // Combine the round and odd parts. 225 Lo = Val; 226 if (DAG.getDataLayout().isBigEndian()) 227 std::swap(Lo, Hi); 228 EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits); 229 Hi = DAG.getNode(ISD::ANY_EXTEND, DL, TotalVT, Hi); 230 Hi = DAG.getNode(ISD::SHL, DL, TotalVT, Hi, 231 DAG.getConstant(Lo.getValueSizeInBits(), DL, 232 TLI.getShiftAmountTy( 233 TotalVT, DAG.getDataLayout()))); 234 Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, TotalVT, Lo); 235 Val = DAG.getNode(ISD::OR, DL, TotalVT, Lo, Hi); 236 } 237 } else if (PartVT.isFloatingPoint()) { 238 // FP split into multiple FP parts (for ppcf128) 239 assert(ValueVT == EVT(MVT::ppcf128) && PartVT == MVT::f64 && 240 "Unexpected split"); 241 SDValue Lo, Hi; 242 Lo = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[0]); 243 Hi = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[1]); 244 if (TLI.hasBigEndianPartOrdering(ValueVT, DAG.getDataLayout())) 245 std::swap(Lo, Hi); 246 Val = DAG.getNode(ISD::BUILD_PAIR, DL, ValueVT, Lo, Hi); 247 } else { 248 // FP split into integer parts (soft fp) 249 assert(ValueVT.isFloatingPoint() && PartVT.isInteger() && 250 !PartVT.isVector() && "Unexpected split"); 251 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits()); 252 Val = getCopyFromParts(DAG, DL, Parts, NumParts, PartVT, IntVT, V, 253 InChain, CC); 254 } 255 } 256 257 // There is now one part, held in Val. Correct it to match ValueVT. 258 // PartEVT is the type of the register class that holds the value. 259 // ValueVT is the type of the inline asm operation. 260 EVT PartEVT = Val.getValueType(); 261 262 if (PartEVT == ValueVT) 263 return Val; 264 265 if (PartEVT.isInteger() && ValueVT.isFloatingPoint() && 266 ValueVT.bitsLT(PartEVT)) { 267 // For an FP value in an integer part, we need to truncate to the right 268 // width first. 269 PartEVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits()); 270 Val = DAG.getNode(ISD::TRUNCATE, DL, PartEVT, Val); 271 } 272 273 // Handle types that have the same size. 274 if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits()) 275 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val); 276 277 // Handle types with different sizes. 278 if (PartEVT.isInteger() && ValueVT.isInteger()) { 279 if (ValueVT.bitsLT(PartEVT)) { 280 // For a truncate, see if we have any information to 281 // indicate whether the truncated bits will always be 282 // zero or sign-extension. 283 if (AssertOp) 284 Val = DAG.getNode(*AssertOp, DL, PartEVT, Val, 285 DAG.getValueType(ValueVT)); 286 return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val); 287 } 288 return DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val); 289 } 290 291 if (PartEVT.isFloatingPoint() && ValueVT.isFloatingPoint()) { 292 // FP_ROUND's are always exact here. 293 if (ValueVT.bitsLT(Val.getValueType())) { 294 295 SDValue NoChange = 296 DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout())); 297 298 if (DAG.getMachineFunction().getFunction().getAttributes().hasFnAttr( 299 llvm::Attribute::StrictFP)) { 300 return DAG.getNode(ISD::STRICT_FP_ROUND, DL, 301 DAG.getVTList(ValueVT, MVT::Other), InChain, Val, 302 NoChange); 303 } 304 305 return DAG.getNode(ISD::FP_ROUND, DL, ValueVT, Val, NoChange); 306 } 307 308 return DAG.getNode(ISD::FP_EXTEND, DL, ValueVT, Val); 309 } 310 311 // Handle MMX to a narrower integer type by bitcasting MMX to integer and 312 // then truncating. 313 if (PartEVT == MVT::x86mmx && ValueVT.isInteger() && 314 ValueVT.bitsLT(PartEVT)) { 315 Val = DAG.getNode(ISD::BITCAST, DL, MVT::i64, Val); 316 return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val); 317 } 318 319 report_fatal_error("Unknown mismatch in getCopyFromParts!"); 320 } 321 322 static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V, 323 const Twine &ErrMsg) { 324 const Instruction *I = dyn_cast_or_null<Instruction>(V); 325 if (!V) 326 return Ctx.emitError(ErrMsg); 327 328 const char *AsmError = ", possible invalid constraint for vector type"; 329 if (const CallInst *CI = dyn_cast<CallInst>(I)) 330 if (CI->isInlineAsm()) 331 return Ctx.emitError(I, ErrMsg + AsmError); 332 333 return Ctx.emitError(I, ErrMsg); 334 } 335 336 /// getCopyFromPartsVector - Create a value that contains the specified legal 337 /// parts combined into the value they represent. If the parts combine to a 338 /// type larger than ValueVT then AssertOp can be used to specify whether the 339 /// extra bits are known to be zero (ISD::AssertZext) or sign extended from 340 /// ValueVT (ISD::AssertSext). 341 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL, 342 const SDValue *Parts, unsigned NumParts, 343 MVT PartVT, EVT ValueVT, const Value *V, 344 SDValue InChain, 345 std::optional<CallingConv::ID> CallConv) { 346 assert(ValueVT.isVector() && "Not a vector value"); 347 assert(NumParts > 0 && "No parts to assemble!"); 348 const bool IsABIRegCopy = CallConv.has_value(); 349 350 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 351 SDValue Val = Parts[0]; 352 353 // Handle a multi-element vector. 354 if (NumParts > 1) { 355 EVT IntermediateVT; 356 MVT RegisterVT; 357 unsigned NumIntermediates; 358 unsigned NumRegs; 359 360 if (IsABIRegCopy) { 361 NumRegs = TLI.getVectorTypeBreakdownForCallingConv( 362 *DAG.getContext(), *CallConv, ValueVT, IntermediateVT, 363 NumIntermediates, RegisterVT); 364 } else { 365 NumRegs = 366 TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT, 367 NumIntermediates, RegisterVT); 368 } 369 370 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!"); 371 NumParts = NumRegs; // Silence a compiler warning. 372 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!"); 373 assert(RegisterVT.getSizeInBits() == 374 Parts[0].getSimpleValueType().getSizeInBits() && 375 "Part type sizes don't match!"); 376 377 // Assemble the parts into intermediate operands. 378 SmallVector<SDValue, 8> Ops(NumIntermediates); 379 if (NumIntermediates == NumParts) { 380 // If the register was not expanded, truncate or copy the value, 381 // as appropriate. 382 for (unsigned i = 0; i != NumParts; ++i) 383 Ops[i] = getCopyFromParts(DAG, DL, &Parts[i], 1, PartVT, IntermediateVT, 384 V, InChain, CallConv); 385 } else if (NumParts > 0) { 386 // If the intermediate type was expanded, build the intermediate 387 // operands from the parts. 388 assert(NumParts % NumIntermediates == 0 && 389 "Must expand into a divisible number of parts!"); 390 unsigned Factor = NumParts / NumIntermediates; 391 for (unsigned i = 0; i != NumIntermediates; ++i) 392 Ops[i] = getCopyFromParts(DAG, DL, &Parts[i * Factor], Factor, PartVT, 393 IntermediateVT, V, InChain, CallConv); 394 } 395 396 // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the 397 // intermediate operands. 398 EVT BuiltVectorTy = 399 IntermediateVT.isVector() 400 ? EVT::getVectorVT( 401 *DAG.getContext(), IntermediateVT.getScalarType(), 402 IntermediateVT.getVectorElementCount() * NumParts) 403 : EVT::getVectorVT(*DAG.getContext(), 404 IntermediateVT.getScalarType(), 405 NumIntermediates); 406 Val = DAG.getNode(IntermediateVT.isVector() ? ISD::CONCAT_VECTORS 407 : ISD::BUILD_VECTOR, 408 DL, BuiltVectorTy, Ops); 409 } 410 411 // There is now one part, held in Val. Correct it to match ValueVT. 412 EVT PartEVT = Val.getValueType(); 413 414 if (PartEVT == ValueVT) 415 return Val; 416 417 if (PartEVT.isVector()) { 418 // Vector/Vector bitcast. 419 if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits()) 420 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val); 421 422 // If the parts vector has more elements than the value vector, then we 423 // have a vector widening case (e.g. <2 x float> -> <4 x float>). 424 // Extract the elements we want. 425 if (PartEVT.getVectorElementCount() != ValueVT.getVectorElementCount()) { 426 assert((PartEVT.getVectorElementCount().getKnownMinValue() > 427 ValueVT.getVectorElementCount().getKnownMinValue()) && 428 (PartEVT.getVectorElementCount().isScalable() == 429 ValueVT.getVectorElementCount().isScalable()) && 430 "Cannot narrow, it would be a lossy transformation"); 431 PartEVT = 432 EVT::getVectorVT(*DAG.getContext(), PartEVT.getVectorElementType(), 433 ValueVT.getVectorElementCount()); 434 Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, PartEVT, Val, 435 DAG.getVectorIdxConstant(0, DL)); 436 if (PartEVT == ValueVT) 437 return Val; 438 if (PartEVT.isInteger() && ValueVT.isFloatingPoint()) 439 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val); 440 441 // Vector/Vector bitcast (e.g. <2 x bfloat> -> <2 x half>). 442 if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits()) 443 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val); 444 } 445 446 // Promoted vector extract 447 return DAG.getAnyExtOrTrunc(Val, DL, ValueVT); 448 } 449 450 // Trivial bitcast if the types are the same size and the destination 451 // vector type is legal. 452 if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits() && 453 TLI.isTypeLegal(ValueVT)) 454 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val); 455 456 if (ValueVT.getVectorNumElements() != 1) { 457 // Certain ABIs require that vectors are passed as integers. For vectors 458 // are the same size, this is an obvious bitcast. 459 if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits()) { 460 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val); 461 } else if (ValueVT.bitsLT(PartEVT)) { 462 const uint64_t ValueSize = ValueVT.getFixedSizeInBits(); 463 EVT IntermediateType = EVT::getIntegerVT(*DAG.getContext(), ValueSize); 464 // Drop the extra bits. 465 Val = DAG.getNode(ISD::TRUNCATE, DL, IntermediateType, Val); 466 return DAG.getBitcast(ValueVT, Val); 467 } 468 469 diagnosePossiblyInvalidConstraint( 470 *DAG.getContext(), V, "non-trivial scalar-to-vector conversion"); 471 return DAG.getUNDEF(ValueVT); 472 } 473 474 // Handle cases such as i8 -> <1 x i1> 475 EVT ValueSVT = ValueVT.getVectorElementType(); 476 if (ValueVT.getVectorNumElements() == 1 && ValueSVT != PartEVT) { 477 unsigned ValueSize = ValueSVT.getSizeInBits(); 478 if (ValueSize == PartEVT.getSizeInBits()) { 479 Val = DAG.getNode(ISD::BITCAST, DL, ValueSVT, Val); 480 } else if (ValueSVT.isFloatingPoint() && PartEVT.isInteger()) { 481 // It's possible a scalar floating point type gets softened to integer and 482 // then promoted to a larger integer. If PartEVT is the larger integer 483 // we need to truncate it and then bitcast to the FP type. 484 assert(ValueSVT.bitsLT(PartEVT) && "Unexpected types"); 485 EVT IntermediateType = EVT::getIntegerVT(*DAG.getContext(), ValueSize); 486 Val = DAG.getNode(ISD::TRUNCATE, DL, IntermediateType, Val); 487 Val = DAG.getBitcast(ValueSVT, Val); 488 } else { 489 Val = ValueVT.isFloatingPoint() 490 ? DAG.getFPExtendOrRound(Val, DL, ValueSVT) 491 : DAG.getAnyExtOrTrunc(Val, DL, ValueSVT); 492 } 493 } 494 495 return DAG.getBuildVector(ValueVT, DL, Val); 496 } 497 498 static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl, 499 SDValue Val, SDValue *Parts, unsigned NumParts, 500 MVT PartVT, const Value *V, 501 std::optional<CallingConv::ID> CallConv); 502 503 /// getCopyToParts - Create a series of nodes that contain the specified value 504 /// split into legal parts. If the parts contain more bits than Val, then, for 505 /// integers, ExtendKind can be used to specify how to generate the extra bits. 506 static void 507 getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, 508 unsigned NumParts, MVT PartVT, const Value *V, 509 std::optional<CallingConv::ID> CallConv = std::nullopt, 510 ISD::NodeType ExtendKind = ISD::ANY_EXTEND) { 511 // Let the target split the parts if it wants to 512 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 513 if (TLI.splitValueIntoRegisterParts(DAG, DL, Val, Parts, NumParts, PartVT, 514 CallConv)) 515 return; 516 EVT ValueVT = Val.getValueType(); 517 518 // Handle the vector case separately. 519 if (ValueVT.isVector()) 520 return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V, 521 CallConv); 522 523 unsigned OrigNumParts = NumParts; 524 assert(DAG.getTargetLoweringInfo().isTypeLegal(PartVT) && 525 "Copying to an illegal type!"); 526 527 if (NumParts == 0) 528 return; 529 530 assert(!ValueVT.isVector() && "Vector case handled elsewhere"); 531 EVT PartEVT = PartVT; 532 if (PartEVT == ValueVT) { 533 assert(NumParts == 1 && "No-op copy with multiple parts!"); 534 Parts[0] = Val; 535 return; 536 } 537 538 unsigned PartBits = PartVT.getSizeInBits(); 539 if (NumParts * PartBits > ValueVT.getSizeInBits()) { 540 // If the parts cover more bits than the value has, promote the value. 541 if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) { 542 assert(NumParts == 1 && "Do not know what to promote to!"); 543 Val = DAG.getNode(ISD::FP_EXTEND, DL, PartVT, Val); 544 } else { 545 if (ValueVT.isFloatingPoint()) { 546 // FP values need to be bitcast, then extended if they are being put 547 // into a larger container. 548 ValueVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits()); 549 Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val); 550 } 551 assert((PartVT.isInteger() || PartVT == MVT::x86mmx) && 552 ValueVT.isInteger() && 553 "Unknown mismatch!"); 554 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits); 555 Val = DAG.getNode(ExtendKind, DL, ValueVT, Val); 556 if (PartVT == MVT::x86mmx) 557 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val); 558 } 559 } else if (PartBits == ValueVT.getSizeInBits()) { 560 // Different types of the same size. 561 assert(NumParts == 1 && PartEVT != ValueVT); 562 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val); 563 } else if (NumParts * PartBits < ValueVT.getSizeInBits()) { 564 // If the parts cover less bits than value has, truncate the value. 565 assert((PartVT.isInteger() || PartVT == MVT::x86mmx) && 566 ValueVT.isInteger() && 567 "Unknown mismatch!"); 568 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits); 569 Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val); 570 if (PartVT == MVT::x86mmx) 571 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val); 572 } 573 574 // The value may have changed - recompute ValueVT. 575 ValueVT = Val.getValueType(); 576 assert(NumParts * PartBits == ValueVT.getSizeInBits() && 577 "Failed to tile the value with PartVT!"); 578 579 if (NumParts == 1) { 580 if (PartEVT != ValueVT) { 581 diagnosePossiblyInvalidConstraint(*DAG.getContext(), V, 582 "scalar-to-vector conversion failed"); 583 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val); 584 } 585 586 Parts[0] = Val; 587 return; 588 } 589 590 // Expand the value into multiple parts. 591 if (NumParts & (NumParts - 1)) { 592 // The number of parts is not a power of 2. Split off and copy the tail. 593 assert(PartVT.isInteger() && ValueVT.isInteger() && 594 "Do not know what to expand to!"); 595 unsigned RoundParts = llvm::bit_floor(NumParts); 596 unsigned RoundBits = RoundParts * PartBits; 597 unsigned OddParts = NumParts - RoundParts; 598 SDValue OddVal = DAG.getNode(ISD::SRL, DL, ValueVT, Val, 599 DAG.getShiftAmountConstant(RoundBits, ValueVT, DL)); 600 601 getCopyToParts(DAG, DL, OddVal, Parts + RoundParts, OddParts, PartVT, V, 602 CallConv); 603 604 if (DAG.getDataLayout().isBigEndian()) 605 // The odd parts were reversed by getCopyToParts - unreverse them. 606 std::reverse(Parts + RoundParts, Parts + NumParts); 607 608 NumParts = RoundParts; 609 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits); 610 Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val); 611 } 612 613 // The number of parts is a power of 2. Repeatedly bisect the value using 614 // EXTRACT_ELEMENT. 615 Parts[0] = DAG.getNode(ISD::BITCAST, DL, 616 EVT::getIntegerVT(*DAG.getContext(), 617 ValueVT.getSizeInBits()), 618 Val); 619 620 for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) { 621 for (unsigned i = 0; i < NumParts; i += StepSize) { 622 unsigned ThisBits = StepSize * PartBits / 2; 623 EVT ThisVT = EVT::getIntegerVT(*DAG.getContext(), ThisBits); 624 SDValue &Part0 = Parts[i]; 625 SDValue &Part1 = Parts[i+StepSize/2]; 626 627 Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, 628 ThisVT, Part0, DAG.getIntPtrConstant(1, DL)); 629 Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, 630 ThisVT, Part0, DAG.getIntPtrConstant(0, DL)); 631 632 if (ThisBits == PartBits && ThisVT != PartVT) { 633 Part0 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part0); 634 Part1 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part1); 635 } 636 } 637 } 638 639 if (DAG.getDataLayout().isBigEndian()) 640 std::reverse(Parts, Parts + OrigNumParts); 641 } 642 643 static SDValue widenVectorToPartType(SelectionDAG &DAG, SDValue Val, 644 const SDLoc &DL, EVT PartVT) { 645 if (!PartVT.isVector()) 646 return SDValue(); 647 648 EVT ValueVT = Val.getValueType(); 649 EVT PartEVT = PartVT.getVectorElementType(); 650 EVT ValueEVT = ValueVT.getVectorElementType(); 651 ElementCount PartNumElts = PartVT.getVectorElementCount(); 652 ElementCount ValueNumElts = ValueVT.getVectorElementCount(); 653 654 // We only support widening vectors with equivalent element types and 655 // fixed/scalable properties. If a target needs to widen a fixed-length type 656 // to a scalable one, it should be possible to use INSERT_SUBVECTOR below. 657 if (ElementCount::isKnownLE(PartNumElts, ValueNumElts) || 658 PartNumElts.isScalable() != ValueNumElts.isScalable()) 659 return SDValue(); 660 661 // Have a try for bf16 because some targets share its ABI with fp16. 662 if (ValueEVT == MVT::bf16 && PartEVT == MVT::f16) { 663 assert(DAG.getTargetLoweringInfo().isTypeLegal(PartVT) && 664 "Cannot widen to illegal type"); 665 Val = DAG.getNode(ISD::BITCAST, DL, 666 ValueVT.changeVectorElementType(MVT::f16), Val); 667 } else if (PartEVT != ValueEVT) { 668 return SDValue(); 669 } 670 671 // Widening a scalable vector to another scalable vector is done by inserting 672 // the vector into a larger undef one. 673 if (PartNumElts.isScalable()) 674 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT), 675 Val, DAG.getVectorIdxConstant(0, DL)); 676 677 // Vector widening case, e.g. <2 x float> -> <4 x float>. Shuffle in 678 // undef elements. 679 SmallVector<SDValue, 16> Ops; 680 DAG.ExtractVectorElements(Val, Ops); 681 SDValue EltUndef = DAG.getUNDEF(PartEVT); 682 Ops.append((PartNumElts - ValueNumElts).getFixedValue(), EltUndef); 683 684 // FIXME: Use CONCAT for 2x -> 4x. 685 return DAG.getBuildVector(PartVT, DL, Ops); 686 } 687 688 /// getCopyToPartsVector - Create a series of nodes that contain the specified 689 /// value split into legal parts. 690 static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL, 691 SDValue Val, SDValue *Parts, unsigned NumParts, 692 MVT PartVT, const Value *V, 693 std::optional<CallingConv::ID> CallConv) { 694 EVT ValueVT = Val.getValueType(); 695 assert(ValueVT.isVector() && "Not a vector"); 696 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 697 const bool IsABIRegCopy = CallConv.has_value(); 698 699 if (NumParts == 1) { 700 EVT PartEVT = PartVT; 701 if (PartEVT == ValueVT) { 702 // Nothing to do. 703 } else if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) { 704 // Bitconvert vector->vector case. 705 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val); 706 } else if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, PartVT)) { 707 Val = Widened; 708 } else if (PartVT.isVector() && 709 PartEVT.getVectorElementType().bitsGE( 710 ValueVT.getVectorElementType()) && 711 PartEVT.getVectorElementCount() == 712 ValueVT.getVectorElementCount()) { 713 714 // Promoted vector extract 715 Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT); 716 } else if (PartEVT.isVector() && 717 PartEVT.getVectorElementType() != 718 ValueVT.getVectorElementType() && 719 TLI.getTypeAction(*DAG.getContext(), ValueVT) == 720 TargetLowering::TypeWidenVector) { 721 // Combination of widening and promotion. 722 EVT WidenVT = 723 EVT::getVectorVT(*DAG.getContext(), ValueVT.getVectorElementType(), 724 PartVT.getVectorElementCount()); 725 SDValue Widened = widenVectorToPartType(DAG, Val, DL, WidenVT); 726 Val = DAG.getAnyExtOrTrunc(Widened, DL, PartVT); 727 } else { 728 // Don't extract an integer from a float vector. This can happen if the 729 // FP type gets softened to integer and then promoted. The promotion 730 // prevents it from being picked up by the earlier bitcast case. 731 if (ValueVT.getVectorElementCount().isScalar() && 732 (!ValueVT.isFloatingPoint() || !PartVT.isInteger())) { 733 // If we reach this condition and PartVT is FP, this means that 734 // ValueVT is also FP and both have a different size, otherwise we 735 // would have bitcasted them. Producing an EXTRACT_VECTOR_ELT here 736 // would be invalid since that would mean the smaller FP type has to 737 // be extended to the larger one. 738 if (PartVT.isFloatingPoint()) { 739 Val = DAG.getBitcast(ValueVT.getScalarType(), Val); 740 Val = DAG.getNode(ISD::FP_EXTEND, DL, PartVT, Val); 741 } else 742 Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, PartVT, Val, 743 DAG.getVectorIdxConstant(0, DL)); 744 } else { 745 uint64_t ValueSize = ValueVT.getFixedSizeInBits(); 746 assert(PartVT.getFixedSizeInBits() > ValueSize && 747 "lossy conversion of vector to scalar type"); 748 EVT IntermediateType = EVT::getIntegerVT(*DAG.getContext(), ValueSize); 749 Val = DAG.getBitcast(IntermediateType, Val); 750 Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT); 751 } 752 } 753 754 assert(Val.getValueType() == PartVT && "Unexpected vector part value type"); 755 Parts[0] = Val; 756 return; 757 } 758 759 // Handle a multi-element vector. 760 EVT IntermediateVT; 761 MVT RegisterVT; 762 unsigned NumIntermediates; 763 unsigned NumRegs; 764 if (IsABIRegCopy) { 765 NumRegs = TLI.getVectorTypeBreakdownForCallingConv( 766 *DAG.getContext(), *CallConv, ValueVT, IntermediateVT, NumIntermediates, 767 RegisterVT); 768 } else { 769 NumRegs = 770 TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT, 771 NumIntermediates, RegisterVT); 772 } 773 774 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!"); 775 NumParts = NumRegs; // Silence a compiler warning. 776 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!"); 777 778 assert(IntermediateVT.isScalableVector() == ValueVT.isScalableVector() && 779 "Mixing scalable and fixed vectors when copying in parts"); 780 781 std::optional<ElementCount> DestEltCnt; 782 783 if (IntermediateVT.isVector()) 784 DestEltCnt = IntermediateVT.getVectorElementCount() * NumIntermediates; 785 else 786 DestEltCnt = ElementCount::getFixed(NumIntermediates); 787 788 EVT BuiltVectorTy = EVT::getVectorVT( 789 *DAG.getContext(), IntermediateVT.getScalarType(), *DestEltCnt); 790 791 if (ValueVT == BuiltVectorTy) { 792 // Nothing to do. 793 } else if (ValueVT.getSizeInBits() == BuiltVectorTy.getSizeInBits()) { 794 // Bitconvert vector->vector case. 795 Val = DAG.getNode(ISD::BITCAST, DL, BuiltVectorTy, Val); 796 } else { 797 if (BuiltVectorTy.getVectorElementType().bitsGT( 798 ValueVT.getVectorElementType())) { 799 // Integer promotion. 800 ValueVT = EVT::getVectorVT(*DAG.getContext(), 801 BuiltVectorTy.getVectorElementType(), 802 ValueVT.getVectorElementCount()); 803 Val = DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val); 804 } 805 806 if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, BuiltVectorTy)) { 807 Val = Widened; 808 } 809 } 810 811 assert(Val.getValueType() == BuiltVectorTy && "Unexpected vector value type"); 812 813 // Split the vector into intermediate operands. 814 SmallVector<SDValue, 8> Ops(NumIntermediates); 815 for (unsigned i = 0; i != NumIntermediates; ++i) { 816 if (IntermediateVT.isVector()) { 817 // This does something sensible for scalable vectors - see the 818 // definition of EXTRACT_SUBVECTOR for further details. 819 unsigned IntermediateNumElts = IntermediateVT.getVectorMinNumElements(); 820 Ops[i] = 821 DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, IntermediateVT, Val, 822 DAG.getVectorIdxConstant(i * IntermediateNumElts, DL)); 823 } else { 824 Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, IntermediateVT, Val, 825 DAG.getVectorIdxConstant(i, DL)); 826 } 827 } 828 829 // Split the intermediate operands into legal parts. 830 if (NumParts == NumIntermediates) { 831 // If the register was not expanded, promote or copy the value, 832 // as appropriate. 833 for (unsigned i = 0; i != NumParts; ++i) 834 getCopyToParts(DAG, DL, Ops[i], &Parts[i], 1, PartVT, V, CallConv); 835 } else if (NumParts > 0) { 836 // If the intermediate type was expanded, split each the value into 837 // legal parts. 838 assert(NumIntermediates != 0 && "division by zero"); 839 assert(NumParts % NumIntermediates == 0 && 840 "Must expand into a divisible number of parts!"); 841 unsigned Factor = NumParts / NumIntermediates; 842 for (unsigned i = 0; i != NumIntermediates; ++i) 843 getCopyToParts(DAG, DL, Ops[i], &Parts[i * Factor], Factor, PartVT, V, 844 CallConv); 845 } 846 } 847 848 RegsForValue::RegsForValue(const SmallVector<Register, 4> ®s, MVT regvt, 849 EVT valuevt, std::optional<CallingConv::ID> CC) 850 : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs), 851 RegCount(1, regs.size()), CallConv(CC) {} 852 853 RegsForValue::RegsForValue(LLVMContext &Context, const TargetLowering &TLI, 854 const DataLayout &DL, Register Reg, Type *Ty, 855 std::optional<CallingConv::ID> CC) { 856 ComputeValueVTs(TLI, DL, Ty, ValueVTs); 857 858 CallConv = CC; 859 860 for (EVT ValueVT : ValueVTs) { 861 unsigned NumRegs = 862 isABIMangled() 863 ? TLI.getNumRegistersForCallingConv(Context, *CC, ValueVT) 864 : TLI.getNumRegisters(Context, ValueVT); 865 MVT RegisterVT = 866 isABIMangled() 867 ? TLI.getRegisterTypeForCallingConv(Context, *CC, ValueVT) 868 : TLI.getRegisterType(Context, ValueVT); 869 for (unsigned i = 0; i != NumRegs; ++i) 870 Regs.push_back(Reg + i); 871 RegVTs.push_back(RegisterVT); 872 RegCount.push_back(NumRegs); 873 Reg = Reg.id() + NumRegs; 874 } 875 } 876 877 SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG, 878 FunctionLoweringInfo &FuncInfo, 879 const SDLoc &dl, SDValue &Chain, 880 SDValue *Glue, const Value *V) const { 881 // A Value with type {} or [0 x %t] needs no registers. 882 if (ValueVTs.empty()) 883 return SDValue(); 884 885 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 886 887 // Assemble the legal parts into the final values. 888 SmallVector<SDValue, 4> Values(ValueVTs.size()); 889 SmallVector<SDValue, 8> Parts; 890 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) { 891 // Copy the legal parts from the registers. 892 EVT ValueVT = ValueVTs[Value]; 893 unsigned NumRegs = RegCount[Value]; 894 MVT RegisterVT = isABIMangled() 895 ? TLI.getRegisterTypeForCallingConv( 896 *DAG.getContext(), *CallConv, RegVTs[Value]) 897 : RegVTs[Value]; 898 899 Parts.resize(NumRegs); 900 for (unsigned i = 0; i != NumRegs; ++i) { 901 SDValue P; 902 if (!Glue) { 903 P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT); 904 } else { 905 P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Glue); 906 *Glue = P.getValue(2); 907 } 908 909 Chain = P.getValue(1); 910 Parts[i] = P; 911 912 // If the source register was virtual and if we know something about it, 913 // add an assert node. 914 if (!Register::isVirtualRegister(Regs[Part + i]) || 915 !RegisterVT.isInteger()) 916 continue; 917 918 const FunctionLoweringInfo::LiveOutInfo *LOI = 919 FuncInfo.GetLiveOutRegInfo(Regs[Part+i]); 920 if (!LOI) 921 continue; 922 923 unsigned RegSize = RegisterVT.getScalarSizeInBits(); 924 unsigned NumSignBits = LOI->NumSignBits; 925 unsigned NumZeroBits = LOI->Known.countMinLeadingZeros(); 926 927 if (NumZeroBits == RegSize) { 928 // The current value is a zero. 929 // Explicitly express that as it would be easier for 930 // optimizations to kick in. 931 Parts[i] = DAG.getConstant(0, dl, RegisterVT); 932 continue; 933 } 934 935 // FIXME: We capture more information than the dag can represent. For 936 // now, just use the tightest assertzext/assertsext possible. 937 bool isSExt; 938 EVT FromVT(MVT::Other); 939 if (NumZeroBits) { 940 FromVT = EVT::getIntegerVT(*DAG.getContext(), RegSize - NumZeroBits); 941 isSExt = false; 942 } else if (NumSignBits > 1) { 943 FromVT = 944 EVT::getIntegerVT(*DAG.getContext(), RegSize - NumSignBits + 1); 945 isSExt = true; 946 } else { 947 continue; 948 } 949 // Add an assertion node. 950 assert(FromVT != MVT::Other); 951 Parts[i] = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl, 952 RegisterVT, P, DAG.getValueType(FromVT)); 953 } 954 955 Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(), NumRegs, 956 RegisterVT, ValueVT, V, Chain, CallConv); 957 Part += NumRegs; 958 Parts.clear(); 959 } 960 961 return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(ValueVTs), Values); 962 } 963 964 void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG, 965 const SDLoc &dl, SDValue &Chain, SDValue *Glue, 966 const Value *V, 967 ISD::NodeType PreferredExtendType) const { 968 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 969 ISD::NodeType ExtendKind = PreferredExtendType; 970 971 // Get the list of the values's legal parts. 972 unsigned NumRegs = Regs.size(); 973 SmallVector<SDValue, 8> Parts(NumRegs); 974 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) { 975 unsigned NumParts = RegCount[Value]; 976 977 MVT RegisterVT = isABIMangled() 978 ? TLI.getRegisterTypeForCallingConv( 979 *DAG.getContext(), *CallConv, RegVTs[Value]) 980 : RegVTs[Value]; 981 982 if (ExtendKind == ISD::ANY_EXTEND && TLI.isZExtFree(Val, RegisterVT)) 983 ExtendKind = ISD::ZERO_EXTEND; 984 985 getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value), &Parts[Part], 986 NumParts, RegisterVT, V, CallConv, ExtendKind); 987 Part += NumParts; 988 } 989 990 // Copy the parts into the registers. 991 SmallVector<SDValue, 8> Chains(NumRegs); 992 for (unsigned i = 0; i != NumRegs; ++i) { 993 SDValue Part; 994 if (!Glue) { 995 Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]); 996 } else { 997 Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Glue); 998 *Glue = Part.getValue(1); 999 } 1000 1001 Chains[i] = Part.getValue(0); 1002 } 1003 1004 if (NumRegs == 1 || Glue) 1005 // If NumRegs > 1 && Glue is used then the use of the last CopyToReg is 1006 // flagged to it. That is the CopyToReg nodes and the user are considered 1007 // a single scheduling unit. If we create a TokenFactor and return it as 1008 // chain, then the TokenFactor is both a predecessor (operand) of the 1009 // user as well as a successor (the TF operands are flagged to the user). 1010 // c1, f1 = CopyToReg 1011 // c2, f2 = CopyToReg 1012 // c3 = TokenFactor c1, c2 1013 // ... 1014 // = op c3, ..., f2 1015 Chain = Chains[NumRegs-1]; 1016 else 1017 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains); 1018 } 1019 1020 void RegsForValue::AddInlineAsmOperands(InlineAsm::Kind Code, bool HasMatching, 1021 unsigned MatchingIdx, const SDLoc &dl, 1022 SelectionDAG &DAG, 1023 std::vector<SDValue> &Ops) const { 1024 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 1025 1026 InlineAsm::Flag Flag(Code, Regs.size()); 1027 if (HasMatching) 1028 Flag.setMatchingOp(MatchingIdx); 1029 else if (!Regs.empty() && Register::isVirtualRegister(Regs.front())) { 1030 // Put the register class of the virtual registers in the flag word. That 1031 // way, later passes can recompute register class constraints for inline 1032 // assembly as well as normal instructions. 1033 // Don't do this for tied operands that can use the regclass information 1034 // from the def. 1035 const MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); 1036 const TargetRegisterClass *RC = MRI.getRegClass(Regs.front()); 1037 Flag.setRegClass(RC->getID()); 1038 } 1039 1040 SDValue Res = DAG.getTargetConstant(Flag, dl, MVT::i32); 1041 Ops.push_back(Res); 1042 1043 if (Code == InlineAsm::Kind::Clobber) { 1044 // Clobbers should always have a 1:1 mapping with registers, and may 1045 // reference registers that have illegal (e.g. vector) types. Hence, we 1046 // shouldn't try to apply any sort of splitting logic to them. 1047 assert(Regs.size() == RegVTs.size() && Regs.size() == ValueVTs.size() && 1048 "No 1:1 mapping from clobbers to regs?"); 1049 Register SP = TLI.getStackPointerRegisterToSaveRestore(); 1050 (void)SP; 1051 for (unsigned I = 0, E = ValueVTs.size(); I != E; ++I) { 1052 Ops.push_back(DAG.getRegister(Regs[I], RegVTs[I])); 1053 assert( 1054 (Regs[I] != SP || 1055 DAG.getMachineFunction().getFrameInfo().hasOpaqueSPAdjustment()) && 1056 "If we clobbered the stack pointer, MFI should know about it."); 1057 } 1058 return; 1059 } 1060 1061 for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) { 1062 MVT RegisterVT = RegVTs[Value]; 1063 unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVTs[Value], 1064 RegisterVT); 1065 for (unsigned i = 0; i != NumRegs; ++i) { 1066 assert(Reg < Regs.size() && "Mismatch in # registers expected"); 1067 unsigned TheReg = Regs[Reg++]; 1068 Ops.push_back(DAG.getRegister(TheReg, RegisterVT)); 1069 } 1070 } 1071 } 1072 1073 SmallVector<std::pair<Register, TypeSize>, 4> 1074 RegsForValue::getRegsAndSizes() const { 1075 SmallVector<std::pair<Register, TypeSize>, 4> OutVec; 1076 unsigned I = 0; 1077 for (auto CountAndVT : zip_first(RegCount, RegVTs)) { 1078 unsigned RegCount = std::get<0>(CountAndVT); 1079 MVT RegisterVT = std::get<1>(CountAndVT); 1080 TypeSize RegisterSize = RegisterVT.getSizeInBits(); 1081 for (unsigned E = I + RegCount; I != E; ++I) 1082 OutVec.push_back(std::make_pair(Regs[I], RegisterSize)); 1083 } 1084 return OutVec; 1085 } 1086 1087 void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis *aa, 1088 AssumptionCache *ac, 1089 const TargetLibraryInfo *li) { 1090 AA = aa; 1091 AC = ac; 1092 GFI = gfi; 1093 LibInfo = li; 1094 Context = DAG.getContext(); 1095 LPadToCallSiteMap.clear(); 1096 SL->init(DAG.getTargetLoweringInfo(), TM, DAG.getDataLayout()); 1097 AssignmentTrackingEnabled = isAssignmentTrackingEnabled( 1098 *DAG.getMachineFunction().getFunction().getParent()); 1099 } 1100 1101 void SelectionDAGBuilder::clear() { 1102 NodeMap.clear(); 1103 UnusedArgNodeMap.clear(); 1104 PendingLoads.clear(); 1105 PendingExports.clear(); 1106 PendingConstrainedFP.clear(); 1107 PendingConstrainedFPStrict.clear(); 1108 CurInst = nullptr; 1109 HasTailCall = false; 1110 SDNodeOrder = LowestSDNodeOrder; 1111 StatepointLowering.clear(); 1112 } 1113 1114 void SelectionDAGBuilder::clearDanglingDebugInfo() { 1115 DanglingDebugInfoMap.clear(); 1116 } 1117 1118 // Update DAG root to include dependencies on Pending chains. 1119 SDValue SelectionDAGBuilder::updateRoot(SmallVectorImpl<SDValue> &Pending) { 1120 SDValue Root = DAG.getRoot(); 1121 1122 if (Pending.empty()) 1123 return Root; 1124 1125 // Add current root to PendingChains, unless we already indirectly 1126 // depend on it. 1127 if (Root.getOpcode() != ISD::EntryToken) { 1128 unsigned i = 0, e = Pending.size(); 1129 for (; i != e; ++i) { 1130 assert(Pending[i].getNode()->getNumOperands() > 1); 1131 if (Pending[i].getNode()->getOperand(0) == Root) 1132 break; // Don't add the root if we already indirectly depend on it. 1133 } 1134 1135 if (i == e) 1136 Pending.push_back(Root); 1137 } 1138 1139 if (Pending.size() == 1) 1140 Root = Pending[0]; 1141 else 1142 Root = DAG.getTokenFactor(getCurSDLoc(), Pending); 1143 1144 DAG.setRoot(Root); 1145 Pending.clear(); 1146 return Root; 1147 } 1148 1149 SDValue SelectionDAGBuilder::getMemoryRoot() { 1150 return updateRoot(PendingLoads); 1151 } 1152 1153 SDValue SelectionDAGBuilder::getRoot() { 1154 // Chain up all pending constrained intrinsics together with all 1155 // pending loads, by simply appending them to PendingLoads and 1156 // then calling getMemoryRoot(). 1157 PendingLoads.reserve(PendingLoads.size() + 1158 PendingConstrainedFP.size() + 1159 PendingConstrainedFPStrict.size()); 1160 PendingLoads.append(PendingConstrainedFP.begin(), 1161 PendingConstrainedFP.end()); 1162 PendingLoads.append(PendingConstrainedFPStrict.begin(), 1163 PendingConstrainedFPStrict.end()); 1164 PendingConstrainedFP.clear(); 1165 PendingConstrainedFPStrict.clear(); 1166 return getMemoryRoot(); 1167 } 1168 1169 SDValue SelectionDAGBuilder::getControlRoot() { 1170 // We need to emit pending fpexcept.strict constrained intrinsics, 1171 // so append them to the PendingExports list. 1172 PendingExports.append(PendingConstrainedFPStrict.begin(), 1173 PendingConstrainedFPStrict.end()); 1174 PendingConstrainedFPStrict.clear(); 1175 return updateRoot(PendingExports); 1176 } 1177 1178 void SelectionDAGBuilder::handleDebugDeclare(Value *Address, 1179 DILocalVariable *Variable, 1180 DIExpression *Expression, 1181 DebugLoc DL) { 1182 assert(Variable && "Missing variable"); 1183 1184 // Check if address has undef value. 1185 if (!Address || isa<UndefValue>(Address) || 1186 (Address->use_empty() && !isa<Argument>(Address))) { 1187 LLVM_DEBUG( 1188 dbgs() 1189 << "dbg_declare: Dropping debug info (bad/undef/unused-arg address)\n"); 1190 return; 1191 } 1192 1193 bool IsParameter = Variable->isParameter() || isa<Argument>(Address); 1194 1195 SDValue &N = NodeMap[Address]; 1196 if (!N.getNode() && isa<Argument>(Address)) 1197 // Check unused arguments map. 1198 N = UnusedArgNodeMap[Address]; 1199 SDDbgValue *SDV; 1200 if (N.getNode()) { 1201 if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Address)) 1202 Address = BCI->getOperand(0); 1203 // Parameters are handled specially. 1204 auto *FINode = dyn_cast<FrameIndexSDNode>(N.getNode()); 1205 if (IsParameter && FINode) { 1206 // Byval parameter. We have a frame index at this point. 1207 SDV = DAG.getFrameIndexDbgValue(Variable, Expression, FINode->getIndex(), 1208 /*IsIndirect*/ true, DL, SDNodeOrder); 1209 } else if (isa<Argument>(Address)) { 1210 // Address is an argument, so try to emit its dbg value using 1211 // virtual register info from the FuncInfo.ValueMap. 1212 EmitFuncArgumentDbgValue(Address, Variable, Expression, DL, 1213 FuncArgumentDbgValueKind::Declare, N); 1214 return; 1215 } else { 1216 SDV = DAG.getDbgValue(Variable, Expression, N.getNode(), N.getResNo(), 1217 true, DL, SDNodeOrder); 1218 } 1219 DAG.AddDbgValue(SDV, IsParameter); 1220 } else { 1221 // If Address is an argument then try to emit its dbg value using 1222 // virtual register info from the FuncInfo.ValueMap. 1223 if (!EmitFuncArgumentDbgValue(Address, Variable, Expression, DL, 1224 FuncArgumentDbgValueKind::Declare, N)) { 1225 LLVM_DEBUG(dbgs() << "dbg_declare: Dropping debug info" 1226 << " (could not emit func-arg dbg_value)\n"); 1227 } 1228 } 1229 return; 1230 } 1231 1232 void SelectionDAGBuilder::visitDbgInfo(const Instruction &I) { 1233 // Add SDDbgValue nodes for any var locs here. Do so before updating 1234 // SDNodeOrder, as this mapping is {Inst -> Locs BEFORE Inst}. 1235 if (FunctionVarLocs const *FnVarLocs = DAG.getFunctionVarLocs()) { 1236 // Add SDDbgValue nodes for any var locs here. Do so before updating 1237 // SDNodeOrder, as this mapping is {Inst -> Locs BEFORE Inst}. 1238 for (auto It = FnVarLocs->locs_begin(&I), End = FnVarLocs->locs_end(&I); 1239 It != End; ++It) { 1240 auto *Var = FnVarLocs->getDILocalVariable(It->VariableID); 1241 dropDanglingDebugInfo(Var, It->Expr); 1242 if (It->Values.isKillLocation(It->Expr)) { 1243 handleKillDebugValue(Var, It->Expr, It->DL, SDNodeOrder); 1244 continue; 1245 } 1246 SmallVector<Value *> Values(It->Values.location_ops()); 1247 if (!handleDebugValue(Values, Var, It->Expr, It->DL, SDNodeOrder, 1248 It->Values.hasArgList())) { 1249 SmallVector<Value *, 4> Vals(It->Values.location_ops()); 1250 addDanglingDebugInfo(Vals, 1251 FnVarLocs->getDILocalVariable(It->VariableID), 1252 It->Expr, Vals.size() > 1, It->DL, SDNodeOrder); 1253 } 1254 } 1255 } 1256 1257 // We must skip DbgVariableRecords if they've already been processed above as 1258 // we have just emitted the debug values resulting from assignment tracking 1259 // analysis, making any existing DbgVariableRecords redundant (and probably 1260 // less correct). We still need to process DbgLabelRecords. This does sink 1261 // DbgLabelRecords to the bottom of the group of debug records. That sholdn't 1262 // be important as it does so deterministcally and ordering between 1263 // DbgLabelRecords and DbgVariableRecords is immaterial (other than for MIR/IR 1264 // printing). 1265 bool SkipDbgVariableRecords = DAG.getFunctionVarLocs(); 1266 // Is there is any debug-info attached to this instruction, in the form of 1267 // DbgRecord non-instruction debug-info records. 1268 for (DbgRecord &DR : I.getDbgRecordRange()) { 1269 if (DbgLabelRecord *DLR = dyn_cast<DbgLabelRecord>(&DR)) { 1270 assert(DLR->getLabel() && "Missing label"); 1271 SDDbgLabel *SDV = 1272 DAG.getDbgLabel(DLR->getLabel(), DLR->getDebugLoc(), SDNodeOrder); 1273 DAG.AddDbgLabel(SDV); 1274 continue; 1275 } 1276 1277 if (SkipDbgVariableRecords) 1278 continue; 1279 DbgVariableRecord &DVR = cast<DbgVariableRecord>(DR); 1280 DILocalVariable *Variable = DVR.getVariable(); 1281 DIExpression *Expression = DVR.getExpression(); 1282 dropDanglingDebugInfo(Variable, Expression); 1283 1284 if (DVR.getType() == DbgVariableRecord::LocationType::Declare) { 1285 if (FuncInfo.PreprocessedDVRDeclares.contains(&DVR)) 1286 continue; 1287 LLVM_DEBUG(dbgs() << "SelectionDAG visiting dbg_declare: " << DVR 1288 << "\n"); 1289 handleDebugDeclare(DVR.getVariableLocationOp(0), Variable, Expression, 1290 DVR.getDebugLoc()); 1291 continue; 1292 } 1293 1294 // A DbgVariableRecord with no locations is a kill location. 1295 SmallVector<Value *, 4> Values(DVR.location_ops()); 1296 if (Values.empty()) { 1297 handleKillDebugValue(Variable, Expression, DVR.getDebugLoc(), 1298 SDNodeOrder); 1299 continue; 1300 } 1301 1302 // A DbgVariableRecord with an undef or absent location is also a kill 1303 // location. 1304 if (llvm::any_of(Values, 1305 [](Value *V) { return !V || isa<UndefValue>(V); })) { 1306 handleKillDebugValue(Variable, Expression, DVR.getDebugLoc(), 1307 SDNodeOrder); 1308 continue; 1309 } 1310 1311 bool IsVariadic = DVR.hasArgList(); 1312 if (!handleDebugValue(Values, Variable, Expression, DVR.getDebugLoc(), 1313 SDNodeOrder, IsVariadic)) { 1314 addDanglingDebugInfo(Values, Variable, Expression, IsVariadic, 1315 DVR.getDebugLoc(), SDNodeOrder); 1316 } 1317 } 1318 } 1319 1320 void SelectionDAGBuilder::visit(const Instruction &I) { 1321 visitDbgInfo(I); 1322 1323 // Set up outgoing PHI node register values before emitting the terminator. 1324 if (I.isTerminator()) { 1325 HandlePHINodesInSuccessorBlocks(I.getParent()); 1326 } 1327 1328 // Increase the SDNodeOrder if dealing with a non-debug instruction. 1329 if (!isa<DbgInfoIntrinsic>(I)) 1330 ++SDNodeOrder; 1331 1332 CurInst = &I; 1333 1334 // Set inserted listener only if required. 1335 bool NodeInserted = false; 1336 std::unique_ptr<SelectionDAG::DAGNodeInsertedListener> InsertedListener; 1337 MDNode *PCSectionsMD = I.getMetadata(LLVMContext::MD_pcsections); 1338 MDNode *MMRA = I.getMetadata(LLVMContext::MD_mmra); 1339 if (PCSectionsMD || MMRA) { 1340 InsertedListener = std::make_unique<SelectionDAG::DAGNodeInsertedListener>( 1341 DAG, [&](SDNode *) { NodeInserted = true; }); 1342 } 1343 1344 visit(I.getOpcode(), I); 1345 1346 if (!I.isTerminator() && !HasTailCall && 1347 !isa<GCStatepointInst>(I)) // statepoints handle their exports internally 1348 CopyToExportRegsIfNeeded(&I); 1349 1350 // Handle metadata. 1351 if (PCSectionsMD || MMRA) { 1352 auto It = NodeMap.find(&I); 1353 if (It != NodeMap.end()) { 1354 if (PCSectionsMD) 1355 DAG.addPCSections(It->second.getNode(), PCSectionsMD); 1356 if (MMRA) 1357 DAG.addMMRAMetadata(It->second.getNode(), MMRA); 1358 } else if (NodeInserted) { 1359 // This should not happen; if it does, don't let it go unnoticed so we can 1360 // fix it. Relevant visit*() function is probably missing a setValue(). 1361 errs() << "warning: loosing !pcsections and/or !mmra metadata [" 1362 << I.getModule()->getName() << "]\n"; 1363 LLVM_DEBUG(I.dump()); 1364 assert(false); 1365 } 1366 } 1367 1368 CurInst = nullptr; 1369 } 1370 1371 void SelectionDAGBuilder::visitPHI(const PHINode &) { 1372 llvm_unreachable("SelectionDAGBuilder shouldn't visit PHI nodes!"); 1373 } 1374 1375 void SelectionDAGBuilder::visit(unsigned Opcode, const User &I) { 1376 // Note: this doesn't use InstVisitor, because it has to work with 1377 // ConstantExpr's in addition to instructions. 1378 switch (Opcode) { 1379 default: llvm_unreachable("Unknown instruction type encountered!"); 1380 // Build the switch statement using the Instruction.def file. 1381 #define HANDLE_INST(NUM, OPCODE, CLASS) \ 1382 case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break; 1383 #include "llvm/IR/Instruction.def" 1384 } 1385 } 1386 1387 static bool handleDanglingVariadicDebugInfo(SelectionDAG &DAG, 1388 DILocalVariable *Variable, 1389 DebugLoc DL, unsigned Order, 1390 SmallVectorImpl<Value *> &Values, 1391 DIExpression *Expression) { 1392 // For variadic dbg_values we will now insert an undef. 1393 // FIXME: We can potentially recover these! 1394 SmallVector<SDDbgOperand, 2> Locs; 1395 for (const Value *V : Values) { 1396 auto *Undef = UndefValue::get(V->getType()); 1397 Locs.push_back(SDDbgOperand::fromConst(Undef)); 1398 } 1399 SDDbgValue *SDV = DAG.getDbgValueList(Variable, Expression, Locs, {}, 1400 /*IsIndirect=*/false, DL, Order, 1401 /*IsVariadic=*/true); 1402 DAG.AddDbgValue(SDV, /*isParameter=*/false); 1403 return true; 1404 } 1405 1406 void SelectionDAGBuilder::addDanglingDebugInfo(SmallVectorImpl<Value *> &Values, 1407 DILocalVariable *Var, 1408 DIExpression *Expr, 1409 bool IsVariadic, DebugLoc DL, 1410 unsigned Order) { 1411 if (IsVariadic) { 1412 handleDanglingVariadicDebugInfo(DAG, Var, DL, Order, Values, Expr); 1413 return; 1414 } 1415 // TODO: Dangling debug info will eventually either be resolved or produce 1416 // an Undef DBG_VALUE. However in the resolution case, a gap may appear 1417 // between the original dbg.value location and its resolved DBG_VALUE, 1418 // which we should ideally fill with an extra Undef DBG_VALUE. 1419 assert(Values.size() == 1); 1420 DanglingDebugInfoMap[Values[0]].emplace_back(Var, Expr, DL, Order); 1421 } 1422 1423 void SelectionDAGBuilder::dropDanglingDebugInfo(const DILocalVariable *Variable, 1424 const DIExpression *Expr) { 1425 auto isMatchingDbgValue = [&](DanglingDebugInfo &DDI) { 1426 DIVariable *DanglingVariable = DDI.getVariable(); 1427 DIExpression *DanglingExpr = DDI.getExpression(); 1428 if (DanglingVariable == Variable && Expr->fragmentsOverlap(DanglingExpr)) { 1429 LLVM_DEBUG(dbgs() << "Dropping dangling debug info for " 1430 << printDDI(nullptr, DDI) << "\n"); 1431 return true; 1432 } 1433 return false; 1434 }; 1435 1436 for (auto &DDIMI : DanglingDebugInfoMap) { 1437 DanglingDebugInfoVector &DDIV = DDIMI.second; 1438 1439 // If debug info is to be dropped, run it through final checks to see 1440 // whether it can be salvaged. 1441 for (auto &DDI : DDIV) 1442 if (isMatchingDbgValue(DDI)) 1443 salvageUnresolvedDbgValue(DDIMI.first, DDI); 1444 1445 erase_if(DDIV, isMatchingDbgValue); 1446 } 1447 } 1448 1449 // resolveDanglingDebugInfo - if we saw an earlier dbg_value referring to V, 1450 // generate the debug data structures now that we've seen its definition. 1451 void SelectionDAGBuilder::resolveDanglingDebugInfo(const Value *V, 1452 SDValue Val) { 1453 auto DanglingDbgInfoIt = DanglingDebugInfoMap.find(V); 1454 if (DanglingDbgInfoIt == DanglingDebugInfoMap.end()) 1455 return; 1456 1457 DanglingDebugInfoVector &DDIV = DanglingDbgInfoIt->second; 1458 for (auto &DDI : DDIV) { 1459 DebugLoc DL = DDI.getDebugLoc(); 1460 unsigned ValSDNodeOrder = Val.getNode()->getIROrder(); 1461 unsigned DbgSDNodeOrder = DDI.getSDNodeOrder(); 1462 DILocalVariable *Variable = DDI.getVariable(); 1463 DIExpression *Expr = DDI.getExpression(); 1464 assert(Variable->isValidLocationForIntrinsic(DL) && 1465 "Expected inlined-at fields to agree"); 1466 SDDbgValue *SDV; 1467 if (Val.getNode()) { 1468 // FIXME: I doubt that it is correct to resolve a dangling DbgValue as a 1469 // FuncArgumentDbgValue (it would be hoisted to the function entry, and if 1470 // we couldn't resolve it directly when examining the DbgValue intrinsic 1471 // in the first place we should not be more successful here). Unless we 1472 // have some test case that prove this to be correct we should avoid 1473 // calling EmitFuncArgumentDbgValue here. 1474 if (!EmitFuncArgumentDbgValue(V, Variable, Expr, DL, 1475 FuncArgumentDbgValueKind::Value, Val)) { 1476 LLVM_DEBUG(dbgs() << "Resolve dangling debug info for " 1477 << printDDI(V, DDI) << "\n"); 1478 LLVM_DEBUG(dbgs() << " By mapping to:\n "; Val.dump()); 1479 // Increase the SDNodeOrder for the DbgValue here to make sure it is 1480 // inserted after the definition of Val when emitting the instructions 1481 // after ISel. An alternative could be to teach 1482 // ScheduleDAGSDNodes::EmitSchedule to delay the insertion properly. 1483 LLVM_DEBUG(if (ValSDNodeOrder > DbgSDNodeOrder) dbgs() 1484 << "changing SDNodeOrder from " << DbgSDNodeOrder << " to " 1485 << ValSDNodeOrder << "\n"); 1486 SDV = getDbgValue(Val, Variable, Expr, DL, 1487 std::max(DbgSDNodeOrder, ValSDNodeOrder)); 1488 DAG.AddDbgValue(SDV, false); 1489 } else 1490 LLVM_DEBUG(dbgs() << "Resolved dangling debug info for " 1491 << printDDI(V, DDI) 1492 << " in EmitFuncArgumentDbgValue\n"); 1493 } else { 1494 LLVM_DEBUG(dbgs() << "Dropping debug info for " << printDDI(V, DDI) 1495 << "\n"); 1496 auto Undef = UndefValue::get(V->getType()); 1497 auto SDV = 1498 DAG.getConstantDbgValue(Variable, Expr, Undef, DL, DbgSDNodeOrder); 1499 DAG.AddDbgValue(SDV, false); 1500 } 1501 } 1502 DDIV.clear(); 1503 } 1504 1505 void SelectionDAGBuilder::salvageUnresolvedDbgValue(const Value *V, 1506 DanglingDebugInfo &DDI) { 1507 // TODO: For the variadic implementation, instead of only checking the fail 1508 // state of `handleDebugValue`, we need know specifically which values were 1509 // invalid, so that we attempt to salvage only those values when processing 1510 // a DIArgList. 1511 const Value *OrigV = V; 1512 DILocalVariable *Var = DDI.getVariable(); 1513 DIExpression *Expr = DDI.getExpression(); 1514 DebugLoc DL = DDI.getDebugLoc(); 1515 unsigned SDOrder = DDI.getSDNodeOrder(); 1516 1517 // Currently we consider only dbg.value intrinsics -- we tell the salvager 1518 // that DW_OP_stack_value is desired. 1519 bool StackValue = true; 1520 1521 // Can this Value can be encoded without any further work? 1522 if (handleDebugValue(V, Var, Expr, DL, SDOrder, /*IsVariadic=*/false)) 1523 return; 1524 1525 // Attempt to salvage back through as many instructions as possible. Bail if 1526 // a non-instruction is seen, such as a constant expression or global 1527 // variable. FIXME: Further work could recover those too. 1528 while (isa<Instruction>(V)) { 1529 const Instruction &VAsInst = *cast<const Instruction>(V); 1530 // Temporary "0", awaiting real implementation. 1531 SmallVector<uint64_t, 16> Ops; 1532 SmallVector<Value *, 4> AdditionalValues; 1533 V = salvageDebugInfoImpl(const_cast<Instruction &>(VAsInst), 1534 Expr->getNumLocationOperands(), Ops, 1535 AdditionalValues); 1536 // If we cannot salvage any further, and haven't yet found a suitable debug 1537 // expression, bail out. 1538 if (!V) 1539 break; 1540 1541 // TODO: If AdditionalValues isn't empty, then the salvage can only be 1542 // represented with a DBG_VALUE_LIST, so we give up. When we have support 1543 // here for variadic dbg_values, remove that condition. 1544 if (!AdditionalValues.empty()) 1545 break; 1546 1547 // New value and expr now represent this debuginfo. 1548 Expr = DIExpression::appendOpsToArg(Expr, Ops, 0, StackValue); 1549 1550 // Some kind of simplification occurred: check whether the operand of the 1551 // salvaged debug expression can be encoded in this DAG. 1552 if (handleDebugValue(V, Var, Expr, DL, SDOrder, /*IsVariadic=*/false)) { 1553 LLVM_DEBUG( 1554 dbgs() << "Salvaged debug location info for:\n " << *Var << "\n" 1555 << *OrigV << "\nBy stripping back to:\n " << *V << "\n"); 1556 return; 1557 } 1558 } 1559 1560 // This was the final opportunity to salvage this debug information, and it 1561 // couldn't be done. Place an undef DBG_VALUE at this location to terminate 1562 // any earlier variable location. 1563 assert(OrigV && "V shouldn't be null"); 1564 auto *Undef = UndefValue::get(OrigV->getType()); 1565 auto *SDV = DAG.getConstantDbgValue(Var, Expr, Undef, DL, SDNodeOrder); 1566 DAG.AddDbgValue(SDV, false); 1567 LLVM_DEBUG(dbgs() << "Dropping debug value info for:\n " 1568 << printDDI(OrigV, DDI) << "\n"); 1569 } 1570 1571 void SelectionDAGBuilder::handleKillDebugValue(DILocalVariable *Var, 1572 DIExpression *Expr, 1573 DebugLoc DbgLoc, 1574 unsigned Order) { 1575 Value *Poison = PoisonValue::get(Type::getInt1Ty(*Context)); 1576 DIExpression *NewExpr = 1577 const_cast<DIExpression *>(DIExpression::convertToUndefExpression(Expr)); 1578 handleDebugValue(Poison, Var, NewExpr, DbgLoc, Order, 1579 /*IsVariadic*/ false); 1580 } 1581 1582 bool SelectionDAGBuilder::handleDebugValue(ArrayRef<const Value *> Values, 1583 DILocalVariable *Var, 1584 DIExpression *Expr, DebugLoc DbgLoc, 1585 unsigned Order, bool IsVariadic) { 1586 if (Values.empty()) 1587 return true; 1588 1589 // Filter EntryValue locations out early. 1590 if (visitEntryValueDbgValue(Values, Var, Expr, DbgLoc)) 1591 return true; 1592 1593 SmallVector<SDDbgOperand> LocationOps; 1594 SmallVector<SDNode *> Dependencies; 1595 for (const Value *V : Values) { 1596 // Constant value. 1597 if (isa<ConstantInt>(V) || isa<ConstantFP>(V) || isa<UndefValue>(V) || 1598 isa<ConstantPointerNull>(V)) { 1599 LocationOps.emplace_back(SDDbgOperand::fromConst(V)); 1600 continue; 1601 } 1602 1603 // Look through IntToPtr constants. 1604 if (auto *CE = dyn_cast<ConstantExpr>(V)) 1605 if (CE->getOpcode() == Instruction::IntToPtr) { 1606 LocationOps.emplace_back(SDDbgOperand::fromConst(CE->getOperand(0))); 1607 continue; 1608 } 1609 1610 // If the Value is a frame index, we can create a FrameIndex debug value 1611 // without relying on the DAG at all. 1612 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) { 1613 auto SI = FuncInfo.StaticAllocaMap.find(AI); 1614 if (SI != FuncInfo.StaticAllocaMap.end()) { 1615 LocationOps.emplace_back(SDDbgOperand::fromFrameIdx(SI->second)); 1616 continue; 1617 } 1618 } 1619 1620 // Do not use getValue() in here; we don't want to generate code at 1621 // this point if it hasn't been done yet. 1622 SDValue N = NodeMap[V]; 1623 if (!N.getNode() && isa<Argument>(V)) // Check unused arguments map. 1624 N = UnusedArgNodeMap[V]; 1625 1626 if (N.getNode()) { 1627 // Only emit func arg dbg value for non-variadic dbg.values for now. 1628 if (!IsVariadic && 1629 EmitFuncArgumentDbgValue(V, Var, Expr, DbgLoc, 1630 FuncArgumentDbgValueKind::Value, N)) 1631 return true; 1632 if (auto *FISDN = dyn_cast<FrameIndexSDNode>(N.getNode())) { 1633 // Construct a FrameIndexDbgValue for FrameIndexSDNodes so we can 1634 // describe stack slot locations. 1635 // 1636 // Consider "int x = 0; int *px = &x;". There are two kinds of 1637 // interesting debug values here after optimization: 1638 // 1639 // dbg.value(i32* %px, !"int *px", !DIExpression()), and 1640 // dbg.value(i32* %px, !"int x", !DIExpression(DW_OP_deref)) 1641 // 1642 // Both describe the direct values of their associated variables. 1643 Dependencies.push_back(N.getNode()); 1644 LocationOps.emplace_back(SDDbgOperand::fromFrameIdx(FISDN->getIndex())); 1645 continue; 1646 } 1647 LocationOps.emplace_back( 1648 SDDbgOperand::fromNode(N.getNode(), N.getResNo())); 1649 continue; 1650 } 1651 1652 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 1653 // Special rules apply for the first dbg.values of parameter variables in a 1654 // function. Identify them by the fact they reference Argument Values, that 1655 // they're parameters, and they are parameters of the current function. We 1656 // need to let them dangle until they get an SDNode. 1657 bool IsParamOfFunc = 1658 isa<Argument>(V) && Var->isParameter() && !DbgLoc.getInlinedAt(); 1659 if (IsParamOfFunc) 1660 return false; 1661 1662 // The value is not used in this block yet (or it would have an SDNode). 1663 // We still want the value to appear for the user if possible -- if it has 1664 // an associated VReg, we can refer to that instead. 1665 auto VMI = FuncInfo.ValueMap.find(V); 1666 if (VMI != FuncInfo.ValueMap.end()) { 1667 unsigned Reg = VMI->second; 1668 // If this is a PHI node, it may be split up into several MI PHI nodes 1669 // (in FunctionLoweringInfo::set). 1670 RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg, 1671 V->getType(), std::nullopt); 1672 if (RFV.occupiesMultipleRegs()) { 1673 // FIXME: We could potentially support variadic dbg_values here. 1674 if (IsVariadic) 1675 return false; 1676 unsigned Offset = 0; 1677 unsigned BitsToDescribe = 0; 1678 if (auto VarSize = Var->getSizeInBits()) 1679 BitsToDescribe = *VarSize; 1680 if (auto Fragment = Expr->getFragmentInfo()) 1681 BitsToDescribe = Fragment->SizeInBits; 1682 for (const auto &RegAndSize : RFV.getRegsAndSizes()) { 1683 // Bail out if all bits are described already. 1684 if (Offset >= BitsToDescribe) 1685 break; 1686 // TODO: handle scalable vectors. 1687 unsigned RegisterSize = RegAndSize.second; 1688 unsigned FragmentSize = (Offset + RegisterSize > BitsToDescribe) 1689 ? BitsToDescribe - Offset 1690 : RegisterSize; 1691 auto FragmentExpr = DIExpression::createFragmentExpression( 1692 Expr, Offset, FragmentSize); 1693 if (!FragmentExpr) 1694 continue; 1695 SDDbgValue *SDV = DAG.getVRegDbgValue( 1696 Var, *FragmentExpr, RegAndSize.first, false, DbgLoc, Order); 1697 DAG.AddDbgValue(SDV, false); 1698 Offset += RegisterSize; 1699 } 1700 return true; 1701 } 1702 // We can use simple vreg locations for variadic dbg_values as well. 1703 LocationOps.emplace_back(SDDbgOperand::fromVReg(Reg)); 1704 continue; 1705 } 1706 // We failed to create a SDDbgOperand for V. 1707 return false; 1708 } 1709 1710 // We have created a SDDbgOperand for each Value in Values. 1711 assert(!LocationOps.empty()); 1712 SDDbgValue *SDV = 1713 DAG.getDbgValueList(Var, Expr, LocationOps, Dependencies, 1714 /*IsIndirect=*/false, DbgLoc, Order, IsVariadic); 1715 DAG.AddDbgValue(SDV, /*isParameter=*/false); 1716 return true; 1717 } 1718 1719 void SelectionDAGBuilder::resolveOrClearDbgInfo() { 1720 // Try to fixup any remaining dangling debug info -- and drop it if we can't. 1721 for (auto &Pair : DanglingDebugInfoMap) 1722 for (auto &DDI : Pair.second) 1723 salvageUnresolvedDbgValue(const_cast<Value *>(Pair.first), DDI); 1724 clearDanglingDebugInfo(); 1725 } 1726 1727 /// getCopyFromRegs - If there was virtual register allocated for the value V 1728 /// emit CopyFromReg of the specified type Ty. Return empty SDValue() otherwise. 1729 SDValue SelectionDAGBuilder::getCopyFromRegs(const Value *V, Type *Ty) { 1730 DenseMap<const Value *, Register>::iterator It = FuncInfo.ValueMap.find(V); 1731 SDValue Result; 1732 1733 if (It != FuncInfo.ValueMap.end()) { 1734 Register InReg = It->second; 1735 1736 RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(), 1737 DAG.getDataLayout(), InReg, Ty, 1738 std::nullopt); // This is not an ABI copy. 1739 SDValue Chain = DAG.getEntryNode(); 1740 Result = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, 1741 V); 1742 resolveDanglingDebugInfo(V, Result); 1743 } 1744 1745 return Result; 1746 } 1747 1748 /// getValue - Return an SDValue for the given Value. 1749 SDValue SelectionDAGBuilder::getValue(const Value *V) { 1750 // If we already have an SDValue for this value, use it. It's important 1751 // to do this first, so that we don't create a CopyFromReg if we already 1752 // have a regular SDValue. 1753 SDValue &N = NodeMap[V]; 1754 if (N.getNode()) return N; 1755 1756 // If there's a virtual register allocated and initialized for this 1757 // value, use it. 1758 if (SDValue copyFromReg = getCopyFromRegs(V, V->getType())) 1759 return copyFromReg; 1760 1761 // Otherwise create a new SDValue and remember it. 1762 SDValue Val = getValueImpl(V); 1763 NodeMap[V] = Val; 1764 resolveDanglingDebugInfo(V, Val); 1765 return Val; 1766 } 1767 1768 /// getNonRegisterValue - Return an SDValue for the given Value, but 1769 /// don't look in FuncInfo.ValueMap for a virtual register. 1770 SDValue SelectionDAGBuilder::getNonRegisterValue(const Value *V) { 1771 // If we already have an SDValue for this value, use it. 1772 SDValue &N = NodeMap[V]; 1773 if (N.getNode()) { 1774 if (isIntOrFPConstant(N)) { 1775 // Remove the debug location from the node as the node is about to be used 1776 // in a location which may differ from the original debug location. This 1777 // is relevant to Constant and ConstantFP nodes because they can appear 1778 // as constant expressions inside PHI nodes. 1779 N->setDebugLoc(DebugLoc()); 1780 } 1781 return N; 1782 } 1783 1784 // Otherwise create a new SDValue and remember it. 1785 SDValue Val = getValueImpl(V); 1786 NodeMap[V] = Val; 1787 resolveDanglingDebugInfo(V, Val); 1788 return Val; 1789 } 1790 1791 /// getValueImpl - Helper function for getValue and getNonRegisterValue. 1792 /// Create an SDValue for the given value. 1793 SDValue SelectionDAGBuilder::getValueImpl(const Value *V) { 1794 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 1795 1796 if (const Constant *C = dyn_cast<Constant>(V)) { 1797 EVT VT = TLI.getValueType(DAG.getDataLayout(), V->getType(), true); 1798 1799 if (const ConstantInt *CI = dyn_cast<ConstantInt>(C)) 1800 return DAG.getConstant(*CI, getCurSDLoc(), VT); 1801 1802 if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) 1803 return DAG.getGlobalAddress(GV, getCurSDLoc(), VT); 1804 1805 if (const ConstantPtrAuth *CPA = dyn_cast<ConstantPtrAuth>(C)) { 1806 return DAG.getNode(ISD::PtrAuthGlobalAddress, getCurSDLoc(), VT, 1807 getValue(CPA->getPointer()), getValue(CPA->getKey()), 1808 getValue(CPA->getAddrDiscriminator()), 1809 getValue(CPA->getDiscriminator())); 1810 } 1811 1812 if (isa<ConstantPointerNull>(C)) { 1813 unsigned AS = V->getType()->getPointerAddressSpace(); 1814 return DAG.getConstant(0, getCurSDLoc(), 1815 TLI.getPointerTy(DAG.getDataLayout(), AS)); 1816 } 1817 1818 if (match(C, m_VScale())) 1819 return DAG.getVScale(getCurSDLoc(), VT, APInt(VT.getSizeInBits(), 1)); 1820 1821 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) 1822 return DAG.getConstantFP(*CFP, getCurSDLoc(), VT); 1823 1824 if (isa<UndefValue>(C) && !V->getType()->isAggregateType()) 1825 return DAG.getUNDEF(VT); 1826 1827 if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) { 1828 visit(CE->getOpcode(), *CE); 1829 SDValue N1 = NodeMap[V]; 1830 assert(N1.getNode() && "visit didn't populate the NodeMap!"); 1831 return N1; 1832 } 1833 1834 if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) { 1835 SmallVector<SDValue, 4> Constants; 1836 for (const Use &U : C->operands()) { 1837 SDNode *Val = getValue(U).getNode(); 1838 // If the operand is an empty aggregate, there are no values. 1839 if (!Val) continue; 1840 // Add each leaf value from the operand to the Constants list 1841 // to form a flattened list of all the values. 1842 for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i) 1843 Constants.push_back(SDValue(Val, i)); 1844 } 1845 1846 return DAG.getMergeValues(Constants, getCurSDLoc()); 1847 } 1848 1849 if (const ConstantDataSequential *CDS = 1850 dyn_cast<ConstantDataSequential>(C)) { 1851 SmallVector<SDValue, 4> Ops; 1852 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) { 1853 SDNode *Val = getValue(CDS->getElementAsConstant(i)).getNode(); 1854 // Add each leaf value from the operand to the Constants list 1855 // to form a flattened list of all the values. 1856 for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i) 1857 Ops.push_back(SDValue(Val, i)); 1858 } 1859 1860 if (isa<ArrayType>(CDS->getType())) 1861 return DAG.getMergeValues(Ops, getCurSDLoc()); 1862 return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops); 1863 } 1864 1865 if (C->getType()->isStructTy() || C->getType()->isArrayTy()) { 1866 assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) && 1867 "Unknown struct or array constant!"); 1868 1869 SmallVector<EVT, 4> ValueVTs; 1870 ComputeValueVTs(TLI, DAG.getDataLayout(), C->getType(), ValueVTs); 1871 unsigned NumElts = ValueVTs.size(); 1872 if (NumElts == 0) 1873 return SDValue(); // empty struct 1874 SmallVector<SDValue, 4> Constants(NumElts); 1875 for (unsigned i = 0; i != NumElts; ++i) { 1876 EVT EltVT = ValueVTs[i]; 1877 if (isa<UndefValue>(C)) 1878 Constants[i] = DAG.getUNDEF(EltVT); 1879 else if (EltVT.isFloatingPoint()) 1880 Constants[i] = DAG.getConstantFP(0, getCurSDLoc(), EltVT); 1881 else 1882 Constants[i] = DAG.getConstant(0, getCurSDLoc(), EltVT); 1883 } 1884 1885 return DAG.getMergeValues(Constants, getCurSDLoc()); 1886 } 1887 1888 if (const BlockAddress *BA = dyn_cast<BlockAddress>(C)) 1889 return DAG.getBlockAddress(BA, VT); 1890 1891 if (const auto *Equiv = dyn_cast<DSOLocalEquivalent>(C)) 1892 return getValue(Equiv->getGlobalValue()); 1893 1894 if (const auto *NC = dyn_cast<NoCFIValue>(C)) 1895 return getValue(NC->getGlobalValue()); 1896 1897 if (VT == MVT::aarch64svcount) { 1898 assert(C->isNullValue() && "Can only zero this target type!"); 1899 return DAG.getNode(ISD::BITCAST, getCurSDLoc(), VT, 1900 DAG.getConstant(0, getCurSDLoc(), MVT::nxv16i1)); 1901 } 1902 1903 VectorType *VecTy = cast<VectorType>(V->getType()); 1904 1905 // Now that we know the number and type of the elements, get that number of 1906 // elements into the Ops array based on what kind of constant it is. 1907 if (const ConstantVector *CV = dyn_cast<ConstantVector>(C)) { 1908 SmallVector<SDValue, 16> Ops; 1909 unsigned NumElements = cast<FixedVectorType>(VecTy)->getNumElements(); 1910 for (unsigned i = 0; i != NumElements; ++i) 1911 Ops.push_back(getValue(CV->getOperand(i))); 1912 1913 return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops); 1914 } 1915 1916 if (isa<ConstantAggregateZero>(C)) { 1917 EVT EltVT = 1918 TLI.getValueType(DAG.getDataLayout(), VecTy->getElementType()); 1919 1920 SDValue Op; 1921 if (EltVT.isFloatingPoint()) 1922 Op = DAG.getConstantFP(0, getCurSDLoc(), EltVT); 1923 else 1924 Op = DAG.getConstant(0, getCurSDLoc(), EltVT); 1925 1926 return NodeMap[V] = DAG.getSplat(VT, getCurSDLoc(), Op); 1927 } 1928 1929 llvm_unreachable("Unknown vector constant"); 1930 } 1931 1932 // If this is a static alloca, generate it as the frameindex instead of 1933 // computation. 1934 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) { 1935 DenseMap<const AllocaInst*, int>::iterator SI = 1936 FuncInfo.StaticAllocaMap.find(AI); 1937 if (SI != FuncInfo.StaticAllocaMap.end()) 1938 return DAG.getFrameIndex( 1939 SI->second, TLI.getValueType(DAG.getDataLayout(), AI->getType())); 1940 } 1941 1942 // If this is an instruction which fast-isel has deferred, select it now. 1943 if (const Instruction *Inst = dyn_cast<Instruction>(V)) { 1944 Register InReg = FuncInfo.InitializeRegForValue(Inst); 1945 1946 RegsForValue RFV(*DAG.getContext(), TLI, DAG.getDataLayout(), InReg, 1947 Inst->getType(), std::nullopt); 1948 SDValue Chain = DAG.getEntryNode(); 1949 return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V); 1950 } 1951 1952 if (const MetadataAsValue *MD = dyn_cast<MetadataAsValue>(V)) 1953 return DAG.getMDNode(cast<MDNode>(MD->getMetadata())); 1954 1955 if (const auto *BB = dyn_cast<BasicBlock>(V)) 1956 return DAG.getBasicBlock(FuncInfo.getMBB(BB)); 1957 1958 llvm_unreachable("Can't get register for value!"); 1959 } 1960 1961 void SelectionDAGBuilder::visitCatchPad(const CatchPadInst &I) { 1962 auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn()); 1963 bool IsMSVCCXX = Pers == EHPersonality::MSVC_CXX; 1964 bool IsCoreCLR = Pers == EHPersonality::CoreCLR; 1965 bool IsSEH = isAsynchronousEHPersonality(Pers); 1966 MachineBasicBlock *CatchPadMBB = FuncInfo.MBB; 1967 if (!IsSEH) 1968 CatchPadMBB->setIsEHScopeEntry(); 1969 // In MSVC C++ and CoreCLR, catchblocks are funclets and need prologues. 1970 if (IsMSVCCXX || IsCoreCLR) 1971 CatchPadMBB->setIsEHFuncletEntry(); 1972 } 1973 1974 void SelectionDAGBuilder::visitCatchRet(const CatchReturnInst &I) { 1975 // Update machine-CFG edge. 1976 MachineBasicBlock *TargetMBB = FuncInfo.getMBB(I.getSuccessor()); 1977 FuncInfo.MBB->addSuccessor(TargetMBB); 1978 TargetMBB->setIsEHCatchretTarget(true); 1979 DAG.getMachineFunction().setHasEHCatchret(true); 1980 1981 auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn()); 1982 bool IsSEH = isAsynchronousEHPersonality(Pers); 1983 if (IsSEH) { 1984 // If this is not a fall-through branch or optimizations are switched off, 1985 // emit the branch. 1986 if (TargetMBB != NextBlock(FuncInfo.MBB) || 1987 TM.getOptLevel() == CodeGenOptLevel::None) 1988 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other, 1989 getControlRoot(), DAG.getBasicBlock(TargetMBB))); 1990 return; 1991 } 1992 1993 // Figure out the funclet membership for the catchret's successor. 1994 // This will be used by the FuncletLayout pass to determine how to order the 1995 // BB's. 1996 // A 'catchret' returns to the outer scope's color. 1997 Value *ParentPad = I.getCatchSwitchParentPad(); 1998 const BasicBlock *SuccessorColor; 1999 if (isa<ConstantTokenNone>(ParentPad)) 2000 SuccessorColor = &FuncInfo.Fn->getEntryBlock(); 2001 else 2002 SuccessorColor = cast<Instruction>(ParentPad)->getParent(); 2003 assert(SuccessorColor && "No parent funclet for catchret!"); 2004 MachineBasicBlock *SuccessorColorMBB = FuncInfo.getMBB(SuccessorColor); 2005 assert(SuccessorColorMBB && "No MBB for SuccessorColor!"); 2006 2007 // Create the terminator node. 2008 SDValue Ret = DAG.getNode(ISD::CATCHRET, getCurSDLoc(), MVT::Other, 2009 getControlRoot(), DAG.getBasicBlock(TargetMBB), 2010 DAG.getBasicBlock(SuccessorColorMBB)); 2011 DAG.setRoot(Ret); 2012 } 2013 2014 void SelectionDAGBuilder::visitCleanupPad(const CleanupPadInst &CPI) { 2015 // Don't emit any special code for the cleanuppad instruction. It just marks 2016 // the start of an EH scope/funclet. 2017 FuncInfo.MBB->setIsEHScopeEntry(); 2018 auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn()); 2019 if (Pers != EHPersonality::Wasm_CXX) { 2020 FuncInfo.MBB->setIsEHFuncletEntry(); 2021 FuncInfo.MBB->setIsCleanupFuncletEntry(); 2022 } 2023 } 2024 2025 // In wasm EH, even though a catchpad may not catch an exception if a tag does 2026 // not match, it is OK to add only the first unwind destination catchpad to the 2027 // successors, because there will be at least one invoke instruction within the 2028 // catch scope that points to the next unwind destination, if one exists, so 2029 // CFGSort cannot mess up with BB sorting order. 2030 // (All catchpads with 'catch (type)' clauses have a 'llvm.rethrow' intrinsic 2031 // call within them, and catchpads only consisting of 'catch (...)' have a 2032 // '__cxa_end_catch' call within them, both of which generate invokes in case 2033 // the next unwind destination exists, i.e., the next unwind destination is not 2034 // the caller.) 2035 // 2036 // Having at most one EH pad successor is also simpler and helps later 2037 // transformations. 2038 // 2039 // For example, 2040 // current: 2041 // invoke void @foo to ... unwind label %catch.dispatch 2042 // catch.dispatch: 2043 // %0 = catchswitch within ... [label %catch.start] unwind label %next 2044 // catch.start: 2045 // ... 2046 // ... in this BB or some other child BB dominated by this BB there will be an 2047 // invoke that points to 'next' BB as an unwind destination 2048 // 2049 // next: ; We don't need to add this to 'current' BB's successor 2050 // ... 2051 static void findWasmUnwindDestinations( 2052 FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB, 2053 BranchProbability Prob, 2054 SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>> 2055 &UnwindDests) { 2056 while (EHPadBB) { 2057 const Instruction *Pad = EHPadBB->getFirstNonPHI(); 2058 if (isa<CleanupPadInst>(Pad)) { 2059 // Stop on cleanup pads. 2060 UnwindDests.emplace_back(FuncInfo.getMBB(EHPadBB), Prob); 2061 UnwindDests.back().first->setIsEHScopeEntry(); 2062 break; 2063 } else if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) { 2064 // Add the catchpad handlers to the possible destinations. We don't 2065 // continue to the unwind destination of the catchswitch for wasm. 2066 for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) { 2067 UnwindDests.emplace_back(FuncInfo.getMBB(CatchPadBB), Prob); 2068 UnwindDests.back().first->setIsEHScopeEntry(); 2069 } 2070 break; 2071 } else { 2072 continue; 2073 } 2074 } 2075 } 2076 2077 /// When an invoke or a cleanupret unwinds to the next EH pad, there are 2078 /// many places it could ultimately go. In the IR, we have a single unwind 2079 /// destination, but in the machine CFG, we enumerate all the possible blocks. 2080 /// This function skips over imaginary basic blocks that hold catchswitch 2081 /// instructions, and finds all the "real" machine 2082 /// basic block destinations. As those destinations may not be successors of 2083 /// EHPadBB, here we also calculate the edge probability to those destinations. 2084 /// The passed-in Prob is the edge probability to EHPadBB. 2085 static void findUnwindDestinations( 2086 FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB, 2087 BranchProbability Prob, 2088 SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>> 2089 &UnwindDests) { 2090 EHPersonality Personality = 2091 classifyEHPersonality(FuncInfo.Fn->getPersonalityFn()); 2092 bool IsMSVCCXX = Personality == EHPersonality::MSVC_CXX; 2093 bool IsCoreCLR = Personality == EHPersonality::CoreCLR; 2094 bool IsWasmCXX = Personality == EHPersonality::Wasm_CXX; 2095 bool IsSEH = isAsynchronousEHPersonality(Personality); 2096 2097 if (IsWasmCXX) { 2098 findWasmUnwindDestinations(FuncInfo, EHPadBB, Prob, UnwindDests); 2099 assert(UnwindDests.size() <= 1 && 2100 "There should be at most one unwind destination for wasm"); 2101 return; 2102 } 2103 2104 while (EHPadBB) { 2105 const Instruction *Pad = EHPadBB->getFirstNonPHI(); 2106 BasicBlock *NewEHPadBB = nullptr; 2107 if (isa<LandingPadInst>(Pad)) { 2108 // Stop on landingpads. They are not funclets. 2109 UnwindDests.emplace_back(FuncInfo.getMBB(EHPadBB), Prob); 2110 break; 2111 } else if (isa<CleanupPadInst>(Pad)) { 2112 // Stop on cleanup pads. Cleanups are always funclet entries for all known 2113 // personalities. 2114 UnwindDests.emplace_back(FuncInfo.getMBB(EHPadBB), Prob); 2115 UnwindDests.back().first->setIsEHScopeEntry(); 2116 UnwindDests.back().first->setIsEHFuncletEntry(); 2117 break; 2118 } else if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) { 2119 // Add the catchpad handlers to the possible destinations. 2120 for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) { 2121 UnwindDests.emplace_back(FuncInfo.getMBB(CatchPadBB), Prob); 2122 // For MSVC++ and the CLR, catchblocks are funclets and need prologues. 2123 if (IsMSVCCXX || IsCoreCLR) 2124 UnwindDests.back().first->setIsEHFuncletEntry(); 2125 if (!IsSEH) 2126 UnwindDests.back().first->setIsEHScopeEntry(); 2127 } 2128 NewEHPadBB = CatchSwitch->getUnwindDest(); 2129 } else { 2130 continue; 2131 } 2132 2133 BranchProbabilityInfo *BPI = FuncInfo.BPI; 2134 if (BPI && NewEHPadBB) 2135 Prob *= BPI->getEdgeProbability(EHPadBB, NewEHPadBB); 2136 EHPadBB = NewEHPadBB; 2137 } 2138 } 2139 2140 void SelectionDAGBuilder::visitCleanupRet(const CleanupReturnInst &I) { 2141 // Update successor info. 2142 SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests; 2143 auto UnwindDest = I.getUnwindDest(); 2144 BranchProbabilityInfo *BPI = FuncInfo.BPI; 2145 BranchProbability UnwindDestProb = 2146 (BPI && UnwindDest) 2147 ? BPI->getEdgeProbability(FuncInfo.MBB->getBasicBlock(), UnwindDest) 2148 : BranchProbability::getZero(); 2149 findUnwindDestinations(FuncInfo, UnwindDest, UnwindDestProb, UnwindDests); 2150 for (auto &UnwindDest : UnwindDests) { 2151 UnwindDest.first->setIsEHPad(); 2152 addSuccessorWithProb(FuncInfo.MBB, UnwindDest.first, UnwindDest.second); 2153 } 2154 FuncInfo.MBB->normalizeSuccProbs(); 2155 2156 // Create the terminator node. 2157 SDValue Ret = 2158 DAG.getNode(ISD::CLEANUPRET, getCurSDLoc(), MVT::Other, getControlRoot()); 2159 DAG.setRoot(Ret); 2160 } 2161 2162 void SelectionDAGBuilder::visitCatchSwitch(const CatchSwitchInst &CSI) { 2163 report_fatal_error("visitCatchSwitch not yet implemented!"); 2164 } 2165 2166 void SelectionDAGBuilder::visitRet(const ReturnInst &I) { 2167 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 2168 auto &DL = DAG.getDataLayout(); 2169 SDValue Chain = getControlRoot(); 2170 SmallVector<ISD::OutputArg, 8> Outs; 2171 SmallVector<SDValue, 8> OutVals; 2172 2173 // Calls to @llvm.experimental.deoptimize don't generate a return value, so 2174 // lower 2175 // 2176 // %val = call <ty> @llvm.experimental.deoptimize() 2177 // ret <ty> %val 2178 // 2179 // differently. 2180 if (I.getParent()->getTerminatingDeoptimizeCall()) { 2181 LowerDeoptimizingReturn(); 2182 return; 2183 } 2184 2185 if (!FuncInfo.CanLowerReturn) { 2186 Register DemoteReg = FuncInfo.DemoteRegister; 2187 const Function *F = I.getParent()->getParent(); 2188 2189 // Emit a store of the return value through the virtual register. 2190 // Leave Outs empty so that LowerReturn won't try to load return 2191 // registers the usual way. 2192 SmallVector<EVT, 1> PtrValueVTs; 2193 ComputeValueVTs(TLI, DL, 2194 PointerType::get(F->getContext(), 2195 DAG.getDataLayout().getAllocaAddrSpace()), 2196 PtrValueVTs); 2197 2198 SDValue RetPtr = 2199 DAG.getCopyFromReg(Chain, getCurSDLoc(), DemoteReg, PtrValueVTs[0]); 2200 SDValue RetOp = getValue(I.getOperand(0)); 2201 2202 SmallVector<EVT, 4> ValueVTs, MemVTs; 2203 SmallVector<uint64_t, 4> Offsets; 2204 ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs, &MemVTs, 2205 &Offsets, 0); 2206 unsigned NumValues = ValueVTs.size(); 2207 2208 SmallVector<SDValue, 4> Chains(NumValues); 2209 Align BaseAlign = DL.getPrefTypeAlign(I.getOperand(0)->getType()); 2210 for (unsigned i = 0; i != NumValues; ++i) { 2211 // An aggregate return value cannot wrap around the address space, so 2212 // offsets to its parts don't wrap either. 2213 SDValue Ptr = DAG.getObjectPtrOffset(getCurSDLoc(), RetPtr, 2214 TypeSize::getFixed(Offsets[i])); 2215 2216 SDValue Val = RetOp.getValue(RetOp.getResNo() + i); 2217 if (MemVTs[i] != ValueVTs[i]) 2218 Val = DAG.getPtrExtOrTrunc(Val, getCurSDLoc(), MemVTs[i]); 2219 Chains[i] = DAG.getStore( 2220 Chain, getCurSDLoc(), Val, 2221 // FIXME: better loc info would be nice. 2222 Ptr, MachinePointerInfo::getUnknownStack(DAG.getMachineFunction()), 2223 commonAlignment(BaseAlign, Offsets[i])); 2224 } 2225 2226 Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), 2227 MVT::Other, Chains); 2228 } else if (I.getNumOperands() != 0) { 2229 SmallVector<EVT, 4> ValueVTs; 2230 ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs); 2231 unsigned NumValues = ValueVTs.size(); 2232 if (NumValues) { 2233 SDValue RetOp = getValue(I.getOperand(0)); 2234 2235 const Function *F = I.getParent()->getParent(); 2236 2237 bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters( 2238 I.getOperand(0)->getType(), F->getCallingConv(), 2239 /*IsVarArg*/ false, DL); 2240 2241 ISD::NodeType ExtendKind = ISD::ANY_EXTEND; 2242 if (F->getAttributes().hasRetAttr(Attribute::SExt)) 2243 ExtendKind = ISD::SIGN_EXTEND; 2244 else if (F->getAttributes().hasRetAttr(Attribute::ZExt)) 2245 ExtendKind = ISD::ZERO_EXTEND; 2246 2247 LLVMContext &Context = F->getContext(); 2248 bool RetInReg = F->getAttributes().hasRetAttr(Attribute::InReg); 2249 2250 for (unsigned j = 0; j != NumValues; ++j) { 2251 EVT VT = ValueVTs[j]; 2252 2253 if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) 2254 VT = TLI.getTypeForExtReturn(Context, VT, ExtendKind); 2255 2256 CallingConv::ID CC = F->getCallingConv(); 2257 2258 unsigned NumParts = TLI.getNumRegistersForCallingConv(Context, CC, VT); 2259 MVT PartVT = TLI.getRegisterTypeForCallingConv(Context, CC, VT); 2260 SmallVector<SDValue, 4> Parts(NumParts); 2261 getCopyToParts(DAG, getCurSDLoc(), 2262 SDValue(RetOp.getNode(), RetOp.getResNo() + j), 2263 &Parts[0], NumParts, PartVT, &I, CC, ExtendKind); 2264 2265 // 'inreg' on function refers to return value 2266 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy(); 2267 if (RetInReg) 2268 Flags.setInReg(); 2269 2270 if (I.getOperand(0)->getType()->isPointerTy()) { 2271 Flags.setPointer(); 2272 Flags.setPointerAddrSpace( 2273 cast<PointerType>(I.getOperand(0)->getType())->getAddressSpace()); 2274 } 2275 2276 if (NeedsRegBlock) { 2277 Flags.setInConsecutiveRegs(); 2278 if (j == NumValues - 1) 2279 Flags.setInConsecutiveRegsLast(); 2280 } 2281 2282 // Propagate extension type if any 2283 if (ExtendKind == ISD::SIGN_EXTEND) 2284 Flags.setSExt(); 2285 else if (ExtendKind == ISD::ZERO_EXTEND) 2286 Flags.setZExt(); 2287 else if (F->getAttributes().hasRetAttr(Attribute::NoExt)) 2288 Flags.setNoExt(); 2289 2290 for (unsigned i = 0; i < NumParts; ++i) { 2291 Outs.push_back(ISD::OutputArg(Flags, 2292 Parts[i].getValueType().getSimpleVT(), 2293 VT, /*isfixed=*/true, 0, 0)); 2294 OutVals.push_back(Parts[i]); 2295 } 2296 } 2297 } 2298 } 2299 2300 // Push in swifterror virtual register as the last element of Outs. This makes 2301 // sure swifterror virtual register will be returned in the swifterror 2302 // physical register. 2303 const Function *F = I.getParent()->getParent(); 2304 if (TLI.supportSwiftError() && 2305 F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) { 2306 assert(SwiftError.getFunctionArg() && "Need a swift error argument"); 2307 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy(); 2308 Flags.setSwiftError(); 2309 Outs.push_back(ISD::OutputArg( 2310 Flags, /*vt=*/TLI.getPointerTy(DL), /*argvt=*/EVT(TLI.getPointerTy(DL)), 2311 /*isfixed=*/true, /*origidx=*/1, /*partOffs=*/0)); 2312 // Create SDNode for the swifterror virtual register. 2313 OutVals.push_back( 2314 DAG.getRegister(SwiftError.getOrCreateVRegUseAt( 2315 &I, FuncInfo.MBB, SwiftError.getFunctionArg()), 2316 EVT(TLI.getPointerTy(DL)))); 2317 } 2318 2319 bool isVarArg = DAG.getMachineFunction().getFunction().isVarArg(); 2320 CallingConv::ID CallConv = 2321 DAG.getMachineFunction().getFunction().getCallingConv(); 2322 Chain = DAG.getTargetLoweringInfo().LowerReturn( 2323 Chain, CallConv, isVarArg, Outs, OutVals, getCurSDLoc(), DAG); 2324 2325 // Verify that the target's LowerReturn behaved as expected. 2326 assert(Chain.getNode() && Chain.getValueType() == MVT::Other && 2327 "LowerReturn didn't return a valid chain!"); 2328 2329 // Update the DAG with the new chain value resulting from return lowering. 2330 DAG.setRoot(Chain); 2331 } 2332 2333 /// CopyToExportRegsIfNeeded - If the given value has virtual registers 2334 /// created for it, emit nodes to copy the value into the virtual 2335 /// registers. 2336 void SelectionDAGBuilder::CopyToExportRegsIfNeeded(const Value *V) { 2337 // Skip empty types 2338 if (V->getType()->isEmptyTy()) 2339 return; 2340 2341 DenseMap<const Value *, Register>::iterator VMI = FuncInfo.ValueMap.find(V); 2342 if (VMI != FuncInfo.ValueMap.end()) { 2343 assert((!V->use_empty() || isa<CallBrInst>(V)) && 2344 "Unused value assigned virtual registers!"); 2345 CopyValueToVirtualRegister(V, VMI->second); 2346 } 2347 } 2348 2349 /// ExportFromCurrentBlock - If this condition isn't known to be exported from 2350 /// the current basic block, add it to ValueMap now so that we'll get a 2351 /// CopyTo/FromReg. 2352 void SelectionDAGBuilder::ExportFromCurrentBlock(const Value *V) { 2353 // No need to export constants. 2354 if (!isa<Instruction>(V) && !isa<Argument>(V)) return; 2355 2356 // Already exported? 2357 if (FuncInfo.isExportedInst(V)) return; 2358 2359 Register Reg = FuncInfo.InitializeRegForValue(V); 2360 CopyValueToVirtualRegister(V, Reg); 2361 } 2362 2363 bool SelectionDAGBuilder::isExportableFromCurrentBlock(const Value *V, 2364 const BasicBlock *FromBB) { 2365 // The operands of the setcc have to be in this block. We don't know 2366 // how to export them from some other block. 2367 if (const Instruction *VI = dyn_cast<Instruction>(V)) { 2368 // Can export from current BB. 2369 if (VI->getParent() == FromBB) 2370 return true; 2371 2372 // Is already exported, noop. 2373 return FuncInfo.isExportedInst(V); 2374 } 2375 2376 // If this is an argument, we can export it if the BB is the entry block or 2377 // if it is already exported. 2378 if (isa<Argument>(V)) { 2379 if (FromBB->isEntryBlock()) 2380 return true; 2381 2382 // Otherwise, can only export this if it is already exported. 2383 return FuncInfo.isExportedInst(V); 2384 } 2385 2386 // Otherwise, constants can always be exported. 2387 return true; 2388 } 2389 2390 /// Return branch probability calculated by BranchProbabilityInfo for IR blocks. 2391 BranchProbability 2392 SelectionDAGBuilder::getEdgeProbability(const MachineBasicBlock *Src, 2393 const MachineBasicBlock *Dst) const { 2394 BranchProbabilityInfo *BPI = FuncInfo.BPI; 2395 const BasicBlock *SrcBB = Src->getBasicBlock(); 2396 const BasicBlock *DstBB = Dst->getBasicBlock(); 2397 if (!BPI) { 2398 // If BPI is not available, set the default probability as 1 / N, where N is 2399 // the number of successors. 2400 auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1); 2401 return BranchProbability(1, SuccSize); 2402 } 2403 return BPI->getEdgeProbability(SrcBB, DstBB); 2404 } 2405 2406 void SelectionDAGBuilder::addSuccessorWithProb(MachineBasicBlock *Src, 2407 MachineBasicBlock *Dst, 2408 BranchProbability Prob) { 2409 if (!FuncInfo.BPI) 2410 Src->addSuccessorWithoutProb(Dst); 2411 else { 2412 if (Prob.isUnknown()) 2413 Prob = getEdgeProbability(Src, Dst); 2414 Src->addSuccessor(Dst, Prob); 2415 } 2416 } 2417 2418 static bool InBlock(const Value *V, const BasicBlock *BB) { 2419 if (const Instruction *I = dyn_cast<Instruction>(V)) 2420 return I->getParent() == BB; 2421 return true; 2422 } 2423 2424 /// EmitBranchForMergedCondition - Helper method for FindMergedConditions. 2425 /// This function emits a branch and is used at the leaves of an OR or an 2426 /// AND operator tree. 2427 void 2428 SelectionDAGBuilder::EmitBranchForMergedCondition(const Value *Cond, 2429 MachineBasicBlock *TBB, 2430 MachineBasicBlock *FBB, 2431 MachineBasicBlock *CurBB, 2432 MachineBasicBlock *SwitchBB, 2433 BranchProbability TProb, 2434 BranchProbability FProb, 2435 bool InvertCond) { 2436 const BasicBlock *BB = CurBB->getBasicBlock(); 2437 2438 // If the leaf of the tree is a comparison, merge the condition into 2439 // the caseblock. 2440 if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) { 2441 // The operands of the cmp have to be in this block. We don't know 2442 // how to export them from some other block. If this is the first block 2443 // of the sequence, no exporting is needed. 2444 if (CurBB == SwitchBB || 2445 (isExportableFromCurrentBlock(BOp->getOperand(0), BB) && 2446 isExportableFromCurrentBlock(BOp->getOperand(1), BB))) { 2447 ISD::CondCode Condition; 2448 if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) { 2449 ICmpInst::Predicate Pred = 2450 InvertCond ? IC->getInversePredicate() : IC->getPredicate(); 2451 Condition = getICmpCondCode(Pred); 2452 } else { 2453 const FCmpInst *FC = cast<FCmpInst>(Cond); 2454 FCmpInst::Predicate Pred = 2455 InvertCond ? FC->getInversePredicate() : FC->getPredicate(); 2456 Condition = getFCmpCondCode(Pred); 2457 if (TM.Options.NoNaNsFPMath) 2458 Condition = getFCmpCodeWithoutNaN(Condition); 2459 } 2460 2461 CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1), nullptr, 2462 TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb); 2463 SL->SwitchCases.push_back(CB); 2464 return; 2465 } 2466 } 2467 2468 // Create a CaseBlock record representing this branch. 2469 ISD::CondCode Opc = InvertCond ? ISD::SETNE : ISD::SETEQ; 2470 CaseBlock CB(Opc, Cond, ConstantInt::getTrue(*DAG.getContext()), 2471 nullptr, TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb); 2472 SL->SwitchCases.push_back(CB); 2473 } 2474 2475 // Collect dependencies on V recursively. This is used for the cost analysis in 2476 // `shouldKeepJumpConditionsTogether`. 2477 static bool collectInstructionDeps( 2478 SmallMapVector<const Instruction *, bool, 8> *Deps, const Value *V, 2479 SmallMapVector<const Instruction *, bool, 8> *Necessary = nullptr, 2480 unsigned Depth = 0) { 2481 // Return false if we have an incomplete count. 2482 if (Depth >= SelectionDAG::MaxRecursionDepth) 2483 return false; 2484 2485 auto *I = dyn_cast<Instruction>(V); 2486 if (I == nullptr) 2487 return true; 2488 2489 if (Necessary != nullptr) { 2490 // This instruction is necessary for the other side of the condition so 2491 // don't count it. 2492 if (Necessary->contains(I)) 2493 return true; 2494 } 2495 2496 // Already added this dep. 2497 if (!Deps->try_emplace(I, false).second) 2498 return true; 2499 2500 for (unsigned OpIdx = 0, E = I->getNumOperands(); OpIdx < E; ++OpIdx) 2501 if (!collectInstructionDeps(Deps, I->getOperand(OpIdx), Necessary, 2502 Depth + 1)) 2503 return false; 2504 return true; 2505 } 2506 2507 bool SelectionDAGBuilder::shouldKeepJumpConditionsTogether( 2508 const FunctionLoweringInfo &FuncInfo, const BranchInst &I, 2509 Instruction::BinaryOps Opc, const Value *Lhs, const Value *Rhs, 2510 TargetLoweringBase::CondMergingParams Params) const { 2511 if (I.getNumSuccessors() != 2) 2512 return false; 2513 2514 if (!I.isConditional()) 2515 return false; 2516 2517 if (Params.BaseCost < 0) 2518 return false; 2519 2520 // Baseline cost. 2521 InstructionCost CostThresh = Params.BaseCost; 2522 2523 BranchProbabilityInfo *BPI = nullptr; 2524 if (Params.LikelyBias || Params.UnlikelyBias) 2525 BPI = FuncInfo.BPI; 2526 if (BPI != nullptr) { 2527 // See if we are either likely to get an early out or compute both lhs/rhs 2528 // of the condition. 2529 BasicBlock *IfFalse = I.getSuccessor(0); 2530 BasicBlock *IfTrue = I.getSuccessor(1); 2531 2532 std::optional<bool> Likely; 2533 if (BPI->isEdgeHot(I.getParent(), IfTrue)) 2534 Likely = true; 2535 else if (BPI->isEdgeHot(I.getParent(), IfFalse)) 2536 Likely = false; 2537 2538 if (Likely) { 2539 if (Opc == (*Likely ? Instruction::And : Instruction::Or)) 2540 // Its likely we will have to compute both lhs and rhs of condition 2541 CostThresh += Params.LikelyBias; 2542 else { 2543 if (Params.UnlikelyBias < 0) 2544 return false; 2545 // Its likely we will get an early out. 2546 CostThresh -= Params.UnlikelyBias; 2547 } 2548 } 2549 } 2550 2551 if (CostThresh <= 0) 2552 return false; 2553 2554 // Collect "all" instructions that lhs condition is dependent on. 2555 // Use map for stable iteration (to avoid non-determanism of iteration of 2556 // SmallPtrSet). The `bool` value is just a dummy. 2557 SmallMapVector<const Instruction *, bool, 8> LhsDeps, RhsDeps; 2558 collectInstructionDeps(&LhsDeps, Lhs); 2559 // Collect "all" instructions that rhs condition is dependent on AND are 2560 // dependencies of lhs. This gives us an estimate on which instructions we 2561 // stand to save by splitting the condition. 2562 if (!collectInstructionDeps(&RhsDeps, Rhs, &LhsDeps)) 2563 return false; 2564 // Add the compare instruction itself unless its a dependency on the LHS. 2565 if (const auto *RhsI = dyn_cast<Instruction>(Rhs)) 2566 if (!LhsDeps.contains(RhsI)) 2567 RhsDeps.try_emplace(RhsI, false); 2568 2569 const auto &TLI = DAG.getTargetLoweringInfo(); 2570 const auto &TTI = 2571 TLI.getTargetMachine().getTargetTransformInfo(*I.getFunction()); 2572 2573 InstructionCost CostOfIncluding = 0; 2574 // See if this instruction will need to computed independently of whether RHS 2575 // is. 2576 Value *BrCond = I.getCondition(); 2577 auto ShouldCountInsn = [&RhsDeps, &BrCond](const Instruction *Ins) { 2578 for (const auto *U : Ins->users()) { 2579 // If user is independent of RHS calculation we don't need to count it. 2580 if (auto *UIns = dyn_cast<Instruction>(U)) 2581 if (UIns != BrCond && !RhsDeps.contains(UIns)) 2582 return false; 2583 } 2584 return true; 2585 }; 2586 2587 // Prune instructions from RHS Deps that are dependencies of unrelated 2588 // instructions. The value (SelectionDAG::MaxRecursionDepth) is fairly 2589 // arbitrary and just meant to cap the how much time we spend in the pruning 2590 // loop. Its highly unlikely to come into affect. 2591 const unsigned MaxPruneIters = SelectionDAG::MaxRecursionDepth; 2592 // Stop after a certain point. No incorrectness from including too many 2593 // instructions. 2594 for (unsigned PruneIters = 0; PruneIters < MaxPruneIters; ++PruneIters) { 2595 const Instruction *ToDrop = nullptr; 2596 for (const auto &InsPair : RhsDeps) { 2597 if (!ShouldCountInsn(InsPair.first)) { 2598 ToDrop = InsPair.first; 2599 break; 2600 } 2601 } 2602 if (ToDrop == nullptr) 2603 break; 2604 RhsDeps.erase(ToDrop); 2605 } 2606 2607 for (const auto &InsPair : RhsDeps) { 2608 // Finally accumulate latency that we can only attribute to computing the 2609 // RHS condition. Use latency because we are essentially trying to calculate 2610 // the cost of the dependency chain. 2611 // Possible TODO: We could try to estimate ILP and make this more precise. 2612 CostOfIncluding += 2613 TTI.getInstructionCost(InsPair.first, TargetTransformInfo::TCK_Latency); 2614 2615 if (CostOfIncluding > CostThresh) 2616 return false; 2617 } 2618 return true; 2619 } 2620 2621 void SelectionDAGBuilder::FindMergedConditions(const Value *Cond, 2622 MachineBasicBlock *TBB, 2623 MachineBasicBlock *FBB, 2624 MachineBasicBlock *CurBB, 2625 MachineBasicBlock *SwitchBB, 2626 Instruction::BinaryOps Opc, 2627 BranchProbability TProb, 2628 BranchProbability FProb, 2629 bool InvertCond) { 2630 // Skip over not part of the tree and remember to invert op and operands at 2631 // next level. 2632 Value *NotCond; 2633 if (match(Cond, m_OneUse(m_Not(m_Value(NotCond)))) && 2634 InBlock(NotCond, CurBB->getBasicBlock())) { 2635 FindMergedConditions(NotCond, TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb, 2636 !InvertCond); 2637 return; 2638 } 2639 2640 const Instruction *BOp = dyn_cast<Instruction>(Cond); 2641 const Value *BOpOp0, *BOpOp1; 2642 // Compute the effective opcode for Cond, taking into account whether it needs 2643 // to be inverted, e.g. 2644 // and (not (or A, B)), C 2645 // gets lowered as 2646 // and (and (not A, not B), C) 2647 Instruction::BinaryOps BOpc = (Instruction::BinaryOps)0; 2648 if (BOp) { 2649 BOpc = match(BOp, m_LogicalAnd(m_Value(BOpOp0), m_Value(BOpOp1))) 2650 ? Instruction::And 2651 : (match(BOp, m_LogicalOr(m_Value(BOpOp0), m_Value(BOpOp1))) 2652 ? Instruction::Or 2653 : (Instruction::BinaryOps)0); 2654 if (InvertCond) { 2655 if (BOpc == Instruction::And) 2656 BOpc = Instruction::Or; 2657 else if (BOpc == Instruction::Or) 2658 BOpc = Instruction::And; 2659 } 2660 } 2661 2662 // If this node is not part of the or/and tree, emit it as a branch. 2663 // Note that all nodes in the tree should have same opcode. 2664 bool BOpIsInOrAndTree = BOpc && BOpc == Opc && BOp->hasOneUse(); 2665 if (!BOpIsInOrAndTree || BOp->getParent() != CurBB->getBasicBlock() || 2666 !InBlock(BOpOp0, CurBB->getBasicBlock()) || 2667 !InBlock(BOpOp1, CurBB->getBasicBlock())) { 2668 EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB, 2669 TProb, FProb, InvertCond); 2670 return; 2671 } 2672 2673 // Create TmpBB after CurBB. 2674 MachineFunction::iterator BBI(CurBB); 2675 MachineFunction &MF = DAG.getMachineFunction(); 2676 MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock()); 2677 CurBB->getParent()->insert(++BBI, TmpBB); 2678 2679 if (Opc == Instruction::Or) { 2680 // Codegen X | Y as: 2681 // BB1: 2682 // jmp_if_X TBB 2683 // jmp TmpBB 2684 // TmpBB: 2685 // jmp_if_Y TBB 2686 // jmp FBB 2687 // 2688 2689 // We have flexibility in setting Prob for BB1 and Prob for TmpBB. 2690 // The requirement is that 2691 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB) 2692 // = TrueProb for original BB. 2693 // Assuming the original probabilities are A and B, one choice is to set 2694 // BB1's probabilities to A/2 and A/2+B, and set TmpBB's probabilities to 2695 // A/(1+B) and 2B/(1+B). This choice assumes that 2696 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB. 2697 // Another choice is to assume TrueProb for BB1 equals to TrueProb for 2698 // TmpBB, but the math is more complicated. 2699 2700 auto NewTrueProb = TProb / 2; 2701 auto NewFalseProb = TProb / 2 + FProb; 2702 // Emit the LHS condition. 2703 FindMergedConditions(BOpOp0, TBB, TmpBB, CurBB, SwitchBB, Opc, NewTrueProb, 2704 NewFalseProb, InvertCond); 2705 2706 // Normalize A/2 and B to get A/(1+B) and 2B/(1+B). 2707 SmallVector<BranchProbability, 2> Probs{TProb / 2, FProb}; 2708 BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end()); 2709 // Emit the RHS condition into TmpBB. 2710 FindMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0], 2711 Probs[1], InvertCond); 2712 } else { 2713 assert(Opc == Instruction::And && "Unknown merge op!"); 2714 // Codegen X & Y as: 2715 // BB1: 2716 // jmp_if_X TmpBB 2717 // jmp FBB 2718 // TmpBB: 2719 // jmp_if_Y TBB 2720 // jmp FBB 2721 // 2722 // This requires creation of TmpBB after CurBB. 2723 2724 // We have flexibility in setting Prob for BB1 and Prob for TmpBB. 2725 // The requirement is that 2726 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB) 2727 // = FalseProb for original BB. 2728 // Assuming the original probabilities are A and B, one choice is to set 2729 // BB1's probabilities to A+B/2 and B/2, and set TmpBB's probabilities to 2730 // 2A/(1+A) and B/(1+A). This choice assumes that FalseProb for BB1 == 2731 // TrueProb for BB1 * FalseProb for TmpBB. 2732 2733 auto NewTrueProb = TProb + FProb / 2; 2734 auto NewFalseProb = FProb / 2; 2735 // Emit the LHS condition. 2736 FindMergedConditions(BOpOp0, TmpBB, FBB, CurBB, SwitchBB, Opc, NewTrueProb, 2737 NewFalseProb, InvertCond); 2738 2739 // Normalize A and B/2 to get 2A/(1+A) and B/(1+A). 2740 SmallVector<BranchProbability, 2> Probs{TProb, FProb / 2}; 2741 BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end()); 2742 // Emit the RHS condition into TmpBB. 2743 FindMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0], 2744 Probs[1], InvertCond); 2745 } 2746 } 2747 2748 /// If the set of cases should be emitted as a series of branches, return true. 2749 /// If we should emit this as a bunch of and/or'd together conditions, return 2750 /// false. 2751 bool 2752 SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases) { 2753 if (Cases.size() != 2) return true; 2754 2755 // If this is two comparisons of the same values or'd or and'd together, they 2756 // will get folded into a single comparison, so don't emit two blocks. 2757 if ((Cases[0].CmpLHS == Cases[1].CmpLHS && 2758 Cases[0].CmpRHS == Cases[1].CmpRHS) || 2759 (Cases[0].CmpRHS == Cases[1].CmpLHS && 2760 Cases[0].CmpLHS == Cases[1].CmpRHS)) { 2761 return false; 2762 } 2763 2764 // Handle: (X != null) | (Y != null) --> (X|Y) != 0 2765 // Handle: (X == null) & (Y == null) --> (X|Y) == 0 2766 if (Cases[0].CmpRHS == Cases[1].CmpRHS && 2767 Cases[0].CC == Cases[1].CC && 2768 isa<Constant>(Cases[0].CmpRHS) && 2769 cast<Constant>(Cases[0].CmpRHS)->isNullValue()) { 2770 if (Cases[0].CC == ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB) 2771 return false; 2772 if (Cases[0].CC == ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB) 2773 return false; 2774 } 2775 2776 return true; 2777 } 2778 2779 void SelectionDAGBuilder::visitBr(const BranchInst &I) { 2780 MachineBasicBlock *BrMBB = FuncInfo.MBB; 2781 2782 // Update machine-CFG edges. 2783 MachineBasicBlock *Succ0MBB = FuncInfo.getMBB(I.getSuccessor(0)); 2784 2785 if (I.isUnconditional()) { 2786 // Update machine-CFG edges. 2787 BrMBB->addSuccessor(Succ0MBB); 2788 2789 // If this is not a fall-through branch or optimizations are switched off, 2790 // emit the branch. 2791 if (Succ0MBB != NextBlock(BrMBB) || 2792 TM.getOptLevel() == CodeGenOptLevel::None) { 2793 auto Br = DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other, 2794 getControlRoot(), DAG.getBasicBlock(Succ0MBB)); 2795 setValue(&I, Br); 2796 DAG.setRoot(Br); 2797 } 2798 2799 return; 2800 } 2801 2802 // If this condition is one of the special cases we handle, do special stuff 2803 // now. 2804 const Value *CondVal = I.getCondition(); 2805 MachineBasicBlock *Succ1MBB = FuncInfo.getMBB(I.getSuccessor(1)); 2806 2807 // If this is a series of conditions that are or'd or and'd together, emit 2808 // this as a sequence of branches instead of setcc's with and/or operations. 2809 // As long as jumps are not expensive (exceptions for multi-use logic ops, 2810 // unpredictable branches, and vector extracts because those jumps are likely 2811 // expensive for any target), this should improve performance. 2812 // For example, instead of something like: 2813 // cmp A, B 2814 // C = seteq 2815 // cmp D, E 2816 // F = setle 2817 // or C, F 2818 // jnz foo 2819 // Emit: 2820 // cmp A, B 2821 // je foo 2822 // cmp D, E 2823 // jle foo 2824 bool IsUnpredictable = I.hasMetadata(LLVMContext::MD_unpredictable); 2825 const Instruction *BOp = dyn_cast<Instruction>(CondVal); 2826 if (!DAG.getTargetLoweringInfo().isJumpExpensive() && BOp && 2827 BOp->hasOneUse() && !IsUnpredictable) { 2828 Value *Vec; 2829 const Value *BOp0, *BOp1; 2830 Instruction::BinaryOps Opcode = (Instruction::BinaryOps)0; 2831 if (match(BOp, m_LogicalAnd(m_Value(BOp0), m_Value(BOp1)))) 2832 Opcode = Instruction::And; 2833 else if (match(BOp, m_LogicalOr(m_Value(BOp0), m_Value(BOp1)))) 2834 Opcode = Instruction::Or; 2835 2836 if (Opcode && 2837 !(match(BOp0, m_ExtractElt(m_Value(Vec), m_Value())) && 2838 match(BOp1, m_ExtractElt(m_Specific(Vec), m_Value()))) && 2839 !shouldKeepJumpConditionsTogether( 2840 FuncInfo, I, Opcode, BOp0, BOp1, 2841 DAG.getTargetLoweringInfo().getJumpConditionMergingParams( 2842 Opcode, BOp0, BOp1))) { 2843 FindMergedConditions(BOp, Succ0MBB, Succ1MBB, BrMBB, BrMBB, Opcode, 2844 getEdgeProbability(BrMBB, Succ0MBB), 2845 getEdgeProbability(BrMBB, Succ1MBB), 2846 /*InvertCond=*/false); 2847 // If the compares in later blocks need to use values not currently 2848 // exported from this block, export them now. This block should always 2849 // be the first entry. 2850 assert(SL->SwitchCases[0].ThisBB == BrMBB && "Unexpected lowering!"); 2851 2852 // Allow some cases to be rejected. 2853 if (ShouldEmitAsBranches(SL->SwitchCases)) { 2854 for (unsigned i = 1, e = SL->SwitchCases.size(); i != e; ++i) { 2855 ExportFromCurrentBlock(SL->SwitchCases[i].CmpLHS); 2856 ExportFromCurrentBlock(SL->SwitchCases[i].CmpRHS); 2857 } 2858 2859 // Emit the branch for this block. 2860 visitSwitchCase(SL->SwitchCases[0], BrMBB); 2861 SL->SwitchCases.erase(SL->SwitchCases.begin()); 2862 return; 2863 } 2864 2865 // Okay, we decided not to do this, remove any inserted MBB's and clear 2866 // SwitchCases. 2867 for (unsigned i = 1, e = SL->SwitchCases.size(); i != e; ++i) 2868 FuncInfo.MF->erase(SL->SwitchCases[i].ThisBB); 2869 2870 SL->SwitchCases.clear(); 2871 } 2872 } 2873 2874 // Create a CaseBlock record representing this branch. 2875 CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(*DAG.getContext()), 2876 nullptr, Succ0MBB, Succ1MBB, BrMBB, getCurSDLoc(), 2877 BranchProbability::getUnknown(), BranchProbability::getUnknown(), 2878 IsUnpredictable); 2879 2880 // Use visitSwitchCase to actually insert the fast branch sequence for this 2881 // cond branch. 2882 visitSwitchCase(CB, BrMBB); 2883 } 2884 2885 /// visitSwitchCase - Emits the necessary code to represent a single node in 2886 /// the binary search tree resulting from lowering a switch instruction. 2887 void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB, 2888 MachineBasicBlock *SwitchBB) { 2889 SDValue Cond; 2890 SDValue CondLHS = getValue(CB.CmpLHS); 2891 SDLoc dl = CB.DL; 2892 2893 if (CB.CC == ISD::SETTRUE) { 2894 // Branch or fall through to TrueBB. 2895 addSuccessorWithProb(SwitchBB, CB.TrueBB, CB.TrueProb); 2896 SwitchBB->normalizeSuccProbs(); 2897 if (CB.TrueBB != NextBlock(SwitchBB)) { 2898 DAG.setRoot(DAG.getNode(ISD::BR, dl, MVT::Other, getControlRoot(), 2899 DAG.getBasicBlock(CB.TrueBB))); 2900 } 2901 return; 2902 } 2903 2904 auto &TLI = DAG.getTargetLoweringInfo(); 2905 EVT MemVT = TLI.getMemValueType(DAG.getDataLayout(), CB.CmpLHS->getType()); 2906 2907 // Build the setcc now. 2908 if (!CB.CmpMHS) { 2909 // Fold "(X == true)" to X and "(X == false)" to !X to 2910 // handle common cases produced by branch lowering. 2911 if (CB.CmpRHS == ConstantInt::getTrue(*DAG.getContext()) && 2912 CB.CC == ISD::SETEQ) 2913 Cond = CondLHS; 2914 else if (CB.CmpRHS == ConstantInt::getFalse(*DAG.getContext()) && 2915 CB.CC == ISD::SETEQ) { 2916 SDValue True = DAG.getConstant(1, dl, CondLHS.getValueType()); 2917 Cond = DAG.getNode(ISD::XOR, dl, CondLHS.getValueType(), CondLHS, True); 2918 } else { 2919 SDValue CondRHS = getValue(CB.CmpRHS); 2920 2921 // If a pointer's DAG type is larger than its memory type then the DAG 2922 // values are zero-extended. This breaks signed comparisons so truncate 2923 // back to the underlying type before doing the compare. 2924 if (CondLHS.getValueType() != MemVT) { 2925 CondLHS = DAG.getPtrExtOrTrunc(CondLHS, getCurSDLoc(), MemVT); 2926 CondRHS = DAG.getPtrExtOrTrunc(CondRHS, getCurSDLoc(), MemVT); 2927 } 2928 Cond = DAG.getSetCC(dl, MVT::i1, CondLHS, CondRHS, CB.CC); 2929 } 2930 } else { 2931 assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now"); 2932 2933 const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue(); 2934 const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue(); 2935 2936 SDValue CmpOp = getValue(CB.CmpMHS); 2937 EVT VT = CmpOp.getValueType(); 2938 2939 if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) { 2940 Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, dl, VT), 2941 ISD::SETLE); 2942 } else { 2943 SDValue SUB = DAG.getNode(ISD::SUB, dl, 2944 VT, CmpOp, DAG.getConstant(Low, dl, VT)); 2945 Cond = DAG.getSetCC(dl, MVT::i1, SUB, 2946 DAG.getConstant(High-Low, dl, VT), ISD::SETULE); 2947 } 2948 } 2949 2950 // Update successor info 2951 addSuccessorWithProb(SwitchBB, CB.TrueBB, CB.TrueProb); 2952 // TrueBB and FalseBB are always different unless the incoming IR is 2953 // degenerate. This only happens when running llc on weird IR. 2954 if (CB.TrueBB != CB.FalseBB) 2955 addSuccessorWithProb(SwitchBB, CB.FalseBB, CB.FalseProb); 2956 SwitchBB->normalizeSuccProbs(); 2957 2958 // If the lhs block is the next block, invert the condition so that we can 2959 // fall through to the lhs instead of the rhs block. 2960 if (CB.TrueBB == NextBlock(SwitchBB)) { 2961 std::swap(CB.TrueBB, CB.FalseBB); 2962 SDValue True = DAG.getConstant(1, dl, Cond.getValueType()); 2963 Cond = DAG.getNode(ISD::XOR, dl, Cond.getValueType(), Cond, True); 2964 } 2965 2966 SDNodeFlags Flags; 2967 Flags.setUnpredictable(CB.IsUnpredictable); 2968 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl, MVT::Other, getControlRoot(), 2969 Cond, DAG.getBasicBlock(CB.TrueBB), Flags); 2970 2971 setValue(CurInst, BrCond); 2972 2973 // Insert the false branch. Do this even if it's a fall through branch, 2974 // this makes it easier to do DAG optimizations which require inverting 2975 // the branch condition. 2976 BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond, 2977 DAG.getBasicBlock(CB.FalseBB)); 2978 2979 DAG.setRoot(BrCond); 2980 } 2981 2982 /// visitJumpTable - Emit JumpTable node in the current MBB 2983 void SelectionDAGBuilder::visitJumpTable(SwitchCG::JumpTable &JT) { 2984 // Emit the code for the jump table 2985 assert(JT.SL && "Should set SDLoc for SelectionDAG!"); 2986 assert(JT.Reg && "Should lower JT Header first!"); 2987 EVT PTy = DAG.getTargetLoweringInfo().getJumpTableRegTy(DAG.getDataLayout()); 2988 SDValue Index = DAG.getCopyFromReg(getControlRoot(), *JT.SL, JT.Reg, PTy); 2989 SDValue Table = DAG.getJumpTable(JT.JTI, PTy); 2990 SDValue BrJumpTable = DAG.getNode(ISD::BR_JT, *JT.SL, MVT::Other, 2991 Index.getValue(1), Table, Index); 2992 DAG.setRoot(BrJumpTable); 2993 } 2994 2995 /// visitJumpTableHeader - This function emits necessary code to produce index 2996 /// in the JumpTable from switch case. 2997 void SelectionDAGBuilder::visitJumpTableHeader(SwitchCG::JumpTable &JT, 2998 JumpTableHeader &JTH, 2999 MachineBasicBlock *SwitchBB) { 3000 assert(JT.SL && "Should set SDLoc for SelectionDAG!"); 3001 const SDLoc &dl = *JT.SL; 3002 3003 // Subtract the lowest switch case value from the value being switched on. 3004 SDValue SwitchOp = getValue(JTH.SValue); 3005 EVT VT = SwitchOp.getValueType(); 3006 SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, SwitchOp, 3007 DAG.getConstant(JTH.First, dl, VT)); 3008 3009 // The SDNode we just created, which holds the value being switched on minus 3010 // the smallest case value, needs to be copied to a virtual register so it 3011 // can be used as an index into the jump table in a subsequent basic block. 3012 // This value may be smaller or larger than the target's pointer type, and 3013 // therefore require extension or truncating. 3014 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3015 SwitchOp = 3016 DAG.getZExtOrTrunc(Sub, dl, TLI.getJumpTableRegTy(DAG.getDataLayout())); 3017 3018 Register JumpTableReg = 3019 FuncInfo.CreateReg(TLI.getJumpTableRegTy(DAG.getDataLayout())); 3020 SDValue CopyTo = 3021 DAG.getCopyToReg(getControlRoot(), dl, JumpTableReg, SwitchOp); 3022 JT.Reg = JumpTableReg; 3023 3024 if (!JTH.FallthroughUnreachable) { 3025 // Emit the range check for the jump table, and branch to the default block 3026 // for the switch statement if the value being switched on exceeds the 3027 // largest case in the switch. 3028 SDValue CMP = DAG.getSetCC( 3029 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), 3030 Sub.getValueType()), 3031 Sub, DAG.getConstant(JTH.Last - JTH.First, dl, VT), ISD::SETUGT); 3032 3033 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl, 3034 MVT::Other, CopyTo, CMP, 3035 DAG.getBasicBlock(JT.Default)); 3036 3037 // Avoid emitting unnecessary branches to the next block. 3038 if (JT.MBB != NextBlock(SwitchBB)) 3039 BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond, 3040 DAG.getBasicBlock(JT.MBB)); 3041 3042 DAG.setRoot(BrCond); 3043 } else { 3044 // Avoid emitting unnecessary branches to the next block. 3045 if (JT.MBB != NextBlock(SwitchBB)) 3046 DAG.setRoot(DAG.getNode(ISD::BR, dl, MVT::Other, CopyTo, 3047 DAG.getBasicBlock(JT.MBB))); 3048 else 3049 DAG.setRoot(CopyTo); 3050 } 3051 } 3052 3053 /// Create a LOAD_STACK_GUARD node, and let it carry the target specific global 3054 /// variable if there exists one. 3055 static SDValue getLoadStackGuard(SelectionDAG &DAG, const SDLoc &DL, 3056 SDValue &Chain) { 3057 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3058 EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout()); 3059 EVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout()); 3060 MachineFunction &MF = DAG.getMachineFunction(); 3061 Value *Global = TLI.getSDagStackGuard(*MF.getFunction().getParent()); 3062 MachineSDNode *Node = 3063 DAG.getMachineNode(TargetOpcode::LOAD_STACK_GUARD, DL, PtrTy, Chain); 3064 if (Global) { 3065 MachinePointerInfo MPInfo(Global); 3066 auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant | 3067 MachineMemOperand::MODereferenceable; 3068 MachineMemOperand *MemRef = MF.getMachineMemOperand( 3069 MPInfo, Flags, LocationSize::precise(PtrTy.getSizeInBits() / 8), 3070 DAG.getEVTAlign(PtrTy)); 3071 DAG.setNodeMemRefs(Node, {MemRef}); 3072 } 3073 if (PtrTy != PtrMemTy) 3074 return DAG.getPtrExtOrTrunc(SDValue(Node, 0), DL, PtrMemTy); 3075 return SDValue(Node, 0); 3076 } 3077 3078 /// Codegen a new tail for a stack protector check ParentMBB which has had its 3079 /// tail spliced into a stack protector check success bb. 3080 /// 3081 /// For a high level explanation of how this fits into the stack protector 3082 /// generation see the comment on the declaration of class 3083 /// StackProtectorDescriptor. 3084 void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD, 3085 MachineBasicBlock *ParentBB) { 3086 3087 // First create the loads to the guard/stack slot for the comparison. 3088 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3089 EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout()); 3090 EVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout()); 3091 3092 MachineFrameInfo &MFI = ParentBB->getParent()->getFrameInfo(); 3093 int FI = MFI.getStackProtectorIndex(); 3094 3095 SDValue Guard; 3096 SDLoc dl = getCurSDLoc(); 3097 SDValue StackSlotPtr = DAG.getFrameIndex(FI, PtrTy); 3098 const Module &M = *ParentBB->getParent()->getFunction().getParent(); 3099 Align Align = 3100 DAG.getDataLayout().getPrefTypeAlign(PointerType::get(M.getContext(), 0)); 3101 3102 // Generate code to load the content of the guard slot. 3103 SDValue GuardVal = DAG.getLoad( 3104 PtrMemTy, dl, DAG.getEntryNode(), StackSlotPtr, 3105 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), Align, 3106 MachineMemOperand::MOVolatile); 3107 3108 if (TLI.useStackGuardXorFP()) 3109 GuardVal = TLI.emitStackGuardXorFP(DAG, GuardVal, dl); 3110 3111 // Retrieve guard check function, nullptr if instrumentation is inlined. 3112 if (const Function *GuardCheckFn = TLI.getSSPStackGuardCheck(M)) { 3113 // The target provides a guard check function to validate the guard value. 3114 // Generate a call to that function with the content of the guard slot as 3115 // argument. 3116 FunctionType *FnTy = GuardCheckFn->getFunctionType(); 3117 assert(FnTy->getNumParams() == 1 && "Invalid function signature"); 3118 3119 TargetLowering::ArgListTy Args; 3120 TargetLowering::ArgListEntry Entry; 3121 Entry.Node = GuardVal; 3122 Entry.Ty = FnTy->getParamType(0); 3123 if (GuardCheckFn->hasParamAttribute(0, Attribute::AttrKind::InReg)) 3124 Entry.IsInReg = true; 3125 Args.push_back(Entry); 3126 3127 TargetLowering::CallLoweringInfo CLI(DAG); 3128 CLI.setDebugLoc(getCurSDLoc()) 3129 .setChain(DAG.getEntryNode()) 3130 .setCallee(GuardCheckFn->getCallingConv(), FnTy->getReturnType(), 3131 getValue(GuardCheckFn), std::move(Args)); 3132 3133 std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI); 3134 DAG.setRoot(Result.second); 3135 return; 3136 } 3137 3138 // If useLoadStackGuardNode returns true, generate LOAD_STACK_GUARD. 3139 // Otherwise, emit a volatile load to retrieve the stack guard value. 3140 SDValue Chain = DAG.getEntryNode(); 3141 if (TLI.useLoadStackGuardNode()) { 3142 Guard = getLoadStackGuard(DAG, dl, Chain); 3143 } else { 3144 const Value *IRGuard = TLI.getSDagStackGuard(M); 3145 SDValue GuardPtr = getValue(IRGuard); 3146 3147 Guard = DAG.getLoad(PtrMemTy, dl, Chain, GuardPtr, 3148 MachinePointerInfo(IRGuard, 0), Align, 3149 MachineMemOperand::MOVolatile); 3150 } 3151 3152 // Perform the comparison via a getsetcc. 3153 SDValue Cmp = DAG.getSetCC(dl, TLI.getSetCCResultType(DAG.getDataLayout(), 3154 *DAG.getContext(), 3155 Guard.getValueType()), 3156 Guard, GuardVal, ISD::SETNE); 3157 3158 // If the guard/stackslot do not equal, branch to failure MBB. 3159 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl, 3160 MVT::Other, GuardVal.getOperand(0), 3161 Cmp, DAG.getBasicBlock(SPD.getFailureMBB())); 3162 // Otherwise branch to success MBB. 3163 SDValue Br = DAG.getNode(ISD::BR, dl, 3164 MVT::Other, BrCond, 3165 DAG.getBasicBlock(SPD.getSuccessMBB())); 3166 3167 DAG.setRoot(Br); 3168 } 3169 3170 /// Codegen the failure basic block for a stack protector check. 3171 /// 3172 /// A failure stack protector machine basic block consists simply of a call to 3173 /// __stack_chk_fail(). 3174 /// 3175 /// For a high level explanation of how this fits into the stack protector 3176 /// generation see the comment on the declaration of class 3177 /// StackProtectorDescriptor. 3178 void 3179 SelectionDAGBuilder::visitSPDescriptorFailure(StackProtectorDescriptor &SPD) { 3180 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3181 TargetLowering::MakeLibCallOptions CallOptions; 3182 CallOptions.setDiscardResult(true); 3183 SDValue Chain = TLI.makeLibCall(DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL, 3184 MVT::isVoid, {}, CallOptions, getCurSDLoc()) 3185 .second; 3186 // On PS4/PS5, the "return address" must still be within the calling 3187 // function, even if it's at the very end, so emit an explicit TRAP here. 3188 // Passing 'true' for doesNotReturn above won't generate the trap for us. 3189 if (TM.getTargetTriple().isPS()) 3190 Chain = DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, Chain); 3191 // WebAssembly needs an unreachable instruction after a non-returning call, 3192 // because the function return type can be different from __stack_chk_fail's 3193 // return type (void). 3194 if (TM.getTargetTriple().isWasm()) 3195 Chain = DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, Chain); 3196 3197 DAG.setRoot(Chain); 3198 } 3199 3200 /// visitBitTestHeader - This function emits necessary code to produce value 3201 /// suitable for "bit tests" 3202 void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B, 3203 MachineBasicBlock *SwitchBB) { 3204 SDLoc dl = getCurSDLoc(); 3205 3206 // Subtract the minimum value. 3207 SDValue SwitchOp = getValue(B.SValue); 3208 EVT VT = SwitchOp.getValueType(); 3209 SDValue RangeSub = 3210 DAG.getNode(ISD::SUB, dl, VT, SwitchOp, DAG.getConstant(B.First, dl, VT)); 3211 3212 // Determine the type of the test operands. 3213 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3214 bool UsePtrType = false; 3215 if (!TLI.isTypeLegal(VT)) { 3216 UsePtrType = true; 3217 } else { 3218 for (unsigned i = 0, e = B.Cases.size(); i != e; ++i) 3219 if (!isUIntN(VT.getSizeInBits(), B.Cases[i].Mask)) { 3220 // Switch table case range are encoded into series of masks. 3221 // Just use pointer type, it's guaranteed to fit. 3222 UsePtrType = true; 3223 break; 3224 } 3225 } 3226 SDValue Sub = RangeSub; 3227 if (UsePtrType) { 3228 VT = TLI.getPointerTy(DAG.getDataLayout()); 3229 Sub = DAG.getZExtOrTrunc(Sub, dl, VT); 3230 } 3231 3232 B.RegVT = VT.getSimpleVT(); 3233 B.Reg = FuncInfo.CreateReg(B.RegVT); 3234 SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl, B.Reg, Sub); 3235 3236 MachineBasicBlock* MBB = B.Cases[0].ThisBB; 3237 3238 if (!B.FallthroughUnreachable) 3239 addSuccessorWithProb(SwitchBB, B.Default, B.DefaultProb); 3240 addSuccessorWithProb(SwitchBB, MBB, B.Prob); 3241 SwitchBB->normalizeSuccProbs(); 3242 3243 SDValue Root = CopyTo; 3244 if (!B.FallthroughUnreachable) { 3245 // Conditional branch to the default block. 3246 SDValue RangeCmp = DAG.getSetCC(dl, 3247 TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), 3248 RangeSub.getValueType()), 3249 RangeSub, DAG.getConstant(B.Range, dl, RangeSub.getValueType()), 3250 ISD::SETUGT); 3251 3252 Root = DAG.getNode(ISD::BRCOND, dl, MVT::Other, Root, RangeCmp, 3253 DAG.getBasicBlock(B.Default)); 3254 } 3255 3256 // Avoid emitting unnecessary branches to the next block. 3257 if (MBB != NextBlock(SwitchBB)) 3258 Root = DAG.getNode(ISD::BR, dl, MVT::Other, Root, DAG.getBasicBlock(MBB)); 3259 3260 DAG.setRoot(Root); 3261 } 3262 3263 /// visitBitTestCase - this function produces one "bit test" 3264 void SelectionDAGBuilder::visitBitTestCase(BitTestBlock &BB, 3265 MachineBasicBlock *NextMBB, 3266 BranchProbability BranchProbToNext, 3267 Register Reg, BitTestCase &B, 3268 MachineBasicBlock *SwitchBB) { 3269 SDLoc dl = getCurSDLoc(); 3270 MVT VT = BB.RegVT; 3271 SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), dl, Reg, VT); 3272 SDValue Cmp; 3273 unsigned PopCount = llvm::popcount(B.Mask); 3274 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3275 if (PopCount == 1) { 3276 // Testing for a single bit; just compare the shift count with what it 3277 // would need to be to shift a 1 bit in that position. 3278 Cmp = DAG.getSetCC( 3279 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT), 3280 ShiftOp, DAG.getConstant(llvm::countr_zero(B.Mask), dl, VT), 3281 ISD::SETEQ); 3282 } else if (PopCount == BB.Range) { 3283 // There is only one zero bit in the range, test for it directly. 3284 Cmp = DAG.getSetCC( 3285 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT), 3286 ShiftOp, DAG.getConstant(llvm::countr_one(B.Mask), dl, VT), ISD::SETNE); 3287 } else { 3288 // Make desired shift 3289 SDValue SwitchVal = DAG.getNode(ISD::SHL, dl, VT, 3290 DAG.getConstant(1, dl, VT), ShiftOp); 3291 3292 // Emit bit tests and jumps 3293 SDValue AndOp = DAG.getNode(ISD::AND, dl, 3294 VT, SwitchVal, DAG.getConstant(B.Mask, dl, VT)); 3295 Cmp = DAG.getSetCC( 3296 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT), 3297 AndOp, DAG.getConstant(0, dl, VT), ISD::SETNE); 3298 } 3299 3300 // The branch probability from SwitchBB to B.TargetBB is B.ExtraProb. 3301 addSuccessorWithProb(SwitchBB, B.TargetBB, B.ExtraProb); 3302 // The branch probability from SwitchBB to NextMBB is BranchProbToNext. 3303 addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext); 3304 // It is not guaranteed that the sum of B.ExtraProb and BranchProbToNext is 3305 // one as they are relative probabilities (and thus work more like weights), 3306 // and hence we need to normalize them to let the sum of them become one. 3307 SwitchBB->normalizeSuccProbs(); 3308 3309 SDValue BrAnd = DAG.getNode(ISD::BRCOND, dl, 3310 MVT::Other, getControlRoot(), 3311 Cmp, DAG.getBasicBlock(B.TargetBB)); 3312 3313 // Avoid emitting unnecessary branches to the next block. 3314 if (NextMBB != NextBlock(SwitchBB)) 3315 BrAnd = DAG.getNode(ISD::BR, dl, MVT::Other, BrAnd, 3316 DAG.getBasicBlock(NextMBB)); 3317 3318 DAG.setRoot(BrAnd); 3319 } 3320 3321 void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) { 3322 MachineBasicBlock *InvokeMBB = FuncInfo.MBB; 3323 3324 // Retrieve successors. Look through artificial IR level blocks like 3325 // catchswitch for successors. 3326 MachineBasicBlock *Return = FuncInfo.getMBB(I.getSuccessor(0)); 3327 const BasicBlock *EHPadBB = I.getSuccessor(1); 3328 MachineBasicBlock *EHPadMBB = FuncInfo.getMBB(EHPadBB); 3329 3330 // Deopt and ptrauth bundles are lowered in helper functions, and we don't 3331 // have to do anything here to lower funclet bundles. 3332 assert(!I.hasOperandBundlesOtherThan( 3333 {LLVMContext::OB_deopt, LLVMContext::OB_gc_transition, 3334 LLVMContext::OB_gc_live, LLVMContext::OB_funclet, 3335 LLVMContext::OB_cfguardtarget, LLVMContext::OB_ptrauth, 3336 LLVMContext::OB_clang_arc_attachedcall}) && 3337 "Cannot lower invokes with arbitrary operand bundles yet!"); 3338 3339 const Value *Callee(I.getCalledOperand()); 3340 const Function *Fn = dyn_cast<Function>(Callee); 3341 if (isa<InlineAsm>(Callee)) 3342 visitInlineAsm(I, EHPadBB); 3343 else if (Fn && Fn->isIntrinsic()) { 3344 switch (Fn->getIntrinsicID()) { 3345 default: 3346 llvm_unreachable("Cannot invoke this intrinsic"); 3347 case Intrinsic::donothing: 3348 // Ignore invokes to @llvm.donothing: jump directly to the next BB. 3349 case Intrinsic::seh_try_begin: 3350 case Intrinsic::seh_scope_begin: 3351 case Intrinsic::seh_try_end: 3352 case Intrinsic::seh_scope_end: 3353 if (EHPadMBB) 3354 // a block referenced by EH table 3355 // so dtor-funclet not removed by opts 3356 EHPadMBB->setMachineBlockAddressTaken(); 3357 break; 3358 case Intrinsic::experimental_patchpoint_void: 3359 case Intrinsic::experimental_patchpoint: 3360 visitPatchpoint(I, EHPadBB); 3361 break; 3362 case Intrinsic::experimental_gc_statepoint: 3363 LowerStatepoint(cast<GCStatepointInst>(I), EHPadBB); 3364 break; 3365 case Intrinsic::wasm_rethrow: { 3366 // This is usually done in visitTargetIntrinsic, but this intrinsic is 3367 // special because it can be invoked, so we manually lower it to a DAG 3368 // node here. 3369 SmallVector<SDValue, 8> Ops; 3370 Ops.push_back(getControlRoot()); // inchain for the terminator node 3371 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3372 Ops.push_back( 3373 DAG.getTargetConstant(Intrinsic::wasm_rethrow, getCurSDLoc(), 3374 TLI.getPointerTy(DAG.getDataLayout()))); 3375 SDVTList VTs = DAG.getVTList(ArrayRef<EVT>({MVT::Other})); // outchain 3376 DAG.setRoot(DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops)); 3377 break; 3378 } 3379 } 3380 } else if (I.hasDeoptState()) { 3381 // Currently we do not lower any intrinsic calls with deopt operand bundles. 3382 // Eventually we will support lowering the @llvm.experimental.deoptimize 3383 // intrinsic, and right now there are no plans to support other intrinsics 3384 // with deopt state. 3385 LowerCallSiteWithDeoptBundle(&I, getValue(Callee), EHPadBB); 3386 } else if (I.countOperandBundlesOfType(LLVMContext::OB_ptrauth)) { 3387 LowerCallSiteWithPtrAuthBundle(cast<CallBase>(I), EHPadBB); 3388 } else { 3389 LowerCallTo(I, getValue(Callee), false, false, EHPadBB); 3390 } 3391 3392 // If the value of the invoke is used outside of its defining block, make it 3393 // available as a virtual register. 3394 // We already took care of the exported value for the statepoint instruction 3395 // during call to the LowerStatepoint. 3396 if (!isa<GCStatepointInst>(I)) { 3397 CopyToExportRegsIfNeeded(&I); 3398 } 3399 3400 SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests; 3401 BranchProbabilityInfo *BPI = FuncInfo.BPI; 3402 BranchProbability EHPadBBProb = 3403 BPI ? BPI->getEdgeProbability(InvokeMBB->getBasicBlock(), EHPadBB) 3404 : BranchProbability::getZero(); 3405 findUnwindDestinations(FuncInfo, EHPadBB, EHPadBBProb, UnwindDests); 3406 3407 // Update successor info. 3408 addSuccessorWithProb(InvokeMBB, Return); 3409 for (auto &UnwindDest : UnwindDests) { 3410 UnwindDest.first->setIsEHPad(); 3411 addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second); 3412 } 3413 InvokeMBB->normalizeSuccProbs(); 3414 3415 // Drop into normal successor. 3416 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other, getControlRoot(), 3417 DAG.getBasicBlock(Return))); 3418 } 3419 3420 void SelectionDAGBuilder::visitCallBr(const CallBrInst &I) { 3421 MachineBasicBlock *CallBrMBB = FuncInfo.MBB; 3422 3423 // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't 3424 // have to do anything here to lower funclet bundles. 3425 assert(!I.hasOperandBundlesOtherThan( 3426 {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) && 3427 "Cannot lower callbrs with arbitrary operand bundles yet!"); 3428 3429 assert(I.isInlineAsm() && "Only know how to handle inlineasm callbr"); 3430 visitInlineAsm(I); 3431 CopyToExportRegsIfNeeded(&I); 3432 3433 // Retrieve successors. 3434 SmallPtrSet<BasicBlock *, 8> Dests; 3435 Dests.insert(I.getDefaultDest()); 3436 MachineBasicBlock *Return = FuncInfo.getMBB(I.getDefaultDest()); 3437 3438 // Update successor info. 3439 addSuccessorWithProb(CallBrMBB, Return, BranchProbability::getOne()); 3440 for (unsigned i = 0, e = I.getNumIndirectDests(); i < e; ++i) { 3441 BasicBlock *Dest = I.getIndirectDest(i); 3442 MachineBasicBlock *Target = FuncInfo.getMBB(Dest); 3443 Target->setIsInlineAsmBrIndirectTarget(); 3444 Target->setMachineBlockAddressTaken(); 3445 Target->setLabelMustBeEmitted(); 3446 // Don't add duplicate machine successors. 3447 if (Dests.insert(Dest).second) 3448 addSuccessorWithProb(CallBrMBB, Target, BranchProbability::getZero()); 3449 } 3450 CallBrMBB->normalizeSuccProbs(); 3451 3452 // Drop into default successor. 3453 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), 3454 MVT::Other, getControlRoot(), 3455 DAG.getBasicBlock(Return))); 3456 } 3457 3458 void SelectionDAGBuilder::visitResume(const ResumeInst &RI) { 3459 llvm_unreachable("SelectionDAGBuilder shouldn't visit resume instructions!"); 3460 } 3461 3462 void SelectionDAGBuilder::visitLandingPad(const LandingPadInst &LP) { 3463 assert(FuncInfo.MBB->isEHPad() && 3464 "Call to landingpad not in landing pad!"); 3465 3466 // If there aren't registers to copy the values into (e.g., during SjLj 3467 // exceptions), then don't bother to create these DAG nodes. 3468 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3469 const Constant *PersonalityFn = FuncInfo.Fn->getPersonalityFn(); 3470 if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 && 3471 TLI.getExceptionSelectorRegister(PersonalityFn) == 0) 3472 return; 3473 3474 // If landingpad's return type is token type, we don't create DAG nodes 3475 // for its exception pointer and selector value. The extraction of exception 3476 // pointer or selector value from token type landingpads is not currently 3477 // supported. 3478 if (LP.getType()->isTokenTy()) 3479 return; 3480 3481 SmallVector<EVT, 2> ValueVTs; 3482 SDLoc dl = getCurSDLoc(); 3483 ComputeValueVTs(TLI, DAG.getDataLayout(), LP.getType(), ValueVTs); 3484 assert(ValueVTs.size() == 2 && "Only two-valued landingpads are supported"); 3485 3486 // Get the two live-in registers as SDValues. The physregs have already been 3487 // copied into virtual registers. 3488 SDValue Ops[2]; 3489 if (FuncInfo.ExceptionPointerVirtReg) { 3490 Ops[0] = DAG.getZExtOrTrunc( 3491 DAG.getCopyFromReg(DAG.getEntryNode(), dl, 3492 FuncInfo.ExceptionPointerVirtReg, 3493 TLI.getPointerTy(DAG.getDataLayout())), 3494 dl, ValueVTs[0]); 3495 } else { 3496 Ops[0] = DAG.getConstant(0, dl, TLI.getPointerTy(DAG.getDataLayout())); 3497 } 3498 Ops[1] = DAG.getZExtOrTrunc( 3499 DAG.getCopyFromReg(DAG.getEntryNode(), dl, 3500 FuncInfo.ExceptionSelectorVirtReg, 3501 TLI.getPointerTy(DAG.getDataLayout())), 3502 dl, ValueVTs[1]); 3503 3504 // Merge into one. 3505 SDValue Res = DAG.getNode(ISD::MERGE_VALUES, dl, 3506 DAG.getVTList(ValueVTs), Ops); 3507 setValue(&LP, Res); 3508 } 3509 3510 void SelectionDAGBuilder::UpdateSplitBlock(MachineBasicBlock *First, 3511 MachineBasicBlock *Last) { 3512 // Update JTCases. 3513 for (JumpTableBlock &JTB : SL->JTCases) 3514 if (JTB.first.HeaderBB == First) 3515 JTB.first.HeaderBB = Last; 3516 3517 // Update BitTestCases. 3518 for (BitTestBlock &BTB : SL->BitTestCases) 3519 if (BTB.Parent == First) 3520 BTB.Parent = Last; 3521 } 3522 3523 void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) { 3524 MachineBasicBlock *IndirectBrMBB = FuncInfo.MBB; 3525 3526 // Update machine-CFG edges with unique successors. 3527 SmallSet<BasicBlock*, 32> Done; 3528 for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i) { 3529 BasicBlock *BB = I.getSuccessor(i); 3530 bool Inserted = Done.insert(BB).second; 3531 if (!Inserted) 3532 continue; 3533 3534 MachineBasicBlock *Succ = FuncInfo.getMBB(BB); 3535 addSuccessorWithProb(IndirectBrMBB, Succ); 3536 } 3537 IndirectBrMBB->normalizeSuccProbs(); 3538 3539 DAG.setRoot(DAG.getNode(ISD::BRIND, getCurSDLoc(), 3540 MVT::Other, getControlRoot(), 3541 getValue(I.getAddress()))); 3542 } 3543 3544 void SelectionDAGBuilder::visitUnreachable(const UnreachableInst &I) { 3545 if (!DAG.getTarget().Options.TrapUnreachable) 3546 return; 3547 3548 // We may be able to ignore unreachable behind a noreturn call. 3549 if (const CallInst *Call = dyn_cast_or_null<CallInst>(I.getPrevNode()); 3550 Call && Call->doesNotReturn()) { 3551 if (DAG.getTarget().Options.NoTrapAfterNoreturn) 3552 return; 3553 // Do not emit an additional trap instruction. 3554 if (Call->isNonContinuableTrap()) 3555 return; 3556 } 3557 3558 DAG.setRoot(DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, DAG.getRoot())); 3559 } 3560 3561 void SelectionDAGBuilder::visitUnary(const User &I, unsigned Opcode) { 3562 SDNodeFlags Flags; 3563 if (auto *FPOp = dyn_cast<FPMathOperator>(&I)) 3564 Flags.copyFMF(*FPOp); 3565 3566 SDValue Op = getValue(I.getOperand(0)); 3567 SDValue UnNodeValue = DAG.getNode(Opcode, getCurSDLoc(), Op.getValueType(), 3568 Op, Flags); 3569 setValue(&I, UnNodeValue); 3570 } 3571 3572 void SelectionDAGBuilder::visitBinary(const User &I, unsigned Opcode) { 3573 SDNodeFlags Flags; 3574 if (auto *OFBinOp = dyn_cast<OverflowingBinaryOperator>(&I)) { 3575 Flags.setNoSignedWrap(OFBinOp->hasNoSignedWrap()); 3576 Flags.setNoUnsignedWrap(OFBinOp->hasNoUnsignedWrap()); 3577 } 3578 if (auto *ExactOp = dyn_cast<PossiblyExactOperator>(&I)) 3579 Flags.setExact(ExactOp->isExact()); 3580 if (auto *DisjointOp = dyn_cast<PossiblyDisjointInst>(&I)) 3581 Flags.setDisjoint(DisjointOp->isDisjoint()); 3582 if (auto *FPOp = dyn_cast<FPMathOperator>(&I)) 3583 Flags.copyFMF(*FPOp); 3584 3585 SDValue Op1 = getValue(I.getOperand(0)); 3586 SDValue Op2 = getValue(I.getOperand(1)); 3587 SDValue BinNodeValue = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(), 3588 Op1, Op2, Flags); 3589 setValue(&I, BinNodeValue); 3590 } 3591 3592 void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) { 3593 SDValue Op1 = getValue(I.getOperand(0)); 3594 SDValue Op2 = getValue(I.getOperand(1)); 3595 3596 EVT ShiftTy = DAG.getTargetLoweringInfo().getShiftAmountTy( 3597 Op1.getValueType(), DAG.getDataLayout()); 3598 3599 // Coerce the shift amount to the right type if we can. This exposes the 3600 // truncate or zext to optimization early. 3601 if (!I.getType()->isVectorTy() && Op2.getValueType() != ShiftTy) { 3602 assert(ShiftTy.getSizeInBits() >= Log2_32_Ceil(Op1.getValueSizeInBits()) && 3603 "Unexpected shift type"); 3604 Op2 = DAG.getZExtOrTrunc(Op2, getCurSDLoc(), ShiftTy); 3605 } 3606 3607 bool nuw = false; 3608 bool nsw = false; 3609 bool exact = false; 3610 3611 if (Opcode == ISD::SRL || Opcode == ISD::SRA || Opcode == ISD::SHL) { 3612 3613 if (const OverflowingBinaryOperator *OFBinOp = 3614 dyn_cast<const OverflowingBinaryOperator>(&I)) { 3615 nuw = OFBinOp->hasNoUnsignedWrap(); 3616 nsw = OFBinOp->hasNoSignedWrap(); 3617 } 3618 if (const PossiblyExactOperator *ExactOp = 3619 dyn_cast<const PossiblyExactOperator>(&I)) 3620 exact = ExactOp->isExact(); 3621 } 3622 SDNodeFlags Flags; 3623 Flags.setExact(exact); 3624 Flags.setNoSignedWrap(nsw); 3625 Flags.setNoUnsignedWrap(nuw); 3626 SDValue Res = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(), Op1, Op2, 3627 Flags); 3628 setValue(&I, Res); 3629 } 3630 3631 void SelectionDAGBuilder::visitSDiv(const User &I) { 3632 SDValue Op1 = getValue(I.getOperand(0)); 3633 SDValue Op2 = getValue(I.getOperand(1)); 3634 3635 SDNodeFlags Flags; 3636 Flags.setExact(isa<PossiblyExactOperator>(&I) && 3637 cast<PossiblyExactOperator>(&I)->isExact()); 3638 setValue(&I, DAG.getNode(ISD::SDIV, getCurSDLoc(), Op1.getValueType(), Op1, 3639 Op2, Flags)); 3640 } 3641 3642 void SelectionDAGBuilder::visitICmp(const ICmpInst &I) { 3643 ICmpInst::Predicate predicate = I.getPredicate(); 3644 SDValue Op1 = getValue(I.getOperand(0)); 3645 SDValue Op2 = getValue(I.getOperand(1)); 3646 ISD::CondCode Opcode = getICmpCondCode(predicate); 3647 3648 auto &TLI = DAG.getTargetLoweringInfo(); 3649 EVT MemVT = 3650 TLI.getMemValueType(DAG.getDataLayout(), I.getOperand(0)->getType()); 3651 3652 // If a pointer's DAG type is larger than its memory type then the DAG values 3653 // are zero-extended. This breaks signed comparisons so truncate back to the 3654 // underlying type before doing the compare. 3655 if (Op1.getValueType() != MemVT) { 3656 Op1 = DAG.getPtrExtOrTrunc(Op1, getCurSDLoc(), MemVT); 3657 Op2 = DAG.getPtrExtOrTrunc(Op2, getCurSDLoc(), MemVT); 3658 } 3659 3660 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 3661 I.getType()); 3662 setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Opcode)); 3663 } 3664 3665 void SelectionDAGBuilder::visitFCmp(const FCmpInst &I) { 3666 FCmpInst::Predicate predicate = I.getPredicate(); 3667 SDValue Op1 = getValue(I.getOperand(0)); 3668 SDValue Op2 = getValue(I.getOperand(1)); 3669 3670 ISD::CondCode Condition = getFCmpCondCode(predicate); 3671 auto *FPMO = cast<FPMathOperator>(&I); 3672 if (FPMO->hasNoNaNs() || TM.Options.NoNaNsFPMath) 3673 Condition = getFCmpCodeWithoutNaN(Condition); 3674 3675 SDNodeFlags Flags; 3676 Flags.copyFMF(*FPMO); 3677 SelectionDAG::FlagInserter FlagsInserter(DAG, Flags); 3678 3679 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 3680 I.getType()); 3681 setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Condition)); 3682 } 3683 3684 // Check if the condition of the select has one use or two users that are both 3685 // selects with the same condition. 3686 static bool hasOnlySelectUsers(const Value *Cond) { 3687 return llvm::all_of(Cond->users(), [](const Value *V) { 3688 return isa<SelectInst>(V); 3689 }); 3690 } 3691 3692 void SelectionDAGBuilder::visitSelect(const User &I) { 3693 SmallVector<EVT, 4> ValueVTs; 3694 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), I.getType(), 3695 ValueVTs); 3696 unsigned NumValues = ValueVTs.size(); 3697 if (NumValues == 0) return; 3698 3699 SmallVector<SDValue, 4> Values(NumValues); 3700 SDValue Cond = getValue(I.getOperand(0)); 3701 SDValue LHSVal = getValue(I.getOperand(1)); 3702 SDValue RHSVal = getValue(I.getOperand(2)); 3703 SmallVector<SDValue, 1> BaseOps(1, Cond); 3704 ISD::NodeType OpCode = 3705 Cond.getValueType().isVector() ? ISD::VSELECT : ISD::SELECT; 3706 3707 bool IsUnaryAbs = false; 3708 bool Negate = false; 3709 3710 SDNodeFlags Flags; 3711 if (auto *FPOp = dyn_cast<FPMathOperator>(&I)) 3712 Flags.copyFMF(*FPOp); 3713 3714 Flags.setUnpredictable( 3715 cast<SelectInst>(I).getMetadata(LLVMContext::MD_unpredictable)); 3716 3717 // Min/max matching is only viable if all output VTs are the same. 3718 if (all_equal(ValueVTs)) { 3719 EVT VT = ValueVTs[0]; 3720 LLVMContext &Ctx = *DAG.getContext(); 3721 auto &TLI = DAG.getTargetLoweringInfo(); 3722 3723 // We care about the legality of the operation after it has been type 3724 // legalized. 3725 while (TLI.getTypeAction(Ctx, VT) != TargetLoweringBase::TypeLegal) 3726 VT = TLI.getTypeToTransformTo(Ctx, VT); 3727 3728 // If the vselect is legal, assume we want to leave this as a vector setcc + 3729 // vselect. Otherwise, if this is going to be scalarized, we want to see if 3730 // min/max is legal on the scalar type. 3731 bool UseScalarMinMax = VT.isVector() && 3732 !TLI.isOperationLegalOrCustom(ISD::VSELECT, VT); 3733 3734 // ValueTracking's select pattern matching does not account for -0.0, 3735 // so we can't lower to FMINIMUM/FMAXIMUM because those nodes specify that 3736 // -0.0 is less than +0.0. 3737 const Value *LHS, *RHS; 3738 auto SPR = matchSelectPattern(&I, LHS, RHS); 3739 ISD::NodeType Opc = ISD::DELETED_NODE; 3740 switch (SPR.Flavor) { 3741 case SPF_UMAX: Opc = ISD::UMAX; break; 3742 case SPF_UMIN: Opc = ISD::UMIN; break; 3743 case SPF_SMAX: Opc = ISD::SMAX; break; 3744 case SPF_SMIN: Opc = ISD::SMIN; break; 3745 case SPF_FMINNUM: 3746 switch (SPR.NaNBehavior) { 3747 case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?"); 3748 case SPNB_RETURNS_NAN: break; 3749 case SPNB_RETURNS_OTHER: Opc = ISD::FMINNUM; break; 3750 case SPNB_RETURNS_ANY: 3751 if (TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT) || 3752 (UseScalarMinMax && 3753 TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT.getScalarType()))) 3754 Opc = ISD::FMINNUM; 3755 break; 3756 } 3757 break; 3758 case SPF_FMAXNUM: 3759 switch (SPR.NaNBehavior) { 3760 case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?"); 3761 case SPNB_RETURNS_NAN: break; 3762 case SPNB_RETURNS_OTHER: Opc = ISD::FMAXNUM; break; 3763 case SPNB_RETURNS_ANY: 3764 if (TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT) || 3765 (UseScalarMinMax && 3766 TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT.getScalarType()))) 3767 Opc = ISD::FMAXNUM; 3768 break; 3769 } 3770 break; 3771 case SPF_NABS: 3772 Negate = true; 3773 [[fallthrough]]; 3774 case SPF_ABS: 3775 IsUnaryAbs = true; 3776 Opc = ISD::ABS; 3777 break; 3778 default: break; 3779 } 3780 3781 if (!IsUnaryAbs && Opc != ISD::DELETED_NODE && 3782 (TLI.isOperationLegalOrCustomOrPromote(Opc, VT) || 3783 (UseScalarMinMax && 3784 TLI.isOperationLegalOrCustom(Opc, VT.getScalarType()))) && 3785 // If the underlying comparison instruction is used by any other 3786 // instruction, the consumed instructions won't be destroyed, so it is 3787 // not profitable to convert to a min/max. 3788 hasOnlySelectUsers(cast<SelectInst>(I).getCondition())) { 3789 OpCode = Opc; 3790 LHSVal = getValue(LHS); 3791 RHSVal = getValue(RHS); 3792 BaseOps.clear(); 3793 } 3794 3795 if (IsUnaryAbs) { 3796 OpCode = Opc; 3797 LHSVal = getValue(LHS); 3798 BaseOps.clear(); 3799 } 3800 } 3801 3802 if (IsUnaryAbs) { 3803 for (unsigned i = 0; i != NumValues; ++i) { 3804 SDLoc dl = getCurSDLoc(); 3805 EVT VT = LHSVal.getNode()->getValueType(LHSVal.getResNo() + i); 3806 Values[i] = 3807 DAG.getNode(OpCode, dl, VT, LHSVal.getValue(LHSVal.getResNo() + i)); 3808 if (Negate) 3809 Values[i] = DAG.getNegative(Values[i], dl, VT); 3810 } 3811 } else { 3812 for (unsigned i = 0; i != NumValues; ++i) { 3813 SmallVector<SDValue, 3> Ops(BaseOps.begin(), BaseOps.end()); 3814 Ops.push_back(SDValue(LHSVal.getNode(), LHSVal.getResNo() + i)); 3815 Ops.push_back(SDValue(RHSVal.getNode(), RHSVal.getResNo() + i)); 3816 Values[i] = DAG.getNode( 3817 OpCode, getCurSDLoc(), 3818 LHSVal.getNode()->getValueType(LHSVal.getResNo() + i), Ops, Flags); 3819 } 3820 } 3821 3822 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(), 3823 DAG.getVTList(ValueVTs), Values)); 3824 } 3825 3826 void SelectionDAGBuilder::visitTrunc(const User &I) { 3827 // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest). 3828 SDValue N = getValue(I.getOperand(0)); 3829 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 3830 I.getType()); 3831 setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), DestVT, N)); 3832 } 3833 3834 void SelectionDAGBuilder::visitZExt(const User &I) { 3835 // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest). 3836 // ZExt also can't be a cast to bool for same reason. So, nothing much to do 3837 SDValue N = getValue(I.getOperand(0)); 3838 auto &TLI = DAG.getTargetLoweringInfo(); 3839 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 3840 3841 SDNodeFlags Flags; 3842 if (auto *PNI = dyn_cast<PossiblyNonNegInst>(&I)) 3843 Flags.setNonNeg(PNI->hasNonNeg()); 3844 3845 // Eagerly use nonneg information to canonicalize towards sign_extend if 3846 // that is the target's preference. 3847 // TODO: Let the target do this later. 3848 if (Flags.hasNonNeg() && 3849 TLI.isSExtCheaperThanZExt(N.getValueType(), DestVT)) { 3850 setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurSDLoc(), DestVT, N)); 3851 return; 3852 } 3853 3854 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurSDLoc(), DestVT, N, Flags)); 3855 } 3856 3857 void SelectionDAGBuilder::visitSExt(const User &I) { 3858 // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest). 3859 // SExt also can't be a cast to bool for same reason. So, nothing much to do 3860 SDValue N = getValue(I.getOperand(0)); 3861 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 3862 I.getType()); 3863 setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurSDLoc(), DestVT, N)); 3864 } 3865 3866 void SelectionDAGBuilder::visitFPTrunc(const User &I) { 3867 // FPTrunc is never a no-op cast, no need to check 3868 SDValue N = getValue(I.getOperand(0)); 3869 SDLoc dl = getCurSDLoc(); 3870 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3871 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 3872 setValue(&I, DAG.getNode(ISD::FP_ROUND, dl, DestVT, N, 3873 DAG.getTargetConstant( 3874 0, dl, TLI.getPointerTy(DAG.getDataLayout())))); 3875 } 3876 3877 void SelectionDAGBuilder::visitFPExt(const User &I) { 3878 // FPExt is never a no-op cast, no need to check 3879 SDValue N = getValue(I.getOperand(0)); 3880 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 3881 I.getType()); 3882 setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurSDLoc(), DestVT, N)); 3883 } 3884 3885 void SelectionDAGBuilder::visitFPToUI(const User &I) { 3886 // FPToUI is never a no-op cast, no need to check 3887 SDValue N = getValue(I.getOperand(0)); 3888 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 3889 I.getType()); 3890 setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurSDLoc(), DestVT, N)); 3891 } 3892 3893 void SelectionDAGBuilder::visitFPToSI(const User &I) { 3894 // FPToSI is never a no-op cast, no need to check 3895 SDValue N = getValue(I.getOperand(0)); 3896 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 3897 I.getType()); 3898 setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurSDLoc(), DestVT, N)); 3899 } 3900 3901 void SelectionDAGBuilder::visitUIToFP(const User &I) { 3902 // UIToFP is never a no-op cast, no need to check 3903 SDValue N = getValue(I.getOperand(0)); 3904 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 3905 I.getType()); 3906 SDNodeFlags Flags; 3907 if (auto *PNI = dyn_cast<PossiblyNonNegInst>(&I)) 3908 Flags.setNonNeg(PNI->hasNonNeg()); 3909 3910 setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurSDLoc(), DestVT, N, Flags)); 3911 } 3912 3913 void SelectionDAGBuilder::visitSIToFP(const User &I) { 3914 // SIToFP is never a no-op cast, no need to check 3915 SDValue N = getValue(I.getOperand(0)); 3916 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 3917 I.getType()); 3918 setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurSDLoc(), DestVT, N)); 3919 } 3920 3921 void SelectionDAGBuilder::visitPtrToInt(const User &I) { 3922 // What to do depends on the size of the integer and the size of the pointer. 3923 // We can either truncate, zero extend, or no-op, accordingly. 3924 SDValue N = getValue(I.getOperand(0)); 3925 auto &TLI = DAG.getTargetLoweringInfo(); 3926 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 3927 I.getType()); 3928 EVT PtrMemVT = 3929 TLI.getMemValueType(DAG.getDataLayout(), I.getOperand(0)->getType()); 3930 N = DAG.getPtrExtOrTrunc(N, getCurSDLoc(), PtrMemVT); 3931 N = DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT); 3932 setValue(&I, N); 3933 } 3934 3935 void SelectionDAGBuilder::visitIntToPtr(const User &I) { 3936 // What to do depends on the size of the integer and the size of the pointer. 3937 // We can either truncate, zero extend, or no-op, accordingly. 3938 SDValue N = getValue(I.getOperand(0)); 3939 auto &TLI = DAG.getTargetLoweringInfo(); 3940 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 3941 EVT PtrMemVT = TLI.getMemValueType(DAG.getDataLayout(), I.getType()); 3942 N = DAG.getZExtOrTrunc(N, getCurSDLoc(), PtrMemVT); 3943 N = DAG.getPtrExtOrTrunc(N, getCurSDLoc(), DestVT); 3944 setValue(&I, N); 3945 } 3946 3947 void SelectionDAGBuilder::visitBitCast(const User &I) { 3948 SDValue N = getValue(I.getOperand(0)); 3949 SDLoc dl = getCurSDLoc(); 3950 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 3951 I.getType()); 3952 3953 // BitCast assures us that source and destination are the same size so this is 3954 // either a BITCAST or a no-op. 3955 if (DestVT != N.getValueType()) 3956 setValue(&I, DAG.getNode(ISD::BITCAST, dl, 3957 DestVT, N)); // convert types. 3958 // Check if the original LLVM IR Operand was a ConstantInt, because getValue() 3959 // might fold any kind of constant expression to an integer constant and that 3960 // is not what we are looking for. Only recognize a bitcast of a genuine 3961 // constant integer as an opaque constant. 3962 else if(ConstantInt *C = dyn_cast<ConstantInt>(I.getOperand(0))) 3963 setValue(&I, DAG.getConstant(C->getValue(), dl, DestVT, /*isTarget=*/false, 3964 /*isOpaque*/true)); 3965 else 3966 setValue(&I, N); // noop cast. 3967 } 3968 3969 void SelectionDAGBuilder::visitAddrSpaceCast(const User &I) { 3970 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3971 const Value *SV = I.getOperand(0); 3972 SDValue N = getValue(SV); 3973 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 3974 3975 unsigned SrcAS = SV->getType()->getPointerAddressSpace(); 3976 unsigned DestAS = I.getType()->getPointerAddressSpace(); 3977 3978 if (!TM.isNoopAddrSpaceCast(SrcAS, DestAS)) 3979 N = DAG.getAddrSpaceCast(getCurSDLoc(), DestVT, N, SrcAS, DestAS); 3980 3981 setValue(&I, N); 3982 } 3983 3984 void SelectionDAGBuilder::visitInsertElement(const User &I) { 3985 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3986 SDValue InVec = getValue(I.getOperand(0)); 3987 SDValue InVal = getValue(I.getOperand(1)); 3988 SDValue InIdx = DAG.getZExtOrTrunc(getValue(I.getOperand(2)), getCurSDLoc(), 3989 TLI.getVectorIdxTy(DAG.getDataLayout())); 3990 setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurSDLoc(), 3991 TLI.getValueType(DAG.getDataLayout(), I.getType()), 3992 InVec, InVal, InIdx)); 3993 } 3994 3995 void SelectionDAGBuilder::visitExtractElement(const User &I) { 3996 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3997 SDValue InVec = getValue(I.getOperand(0)); 3998 SDValue InIdx = DAG.getZExtOrTrunc(getValue(I.getOperand(1)), getCurSDLoc(), 3999 TLI.getVectorIdxTy(DAG.getDataLayout())); 4000 setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurSDLoc(), 4001 TLI.getValueType(DAG.getDataLayout(), I.getType()), 4002 InVec, InIdx)); 4003 } 4004 4005 void SelectionDAGBuilder::visitShuffleVector(const User &I) { 4006 SDValue Src1 = getValue(I.getOperand(0)); 4007 SDValue Src2 = getValue(I.getOperand(1)); 4008 ArrayRef<int> Mask; 4009 if (auto *SVI = dyn_cast<ShuffleVectorInst>(&I)) 4010 Mask = SVI->getShuffleMask(); 4011 else 4012 Mask = cast<ConstantExpr>(I).getShuffleMask(); 4013 SDLoc DL = getCurSDLoc(); 4014 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4015 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 4016 EVT SrcVT = Src1.getValueType(); 4017 4018 if (all_of(Mask, [](int Elem) { return Elem == 0; }) && 4019 VT.isScalableVector()) { 4020 // Canonical splat form of first element of first input vector. 4021 SDValue FirstElt = 4022 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SrcVT.getScalarType(), Src1, 4023 DAG.getVectorIdxConstant(0, DL)); 4024 setValue(&I, DAG.getNode(ISD::SPLAT_VECTOR, DL, VT, FirstElt)); 4025 return; 4026 } 4027 4028 // For now, we only handle splats for scalable vectors. 4029 // The DAGCombiner will perform a BUILD_VECTOR -> SPLAT_VECTOR transformation 4030 // for targets that support a SPLAT_VECTOR for non-scalable vector types. 4031 assert(!VT.isScalableVector() && "Unsupported scalable vector shuffle"); 4032 4033 unsigned SrcNumElts = SrcVT.getVectorNumElements(); 4034 unsigned MaskNumElts = Mask.size(); 4035 4036 if (SrcNumElts == MaskNumElts) { 4037 setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, Mask)); 4038 return; 4039 } 4040 4041 // Normalize the shuffle vector since mask and vector length don't match. 4042 if (SrcNumElts < MaskNumElts) { 4043 // Mask is longer than the source vectors. We can use concatenate vector to 4044 // make the mask and vectors lengths match. 4045 4046 if (MaskNumElts % SrcNumElts == 0) { 4047 // Mask length is a multiple of the source vector length. 4048 // Check if the shuffle is some kind of concatenation of the input 4049 // vectors. 4050 unsigned NumConcat = MaskNumElts / SrcNumElts; 4051 bool IsConcat = true; 4052 SmallVector<int, 8> ConcatSrcs(NumConcat, -1); 4053 for (unsigned i = 0; i != MaskNumElts; ++i) { 4054 int Idx = Mask[i]; 4055 if (Idx < 0) 4056 continue; 4057 // Ensure the indices in each SrcVT sized piece are sequential and that 4058 // the same source is used for the whole piece. 4059 if ((Idx % SrcNumElts != (i % SrcNumElts)) || 4060 (ConcatSrcs[i / SrcNumElts] >= 0 && 4061 ConcatSrcs[i / SrcNumElts] != (int)(Idx / SrcNumElts))) { 4062 IsConcat = false; 4063 break; 4064 } 4065 // Remember which source this index came from. 4066 ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts; 4067 } 4068 4069 // The shuffle is concatenating multiple vectors together. Just emit 4070 // a CONCAT_VECTORS operation. 4071 if (IsConcat) { 4072 SmallVector<SDValue, 8> ConcatOps; 4073 for (auto Src : ConcatSrcs) { 4074 if (Src < 0) 4075 ConcatOps.push_back(DAG.getUNDEF(SrcVT)); 4076 else if (Src == 0) 4077 ConcatOps.push_back(Src1); 4078 else 4079 ConcatOps.push_back(Src2); 4080 } 4081 setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ConcatOps)); 4082 return; 4083 } 4084 } 4085 4086 unsigned PaddedMaskNumElts = alignTo(MaskNumElts, SrcNumElts); 4087 unsigned NumConcat = PaddedMaskNumElts / SrcNumElts; 4088 EVT PaddedVT = EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(), 4089 PaddedMaskNumElts); 4090 4091 // Pad both vectors with undefs to make them the same length as the mask. 4092 SDValue UndefVal = DAG.getUNDEF(SrcVT); 4093 4094 SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal); 4095 SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal); 4096 MOps1[0] = Src1; 4097 MOps2[0] = Src2; 4098 4099 Src1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps1); 4100 Src2 = DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps2); 4101 4102 // Readjust mask for new input vector length. 4103 SmallVector<int, 8> MappedOps(PaddedMaskNumElts, -1); 4104 for (unsigned i = 0; i != MaskNumElts; ++i) { 4105 int Idx = Mask[i]; 4106 if (Idx >= (int)SrcNumElts) 4107 Idx -= SrcNumElts - PaddedMaskNumElts; 4108 MappedOps[i] = Idx; 4109 } 4110 4111 SDValue Result = DAG.getVectorShuffle(PaddedVT, DL, Src1, Src2, MappedOps); 4112 4113 // If the concatenated vector was padded, extract a subvector with the 4114 // correct number of elements. 4115 if (MaskNumElts != PaddedMaskNumElts) 4116 Result = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Result, 4117 DAG.getVectorIdxConstant(0, DL)); 4118 4119 setValue(&I, Result); 4120 return; 4121 } 4122 4123 if (SrcNumElts > MaskNumElts) { 4124 // Analyze the access pattern of the vector to see if we can extract 4125 // two subvectors and do the shuffle. 4126 int StartIdx[2] = { -1, -1 }; // StartIdx to extract from 4127 bool CanExtract = true; 4128 for (int Idx : Mask) { 4129 unsigned Input = 0; 4130 if (Idx < 0) 4131 continue; 4132 4133 if (Idx >= (int)SrcNumElts) { 4134 Input = 1; 4135 Idx -= SrcNumElts; 4136 } 4137 4138 // If all the indices come from the same MaskNumElts sized portion of 4139 // the sources we can use extract. Also make sure the extract wouldn't 4140 // extract past the end of the source. 4141 int NewStartIdx = alignDown(Idx, MaskNumElts); 4142 if (NewStartIdx + MaskNumElts > SrcNumElts || 4143 (StartIdx[Input] >= 0 && StartIdx[Input] != NewStartIdx)) 4144 CanExtract = false; 4145 // Make sure we always update StartIdx as we use it to track if all 4146 // elements are undef. 4147 StartIdx[Input] = NewStartIdx; 4148 } 4149 4150 if (StartIdx[0] < 0 && StartIdx[1] < 0) { 4151 setValue(&I, DAG.getUNDEF(VT)); // Vectors are not used. 4152 return; 4153 } 4154 if (CanExtract) { 4155 // Extract appropriate subvector and generate a vector shuffle 4156 for (unsigned Input = 0; Input < 2; ++Input) { 4157 SDValue &Src = Input == 0 ? Src1 : Src2; 4158 if (StartIdx[Input] < 0) 4159 Src = DAG.getUNDEF(VT); 4160 else { 4161 Src = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Src, 4162 DAG.getVectorIdxConstant(StartIdx[Input], DL)); 4163 } 4164 } 4165 4166 // Calculate new mask. 4167 SmallVector<int, 8> MappedOps(Mask); 4168 for (int &Idx : MappedOps) { 4169 if (Idx >= (int)SrcNumElts) 4170 Idx -= SrcNumElts + StartIdx[1] - MaskNumElts; 4171 else if (Idx >= 0) 4172 Idx -= StartIdx[0]; 4173 } 4174 4175 setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, MappedOps)); 4176 return; 4177 } 4178 } 4179 4180 // We can't use either concat vectors or extract subvectors so fall back to 4181 // replacing the shuffle with extract and build vector. 4182 // to insert and build vector. 4183 EVT EltVT = VT.getVectorElementType(); 4184 SmallVector<SDValue,8> Ops; 4185 for (int Idx : Mask) { 4186 SDValue Res; 4187 4188 if (Idx < 0) { 4189 Res = DAG.getUNDEF(EltVT); 4190 } else { 4191 SDValue &Src = Idx < (int)SrcNumElts ? Src1 : Src2; 4192 if (Idx >= (int)SrcNumElts) Idx -= SrcNumElts; 4193 4194 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Src, 4195 DAG.getVectorIdxConstant(Idx, DL)); 4196 } 4197 4198 Ops.push_back(Res); 4199 } 4200 4201 setValue(&I, DAG.getBuildVector(VT, DL, Ops)); 4202 } 4203 4204 void SelectionDAGBuilder::visitInsertValue(const InsertValueInst &I) { 4205 ArrayRef<unsigned> Indices = I.getIndices(); 4206 const Value *Op0 = I.getOperand(0); 4207 const Value *Op1 = I.getOperand(1); 4208 Type *AggTy = I.getType(); 4209 Type *ValTy = Op1->getType(); 4210 bool IntoUndef = isa<UndefValue>(Op0); 4211 bool FromUndef = isa<UndefValue>(Op1); 4212 4213 unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices); 4214 4215 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4216 SmallVector<EVT, 4> AggValueVTs; 4217 ComputeValueVTs(TLI, DAG.getDataLayout(), AggTy, AggValueVTs); 4218 SmallVector<EVT, 4> ValValueVTs; 4219 ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs); 4220 4221 unsigned NumAggValues = AggValueVTs.size(); 4222 unsigned NumValValues = ValValueVTs.size(); 4223 SmallVector<SDValue, 4> Values(NumAggValues); 4224 4225 // Ignore an insertvalue that produces an empty object 4226 if (!NumAggValues) { 4227 setValue(&I, DAG.getUNDEF(MVT(MVT::Other))); 4228 return; 4229 } 4230 4231 SDValue Agg = getValue(Op0); 4232 unsigned i = 0; 4233 // Copy the beginning value(s) from the original aggregate. 4234 for (; i != LinearIndex; ++i) 4235 Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) : 4236 SDValue(Agg.getNode(), Agg.getResNo() + i); 4237 // Copy values from the inserted value(s). 4238 if (NumValValues) { 4239 SDValue Val = getValue(Op1); 4240 for (; i != LinearIndex + NumValValues; ++i) 4241 Values[i] = FromUndef ? DAG.getUNDEF(AggValueVTs[i]) : 4242 SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex); 4243 } 4244 // Copy remaining value(s) from the original aggregate. 4245 for (; i != NumAggValues; ++i) 4246 Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) : 4247 SDValue(Agg.getNode(), Agg.getResNo() + i); 4248 4249 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(), 4250 DAG.getVTList(AggValueVTs), Values)); 4251 } 4252 4253 void SelectionDAGBuilder::visitExtractValue(const ExtractValueInst &I) { 4254 ArrayRef<unsigned> Indices = I.getIndices(); 4255 const Value *Op0 = I.getOperand(0); 4256 Type *AggTy = Op0->getType(); 4257 Type *ValTy = I.getType(); 4258 bool OutOfUndef = isa<UndefValue>(Op0); 4259 4260 unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices); 4261 4262 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4263 SmallVector<EVT, 4> ValValueVTs; 4264 ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs); 4265 4266 unsigned NumValValues = ValValueVTs.size(); 4267 4268 // Ignore a extractvalue that produces an empty object 4269 if (!NumValValues) { 4270 setValue(&I, DAG.getUNDEF(MVT(MVT::Other))); 4271 return; 4272 } 4273 4274 SmallVector<SDValue, 4> Values(NumValValues); 4275 4276 SDValue Agg = getValue(Op0); 4277 // Copy out the selected value(s). 4278 for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i) 4279 Values[i - LinearIndex] = 4280 OutOfUndef ? 4281 DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) : 4282 SDValue(Agg.getNode(), Agg.getResNo() + i); 4283 4284 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(), 4285 DAG.getVTList(ValValueVTs), Values)); 4286 } 4287 4288 void SelectionDAGBuilder::visitGetElementPtr(const User &I) { 4289 Value *Op0 = I.getOperand(0); 4290 // Note that the pointer operand may be a vector of pointers. Take the scalar 4291 // element which holds a pointer. 4292 unsigned AS = Op0->getType()->getScalarType()->getPointerAddressSpace(); 4293 SDValue N = getValue(Op0); 4294 SDLoc dl = getCurSDLoc(); 4295 auto &TLI = DAG.getTargetLoweringInfo(); 4296 GEPNoWrapFlags NW = cast<GEPOperator>(I).getNoWrapFlags(); 4297 4298 // Normalize Vector GEP - all scalar operands should be converted to the 4299 // splat vector. 4300 bool IsVectorGEP = I.getType()->isVectorTy(); 4301 ElementCount VectorElementCount = 4302 IsVectorGEP ? cast<VectorType>(I.getType())->getElementCount() 4303 : ElementCount::getFixed(0); 4304 4305 if (IsVectorGEP && !N.getValueType().isVector()) { 4306 LLVMContext &Context = *DAG.getContext(); 4307 EVT VT = EVT::getVectorVT(Context, N.getValueType(), VectorElementCount); 4308 N = DAG.getSplat(VT, dl, N); 4309 } 4310 4311 for (gep_type_iterator GTI = gep_type_begin(&I), E = gep_type_end(&I); 4312 GTI != E; ++GTI) { 4313 const Value *Idx = GTI.getOperand(); 4314 if (StructType *StTy = GTI.getStructTypeOrNull()) { 4315 unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue(); 4316 if (Field) { 4317 // N = N + Offset 4318 uint64_t Offset = 4319 DAG.getDataLayout().getStructLayout(StTy)->getElementOffset(Field); 4320 4321 // In an inbounds GEP with an offset that is nonnegative even when 4322 // interpreted as signed, assume there is no unsigned overflow. 4323 SDNodeFlags Flags; 4324 if (NW.hasNoUnsignedWrap() || 4325 (int64_t(Offset) >= 0 && NW.hasNoUnsignedSignedWrap())) 4326 Flags.setNoUnsignedWrap(true); 4327 4328 N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N, 4329 DAG.getConstant(Offset, dl, N.getValueType()), Flags); 4330 } 4331 } else { 4332 // IdxSize is the width of the arithmetic according to IR semantics. 4333 // In SelectionDAG, we may prefer to do arithmetic in a wider bitwidth 4334 // (and fix up the result later). 4335 unsigned IdxSize = DAG.getDataLayout().getIndexSizeInBits(AS); 4336 MVT IdxTy = MVT::getIntegerVT(IdxSize); 4337 TypeSize ElementSize = 4338 GTI.getSequentialElementStride(DAG.getDataLayout()); 4339 // We intentionally mask away the high bits here; ElementSize may not 4340 // fit in IdxTy. 4341 APInt ElementMul(IdxSize, ElementSize.getKnownMinValue()); 4342 bool ElementScalable = ElementSize.isScalable(); 4343 4344 // If this is a scalar constant or a splat vector of constants, 4345 // handle it quickly. 4346 const auto *C = dyn_cast<Constant>(Idx); 4347 if (C && isa<VectorType>(C->getType())) 4348 C = C->getSplatValue(); 4349 4350 const auto *CI = dyn_cast_or_null<ConstantInt>(C); 4351 if (CI && CI->isZero()) 4352 continue; 4353 if (CI && !ElementScalable) { 4354 APInt Offs = ElementMul * CI->getValue().sextOrTrunc(IdxSize); 4355 LLVMContext &Context = *DAG.getContext(); 4356 SDValue OffsVal; 4357 if (IsVectorGEP) 4358 OffsVal = DAG.getConstant( 4359 Offs, dl, EVT::getVectorVT(Context, IdxTy, VectorElementCount)); 4360 else 4361 OffsVal = DAG.getConstant(Offs, dl, IdxTy); 4362 4363 // In an inbounds GEP with an offset that is nonnegative even when 4364 // interpreted as signed, assume there is no unsigned overflow. 4365 SDNodeFlags Flags; 4366 if (NW.hasNoUnsignedWrap() || 4367 (Offs.isNonNegative() && NW.hasNoUnsignedSignedWrap())) 4368 Flags.setNoUnsignedWrap(true); 4369 4370 OffsVal = DAG.getSExtOrTrunc(OffsVal, dl, N.getValueType()); 4371 4372 N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N, OffsVal, Flags); 4373 continue; 4374 } 4375 4376 // N = N + Idx * ElementMul; 4377 SDValue IdxN = getValue(Idx); 4378 4379 if (!IdxN.getValueType().isVector() && IsVectorGEP) { 4380 EVT VT = EVT::getVectorVT(*Context, IdxN.getValueType(), 4381 VectorElementCount); 4382 IdxN = DAG.getSplat(VT, dl, IdxN); 4383 } 4384 4385 // If the index is smaller or larger than intptr_t, truncate or extend 4386 // it. 4387 IdxN = DAG.getSExtOrTrunc(IdxN, dl, N.getValueType()); 4388 4389 SDNodeFlags ScaleFlags; 4390 // The multiplication of an index by the type size does not wrap the 4391 // pointer index type in a signed sense (mul nsw). 4392 ScaleFlags.setNoSignedWrap(NW.hasNoUnsignedSignedWrap()); 4393 4394 // The multiplication of an index by the type size does not wrap the 4395 // pointer index type in an unsigned sense (mul nuw). 4396 ScaleFlags.setNoUnsignedWrap(NW.hasNoUnsignedWrap()); 4397 4398 if (ElementScalable) { 4399 EVT VScaleTy = N.getValueType().getScalarType(); 4400 SDValue VScale = DAG.getNode( 4401 ISD::VSCALE, dl, VScaleTy, 4402 DAG.getConstant(ElementMul.getZExtValue(), dl, VScaleTy)); 4403 if (IsVectorGEP) 4404 VScale = DAG.getSplatVector(N.getValueType(), dl, VScale); 4405 IdxN = DAG.getNode(ISD::MUL, dl, N.getValueType(), IdxN, VScale, 4406 ScaleFlags); 4407 } else { 4408 // If this is a multiply by a power of two, turn it into a shl 4409 // immediately. This is a very common case. 4410 if (ElementMul != 1) { 4411 if (ElementMul.isPowerOf2()) { 4412 unsigned Amt = ElementMul.logBase2(); 4413 IdxN = DAG.getNode(ISD::SHL, dl, N.getValueType(), IdxN, 4414 DAG.getConstant(Amt, dl, IdxN.getValueType()), 4415 ScaleFlags); 4416 } else { 4417 SDValue Scale = DAG.getConstant(ElementMul.getZExtValue(), dl, 4418 IdxN.getValueType()); 4419 IdxN = DAG.getNode(ISD::MUL, dl, N.getValueType(), IdxN, Scale, 4420 ScaleFlags); 4421 } 4422 } 4423 } 4424 4425 // The successive addition of the current address, truncated to the 4426 // pointer index type and interpreted as an unsigned number, and each 4427 // offset, also interpreted as an unsigned number, does not wrap the 4428 // pointer index type (add nuw). 4429 SDNodeFlags AddFlags; 4430 AddFlags.setNoUnsignedWrap(NW.hasNoUnsignedWrap()); 4431 4432 N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N, IdxN, AddFlags); 4433 } 4434 } 4435 4436 MVT PtrTy = TLI.getPointerTy(DAG.getDataLayout(), AS); 4437 MVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout(), AS); 4438 if (IsVectorGEP) { 4439 PtrTy = MVT::getVectorVT(PtrTy, VectorElementCount); 4440 PtrMemTy = MVT::getVectorVT(PtrMemTy, VectorElementCount); 4441 } 4442 4443 if (PtrMemTy != PtrTy && !cast<GEPOperator>(I).isInBounds()) 4444 N = DAG.getPtrExtendInReg(N, dl, PtrMemTy); 4445 4446 setValue(&I, N); 4447 } 4448 4449 void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) { 4450 // If this is a fixed sized alloca in the entry block of the function, 4451 // allocate it statically on the stack. 4452 if (FuncInfo.StaticAllocaMap.count(&I)) 4453 return; // getValue will auto-populate this. 4454 4455 SDLoc dl = getCurSDLoc(); 4456 Type *Ty = I.getAllocatedType(); 4457 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4458 auto &DL = DAG.getDataLayout(); 4459 TypeSize TySize = DL.getTypeAllocSize(Ty); 4460 MaybeAlign Alignment = std::max(DL.getPrefTypeAlign(Ty), I.getAlign()); 4461 4462 SDValue AllocSize = getValue(I.getArraySize()); 4463 4464 EVT IntPtr = TLI.getPointerTy(DL, I.getAddressSpace()); 4465 if (AllocSize.getValueType() != IntPtr) 4466 AllocSize = DAG.getZExtOrTrunc(AllocSize, dl, IntPtr); 4467 4468 if (TySize.isScalable()) 4469 AllocSize = DAG.getNode(ISD::MUL, dl, IntPtr, AllocSize, 4470 DAG.getVScale(dl, IntPtr, 4471 APInt(IntPtr.getScalarSizeInBits(), 4472 TySize.getKnownMinValue()))); 4473 else { 4474 SDValue TySizeValue = 4475 DAG.getConstant(TySize.getFixedValue(), dl, MVT::getIntegerVT(64)); 4476 AllocSize = DAG.getNode(ISD::MUL, dl, IntPtr, AllocSize, 4477 DAG.getZExtOrTrunc(TySizeValue, dl, IntPtr)); 4478 } 4479 4480 // Handle alignment. If the requested alignment is less than or equal to 4481 // the stack alignment, ignore it. If the size is greater than or equal to 4482 // the stack alignment, we note this in the DYNAMIC_STACKALLOC node. 4483 Align StackAlign = DAG.getSubtarget().getFrameLowering()->getStackAlign(); 4484 if (*Alignment <= StackAlign) 4485 Alignment = std::nullopt; 4486 4487 const uint64_t StackAlignMask = StackAlign.value() - 1U; 4488 // Round the size of the allocation up to the stack alignment size 4489 // by add SA-1 to the size. This doesn't overflow because we're computing 4490 // an address inside an alloca. 4491 SDNodeFlags Flags; 4492 Flags.setNoUnsignedWrap(true); 4493 AllocSize = DAG.getNode(ISD::ADD, dl, AllocSize.getValueType(), AllocSize, 4494 DAG.getConstant(StackAlignMask, dl, IntPtr), Flags); 4495 4496 // Mask out the low bits for alignment purposes. 4497 AllocSize = DAG.getNode(ISD::AND, dl, AllocSize.getValueType(), AllocSize, 4498 DAG.getSignedConstant(~StackAlignMask, dl, IntPtr)); 4499 4500 SDValue Ops[] = { 4501 getRoot(), AllocSize, 4502 DAG.getConstant(Alignment ? Alignment->value() : 0, dl, IntPtr)}; 4503 SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other); 4504 SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, dl, VTs, Ops); 4505 setValue(&I, DSA); 4506 DAG.setRoot(DSA.getValue(1)); 4507 4508 assert(FuncInfo.MF->getFrameInfo().hasVarSizedObjects()); 4509 } 4510 4511 static const MDNode *getRangeMetadata(const Instruction &I) { 4512 // If !noundef is not present, then !range violation results in a poison 4513 // value rather than immediate undefined behavior. In theory, transferring 4514 // these annotations to SDAG is fine, but in practice there are key SDAG 4515 // transforms that are known not to be poison-safe, such as folding logical 4516 // and/or to bitwise and/or. For now, only transfer !range if !noundef is 4517 // also present. 4518 if (!I.hasMetadata(LLVMContext::MD_noundef)) 4519 return nullptr; 4520 return I.getMetadata(LLVMContext::MD_range); 4521 } 4522 4523 static std::optional<ConstantRange> getRange(const Instruction &I) { 4524 if (const auto *CB = dyn_cast<CallBase>(&I)) { 4525 // see comment in getRangeMetadata about this check 4526 if (CB->hasRetAttr(Attribute::NoUndef)) 4527 return CB->getRange(); 4528 } 4529 if (const MDNode *Range = getRangeMetadata(I)) 4530 return getConstantRangeFromMetadata(*Range); 4531 return std::nullopt; 4532 } 4533 4534 void SelectionDAGBuilder::visitLoad(const LoadInst &I) { 4535 if (I.isAtomic()) 4536 return visitAtomicLoad(I); 4537 4538 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4539 const Value *SV = I.getOperand(0); 4540 if (TLI.supportSwiftError()) { 4541 // Swifterror values can come from either a function parameter with 4542 // swifterror attribute or an alloca with swifterror attribute. 4543 if (const Argument *Arg = dyn_cast<Argument>(SV)) { 4544 if (Arg->hasSwiftErrorAttr()) 4545 return visitLoadFromSwiftError(I); 4546 } 4547 4548 if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) { 4549 if (Alloca->isSwiftError()) 4550 return visitLoadFromSwiftError(I); 4551 } 4552 } 4553 4554 SDValue Ptr = getValue(SV); 4555 4556 Type *Ty = I.getType(); 4557 SmallVector<EVT, 4> ValueVTs, MemVTs; 4558 SmallVector<TypeSize, 4> Offsets; 4559 ComputeValueVTs(TLI, DAG.getDataLayout(), Ty, ValueVTs, &MemVTs, &Offsets); 4560 unsigned NumValues = ValueVTs.size(); 4561 if (NumValues == 0) 4562 return; 4563 4564 Align Alignment = I.getAlign(); 4565 AAMDNodes AAInfo = I.getAAMetadata(); 4566 const MDNode *Ranges = getRangeMetadata(I); 4567 bool isVolatile = I.isVolatile(); 4568 MachineMemOperand::Flags MMOFlags = 4569 TLI.getLoadMemOperandFlags(I, DAG.getDataLayout(), AC, LibInfo); 4570 4571 SDValue Root; 4572 bool ConstantMemory = false; 4573 if (isVolatile) 4574 // Serialize volatile loads with other side effects. 4575 Root = getRoot(); 4576 else if (NumValues > MaxParallelChains) 4577 Root = getMemoryRoot(); 4578 else if (AA && 4579 AA->pointsToConstantMemory(MemoryLocation( 4580 SV, 4581 LocationSize::precise(DAG.getDataLayout().getTypeStoreSize(Ty)), 4582 AAInfo))) { 4583 // Do not serialize (non-volatile) loads of constant memory with anything. 4584 Root = DAG.getEntryNode(); 4585 ConstantMemory = true; 4586 MMOFlags |= MachineMemOperand::MOInvariant; 4587 } else { 4588 // Do not serialize non-volatile loads against each other. 4589 Root = DAG.getRoot(); 4590 } 4591 4592 SDLoc dl = getCurSDLoc(); 4593 4594 if (isVolatile) 4595 Root = TLI.prepareVolatileOrAtomicLoad(Root, dl, DAG); 4596 4597 SmallVector<SDValue, 4> Values(NumValues); 4598 SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues)); 4599 4600 unsigned ChainI = 0; 4601 for (unsigned i = 0; i != NumValues; ++i, ++ChainI) { 4602 // Serializing loads here may result in excessive register pressure, and 4603 // TokenFactor places arbitrary choke points on the scheduler. SD scheduling 4604 // could recover a bit by hoisting nodes upward in the chain by recognizing 4605 // they are side-effect free or do not alias. The optimizer should really 4606 // avoid this case by converting large object/array copies to llvm.memcpy 4607 // (MaxParallelChains should always remain as failsafe). 4608 if (ChainI == MaxParallelChains) { 4609 assert(PendingLoads.empty() && "PendingLoads must be serialized first"); 4610 SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 4611 ArrayRef(Chains.data(), ChainI)); 4612 Root = Chain; 4613 ChainI = 0; 4614 } 4615 4616 // TODO: MachinePointerInfo only supports a fixed length offset. 4617 MachinePointerInfo PtrInfo = 4618 !Offsets[i].isScalable() || Offsets[i].isZero() 4619 ? MachinePointerInfo(SV, Offsets[i].getKnownMinValue()) 4620 : MachinePointerInfo(); 4621 4622 SDValue A = DAG.getObjectPtrOffset(dl, Ptr, Offsets[i]); 4623 SDValue L = DAG.getLoad(MemVTs[i], dl, Root, A, PtrInfo, Alignment, 4624 MMOFlags, AAInfo, Ranges); 4625 Chains[ChainI] = L.getValue(1); 4626 4627 if (MemVTs[i] != ValueVTs[i]) 4628 L = DAG.getPtrExtOrTrunc(L, dl, ValueVTs[i]); 4629 4630 Values[i] = L; 4631 } 4632 4633 if (!ConstantMemory) { 4634 SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 4635 ArrayRef(Chains.data(), ChainI)); 4636 if (isVolatile) 4637 DAG.setRoot(Chain); 4638 else 4639 PendingLoads.push_back(Chain); 4640 } 4641 4642 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, dl, 4643 DAG.getVTList(ValueVTs), Values)); 4644 } 4645 4646 void SelectionDAGBuilder::visitStoreToSwiftError(const StoreInst &I) { 4647 assert(DAG.getTargetLoweringInfo().supportSwiftError() && 4648 "call visitStoreToSwiftError when backend supports swifterror"); 4649 4650 SmallVector<EVT, 4> ValueVTs; 4651 SmallVector<uint64_t, 4> Offsets; 4652 const Value *SrcV = I.getOperand(0); 4653 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), 4654 SrcV->getType(), ValueVTs, &Offsets, 0); 4655 assert(ValueVTs.size() == 1 && Offsets[0] == 0 && 4656 "expect a single EVT for swifterror"); 4657 4658 SDValue Src = getValue(SrcV); 4659 // Create a virtual register, then update the virtual register. 4660 Register VReg = 4661 SwiftError.getOrCreateVRegDefAt(&I, FuncInfo.MBB, I.getPointerOperand()); 4662 // Chain, DL, Reg, N or Chain, DL, Reg, N, Glue 4663 // Chain can be getRoot or getControlRoot. 4664 SDValue CopyNode = DAG.getCopyToReg(getRoot(), getCurSDLoc(), VReg, 4665 SDValue(Src.getNode(), Src.getResNo())); 4666 DAG.setRoot(CopyNode); 4667 } 4668 4669 void SelectionDAGBuilder::visitLoadFromSwiftError(const LoadInst &I) { 4670 assert(DAG.getTargetLoweringInfo().supportSwiftError() && 4671 "call visitLoadFromSwiftError when backend supports swifterror"); 4672 4673 assert(!I.isVolatile() && 4674 !I.hasMetadata(LLVMContext::MD_nontemporal) && 4675 !I.hasMetadata(LLVMContext::MD_invariant_load) && 4676 "Support volatile, non temporal, invariant for load_from_swift_error"); 4677 4678 const Value *SV = I.getOperand(0); 4679 Type *Ty = I.getType(); 4680 assert( 4681 (!AA || 4682 !AA->pointsToConstantMemory(MemoryLocation( 4683 SV, LocationSize::precise(DAG.getDataLayout().getTypeStoreSize(Ty)), 4684 I.getAAMetadata()))) && 4685 "load_from_swift_error should not be constant memory"); 4686 4687 SmallVector<EVT, 4> ValueVTs; 4688 SmallVector<uint64_t, 4> Offsets; 4689 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), Ty, 4690 ValueVTs, &Offsets, 0); 4691 assert(ValueVTs.size() == 1 && Offsets[0] == 0 && 4692 "expect a single EVT for swifterror"); 4693 4694 // Chain, DL, Reg, VT, Glue or Chain, DL, Reg, VT 4695 SDValue L = DAG.getCopyFromReg( 4696 getRoot(), getCurSDLoc(), 4697 SwiftError.getOrCreateVRegUseAt(&I, FuncInfo.MBB, SV), ValueVTs[0]); 4698 4699 setValue(&I, L); 4700 } 4701 4702 void SelectionDAGBuilder::visitStore(const StoreInst &I) { 4703 if (I.isAtomic()) 4704 return visitAtomicStore(I); 4705 4706 const Value *SrcV = I.getOperand(0); 4707 const Value *PtrV = I.getOperand(1); 4708 4709 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4710 if (TLI.supportSwiftError()) { 4711 // Swifterror values can come from either a function parameter with 4712 // swifterror attribute or an alloca with swifterror attribute. 4713 if (const Argument *Arg = dyn_cast<Argument>(PtrV)) { 4714 if (Arg->hasSwiftErrorAttr()) 4715 return visitStoreToSwiftError(I); 4716 } 4717 4718 if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) { 4719 if (Alloca->isSwiftError()) 4720 return visitStoreToSwiftError(I); 4721 } 4722 } 4723 4724 SmallVector<EVT, 4> ValueVTs, MemVTs; 4725 SmallVector<TypeSize, 4> Offsets; 4726 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), 4727 SrcV->getType(), ValueVTs, &MemVTs, &Offsets); 4728 unsigned NumValues = ValueVTs.size(); 4729 if (NumValues == 0) 4730 return; 4731 4732 // Get the lowered operands. Note that we do this after 4733 // checking if NumResults is zero, because with zero results 4734 // the operands won't have values in the map. 4735 SDValue Src = getValue(SrcV); 4736 SDValue Ptr = getValue(PtrV); 4737 4738 SDValue Root = I.isVolatile() ? getRoot() : getMemoryRoot(); 4739 SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues)); 4740 SDLoc dl = getCurSDLoc(); 4741 Align Alignment = I.getAlign(); 4742 AAMDNodes AAInfo = I.getAAMetadata(); 4743 4744 auto MMOFlags = TLI.getStoreMemOperandFlags(I, DAG.getDataLayout()); 4745 4746 unsigned ChainI = 0; 4747 for (unsigned i = 0; i != NumValues; ++i, ++ChainI) { 4748 // See visitLoad comments. 4749 if (ChainI == MaxParallelChains) { 4750 SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 4751 ArrayRef(Chains.data(), ChainI)); 4752 Root = Chain; 4753 ChainI = 0; 4754 } 4755 4756 // TODO: MachinePointerInfo only supports a fixed length offset. 4757 MachinePointerInfo PtrInfo = 4758 !Offsets[i].isScalable() || Offsets[i].isZero() 4759 ? MachinePointerInfo(PtrV, Offsets[i].getKnownMinValue()) 4760 : MachinePointerInfo(); 4761 4762 SDValue Add = DAG.getObjectPtrOffset(dl, Ptr, Offsets[i]); 4763 SDValue Val = SDValue(Src.getNode(), Src.getResNo() + i); 4764 if (MemVTs[i] != ValueVTs[i]) 4765 Val = DAG.getPtrExtOrTrunc(Val, dl, MemVTs[i]); 4766 SDValue St = 4767 DAG.getStore(Root, dl, Val, Add, PtrInfo, Alignment, MMOFlags, AAInfo); 4768 Chains[ChainI] = St; 4769 } 4770 4771 SDValue StoreNode = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 4772 ArrayRef(Chains.data(), ChainI)); 4773 setValue(&I, StoreNode); 4774 DAG.setRoot(StoreNode); 4775 } 4776 4777 void SelectionDAGBuilder::visitMaskedStore(const CallInst &I, 4778 bool IsCompressing) { 4779 SDLoc sdl = getCurSDLoc(); 4780 4781 auto getMaskedStoreOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0, 4782 Align &Alignment) { 4783 // llvm.masked.store.*(Src0, Ptr, alignment, Mask) 4784 Src0 = I.getArgOperand(0); 4785 Ptr = I.getArgOperand(1); 4786 Alignment = cast<ConstantInt>(I.getArgOperand(2))->getAlignValue(); 4787 Mask = I.getArgOperand(3); 4788 }; 4789 auto getCompressingStoreOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0, 4790 Align &Alignment) { 4791 // llvm.masked.compressstore.*(Src0, Ptr, Mask) 4792 Src0 = I.getArgOperand(0); 4793 Ptr = I.getArgOperand(1); 4794 Mask = I.getArgOperand(2); 4795 Alignment = I.getParamAlign(1).valueOrOne(); 4796 }; 4797 4798 Value *PtrOperand, *MaskOperand, *Src0Operand; 4799 Align Alignment; 4800 if (IsCompressing) 4801 getCompressingStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment); 4802 else 4803 getMaskedStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment); 4804 4805 SDValue Ptr = getValue(PtrOperand); 4806 SDValue Src0 = getValue(Src0Operand); 4807 SDValue Mask = getValue(MaskOperand); 4808 SDValue Offset = DAG.getUNDEF(Ptr.getValueType()); 4809 4810 EVT VT = Src0.getValueType(); 4811 4812 auto MMOFlags = MachineMemOperand::MOStore; 4813 if (I.hasMetadata(LLVMContext::MD_nontemporal)) 4814 MMOFlags |= MachineMemOperand::MONonTemporal; 4815 4816 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( 4817 MachinePointerInfo(PtrOperand), MMOFlags, 4818 LocationSize::beforeOrAfterPointer(), Alignment, I.getAAMetadata()); 4819 4820 const auto &TLI = DAG.getTargetLoweringInfo(); 4821 const auto &TTI = 4822 TLI.getTargetMachine().getTargetTransformInfo(*I.getFunction()); 4823 SDValue StoreNode = 4824 !IsCompressing && 4825 TTI.hasConditionalLoadStoreForType(I.getArgOperand(0)->getType()) 4826 ? TLI.visitMaskedStore(DAG, sdl, getMemoryRoot(), MMO, Ptr, Src0, 4827 Mask) 4828 : DAG.getMaskedStore(getMemoryRoot(), sdl, Src0, Ptr, Offset, Mask, 4829 VT, MMO, ISD::UNINDEXED, /*Truncating=*/false, 4830 IsCompressing); 4831 DAG.setRoot(StoreNode); 4832 setValue(&I, StoreNode); 4833 } 4834 4835 // Get a uniform base for the Gather/Scatter intrinsic. 4836 // The first argument of the Gather/Scatter intrinsic is a vector of pointers. 4837 // We try to represent it as a base pointer + vector of indices. 4838 // Usually, the vector of pointers comes from a 'getelementptr' instruction. 4839 // The first operand of the GEP may be a single pointer or a vector of pointers 4840 // Example: 4841 // %gep.ptr = getelementptr i32, <8 x i32*> %vptr, <8 x i32> %ind 4842 // or 4843 // %gep.ptr = getelementptr i32, i32* %ptr, <8 x i32> %ind 4844 // %res = call <8 x i32> @llvm.masked.gather.v8i32(<8 x i32*> %gep.ptr, .. 4845 // 4846 // When the first GEP operand is a single pointer - it is the uniform base we 4847 // are looking for. If first operand of the GEP is a splat vector - we 4848 // extract the splat value and use it as a uniform base. 4849 // In all other cases the function returns 'false'. 4850 static bool getUniformBase(const Value *Ptr, SDValue &Base, SDValue &Index, 4851 ISD::MemIndexType &IndexType, SDValue &Scale, 4852 SelectionDAGBuilder *SDB, const BasicBlock *CurBB, 4853 uint64_t ElemSize) { 4854 SelectionDAG& DAG = SDB->DAG; 4855 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4856 const DataLayout &DL = DAG.getDataLayout(); 4857 4858 assert(Ptr->getType()->isVectorTy() && "Unexpected pointer type"); 4859 4860 // Handle splat constant pointer. 4861 if (auto *C = dyn_cast<Constant>(Ptr)) { 4862 C = C->getSplatValue(); 4863 if (!C) 4864 return false; 4865 4866 Base = SDB->getValue(C); 4867 4868 ElementCount NumElts = cast<VectorType>(Ptr->getType())->getElementCount(); 4869 EVT VT = EVT::getVectorVT(*DAG.getContext(), TLI.getPointerTy(DL), NumElts); 4870 Index = DAG.getConstant(0, SDB->getCurSDLoc(), VT); 4871 IndexType = ISD::SIGNED_SCALED; 4872 Scale = DAG.getTargetConstant(1, SDB->getCurSDLoc(), TLI.getPointerTy(DL)); 4873 return true; 4874 } 4875 4876 const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr); 4877 if (!GEP || GEP->getParent() != CurBB) 4878 return false; 4879 4880 if (GEP->getNumOperands() != 2) 4881 return false; 4882 4883 const Value *BasePtr = GEP->getPointerOperand(); 4884 const Value *IndexVal = GEP->getOperand(GEP->getNumOperands() - 1); 4885 4886 // Make sure the base is scalar and the index is a vector. 4887 if (BasePtr->getType()->isVectorTy() || !IndexVal->getType()->isVectorTy()) 4888 return false; 4889 4890 TypeSize ScaleVal = DL.getTypeAllocSize(GEP->getResultElementType()); 4891 if (ScaleVal.isScalable()) 4892 return false; 4893 4894 // Target may not support the required addressing mode. 4895 if (ScaleVal != 1 && 4896 !TLI.isLegalScaleForGatherScatter(ScaleVal.getFixedValue(), ElemSize)) 4897 return false; 4898 4899 Base = SDB->getValue(BasePtr); 4900 Index = SDB->getValue(IndexVal); 4901 IndexType = ISD::SIGNED_SCALED; 4902 4903 Scale = 4904 DAG.getTargetConstant(ScaleVal, SDB->getCurSDLoc(), TLI.getPointerTy(DL)); 4905 return true; 4906 } 4907 4908 void SelectionDAGBuilder::visitMaskedScatter(const CallInst &I) { 4909 SDLoc sdl = getCurSDLoc(); 4910 4911 // llvm.masked.scatter.*(Src0, Ptrs, alignment, Mask) 4912 const Value *Ptr = I.getArgOperand(1); 4913 SDValue Src0 = getValue(I.getArgOperand(0)); 4914 SDValue Mask = getValue(I.getArgOperand(3)); 4915 EVT VT = Src0.getValueType(); 4916 Align Alignment = cast<ConstantInt>(I.getArgOperand(2)) 4917 ->getMaybeAlignValue() 4918 .value_or(DAG.getEVTAlign(VT.getScalarType())); 4919 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4920 4921 SDValue Base; 4922 SDValue Index; 4923 ISD::MemIndexType IndexType; 4924 SDValue Scale; 4925 bool UniformBase = getUniformBase(Ptr, Base, Index, IndexType, Scale, this, 4926 I.getParent(), VT.getScalarStoreSize()); 4927 4928 unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace(); 4929 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( 4930 MachinePointerInfo(AS), MachineMemOperand::MOStore, 4931 LocationSize::beforeOrAfterPointer(), Alignment, I.getAAMetadata()); 4932 if (!UniformBase) { 4933 Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout())); 4934 Index = getValue(Ptr); 4935 IndexType = ISD::SIGNED_SCALED; 4936 Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout())); 4937 } 4938 4939 EVT IdxVT = Index.getValueType(); 4940 EVT EltTy = IdxVT.getVectorElementType(); 4941 if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) { 4942 EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy); 4943 Index = DAG.getNode(ISD::SIGN_EXTEND, sdl, NewIdxVT, Index); 4944 } 4945 4946 SDValue Ops[] = { getMemoryRoot(), Src0, Mask, Base, Index, Scale }; 4947 SDValue Scatter = DAG.getMaskedScatter(DAG.getVTList(MVT::Other), VT, sdl, 4948 Ops, MMO, IndexType, false); 4949 DAG.setRoot(Scatter); 4950 setValue(&I, Scatter); 4951 } 4952 4953 void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I, bool IsExpanding) { 4954 SDLoc sdl = getCurSDLoc(); 4955 4956 auto getMaskedLoadOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0, 4957 Align &Alignment) { 4958 // @llvm.masked.load.*(Ptr, alignment, Mask, Src0) 4959 Ptr = I.getArgOperand(0); 4960 Alignment = cast<ConstantInt>(I.getArgOperand(1))->getAlignValue(); 4961 Mask = I.getArgOperand(2); 4962 Src0 = I.getArgOperand(3); 4963 }; 4964 auto getExpandingLoadOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0, 4965 Align &Alignment) { 4966 // @llvm.masked.expandload.*(Ptr, Mask, Src0) 4967 Ptr = I.getArgOperand(0); 4968 Alignment = I.getParamAlign(0).valueOrOne(); 4969 Mask = I.getArgOperand(1); 4970 Src0 = I.getArgOperand(2); 4971 }; 4972 4973 Value *PtrOperand, *MaskOperand, *Src0Operand; 4974 Align Alignment; 4975 if (IsExpanding) 4976 getExpandingLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment); 4977 else 4978 getMaskedLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment); 4979 4980 SDValue Ptr = getValue(PtrOperand); 4981 SDValue Src0 = getValue(Src0Operand); 4982 SDValue Mask = getValue(MaskOperand); 4983 SDValue Offset = DAG.getUNDEF(Ptr.getValueType()); 4984 4985 EVT VT = Src0.getValueType(); 4986 AAMDNodes AAInfo = I.getAAMetadata(); 4987 const MDNode *Ranges = getRangeMetadata(I); 4988 4989 // Do not serialize masked loads of constant memory with anything. 4990 MemoryLocation ML = MemoryLocation::getAfter(PtrOperand, AAInfo); 4991 bool AddToChain = !AA || !AA->pointsToConstantMemory(ML); 4992 4993 SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode(); 4994 4995 auto MMOFlags = MachineMemOperand::MOLoad; 4996 if (I.hasMetadata(LLVMContext::MD_nontemporal)) 4997 MMOFlags |= MachineMemOperand::MONonTemporal; 4998 4999 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( 5000 MachinePointerInfo(PtrOperand), MMOFlags, 5001 LocationSize::beforeOrAfterPointer(), Alignment, AAInfo, Ranges); 5002 5003 const auto &TLI = DAG.getTargetLoweringInfo(); 5004 const auto &TTI = 5005 TLI.getTargetMachine().getTargetTransformInfo(*I.getFunction()); 5006 // The Load/Res may point to different values and both of them are output 5007 // variables. 5008 SDValue Load; 5009 SDValue Res; 5010 if (!IsExpanding && 5011 TTI.hasConditionalLoadStoreForType(Src0Operand->getType())) 5012 Res = TLI.visitMaskedLoad(DAG, sdl, InChain, MMO, Load, Ptr, Src0, Mask); 5013 else 5014 Res = Load = 5015 DAG.getMaskedLoad(VT, sdl, InChain, Ptr, Offset, Mask, Src0, VT, MMO, 5016 ISD::UNINDEXED, ISD::NON_EXTLOAD, IsExpanding); 5017 if (AddToChain) 5018 PendingLoads.push_back(Load.getValue(1)); 5019 setValue(&I, Res); 5020 } 5021 5022 void SelectionDAGBuilder::visitMaskedGather(const CallInst &I) { 5023 SDLoc sdl = getCurSDLoc(); 5024 5025 // @llvm.masked.gather.*(Ptrs, alignment, Mask, Src0) 5026 const Value *Ptr = I.getArgOperand(0); 5027 SDValue Src0 = getValue(I.getArgOperand(3)); 5028 SDValue Mask = getValue(I.getArgOperand(2)); 5029 5030 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5031 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 5032 Align Alignment = cast<ConstantInt>(I.getArgOperand(1)) 5033 ->getMaybeAlignValue() 5034 .value_or(DAG.getEVTAlign(VT.getScalarType())); 5035 5036 const MDNode *Ranges = getRangeMetadata(I); 5037 5038 SDValue Root = DAG.getRoot(); 5039 SDValue Base; 5040 SDValue Index; 5041 ISD::MemIndexType IndexType; 5042 SDValue Scale; 5043 bool UniformBase = getUniformBase(Ptr, Base, Index, IndexType, Scale, this, 5044 I.getParent(), VT.getScalarStoreSize()); 5045 unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace(); 5046 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( 5047 MachinePointerInfo(AS), MachineMemOperand::MOLoad, 5048 LocationSize::beforeOrAfterPointer(), Alignment, I.getAAMetadata(), 5049 Ranges); 5050 5051 if (!UniformBase) { 5052 Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout())); 5053 Index = getValue(Ptr); 5054 IndexType = ISD::SIGNED_SCALED; 5055 Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout())); 5056 } 5057 5058 EVT IdxVT = Index.getValueType(); 5059 EVT EltTy = IdxVT.getVectorElementType(); 5060 if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) { 5061 EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy); 5062 Index = DAG.getNode(ISD::SIGN_EXTEND, sdl, NewIdxVT, Index); 5063 } 5064 5065 SDValue Ops[] = { Root, Src0, Mask, Base, Index, Scale }; 5066 SDValue Gather = DAG.getMaskedGather(DAG.getVTList(VT, MVT::Other), VT, sdl, 5067 Ops, MMO, IndexType, ISD::NON_EXTLOAD); 5068 5069 PendingLoads.push_back(Gather.getValue(1)); 5070 setValue(&I, Gather); 5071 } 5072 5073 void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) { 5074 SDLoc dl = getCurSDLoc(); 5075 AtomicOrdering SuccessOrdering = I.getSuccessOrdering(); 5076 AtomicOrdering FailureOrdering = I.getFailureOrdering(); 5077 SyncScope::ID SSID = I.getSyncScopeID(); 5078 5079 SDValue InChain = getRoot(); 5080 5081 MVT MemVT = getValue(I.getCompareOperand()).getSimpleValueType(); 5082 SDVTList VTs = DAG.getVTList(MemVT, MVT::i1, MVT::Other); 5083 5084 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5085 auto Flags = TLI.getAtomicMemOperandFlags(I, DAG.getDataLayout()); 5086 5087 MachineFunction &MF = DAG.getMachineFunction(); 5088 MachineMemOperand *MMO = MF.getMachineMemOperand( 5089 MachinePointerInfo(I.getPointerOperand()), Flags, 5090 LocationSize::precise(MemVT.getStoreSize()), DAG.getEVTAlign(MemVT), 5091 AAMDNodes(), nullptr, SSID, SuccessOrdering, FailureOrdering); 5092 5093 SDValue L = DAG.getAtomicCmpSwap(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, 5094 dl, MemVT, VTs, InChain, 5095 getValue(I.getPointerOperand()), 5096 getValue(I.getCompareOperand()), 5097 getValue(I.getNewValOperand()), MMO); 5098 5099 SDValue OutChain = L.getValue(2); 5100 5101 setValue(&I, L); 5102 DAG.setRoot(OutChain); 5103 } 5104 5105 void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) { 5106 SDLoc dl = getCurSDLoc(); 5107 ISD::NodeType NT; 5108 switch (I.getOperation()) { 5109 default: llvm_unreachable("Unknown atomicrmw operation"); 5110 case AtomicRMWInst::Xchg: NT = ISD::ATOMIC_SWAP; break; 5111 case AtomicRMWInst::Add: NT = ISD::ATOMIC_LOAD_ADD; break; 5112 case AtomicRMWInst::Sub: NT = ISD::ATOMIC_LOAD_SUB; break; 5113 case AtomicRMWInst::And: NT = ISD::ATOMIC_LOAD_AND; break; 5114 case AtomicRMWInst::Nand: NT = ISD::ATOMIC_LOAD_NAND; break; 5115 case AtomicRMWInst::Or: NT = ISD::ATOMIC_LOAD_OR; break; 5116 case AtomicRMWInst::Xor: NT = ISD::ATOMIC_LOAD_XOR; break; 5117 case AtomicRMWInst::Max: NT = ISD::ATOMIC_LOAD_MAX; break; 5118 case AtomicRMWInst::Min: NT = ISD::ATOMIC_LOAD_MIN; break; 5119 case AtomicRMWInst::UMax: NT = ISD::ATOMIC_LOAD_UMAX; break; 5120 case AtomicRMWInst::UMin: NT = ISD::ATOMIC_LOAD_UMIN; break; 5121 case AtomicRMWInst::FAdd: NT = ISD::ATOMIC_LOAD_FADD; break; 5122 case AtomicRMWInst::FSub: NT = ISD::ATOMIC_LOAD_FSUB; break; 5123 case AtomicRMWInst::FMax: NT = ISD::ATOMIC_LOAD_FMAX; break; 5124 case AtomicRMWInst::FMin: NT = ISD::ATOMIC_LOAD_FMIN; break; 5125 case AtomicRMWInst::UIncWrap: 5126 NT = ISD::ATOMIC_LOAD_UINC_WRAP; 5127 break; 5128 case AtomicRMWInst::UDecWrap: 5129 NT = ISD::ATOMIC_LOAD_UDEC_WRAP; 5130 break; 5131 case AtomicRMWInst::USubCond: 5132 NT = ISD::ATOMIC_LOAD_USUB_COND; 5133 break; 5134 case AtomicRMWInst::USubSat: 5135 NT = ISD::ATOMIC_LOAD_USUB_SAT; 5136 break; 5137 } 5138 AtomicOrdering Ordering = I.getOrdering(); 5139 SyncScope::ID SSID = I.getSyncScopeID(); 5140 5141 SDValue InChain = getRoot(); 5142 5143 auto MemVT = getValue(I.getValOperand()).getSimpleValueType(); 5144 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5145 auto Flags = TLI.getAtomicMemOperandFlags(I, DAG.getDataLayout()); 5146 5147 MachineFunction &MF = DAG.getMachineFunction(); 5148 MachineMemOperand *MMO = MF.getMachineMemOperand( 5149 MachinePointerInfo(I.getPointerOperand()), Flags, 5150 LocationSize::precise(MemVT.getStoreSize()), DAG.getEVTAlign(MemVT), 5151 AAMDNodes(), nullptr, SSID, Ordering); 5152 5153 SDValue L = 5154 DAG.getAtomic(NT, dl, MemVT, InChain, 5155 getValue(I.getPointerOperand()), getValue(I.getValOperand()), 5156 MMO); 5157 5158 SDValue OutChain = L.getValue(1); 5159 5160 setValue(&I, L); 5161 DAG.setRoot(OutChain); 5162 } 5163 5164 void SelectionDAGBuilder::visitFence(const FenceInst &I) { 5165 SDLoc dl = getCurSDLoc(); 5166 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5167 SDValue Ops[3]; 5168 Ops[0] = getRoot(); 5169 Ops[1] = DAG.getTargetConstant((unsigned)I.getOrdering(), dl, 5170 TLI.getFenceOperandTy(DAG.getDataLayout())); 5171 Ops[2] = DAG.getTargetConstant(I.getSyncScopeID(), dl, 5172 TLI.getFenceOperandTy(DAG.getDataLayout())); 5173 SDValue N = DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops); 5174 setValue(&I, N); 5175 DAG.setRoot(N); 5176 } 5177 5178 void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) { 5179 SDLoc dl = getCurSDLoc(); 5180 AtomicOrdering Order = I.getOrdering(); 5181 SyncScope::ID SSID = I.getSyncScopeID(); 5182 5183 SDValue InChain = getRoot(); 5184 5185 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5186 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 5187 EVT MemVT = TLI.getMemValueType(DAG.getDataLayout(), I.getType()); 5188 5189 if (!TLI.supportsUnalignedAtomics() && 5190 I.getAlign().value() < MemVT.getSizeInBits() / 8) 5191 report_fatal_error("Cannot generate unaligned atomic load"); 5192 5193 auto Flags = TLI.getLoadMemOperandFlags(I, DAG.getDataLayout(), AC, LibInfo); 5194 5195 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( 5196 MachinePointerInfo(I.getPointerOperand()), Flags, 5197 LocationSize::precise(MemVT.getStoreSize()), I.getAlign(), AAMDNodes(), 5198 nullptr, SSID, Order); 5199 5200 InChain = TLI.prepareVolatileOrAtomicLoad(InChain, dl, DAG); 5201 5202 SDValue Ptr = getValue(I.getPointerOperand()); 5203 SDValue L = DAG.getAtomic(ISD::ATOMIC_LOAD, dl, MemVT, MemVT, InChain, 5204 Ptr, MMO); 5205 5206 SDValue OutChain = L.getValue(1); 5207 if (MemVT != VT) 5208 L = DAG.getPtrExtOrTrunc(L, dl, VT); 5209 5210 setValue(&I, L); 5211 DAG.setRoot(OutChain); 5212 } 5213 5214 void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) { 5215 SDLoc dl = getCurSDLoc(); 5216 5217 AtomicOrdering Ordering = I.getOrdering(); 5218 SyncScope::ID SSID = I.getSyncScopeID(); 5219 5220 SDValue InChain = getRoot(); 5221 5222 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5223 EVT MemVT = 5224 TLI.getMemValueType(DAG.getDataLayout(), I.getValueOperand()->getType()); 5225 5226 if (!TLI.supportsUnalignedAtomics() && 5227 I.getAlign().value() < MemVT.getSizeInBits() / 8) 5228 report_fatal_error("Cannot generate unaligned atomic store"); 5229 5230 auto Flags = TLI.getStoreMemOperandFlags(I, DAG.getDataLayout()); 5231 5232 MachineFunction &MF = DAG.getMachineFunction(); 5233 MachineMemOperand *MMO = MF.getMachineMemOperand( 5234 MachinePointerInfo(I.getPointerOperand()), Flags, 5235 LocationSize::precise(MemVT.getStoreSize()), I.getAlign(), AAMDNodes(), 5236 nullptr, SSID, Ordering); 5237 5238 SDValue Val = getValue(I.getValueOperand()); 5239 if (Val.getValueType() != MemVT) 5240 Val = DAG.getPtrExtOrTrunc(Val, dl, MemVT); 5241 SDValue Ptr = getValue(I.getPointerOperand()); 5242 5243 SDValue OutChain = 5244 DAG.getAtomic(ISD::ATOMIC_STORE, dl, MemVT, InChain, Val, Ptr, MMO); 5245 5246 setValue(&I, OutChain); 5247 DAG.setRoot(OutChain); 5248 } 5249 5250 /// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC 5251 /// node. 5252 void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I, 5253 unsigned Intrinsic) { 5254 // Ignore the callsite's attributes. A specific call site may be marked with 5255 // readnone, but the lowering code will expect the chain based on the 5256 // definition. 5257 const Function *F = I.getCalledFunction(); 5258 bool HasChain = !F->doesNotAccessMemory(); 5259 bool OnlyLoad = 5260 HasChain && F->onlyReadsMemory() && F->willReturn() && F->doesNotThrow(); 5261 5262 // Build the operand list. 5263 SmallVector<SDValue, 8> Ops; 5264 if (HasChain) { // If this intrinsic has side-effects, chainify it. 5265 if (OnlyLoad) { 5266 // We don't need to serialize loads against other loads. 5267 Ops.push_back(DAG.getRoot()); 5268 } else { 5269 Ops.push_back(getRoot()); 5270 } 5271 } 5272 5273 // Info is set by getTgtMemIntrinsic 5274 TargetLowering::IntrinsicInfo Info; 5275 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 5276 bool IsTgtIntrinsic = TLI.getTgtMemIntrinsic(Info, I, 5277 DAG.getMachineFunction(), 5278 Intrinsic); 5279 5280 // Add the intrinsic ID as an integer operand if it's not a target intrinsic. 5281 if (!IsTgtIntrinsic || Info.opc == ISD::INTRINSIC_VOID || 5282 Info.opc == ISD::INTRINSIC_W_CHAIN) 5283 Ops.push_back(DAG.getTargetConstant(Intrinsic, getCurSDLoc(), 5284 TLI.getPointerTy(DAG.getDataLayout()))); 5285 5286 // Add all operands of the call to the operand list. 5287 for (unsigned i = 0, e = I.arg_size(); i != e; ++i) { 5288 const Value *Arg = I.getArgOperand(i); 5289 if (!I.paramHasAttr(i, Attribute::ImmArg)) { 5290 Ops.push_back(getValue(Arg)); 5291 continue; 5292 } 5293 5294 // Use TargetConstant instead of a regular constant for immarg. 5295 EVT VT = TLI.getValueType(DAG.getDataLayout(), Arg->getType(), true); 5296 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Arg)) { 5297 assert(CI->getBitWidth() <= 64 && 5298 "large intrinsic immediates not handled"); 5299 Ops.push_back(DAG.getTargetConstant(*CI, SDLoc(), VT)); 5300 } else { 5301 Ops.push_back( 5302 DAG.getTargetConstantFP(*cast<ConstantFP>(Arg), SDLoc(), VT)); 5303 } 5304 } 5305 5306 SmallVector<EVT, 4> ValueVTs; 5307 ComputeValueVTs(TLI, DAG.getDataLayout(), I.getType(), ValueVTs); 5308 5309 if (HasChain) 5310 ValueVTs.push_back(MVT::Other); 5311 5312 SDVTList VTs = DAG.getVTList(ValueVTs); 5313 5314 // Propagate fast-math-flags from IR to node(s). 5315 SDNodeFlags Flags; 5316 if (auto *FPMO = dyn_cast<FPMathOperator>(&I)) 5317 Flags.copyFMF(*FPMO); 5318 SelectionDAG::FlagInserter FlagsInserter(DAG, Flags); 5319 5320 // Create the node. 5321 SDValue Result; 5322 5323 if (auto Bundle = I.getOperandBundle(LLVMContext::OB_convergencectrl)) { 5324 auto *Token = Bundle->Inputs[0].get(); 5325 SDValue ConvControlToken = getValue(Token); 5326 assert(Ops.back().getValueType() != MVT::Glue && 5327 "Did not expected another glue node here."); 5328 ConvControlToken = 5329 DAG.getNode(ISD::CONVERGENCECTRL_GLUE, {}, MVT::Glue, ConvControlToken); 5330 Ops.push_back(ConvControlToken); 5331 } 5332 5333 // In some cases, custom collection of operands from CallInst I may be needed. 5334 TLI.CollectTargetIntrinsicOperands(I, Ops, DAG); 5335 if (IsTgtIntrinsic) { 5336 // This is target intrinsic that touches memory 5337 // 5338 // TODO: We currently just fallback to address space 0 if getTgtMemIntrinsic 5339 // didn't yield anything useful. 5340 MachinePointerInfo MPI; 5341 if (Info.ptrVal) 5342 MPI = MachinePointerInfo(Info.ptrVal, Info.offset); 5343 else if (Info.fallbackAddressSpace) 5344 MPI = MachinePointerInfo(*Info.fallbackAddressSpace); 5345 Result = DAG.getMemIntrinsicNode(Info.opc, getCurSDLoc(), VTs, Ops, 5346 Info.memVT, MPI, Info.align, Info.flags, 5347 Info.size, I.getAAMetadata()); 5348 } else if (!HasChain) { 5349 Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurSDLoc(), VTs, Ops); 5350 } else if (!I.getType()->isVoidTy()) { 5351 Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurSDLoc(), VTs, Ops); 5352 } else { 5353 Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops); 5354 } 5355 5356 if (HasChain) { 5357 SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1); 5358 if (OnlyLoad) 5359 PendingLoads.push_back(Chain); 5360 else 5361 DAG.setRoot(Chain); 5362 } 5363 5364 if (!I.getType()->isVoidTy()) { 5365 if (!isa<VectorType>(I.getType())) 5366 Result = lowerRangeToAssertZExt(DAG, I, Result); 5367 5368 MaybeAlign Alignment = I.getRetAlign(); 5369 5370 // Insert `assertalign` node if there's an alignment. 5371 if (InsertAssertAlign && Alignment) { 5372 Result = 5373 DAG.getAssertAlign(getCurSDLoc(), Result, Alignment.valueOrOne()); 5374 } 5375 } 5376 5377 setValue(&I, Result); 5378 } 5379 5380 /// GetSignificand - Get the significand and build it into a floating-point 5381 /// number with exponent of 1: 5382 /// 5383 /// Op = (Op & 0x007fffff) | 0x3f800000; 5384 /// 5385 /// where Op is the hexadecimal representation of floating point value. 5386 static SDValue GetSignificand(SelectionDAG &DAG, SDValue Op, const SDLoc &dl) { 5387 SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op, 5388 DAG.getConstant(0x007fffff, dl, MVT::i32)); 5389 SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1, 5390 DAG.getConstant(0x3f800000, dl, MVT::i32)); 5391 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, t2); 5392 } 5393 5394 /// GetExponent - Get the exponent: 5395 /// 5396 /// (float)(int)(((Op & 0x7f800000) >> 23) - 127); 5397 /// 5398 /// where Op is the hexadecimal representation of floating point value. 5399 static SDValue GetExponent(SelectionDAG &DAG, SDValue Op, 5400 const TargetLowering &TLI, const SDLoc &dl) { 5401 SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op, 5402 DAG.getConstant(0x7f800000, dl, MVT::i32)); 5403 SDValue t1 = DAG.getNode( 5404 ISD::SRL, dl, MVT::i32, t0, 5405 DAG.getConstant(23, dl, 5406 TLI.getShiftAmountTy(MVT::i32, DAG.getDataLayout()))); 5407 SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1, 5408 DAG.getConstant(127, dl, MVT::i32)); 5409 return DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2); 5410 } 5411 5412 /// getF32Constant - Get 32-bit floating point constant. 5413 static SDValue getF32Constant(SelectionDAG &DAG, unsigned Flt, 5414 const SDLoc &dl) { 5415 return DAG.getConstantFP(APFloat(APFloat::IEEEsingle(), APInt(32, Flt)), dl, 5416 MVT::f32); 5417 } 5418 5419 static SDValue getLimitedPrecisionExp2(SDValue t0, const SDLoc &dl, 5420 SelectionDAG &DAG) { 5421 // TODO: What fast-math-flags should be set on the floating-point nodes? 5422 5423 // IntegerPartOfX = ((int32_t)(t0); 5424 SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0); 5425 5426 // FractionalPartOfX = t0 - (float)IntegerPartOfX; 5427 SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX); 5428 SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1); 5429 5430 // IntegerPartOfX <<= 23; 5431 IntegerPartOfX = 5432 DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX, 5433 DAG.getConstant(23, dl, 5434 DAG.getTargetLoweringInfo().getShiftAmountTy( 5435 MVT::i32, DAG.getDataLayout()))); 5436 5437 SDValue TwoToFractionalPartOfX; 5438 if (LimitFloatPrecision <= 6) { 5439 // For floating-point precision of 6: 5440 // 5441 // TwoToFractionalPartOfX = 5442 // 0.997535578f + 5443 // (0.735607626f + 0.252464424f * x) * x; 5444 // 5445 // error 0.0144103317, which is 6 bits 5446 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 5447 getF32Constant(DAG, 0x3e814304, dl)); 5448 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2, 5449 getF32Constant(DAG, 0x3f3c50c8, dl)); 5450 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); 5451 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t4, 5452 getF32Constant(DAG, 0x3f7f5e7e, dl)); 5453 } else if (LimitFloatPrecision <= 12) { 5454 // For floating-point precision of 12: 5455 // 5456 // TwoToFractionalPartOfX = 5457 // 0.999892986f + 5458 // (0.696457318f + 5459 // (0.224338339f + 0.792043434e-1f * x) * x) * x; 5460 // 5461 // error 0.000107046256, which is 13 to 14 bits 5462 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 5463 getF32Constant(DAG, 0x3da235e3, dl)); 5464 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2, 5465 getF32Constant(DAG, 0x3e65b8f3, dl)); 5466 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); 5467 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4, 5468 getF32Constant(DAG, 0x3f324b07, dl)); 5469 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X); 5470 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t6, 5471 getF32Constant(DAG, 0x3f7ff8fd, dl)); 5472 } else { // LimitFloatPrecision <= 18 5473 // For floating-point precision of 18: 5474 // 5475 // TwoToFractionalPartOfX = 5476 // 0.999999982f + 5477 // (0.693148872f + 5478 // (0.240227044f + 5479 // (0.554906021e-1f + 5480 // (0.961591928e-2f + 5481 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x; 5482 // error 2.47208000*10^(-7), which is better than 18 bits 5483 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 5484 getF32Constant(DAG, 0x3924b03e, dl)); 5485 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2, 5486 getF32Constant(DAG, 0x3ab24b87, dl)); 5487 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); 5488 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4, 5489 getF32Constant(DAG, 0x3c1d8c17, dl)); 5490 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X); 5491 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6, 5492 getF32Constant(DAG, 0x3d634a1d, dl)); 5493 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X); 5494 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8, 5495 getF32Constant(DAG, 0x3e75fe14, dl)); 5496 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X); 5497 SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10, 5498 getF32Constant(DAG, 0x3f317234, dl)); 5499 SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X); 5500 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t12, 5501 getF32Constant(DAG, 0x3f800000, dl)); 5502 } 5503 5504 // Add the exponent into the result in integer domain. 5505 SDValue t13 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, TwoToFractionalPartOfX); 5506 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, 5507 DAG.getNode(ISD::ADD, dl, MVT::i32, t13, IntegerPartOfX)); 5508 } 5509 5510 /// expandExp - Lower an exp intrinsic. Handles the special sequences for 5511 /// limited-precision mode. 5512 static SDValue expandExp(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, 5513 const TargetLowering &TLI, SDNodeFlags Flags) { 5514 if (Op.getValueType() == MVT::f32 && 5515 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) { 5516 5517 // Put the exponent in the right bit position for later addition to the 5518 // final result: 5519 // 5520 // t0 = Op * log2(e) 5521 5522 // TODO: What fast-math-flags should be set here? 5523 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op, 5524 DAG.getConstantFP(numbers::log2ef, dl, MVT::f32)); 5525 return getLimitedPrecisionExp2(t0, dl, DAG); 5526 } 5527 5528 // No special expansion. 5529 return DAG.getNode(ISD::FEXP, dl, Op.getValueType(), Op, Flags); 5530 } 5531 5532 /// expandLog - Lower a log intrinsic. Handles the special sequences for 5533 /// limited-precision mode. 5534 static SDValue expandLog(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, 5535 const TargetLowering &TLI, SDNodeFlags Flags) { 5536 // TODO: What fast-math-flags should be set on the floating-point nodes? 5537 5538 if (Op.getValueType() == MVT::f32 && 5539 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) { 5540 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op); 5541 5542 // Scale the exponent by log(2). 5543 SDValue Exp = GetExponent(DAG, Op1, TLI, dl); 5544 SDValue LogOfExponent = 5545 DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp, 5546 DAG.getConstantFP(numbers::ln2f, dl, MVT::f32)); 5547 5548 // Get the significand and build it into a floating-point number with 5549 // exponent of 1. 5550 SDValue X = GetSignificand(DAG, Op1, dl); 5551 5552 SDValue LogOfMantissa; 5553 if (LimitFloatPrecision <= 6) { 5554 // For floating-point precision of 6: 5555 // 5556 // LogofMantissa = 5557 // -1.1609546f + 5558 // (1.4034025f - 0.23903021f * x) * x; 5559 // 5560 // error 0.0034276066, which is better than 8 bits 5561 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 5562 getF32Constant(DAG, 0xbe74c456, dl)); 5563 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0, 5564 getF32Constant(DAG, 0x3fb3a2b1, dl)); 5565 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X); 5566 LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2, 5567 getF32Constant(DAG, 0x3f949a29, dl)); 5568 } else if (LimitFloatPrecision <= 12) { 5569 // For floating-point precision of 12: 5570 // 5571 // LogOfMantissa = 5572 // -1.7417939f + 5573 // (2.8212026f + 5574 // (-1.4699568f + 5575 // (0.44717955f - 0.56570851e-1f * x) * x) * x) * x; 5576 // 5577 // error 0.000061011436, which is 14 bits 5578 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 5579 getF32Constant(DAG, 0xbd67b6d6, dl)); 5580 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0, 5581 getF32Constant(DAG, 0x3ee4f4b8, dl)); 5582 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X); 5583 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2, 5584 getF32Constant(DAG, 0x3fbc278b, dl)); 5585 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); 5586 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4, 5587 getF32Constant(DAG, 0x40348e95, dl)); 5588 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X); 5589 LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6, 5590 getF32Constant(DAG, 0x3fdef31a, dl)); 5591 } else { // LimitFloatPrecision <= 18 5592 // For floating-point precision of 18: 5593 // 5594 // LogOfMantissa = 5595 // -2.1072184f + 5596 // (4.2372794f + 5597 // (-3.7029485f + 5598 // (2.2781945f + 5599 // (-0.87823314f + 5600 // (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x; 5601 // 5602 // error 0.0000023660568, which is better than 18 bits 5603 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 5604 getF32Constant(DAG, 0xbc91e5ac, dl)); 5605 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0, 5606 getF32Constant(DAG, 0x3e4350aa, dl)); 5607 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X); 5608 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2, 5609 getF32Constant(DAG, 0x3f60d3e3, dl)); 5610 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); 5611 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4, 5612 getF32Constant(DAG, 0x4011cdf0, dl)); 5613 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X); 5614 SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6, 5615 getF32Constant(DAG, 0x406cfd1c, dl)); 5616 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X); 5617 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8, 5618 getF32Constant(DAG, 0x408797cb, dl)); 5619 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X); 5620 LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10, 5621 getF32Constant(DAG, 0x4006dcab, dl)); 5622 } 5623 5624 return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, LogOfMantissa); 5625 } 5626 5627 // No special expansion. 5628 return DAG.getNode(ISD::FLOG, dl, Op.getValueType(), Op, Flags); 5629 } 5630 5631 /// expandLog2 - Lower a log2 intrinsic. Handles the special sequences for 5632 /// limited-precision mode. 5633 static SDValue expandLog2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, 5634 const TargetLowering &TLI, SDNodeFlags Flags) { 5635 // TODO: What fast-math-flags should be set on the floating-point nodes? 5636 5637 if (Op.getValueType() == MVT::f32 && 5638 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) { 5639 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op); 5640 5641 // Get the exponent. 5642 SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl); 5643 5644 // Get the significand and build it into a floating-point number with 5645 // exponent of 1. 5646 SDValue X = GetSignificand(DAG, Op1, dl); 5647 5648 // Different possible minimax approximations of significand in 5649 // floating-point for various degrees of accuracy over [1,2]. 5650 SDValue Log2ofMantissa; 5651 if (LimitFloatPrecision <= 6) { 5652 // For floating-point precision of 6: 5653 // 5654 // Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x; 5655 // 5656 // error 0.0049451742, which is more than 7 bits 5657 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 5658 getF32Constant(DAG, 0xbeb08fe0, dl)); 5659 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0, 5660 getF32Constant(DAG, 0x40019463, dl)); 5661 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X); 5662 Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2, 5663 getF32Constant(DAG, 0x3fd6633d, dl)); 5664 } else if (LimitFloatPrecision <= 12) { 5665 // For floating-point precision of 12: 5666 // 5667 // Log2ofMantissa = 5668 // -2.51285454f + 5669 // (4.07009056f + 5670 // (-2.12067489f + 5671 // (.645142248f - 0.816157886e-1f * x) * x) * x) * x; 5672 // 5673 // error 0.0000876136000, which is better than 13 bits 5674 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 5675 getF32Constant(DAG, 0xbda7262e, dl)); 5676 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0, 5677 getF32Constant(DAG, 0x3f25280b, dl)); 5678 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X); 5679 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2, 5680 getF32Constant(DAG, 0x4007b923, dl)); 5681 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); 5682 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4, 5683 getF32Constant(DAG, 0x40823e2f, dl)); 5684 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X); 5685 Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6, 5686 getF32Constant(DAG, 0x4020d29c, dl)); 5687 } else { // LimitFloatPrecision <= 18 5688 // For floating-point precision of 18: 5689 // 5690 // Log2ofMantissa = 5691 // -3.0400495f + 5692 // (6.1129976f + 5693 // (-5.3420409f + 5694 // (3.2865683f + 5695 // (-1.2669343f + 5696 // (0.27515199f - 5697 // 0.25691327e-1f * x) * x) * x) * x) * x) * x; 5698 // 5699 // error 0.0000018516, which is better than 18 bits 5700 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 5701 getF32Constant(DAG, 0xbcd2769e, dl)); 5702 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0, 5703 getF32Constant(DAG, 0x3e8ce0b9, dl)); 5704 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X); 5705 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2, 5706 getF32Constant(DAG, 0x3fa22ae7, dl)); 5707 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); 5708 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4, 5709 getF32Constant(DAG, 0x40525723, dl)); 5710 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X); 5711 SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6, 5712 getF32Constant(DAG, 0x40aaf200, dl)); 5713 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X); 5714 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8, 5715 getF32Constant(DAG, 0x40c39dad, dl)); 5716 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X); 5717 Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10, 5718 getF32Constant(DAG, 0x4042902c, dl)); 5719 } 5720 5721 return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa); 5722 } 5723 5724 // No special expansion. 5725 return DAG.getNode(ISD::FLOG2, dl, Op.getValueType(), Op, Flags); 5726 } 5727 5728 /// expandLog10 - Lower a log10 intrinsic. Handles the special sequences for 5729 /// limited-precision mode. 5730 static SDValue expandLog10(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, 5731 const TargetLowering &TLI, SDNodeFlags Flags) { 5732 // TODO: What fast-math-flags should be set on the floating-point nodes? 5733 5734 if (Op.getValueType() == MVT::f32 && 5735 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) { 5736 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op); 5737 5738 // Scale the exponent by log10(2) [0.30102999f]. 5739 SDValue Exp = GetExponent(DAG, Op1, TLI, dl); 5740 SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp, 5741 getF32Constant(DAG, 0x3e9a209a, dl)); 5742 5743 // Get the significand and build it into a floating-point number with 5744 // exponent of 1. 5745 SDValue X = GetSignificand(DAG, Op1, dl); 5746 5747 SDValue Log10ofMantissa; 5748 if (LimitFloatPrecision <= 6) { 5749 // For floating-point precision of 6: 5750 // 5751 // Log10ofMantissa = 5752 // -0.50419619f + 5753 // (0.60948995f - 0.10380950f * x) * x; 5754 // 5755 // error 0.0014886165, which is 6 bits 5756 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 5757 getF32Constant(DAG, 0xbdd49a13, dl)); 5758 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0, 5759 getF32Constant(DAG, 0x3f1c0789, dl)); 5760 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X); 5761 Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2, 5762 getF32Constant(DAG, 0x3f011300, dl)); 5763 } else if (LimitFloatPrecision <= 12) { 5764 // For floating-point precision of 12: 5765 // 5766 // Log10ofMantissa = 5767 // -0.64831180f + 5768 // (0.91751397f + 5769 // (-0.31664806f + 0.47637168e-1f * x) * x) * x; 5770 // 5771 // error 0.00019228036, which is better than 12 bits 5772 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 5773 getF32Constant(DAG, 0x3d431f31, dl)); 5774 SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, 5775 getF32Constant(DAG, 0x3ea21fb2, dl)); 5776 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X); 5777 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2, 5778 getF32Constant(DAG, 0x3f6ae232, dl)); 5779 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); 5780 Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4, 5781 getF32Constant(DAG, 0x3f25f7c3, dl)); 5782 } else { // LimitFloatPrecision <= 18 5783 // For floating-point precision of 18: 5784 // 5785 // Log10ofMantissa = 5786 // -0.84299375f + 5787 // (1.5327582f + 5788 // (-1.0688956f + 5789 // (0.49102474f + 5790 // (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x; 5791 // 5792 // error 0.0000037995730, which is better than 18 bits 5793 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, 5794 getF32Constant(DAG, 0x3c5d51ce, dl)); 5795 SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, 5796 getF32Constant(DAG, 0x3e00685a, dl)); 5797 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X); 5798 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2, 5799 getF32Constant(DAG, 0x3efb6798, dl)); 5800 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); 5801 SDValue t5 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4, 5802 getF32Constant(DAG, 0x3f88d192, dl)); 5803 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X); 5804 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6, 5805 getF32Constant(DAG, 0x3fc4316c, dl)); 5806 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X); 5807 Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t8, 5808 getF32Constant(DAG, 0x3f57ce70, dl)); 5809 } 5810 5811 return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa); 5812 } 5813 5814 // No special expansion. 5815 return DAG.getNode(ISD::FLOG10, dl, Op.getValueType(), Op, Flags); 5816 } 5817 5818 /// expandExp2 - Lower an exp2 intrinsic. Handles the special sequences for 5819 /// limited-precision mode. 5820 static SDValue expandExp2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, 5821 const TargetLowering &TLI, SDNodeFlags Flags) { 5822 if (Op.getValueType() == MVT::f32 && 5823 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) 5824 return getLimitedPrecisionExp2(Op, dl, DAG); 5825 5826 // No special expansion. 5827 return DAG.getNode(ISD::FEXP2, dl, Op.getValueType(), Op, Flags); 5828 } 5829 5830 /// visitPow - Lower a pow intrinsic. Handles the special sequences for 5831 /// limited-precision mode with x == 10.0f. 5832 static SDValue expandPow(const SDLoc &dl, SDValue LHS, SDValue RHS, 5833 SelectionDAG &DAG, const TargetLowering &TLI, 5834 SDNodeFlags Flags) { 5835 bool IsExp10 = false; 5836 if (LHS.getValueType() == MVT::f32 && RHS.getValueType() == MVT::f32 && 5837 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) { 5838 if (ConstantFPSDNode *LHSC = dyn_cast<ConstantFPSDNode>(LHS)) { 5839 APFloat Ten(10.0f); 5840 IsExp10 = LHSC->isExactlyValue(Ten); 5841 } 5842 } 5843 5844 // TODO: What fast-math-flags should be set on the FMUL node? 5845 if (IsExp10) { 5846 // Put the exponent in the right bit position for later addition to the 5847 // final result: 5848 // 5849 // #define LOG2OF10 3.3219281f 5850 // t0 = Op * LOG2OF10; 5851 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, RHS, 5852 getF32Constant(DAG, 0x40549a78, dl)); 5853 return getLimitedPrecisionExp2(t0, dl, DAG); 5854 } 5855 5856 // No special expansion. 5857 return DAG.getNode(ISD::FPOW, dl, LHS.getValueType(), LHS, RHS, Flags); 5858 } 5859 5860 /// ExpandPowI - Expand a llvm.powi intrinsic. 5861 static SDValue ExpandPowI(const SDLoc &DL, SDValue LHS, SDValue RHS, 5862 SelectionDAG &DAG) { 5863 // If RHS is a constant, we can expand this out to a multiplication tree if 5864 // it's beneficial on the target, otherwise we end up lowering to a call to 5865 // __powidf2 (for example). 5866 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) { 5867 unsigned Val = RHSC->getSExtValue(); 5868 5869 // powi(x, 0) -> 1.0 5870 if (Val == 0) 5871 return DAG.getConstantFP(1.0, DL, LHS.getValueType()); 5872 5873 if (DAG.getTargetLoweringInfo().isBeneficialToExpandPowI( 5874 Val, DAG.shouldOptForSize())) { 5875 // Get the exponent as a positive value. 5876 if ((int)Val < 0) 5877 Val = -Val; 5878 // We use the simple binary decomposition method to generate the multiply 5879 // sequence. There are more optimal ways to do this (for example, 5880 // powi(x,15) generates one more multiply than it should), but this has 5881 // the benefit of being both really simple and much better than a libcall. 5882 SDValue Res; // Logically starts equal to 1.0 5883 SDValue CurSquare = LHS; 5884 // TODO: Intrinsics should have fast-math-flags that propagate to these 5885 // nodes. 5886 while (Val) { 5887 if (Val & 1) { 5888 if (Res.getNode()) 5889 Res = 5890 DAG.getNode(ISD::FMUL, DL, Res.getValueType(), Res, CurSquare); 5891 else 5892 Res = CurSquare; // 1.0*CurSquare. 5893 } 5894 5895 CurSquare = DAG.getNode(ISD::FMUL, DL, CurSquare.getValueType(), 5896 CurSquare, CurSquare); 5897 Val >>= 1; 5898 } 5899 5900 // If the original was negative, invert the result, producing 1/(x*x*x). 5901 if (RHSC->getSExtValue() < 0) 5902 Res = DAG.getNode(ISD::FDIV, DL, LHS.getValueType(), 5903 DAG.getConstantFP(1.0, DL, LHS.getValueType()), Res); 5904 return Res; 5905 } 5906 } 5907 5908 // Otherwise, expand to a libcall. 5909 return DAG.getNode(ISD::FPOWI, DL, LHS.getValueType(), LHS, RHS); 5910 } 5911 5912 static SDValue expandDivFix(unsigned Opcode, const SDLoc &DL, 5913 SDValue LHS, SDValue RHS, SDValue Scale, 5914 SelectionDAG &DAG, const TargetLowering &TLI) { 5915 EVT VT = LHS.getValueType(); 5916 bool Signed = Opcode == ISD::SDIVFIX || Opcode == ISD::SDIVFIXSAT; 5917 bool Saturating = Opcode == ISD::SDIVFIXSAT || Opcode == ISD::UDIVFIXSAT; 5918 LLVMContext &Ctx = *DAG.getContext(); 5919 5920 // If the type is legal but the operation isn't, this node might survive all 5921 // the way to operation legalization. If we end up there and we do not have 5922 // the ability to widen the type (if VT*2 is not legal), we cannot expand the 5923 // node. 5924 5925 // Coax the legalizer into expanding the node during type legalization instead 5926 // by bumping the size by one bit. This will force it to Promote, enabling the 5927 // early expansion and avoiding the need to expand later. 5928 5929 // We don't have to do this if Scale is 0; that can always be expanded, unless 5930 // it's a saturating signed operation. Those can experience true integer 5931 // division overflow, a case which we must avoid. 5932 5933 // FIXME: We wouldn't have to do this (or any of the early 5934 // expansion/promotion) if it was possible to expand a libcall of an 5935 // illegal type during operation legalization. But it's not, so things 5936 // get a bit hacky. 5937 unsigned ScaleInt = Scale->getAsZExtVal(); 5938 if ((ScaleInt > 0 || (Saturating && Signed)) && 5939 (TLI.isTypeLegal(VT) || 5940 (VT.isVector() && TLI.isTypeLegal(VT.getVectorElementType())))) { 5941 TargetLowering::LegalizeAction Action = TLI.getFixedPointOperationAction( 5942 Opcode, VT, ScaleInt); 5943 if (Action != TargetLowering::Legal && Action != TargetLowering::Custom) { 5944 EVT PromVT; 5945 if (VT.isScalarInteger()) 5946 PromVT = EVT::getIntegerVT(Ctx, VT.getSizeInBits() + 1); 5947 else if (VT.isVector()) { 5948 PromVT = VT.getVectorElementType(); 5949 PromVT = EVT::getIntegerVT(Ctx, PromVT.getSizeInBits() + 1); 5950 PromVT = EVT::getVectorVT(Ctx, PromVT, VT.getVectorElementCount()); 5951 } else 5952 llvm_unreachable("Wrong VT for DIVFIX?"); 5953 LHS = DAG.getExtOrTrunc(Signed, LHS, DL, PromVT); 5954 RHS = DAG.getExtOrTrunc(Signed, RHS, DL, PromVT); 5955 EVT ShiftTy = TLI.getShiftAmountTy(PromVT, DAG.getDataLayout()); 5956 // For saturating operations, we need to shift up the LHS to get the 5957 // proper saturation width, and then shift down again afterwards. 5958 if (Saturating) 5959 LHS = DAG.getNode(ISD::SHL, DL, PromVT, LHS, 5960 DAG.getConstant(1, DL, ShiftTy)); 5961 SDValue Res = DAG.getNode(Opcode, DL, PromVT, LHS, RHS, Scale); 5962 if (Saturating) 5963 Res = DAG.getNode(Signed ? ISD::SRA : ISD::SRL, DL, PromVT, Res, 5964 DAG.getConstant(1, DL, ShiftTy)); 5965 return DAG.getZExtOrTrunc(Res, DL, VT); 5966 } 5967 } 5968 5969 return DAG.getNode(Opcode, DL, VT, LHS, RHS, Scale); 5970 } 5971 5972 // getUnderlyingArgRegs - Find underlying registers used for a truncated, 5973 // bitcasted, or split argument. Returns a list of <Register, size in bits> 5974 static void 5975 getUnderlyingArgRegs(SmallVectorImpl<std::pair<Register, TypeSize>> &Regs, 5976 const SDValue &N) { 5977 switch (N.getOpcode()) { 5978 case ISD::CopyFromReg: { 5979 SDValue Op = N.getOperand(1); 5980 Regs.emplace_back(cast<RegisterSDNode>(Op)->getReg(), 5981 Op.getValueType().getSizeInBits()); 5982 return; 5983 } 5984 case ISD::BITCAST: 5985 case ISD::AssertZext: 5986 case ISD::AssertSext: 5987 case ISD::TRUNCATE: 5988 getUnderlyingArgRegs(Regs, N.getOperand(0)); 5989 return; 5990 case ISD::BUILD_PAIR: 5991 case ISD::BUILD_VECTOR: 5992 case ISD::CONCAT_VECTORS: 5993 for (SDValue Op : N->op_values()) 5994 getUnderlyingArgRegs(Regs, Op); 5995 return; 5996 default: 5997 return; 5998 } 5999 } 6000 6001 /// If the DbgValueInst is a dbg_value of a function argument, create the 6002 /// corresponding DBG_VALUE machine instruction for it now. At the end of 6003 /// instruction selection, they will be inserted to the entry BB. 6004 /// We don't currently support this for variadic dbg_values, as they shouldn't 6005 /// appear for function arguments or in the prologue. 6006 bool SelectionDAGBuilder::EmitFuncArgumentDbgValue( 6007 const Value *V, DILocalVariable *Variable, DIExpression *Expr, 6008 DILocation *DL, FuncArgumentDbgValueKind Kind, const SDValue &N) { 6009 const Argument *Arg = dyn_cast<Argument>(V); 6010 if (!Arg) 6011 return false; 6012 6013 MachineFunction &MF = DAG.getMachineFunction(); 6014 const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo(); 6015 6016 // Helper to create DBG_INSTR_REFs or DBG_VALUEs, depending on what kind 6017 // we've been asked to pursue. 6018 auto MakeVRegDbgValue = [&](Register Reg, DIExpression *FragExpr, 6019 bool Indirect) { 6020 if (Reg.isVirtual() && MF.useDebugInstrRef()) { 6021 // For VRegs, in instruction referencing mode, create a DBG_INSTR_REF 6022 // pointing at the VReg, which will be patched up later. 6023 auto &Inst = TII->get(TargetOpcode::DBG_INSTR_REF); 6024 SmallVector<MachineOperand, 1> MOs({MachineOperand::CreateReg( 6025 /* Reg */ Reg, /* isDef */ false, /* isImp */ false, 6026 /* isKill */ false, /* isDead */ false, 6027 /* isUndef */ false, /* isEarlyClobber */ false, 6028 /* SubReg */ 0, /* isDebug */ true)}); 6029 6030 auto *NewDIExpr = FragExpr; 6031 // We don't have an "Indirect" field in DBG_INSTR_REF, fold that into 6032 // the DIExpression. 6033 if (Indirect) 6034 NewDIExpr = DIExpression::prepend(FragExpr, DIExpression::DerefBefore); 6035 SmallVector<uint64_t, 2> Ops({dwarf::DW_OP_LLVM_arg, 0}); 6036 NewDIExpr = DIExpression::prependOpcodes(NewDIExpr, Ops); 6037 return BuildMI(MF, DL, Inst, false, MOs, Variable, NewDIExpr); 6038 } else { 6039 // Create a completely standard DBG_VALUE. 6040 auto &Inst = TII->get(TargetOpcode::DBG_VALUE); 6041 return BuildMI(MF, DL, Inst, Indirect, Reg, Variable, FragExpr); 6042 } 6043 }; 6044 6045 if (Kind == FuncArgumentDbgValueKind::Value) { 6046 // ArgDbgValues are hoisted to the beginning of the entry block. So we 6047 // should only emit as ArgDbgValue if the dbg.value intrinsic is found in 6048 // the entry block. 6049 bool IsInEntryBlock = FuncInfo.MBB == &FuncInfo.MF->front(); 6050 if (!IsInEntryBlock) 6051 return false; 6052 6053 // ArgDbgValues are hoisted to the beginning of the entry block. So we 6054 // should only emit as ArgDbgValue if the dbg.value intrinsic describes a 6055 // variable that also is a param. 6056 // 6057 // Although, if we are at the top of the entry block already, we can still 6058 // emit using ArgDbgValue. This might catch some situations when the 6059 // dbg.value refers to an argument that isn't used in the entry block, so 6060 // any CopyToReg node would be optimized out and the only way to express 6061 // this DBG_VALUE is by using the physical reg (or FI) as done in this 6062 // method. ArgDbgValues are hoisted to the beginning of the entry block. So 6063 // we should only emit as ArgDbgValue if the Variable is an argument to the 6064 // current function, and the dbg.value intrinsic is found in the entry 6065 // block. 6066 bool VariableIsFunctionInputArg = Variable->isParameter() && 6067 !DL->getInlinedAt(); 6068 bool IsInPrologue = SDNodeOrder == LowestSDNodeOrder; 6069 if (!IsInPrologue && !VariableIsFunctionInputArg) 6070 return false; 6071 6072 // Here we assume that a function argument on IR level only can be used to 6073 // describe one input parameter on source level. If we for example have 6074 // source code like this 6075 // 6076 // struct A { long x, y; }; 6077 // void foo(struct A a, long b) { 6078 // ... 6079 // b = a.x; 6080 // ... 6081 // } 6082 // 6083 // and IR like this 6084 // 6085 // define void @foo(i32 %a1, i32 %a2, i32 %b) { 6086 // entry: 6087 // call void @llvm.dbg.value(metadata i32 %a1, "a", DW_OP_LLVM_fragment 6088 // call void @llvm.dbg.value(metadata i32 %a2, "a", DW_OP_LLVM_fragment 6089 // call void @llvm.dbg.value(metadata i32 %b, "b", 6090 // ... 6091 // call void @llvm.dbg.value(metadata i32 %a1, "b" 6092 // ... 6093 // 6094 // then the last dbg.value is describing a parameter "b" using a value that 6095 // is an argument. But since we already has used %a1 to describe a parameter 6096 // we should not handle that last dbg.value here (that would result in an 6097 // incorrect hoisting of the DBG_VALUE to the function entry). 6098 // Notice that we allow one dbg.value per IR level argument, to accommodate 6099 // for the situation with fragments above. 6100 // If there is no node for the value being handled, we return true to skip 6101 // the normal generation of debug info, as it would kill existing debug 6102 // info for the parameter in case of duplicates. 6103 if (VariableIsFunctionInputArg) { 6104 unsigned ArgNo = Arg->getArgNo(); 6105 if (ArgNo >= FuncInfo.DescribedArgs.size()) 6106 FuncInfo.DescribedArgs.resize(ArgNo + 1, false); 6107 else if (!IsInPrologue && FuncInfo.DescribedArgs.test(ArgNo)) 6108 return !NodeMap[V].getNode(); 6109 FuncInfo.DescribedArgs.set(ArgNo); 6110 } 6111 } 6112 6113 bool IsIndirect = false; 6114 std::optional<MachineOperand> Op; 6115 // Some arguments' frame index is recorded during argument lowering. 6116 int FI = FuncInfo.getArgumentFrameIndex(Arg); 6117 if (FI != std::numeric_limits<int>::max()) 6118 Op = MachineOperand::CreateFI(FI); 6119 6120 SmallVector<std::pair<Register, TypeSize>, 8> ArgRegsAndSizes; 6121 if (!Op && N.getNode()) { 6122 getUnderlyingArgRegs(ArgRegsAndSizes, N); 6123 Register Reg; 6124 if (ArgRegsAndSizes.size() == 1) 6125 Reg = ArgRegsAndSizes.front().first; 6126 6127 if (Reg && Reg.isVirtual()) { 6128 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 6129 Register PR = RegInfo.getLiveInPhysReg(Reg); 6130 if (PR) 6131 Reg = PR; 6132 } 6133 if (Reg) { 6134 Op = MachineOperand::CreateReg(Reg, false); 6135 IsIndirect = Kind != FuncArgumentDbgValueKind::Value; 6136 } 6137 } 6138 6139 if (!Op && N.getNode()) { 6140 // Check if frame index is available. 6141 SDValue LCandidate = peekThroughBitcasts(N); 6142 if (LoadSDNode *LNode = dyn_cast<LoadSDNode>(LCandidate.getNode())) 6143 if (FrameIndexSDNode *FINode = 6144 dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode())) 6145 Op = MachineOperand::CreateFI(FINode->getIndex()); 6146 } 6147 6148 if (!Op) { 6149 // Create a DBG_VALUE for each decomposed value in ArgRegs to cover Reg 6150 auto splitMultiRegDbgValue = [&](ArrayRef<std::pair<Register, TypeSize>> 6151 SplitRegs) { 6152 unsigned Offset = 0; 6153 for (const auto &RegAndSize : SplitRegs) { 6154 // If the expression is already a fragment, the current register 6155 // offset+size might extend beyond the fragment. In this case, only 6156 // the register bits that are inside the fragment are relevant. 6157 int RegFragmentSizeInBits = RegAndSize.second; 6158 if (auto ExprFragmentInfo = Expr->getFragmentInfo()) { 6159 uint64_t ExprFragmentSizeInBits = ExprFragmentInfo->SizeInBits; 6160 // The register is entirely outside the expression fragment, 6161 // so is irrelevant for debug info. 6162 if (Offset >= ExprFragmentSizeInBits) 6163 break; 6164 // The register is partially outside the expression fragment, only 6165 // the low bits within the fragment are relevant for debug info. 6166 if (Offset + RegFragmentSizeInBits > ExprFragmentSizeInBits) { 6167 RegFragmentSizeInBits = ExprFragmentSizeInBits - Offset; 6168 } 6169 } 6170 6171 auto FragmentExpr = DIExpression::createFragmentExpression( 6172 Expr, Offset, RegFragmentSizeInBits); 6173 Offset += RegAndSize.second; 6174 // If a valid fragment expression cannot be created, the variable's 6175 // correct value cannot be determined and so it is set as Undef. 6176 if (!FragmentExpr) { 6177 SDDbgValue *SDV = DAG.getConstantDbgValue( 6178 Variable, Expr, UndefValue::get(V->getType()), DL, SDNodeOrder); 6179 DAG.AddDbgValue(SDV, false); 6180 continue; 6181 } 6182 MachineInstr *NewMI = 6183 MakeVRegDbgValue(RegAndSize.first, *FragmentExpr, 6184 Kind != FuncArgumentDbgValueKind::Value); 6185 FuncInfo.ArgDbgValues.push_back(NewMI); 6186 } 6187 }; 6188 6189 // Check if ValueMap has reg number. 6190 DenseMap<const Value *, Register>::const_iterator 6191 VMI = FuncInfo.ValueMap.find(V); 6192 if (VMI != FuncInfo.ValueMap.end()) { 6193 const auto &TLI = DAG.getTargetLoweringInfo(); 6194 RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), VMI->second, 6195 V->getType(), std::nullopt); 6196 if (RFV.occupiesMultipleRegs()) { 6197 splitMultiRegDbgValue(RFV.getRegsAndSizes()); 6198 return true; 6199 } 6200 6201 Op = MachineOperand::CreateReg(VMI->second, false); 6202 IsIndirect = Kind != FuncArgumentDbgValueKind::Value; 6203 } else if (ArgRegsAndSizes.size() > 1) { 6204 // This was split due to the calling convention, and no virtual register 6205 // mapping exists for the value. 6206 splitMultiRegDbgValue(ArgRegsAndSizes); 6207 return true; 6208 } 6209 } 6210 6211 if (!Op) 6212 return false; 6213 6214 assert(Variable->isValidLocationForIntrinsic(DL) && 6215 "Expected inlined-at fields to agree"); 6216 MachineInstr *NewMI = nullptr; 6217 6218 if (Op->isReg()) 6219 NewMI = MakeVRegDbgValue(Op->getReg(), Expr, IsIndirect); 6220 else 6221 NewMI = BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE), true, *Op, 6222 Variable, Expr); 6223 6224 // Otherwise, use ArgDbgValues. 6225 FuncInfo.ArgDbgValues.push_back(NewMI); 6226 return true; 6227 } 6228 6229 /// Return the appropriate SDDbgValue based on N. 6230 SDDbgValue *SelectionDAGBuilder::getDbgValue(SDValue N, 6231 DILocalVariable *Variable, 6232 DIExpression *Expr, 6233 const DebugLoc &dl, 6234 unsigned DbgSDNodeOrder) { 6235 if (auto *FISDN = dyn_cast<FrameIndexSDNode>(N.getNode())) { 6236 // Construct a FrameIndexDbgValue for FrameIndexSDNodes so we can describe 6237 // stack slot locations. 6238 // 6239 // Consider "int x = 0; int *px = &x;". There are two kinds of interesting 6240 // debug values here after optimization: 6241 // 6242 // dbg.value(i32* %px, !"int *px", !DIExpression()), and 6243 // dbg.value(i32* %px, !"int x", !DIExpression(DW_OP_deref)) 6244 // 6245 // Both describe the direct values of their associated variables. 6246 return DAG.getFrameIndexDbgValue(Variable, Expr, FISDN->getIndex(), 6247 /*IsIndirect*/ false, dl, DbgSDNodeOrder); 6248 } 6249 return DAG.getDbgValue(Variable, Expr, N.getNode(), N.getResNo(), 6250 /*IsIndirect*/ false, dl, DbgSDNodeOrder); 6251 } 6252 6253 static unsigned FixedPointIntrinsicToOpcode(unsigned Intrinsic) { 6254 switch (Intrinsic) { 6255 case Intrinsic::smul_fix: 6256 return ISD::SMULFIX; 6257 case Intrinsic::umul_fix: 6258 return ISD::UMULFIX; 6259 case Intrinsic::smul_fix_sat: 6260 return ISD::SMULFIXSAT; 6261 case Intrinsic::umul_fix_sat: 6262 return ISD::UMULFIXSAT; 6263 case Intrinsic::sdiv_fix: 6264 return ISD::SDIVFIX; 6265 case Intrinsic::udiv_fix: 6266 return ISD::UDIVFIX; 6267 case Intrinsic::sdiv_fix_sat: 6268 return ISD::SDIVFIXSAT; 6269 case Intrinsic::udiv_fix_sat: 6270 return ISD::UDIVFIXSAT; 6271 default: 6272 llvm_unreachable("Unhandled fixed point intrinsic"); 6273 } 6274 } 6275 6276 void SelectionDAGBuilder::lowerCallToExternalSymbol(const CallInst &I, 6277 const char *FunctionName) { 6278 assert(FunctionName && "FunctionName must not be nullptr"); 6279 SDValue Callee = DAG.getExternalSymbol( 6280 FunctionName, 6281 DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout())); 6282 LowerCallTo(I, Callee, I.isTailCall(), I.isMustTailCall()); 6283 } 6284 6285 /// Given a @llvm.call.preallocated.setup, return the corresponding 6286 /// preallocated call. 6287 static const CallBase *FindPreallocatedCall(const Value *PreallocatedSetup) { 6288 assert(cast<CallBase>(PreallocatedSetup) 6289 ->getCalledFunction() 6290 ->getIntrinsicID() == Intrinsic::call_preallocated_setup && 6291 "expected call_preallocated_setup Value"); 6292 for (const auto *U : PreallocatedSetup->users()) { 6293 auto *UseCall = cast<CallBase>(U); 6294 const Function *Fn = UseCall->getCalledFunction(); 6295 if (!Fn || Fn->getIntrinsicID() != Intrinsic::call_preallocated_arg) { 6296 return UseCall; 6297 } 6298 } 6299 llvm_unreachable("expected corresponding call to preallocated setup/arg"); 6300 } 6301 6302 /// If DI is a debug value with an EntryValue expression, lower it using the 6303 /// corresponding physical register of the associated Argument value 6304 /// (guaranteed to exist by the verifier). 6305 bool SelectionDAGBuilder::visitEntryValueDbgValue( 6306 ArrayRef<const Value *> Values, DILocalVariable *Variable, 6307 DIExpression *Expr, DebugLoc DbgLoc) { 6308 if (!Expr->isEntryValue() || !hasSingleElement(Values)) 6309 return false; 6310 6311 // These properties are guaranteed by the verifier. 6312 const Argument *Arg = cast<Argument>(Values[0]); 6313 assert(Arg->hasAttribute(Attribute::AttrKind::SwiftAsync)); 6314 6315 auto ArgIt = FuncInfo.ValueMap.find(Arg); 6316 if (ArgIt == FuncInfo.ValueMap.end()) { 6317 LLVM_DEBUG( 6318 dbgs() << "Dropping dbg.value: expression is entry_value but " 6319 "couldn't find an associated register for the Argument\n"); 6320 return true; 6321 } 6322 Register ArgVReg = ArgIt->getSecond(); 6323 6324 for (auto [PhysReg, VirtReg] : FuncInfo.RegInfo->liveins()) 6325 if (ArgVReg == VirtReg || ArgVReg == PhysReg) { 6326 SDDbgValue *SDV = DAG.getVRegDbgValue( 6327 Variable, Expr, PhysReg, false /*IsIndidrect*/, DbgLoc, SDNodeOrder); 6328 DAG.AddDbgValue(SDV, false /*treat as dbg.declare byval parameter*/); 6329 return true; 6330 } 6331 LLVM_DEBUG(dbgs() << "Dropping dbg.value: expression is entry_value but " 6332 "couldn't find a physical register\n"); 6333 return true; 6334 } 6335 6336 /// Lower the call to the specified intrinsic function. 6337 void SelectionDAGBuilder::visitConvergenceControl(const CallInst &I, 6338 unsigned Intrinsic) { 6339 SDLoc sdl = getCurSDLoc(); 6340 switch (Intrinsic) { 6341 case Intrinsic::experimental_convergence_anchor: 6342 setValue(&I, DAG.getNode(ISD::CONVERGENCECTRL_ANCHOR, sdl, MVT::Untyped)); 6343 break; 6344 case Intrinsic::experimental_convergence_entry: 6345 setValue(&I, DAG.getNode(ISD::CONVERGENCECTRL_ENTRY, sdl, MVT::Untyped)); 6346 break; 6347 case Intrinsic::experimental_convergence_loop: { 6348 auto Bundle = I.getOperandBundle(LLVMContext::OB_convergencectrl); 6349 auto *Token = Bundle->Inputs[0].get(); 6350 setValue(&I, DAG.getNode(ISD::CONVERGENCECTRL_LOOP, sdl, MVT::Untyped, 6351 getValue(Token))); 6352 break; 6353 } 6354 } 6355 } 6356 6357 void SelectionDAGBuilder::visitVectorHistogram(const CallInst &I, 6358 unsigned IntrinsicID) { 6359 // For now, we're only lowering an 'add' histogram. 6360 // We can add others later, e.g. saturating adds, min/max. 6361 assert(IntrinsicID == Intrinsic::experimental_vector_histogram_add && 6362 "Tried to lower unsupported histogram type"); 6363 SDLoc sdl = getCurSDLoc(); 6364 Value *Ptr = I.getOperand(0); 6365 SDValue Inc = getValue(I.getOperand(1)); 6366 SDValue Mask = getValue(I.getOperand(2)); 6367 6368 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6369 DataLayout TargetDL = DAG.getDataLayout(); 6370 EVT VT = Inc.getValueType(); 6371 Align Alignment = DAG.getEVTAlign(VT); 6372 6373 const MDNode *Ranges = getRangeMetadata(I); 6374 6375 SDValue Root = DAG.getRoot(); 6376 SDValue Base; 6377 SDValue Index; 6378 ISD::MemIndexType IndexType; 6379 SDValue Scale; 6380 bool UniformBase = getUniformBase(Ptr, Base, Index, IndexType, Scale, this, 6381 I.getParent(), VT.getScalarStoreSize()); 6382 6383 unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace(); 6384 6385 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( 6386 MachinePointerInfo(AS), 6387 MachineMemOperand::MOLoad | MachineMemOperand::MOStore, 6388 MemoryLocation::UnknownSize, Alignment, I.getAAMetadata(), Ranges); 6389 6390 if (!UniformBase) { 6391 Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout())); 6392 Index = getValue(Ptr); 6393 IndexType = ISD::SIGNED_SCALED; 6394 Scale = 6395 DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout())); 6396 } 6397 6398 EVT IdxVT = Index.getValueType(); 6399 EVT EltTy = IdxVT.getVectorElementType(); 6400 if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) { 6401 EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy); 6402 Index = DAG.getNode(ISD::SIGN_EXTEND, sdl, NewIdxVT, Index); 6403 } 6404 6405 SDValue ID = DAG.getTargetConstant(IntrinsicID, sdl, MVT::i32); 6406 6407 SDValue Ops[] = {Root, Inc, Mask, Base, Index, Scale, ID}; 6408 SDValue Histogram = DAG.getMaskedHistogram(DAG.getVTList(MVT::Other), VT, sdl, 6409 Ops, MMO, IndexType); 6410 6411 setValue(&I, Histogram); 6412 DAG.setRoot(Histogram); 6413 } 6414 6415 /// Lower the call to the specified intrinsic function. 6416 void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, 6417 unsigned Intrinsic) { 6418 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6419 SDLoc sdl = getCurSDLoc(); 6420 DebugLoc dl = getCurDebugLoc(); 6421 SDValue Res; 6422 6423 SDNodeFlags Flags; 6424 if (auto *FPOp = dyn_cast<FPMathOperator>(&I)) 6425 Flags.copyFMF(*FPOp); 6426 6427 switch (Intrinsic) { 6428 default: 6429 // By default, turn this into a target intrinsic node. 6430 visitTargetIntrinsic(I, Intrinsic); 6431 return; 6432 case Intrinsic::vscale: { 6433 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 6434 setValue(&I, DAG.getVScale(sdl, VT, APInt(VT.getSizeInBits(), 1))); 6435 return; 6436 } 6437 case Intrinsic::vastart: visitVAStart(I); return; 6438 case Intrinsic::vaend: visitVAEnd(I); return; 6439 case Intrinsic::vacopy: visitVACopy(I); return; 6440 case Intrinsic::returnaddress: 6441 setValue(&I, DAG.getNode(ISD::RETURNADDR, sdl, 6442 TLI.getValueType(DAG.getDataLayout(), I.getType()), 6443 getValue(I.getArgOperand(0)))); 6444 return; 6445 case Intrinsic::addressofreturnaddress: 6446 setValue(&I, 6447 DAG.getNode(ISD::ADDROFRETURNADDR, sdl, 6448 TLI.getValueType(DAG.getDataLayout(), I.getType()))); 6449 return; 6450 case Intrinsic::sponentry: 6451 setValue(&I, 6452 DAG.getNode(ISD::SPONENTRY, sdl, 6453 TLI.getValueType(DAG.getDataLayout(), I.getType()))); 6454 return; 6455 case Intrinsic::frameaddress: 6456 setValue(&I, DAG.getNode(ISD::FRAMEADDR, sdl, 6457 TLI.getFrameIndexTy(DAG.getDataLayout()), 6458 getValue(I.getArgOperand(0)))); 6459 return; 6460 case Intrinsic::read_volatile_register: 6461 case Intrinsic::read_register: { 6462 Value *Reg = I.getArgOperand(0); 6463 SDValue Chain = getRoot(); 6464 SDValue RegName = 6465 DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata())); 6466 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 6467 Res = DAG.getNode(ISD::READ_REGISTER, sdl, 6468 DAG.getVTList(VT, MVT::Other), Chain, RegName); 6469 setValue(&I, Res); 6470 DAG.setRoot(Res.getValue(1)); 6471 return; 6472 } 6473 case Intrinsic::write_register: { 6474 Value *Reg = I.getArgOperand(0); 6475 Value *RegValue = I.getArgOperand(1); 6476 SDValue Chain = getRoot(); 6477 SDValue RegName = 6478 DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata())); 6479 DAG.setRoot(DAG.getNode(ISD::WRITE_REGISTER, sdl, MVT::Other, Chain, 6480 RegName, getValue(RegValue))); 6481 return; 6482 } 6483 case Intrinsic::memcpy: { 6484 const auto &MCI = cast<MemCpyInst>(I); 6485 SDValue Op1 = getValue(I.getArgOperand(0)); 6486 SDValue Op2 = getValue(I.getArgOperand(1)); 6487 SDValue Op3 = getValue(I.getArgOperand(2)); 6488 // @llvm.memcpy defines 0 and 1 to both mean no alignment. 6489 Align DstAlign = MCI.getDestAlign().valueOrOne(); 6490 Align SrcAlign = MCI.getSourceAlign().valueOrOne(); 6491 Align Alignment = std::min(DstAlign, SrcAlign); 6492 bool isVol = MCI.isVolatile(); 6493 // FIXME: Support passing different dest/src alignments to the memcpy DAG 6494 // node. 6495 SDValue Root = isVol ? getRoot() : getMemoryRoot(); 6496 SDValue MC = DAG.getMemcpy(Root, sdl, Op1, Op2, Op3, Alignment, isVol, 6497 /* AlwaysInline */ false, &I, std::nullopt, 6498 MachinePointerInfo(I.getArgOperand(0)), 6499 MachinePointerInfo(I.getArgOperand(1)), 6500 I.getAAMetadata(), AA); 6501 updateDAGForMaybeTailCall(MC); 6502 return; 6503 } 6504 case Intrinsic::memcpy_inline: { 6505 const auto &MCI = cast<MemCpyInlineInst>(I); 6506 SDValue Dst = getValue(I.getArgOperand(0)); 6507 SDValue Src = getValue(I.getArgOperand(1)); 6508 SDValue Size = getValue(I.getArgOperand(2)); 6509 assert(isa<ConstantSDNode>(Size) && "memcpy_inline needs constant size"); 6510 // @llvm.memcpy.inline defines 0 and 1 to both mean no alignment. 6511 Align DstAlign = MCI.getDestAlign().valueOrOne(); 6512 Align SrcAlign = MCI.getSourceAlign().valueOrOne(); 6513 Align Alignment = std::min(DstAlign, SrcAlign); 6514 bool isVol = MCI.isVolatile(); 6515 // FIXME: Support passing different dest/src alignments to the memcpy DAG 6516 // node. 6517 SDValue MC = DAG.getMemcpy(getRoot(), sdl, Dst, Src, Size, Alignment, isVol, 6518 /* AlwaysInline */ true, &I, std::nullopt, 6519 MachinePointerInfo(I.getArgOperand(0)), 6520 MachinePointerInfo(I.getArgOperand(1)), 6521 I.getAAMetadata(), AA); 6522 updateDAGForMaybeTailCall(MC); 6523 return; 6524 } 6525 case Intrinsic::memset: { 6526 const auto &MSI = cast<MemSetInst>(I); 6527 SDValue Op1 = getValue(I.getArgOperand(0)); 6528 SDValue Op2 = getValue(I.getArgOperand(1)); 6529 SDValue Op3 = getValue(I.getArgOperand(2)); 6530 // @llvm.memset defines 0 and 1 to both mean no alignment. 6531 Align Alignment = MSI.getDestAlign().valueOrOne(); 6532 bool isVol = MSI.isVolatile(); 6533 SDValue Root = isVol ? getRoot() : getMemoryRoot(); 6534 SDValue MS = DAG.getMemset( 6535 Root, sdl, Op1, Op2, Op3, Alignment, isVol, /* AlwaysInline */ false, 6536 &I, MachinePointerInfo(I.getArgOperand(0)), I.getAAMetadata()); 6537 updateDAGForMaybeTailCall(MS); 6538 return; 6539 } 6540 case Intrinsic::memset_inline: { 6541 const auto &MSII = cast<MemSetInlineInst>(I); 6542 SDValue Dst = getValue(I.getArgOperand(0)); 6543 SDValue Value = getValue(I.getArgOperand(1)); 6544 SDValue Size = getValue(I.getArgOperand(2)); 6545 assert(isa<ConstantSDNode>(Size) && "memset_inline needs constant size"); 6546 // @llvm.memset defines 0 and 1 to both mean no alignment. 6547 Align DstAlign = MSII.getDestAlign().valueOrOne(); 6548 bool isVol = MSII.isVolatile(); 6549 SDValue Root = isVol ? getRoot() : getMemoryRoot(); 6550 SDValue MC = DAG.getMemset(Root, sdl, Dst, Value, Size, DstAlign, isVol, 6551 /* AlwaysInline */ true, &I, 6552 MachinePointerInfo(I.getArgOperand(0)), 6553 I.getAAMetadata()); 6554 updateDAGForMaybeTailCall(MC); 6555 return; 6556 } 6557 case Intrinsic::memmove: { 6558 const auto &MMI = cast<MemMoveInst>(I); 6559 SDValue Op1 = getValue(I.getArgOperand(0)); 6560 SDValue Op2 = getValue(I.getArgOperand(1)); 6561 SDValue Op3 = getValue(I.getArgOperand(2)); 6562 // @llvm.memmove defines 0 and 1 to both mean no alignment. 6563 Align DstAlign = MMI.getDestAlign().valueOrOne(); 6564 Align SrcAlign = MMI.getSourceAlign().valueOrOne(); 6565 Align Alignment = std::min(DstAlign, SrcAlign); 6566 bool isVol = MMI.isVolatile(); 6567 // FIXME: Support passing different dest/src alignments to the memmove DAG 6568 // node. 6569 SDValue Root = isVol ? getRoot() : getMemoryRoot(); 6570 SDValue MM = DAG.getMemmove(Root, sdl, Op1, Op2, Op3, Alignment, isVol, &I, 6571 /* OverrideTailCall */ std::nullopt, 6572 MachinePointerInfo(I.getArgOperand(0)), 6573 MachinePointerInfo(I.getArgOperand(1)), 6574 I.getAAMetadata(), AA); 6575 updateDAGForMaybeTailCall(MM); 6576 return; 6577 } 6578 case Intrinsic::memcpy_element_unordered_atomic: { 6579 const AtomicMemCpyInst &MI = cast<AtomicMemCpyInst>(I); 6580 SDValue Dst = getValue(MI.getRawDest()); 6581 SDValue Src = getValue(MI.getRawSource()); 6582 SDValue Length = getValue(MI.getLength()); 6583 6584 Type *LengthTy = MI.getLength()->getType(); 6585 unsigned ElemSz = MI.getElementSizeInBytes(); 6586 bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget()); 6587 SDValue MC = 6588 DAG.getAtomicMemcpy(getRoot(), sdl, Dst, Src, Length, LengthTy, ElemSz, 6589 isTC, MachinePointerInfo(MI.getRawDest()), 6590 MachinePointerInfo(MI.getRawSource())); 6591 updateDAGForMaybeTailCall(MC); 6592 return; 6593 } 6594 case Intrinsic::memmove_element_unordered_atomic: { 6595 auto &MI = cast<AtomicMemMoveInst>(I); 6596 SDValue Dst = getValue(MI.getRawDest()); 6597 SDValue Src = getValue(MI.getRawSource()); 6598 SDValue Length = getValue(MI.getLength()); 6599 6600 Type *LengthTy = MI.getLength()->getType(); 6601 unsigned ElemSz = MI.getElementSizeInBytes(); 6602 bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget()); 6603 SDValue MC = 6604 DAG.getAtomicMemmove(getRoot(), sdl, Dst, Src, Length, LengthTy, ElemSz, 6605 isTC, MachinePointerInfo(MI.getRawDest()), 6606 MachinePointerInfo(MI.getRawSource())); 6607 updateDAGForMaybeTailCall(MC); 6608 return; 6609 } 6610 case Intrinsic::memset_element_unordered_atomic: { 6611 auto &MI = cast<AtomicMemSetInst>(I); 6612 SDValue Dst = getValue(MI.getRawDest()); 6613 SDValue Val = getValue(MI.getValue()); 6614 SDValue Length = getValue(MI.getLength()); 6615 6616 Type *LengthTy = MI.getLength()->getType(); 6617 unsigned ElemSz = MI.getElementSizeInBytes(); 6618 bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget()); 6619 SDValue MC = 6620 DAG.getAtomicMemset(getRoot(), sdl, Dst, Val, Length, LengthTy, ElemSz, 6621 isTC, MachinePointerInfo(MI.getRawDest())); 6622 updateDAGForMaybeTailCall(MC); 6623 return; 6624 } 6625 case Intrinsic::call_preallocated_setup: { 6626 const CallBase *PreallocatedCall = FindPreallocatedCall(&I); 6627 SDValue SrcValue = DAG.getSrcValue(PreallocatedCall); 6628 SDValue Res = DAG.getNode(ISD::PREALLOCATED_SETUP, sdl, MVT::Other, 6629 getRoot(), SrcValue); 6630 setValue(&I, Res); 6631 DAG.setRoot(Res); 6632 return; 6633 } 6634 case Intrinsic::call_preallocated_arg: { 6635 const CallBase *PreallocatedCall = FindPreallocatedCall(I.getOperand(0)); 6636 SDValue SrcValue = DAG.getSrcValue(PreallocatedCall); 6637 SDValue Ops[3]; 6638 Ops[0] = getRoot(); 6639 Ops[1] = SrcValue; 6640 Ops[2] = DAG.getTargetConstant(*cast<ConstantInt>(I.getArgOperand(1)), sdl, 6641 MVT::i32); // arg index 6642 SDValue Res = DAG.getNode( 6643 ISD::PREALLOCATED_ARG, sdl, 6644 DAG.getVTList(TLI.getPointerTy(DAG.getDataLayout()), MVT::Other), Ops); 6645 setValue(&I, Res); 6646 DAG.setRoot(Res.getValue(1)); 6647 return; 6648 } 6649 case Intrinsic::dbg_declare: { 6650 const auto &DI = cast<DbgDeclareInst>(I); 6651 // Debug intrinsics are handled separately in assignment tracking mode. 6652 // Some intrinsics are handled right after Argument lowering. 6653 if (AssignmentTrackingEnabled || 6654 FuncInfo.PreprocessedDbgDeclares.count(&DI)) 6655 return; 6656 LLVM_DEBUG(dbgs() << "SelectionDAG visiting dbg_declare: " << DI << "\n"); 6657 DILocalVariable *Variable = DI.getVariable(); 6658 DIExpression *Expression = DI.getExpression(); 6659 dropDanglingDebugInfo(Variable, Expression); 6660 // Assume dbg.declare can not currently use DIArgList, i.e. 6661 // it is non-variadic. 6662 assert(!DI.hasArgList() && "Only dbg.value should currently use DIArgList"); 6663 handleDebugDeclare(DI.getVariableLocationOp(0), Variable, Expression, 6664 DI.getDebugLoc()); 6665 return; 6666 } 6667 case Intrinsic::dbg_label: { 6668 const DbgLabelInst &DI = cast<DbgLabelInst>(I); 6669 DILabel *Label = DI.getLabel(); 6670 assert(Label && "Missing label"); 6671 6672 SDDbgLabel *SDV; 6673 SDV = DAG.getDbgLabel(Label, dl, SDNodeOrder); 6674 DAG.AddDbgLabel(SDV); 6675 return; 6676 } 6677 case Intrinsic::dbg_assign: { 6678 // Debug intrinsics are handled separately in assignment tracking mode. 6679 if (AssignmentTrackingEnabled) 6680 return; 6681 // If assignment tracking hasn't been enabled then fall through and treat 6682 // the dbg.assign as a dbg.value. 6683 [[fallthrough]]; 6684 } 6685 case Intrinsic::dbg_value: { 6686 // Debug intrinsics are handled separately in assignment tracking mode. 6687 if (AssignmentTrackingEnabled) 6688 return; 6689 const DbgValueInst &DI = cast<DbgValueInst>(I); 6690 assert(DI.getVariable() && "Missing variable"); 6691 6692 DILocalVariable *Variable = DI.getVariable(); 6693 DIExpression *Expression = DI.getExpression(); 6694 dropDanglingDebugInfo(Variable, Expression); 6695 6696 if (DI.isKillLocation()) { 6697 handleKillDebugValue(Variable, Expression, DI.getDebugLoc(), SDNodeOrder); 6698 return; 6699 } 6700 6701 SmallVector<Value *, 4> Values(DI.getValues()); 6702 if (Values.empty()) 6703 return; 6704 6705 bool IsVariadic = DI.hasArgList(); 6706 if (!handleDebugValue(Values, Variable, Expression, DI.getDebugLoc(), 6707 SDNodeOrder, IsVariadic)) 6708 addDanglingDebugInfo(Values, Variable, Expression, IsVariadic, 6709 DI.getDebugLoc(), SDNodeOrder); 6710 return; 6711 } 6712 6713 case Intrinsic::eh_typeid_for: { 6714 // Find the type id for the given typeinfo. 6715 GlobalValue *GV = ExtractTypeInfo(I.getArgOperand(0)); 6716 unsigned TypeID = DAG.getMachineFunction().getTypeIDFor(GV); 6717 Res = DAG.getConstant(TypeID, sdl, MVT::i32); 6718 setValue(&I, Res); 6719 return; 6720 } 6721 6722 case Intrinsic::eh_return_i32: 6723 case Intrinsic::eh_return_i64: 6724 DAG.getMachineFunction().setCallsEHReturn(true); 6725 DAG.setRoot(DAG.getNode(ISD::EH_RETURN, sdl, 6726 MVT::Other, 6727 getControlRoot(), 6728 getValue(I.getArgOperand(0)), 6729 getValue(I.getArgOperand(1)))); 6730 return; 6731 case Intrinsic::eh_unwind_init: 6732 DAG.getMachineFunction().setCallsUnwindInit(true); 6733 return; 6734 case Intrinsic::eh_dwarf_cfa: 6735 setValue(&I, DAG.getNode(ISD::EH_DWARF_CFA, sdl, 6736 TLI.getPointerTy(DAG.getDataLayout()), 6737 getValue(I.getArgOperand(0)))); 6738 return; 6739 case Intrinsic::eh_sjlj_callsite: { 6740 ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(0)); 6741 assert(FuncInfo.getCurrentCallSite() == 0 && "Overlapping call sites!"); 6742 6743 FuncInfo.setCurrentCallSite(CI->getZExtValue()); 6744 return; 6745 } 6746 case Intrinsic::eh_sjlj_functioncontext: { 6747 // Get and store the index of the function context. 6748 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 6749 AllocaInst *FnCtx = 6750 cast<AllocaInst>(I.getArgOperand(0)->stripPointerCasts()); 6751 int FI = FuncInfo.StaticAllocaMap[FnCtx]; 6752 MFI.setFunctionContextIndex(FI); 6753 return; 6754 } 6755 case Intrinsic::eh_sjlj_setjmp: { 6756 SDValue Ops[2]; 6757 Ops[0] = getRoot(); 6758 Ops[1] = getValue(I.getArgOperand(0)); 6759 SDValue Op = DAG.getNode(ISD::EH_SJLJ_SETJMP, sdl, 6760 DAG.getVTList(MVT::i32, MVT::Other), Ops); 6761 setValue(&I, Op.getValue(0)); 6762 DAG.setRoot(Op.getValue(1)); 6763 return; 6764 } 6765 case Intrinsic::eh_sjlj_longjmp: 6766 DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_LONGJMP, sdl, MVT::Other, 6767 getRoot(), getValue(I.getArgOperand(0)))); 6768 return; 6769 case Intrinsic::eh_sjlj_setup_dispatch: 6770 DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_SETUP_DISPATCH, sdl, MVT::Other, 6771 getRoot())); 6772 return; 6773 case Intrinsic::masked_gather: 6774 visitMaskedGather(I); 6775 return; 6776 case Intrinsic::masked_load: 6777 visitMaskedLoad(I); 6778 return; 6779 case Intrinsic::masked_scatter: 6780 visitMaskedScatter(I); 6781 return; 6782 case Intrinsic::masked_store: 6783 visitMaskedStore(I); 6784 return; 6785 case Intrinsic::masked_expandload: 6786 visitMaskedLoad(I, true /* IsExpanding */); 6787 return; 6788 case Intrinsic::masked_compressstore: 6789 visitMaskedStore(I, true /* IsCompressing */); 6790 return; 6791 case Intrinsic::powi: 6792 setValue(&I, ExpandPowI(sdl, getValue(I.getArgOperand(0)), 6793 getValue(I.getArgOperand(1)), DAG)); 6794 return; 6795 case Intrinsic::log: 6796 setValue(&I, expandLog(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags)); 6797 return; 6798 case Intrinsic::log2: 6799 setValue(&I, 6800 expandLog2(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags)); 6801 return; 6802 case Intrinsic::log10: 6803 setValue(&I, 6804 expandLog10(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags)); 6805 return; 6806 case Intrinsic::exp: 6807 setValue(&I, expandExp(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags)); 6808 return; 6809 case Intrinsic::exp2: 6810 setValue(&I, 6811 expandExp2(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags)); 6812 return; 6813 case Intrinsic::pow: 6814 setValue(&I, expandPow(sdl, getValue(I.getArgOperand(0)), 6815 getValue(I.getArgOperand(1)), DAG, TLI, Flags)); 6816 return; 6817 case Intrinsic::sqrt: 6818 case Intrinsic::fabs: 6819 case Intrinsic::sin: 6820 case Intrinsic::cos: 6821 case Intrinsic::tan: 6822 case Intrinsic::asin: 6823 case Intrinsic::acos: 6824 case Intrinsic::atan: 6825 case Intrinsic::sinh: 6826 case Intrinsic::cosh: 6827 case Intrinsic::tanh: 6828 case Intrinsic::exp10: 6829 case Intrinsic::floor: 6830 case Intrinsic::ceil: 6831 case Intrinsic::trunc: 6832 case Intrinsic::rint: 6833 case Intrinsic::nearbyint: 6834 case Intrinsic::round: 6835 case Intrinsic::roundeven: 6836 case Intrinsic::canonicalize: { 6837 unsigned Opcode; 6838 // clang-format off 6839 switch (Intrinsic) { 6840 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 6841 case Intrinsic::sqrt: Opcode = ISD::FSQRT; break; 6842 case Intrinsic::fabs: Opcode = ISD::FABS; break; 6843 case Intrinsic::sin: Opcode = ISD::FSIN; break; 6844 case Intrinsic::cos: Opcode = ISD::FCOS; break; 6845 case Intrinsic::tan: Opcode = ISD::FTAN; break; 6846 case Intrinsic::asin: Opcode = ISD::FASIN; break; 6847 case Intrinsic::acos: Opcode = ISD::FACOS; break; 6848 case Intrinsic::atan: Opcode = ISD::FATAN; break; 6849 case Intrinsic::sinh: Opcode = ISD::FSINH; break; 6850 case Intrinsic::cosh: Opcode = ISD::FCOSH; break; 6851 case Intrinsic::tanh: Opcode = ISD::FTANH; break; 6852 case Intrinsic::exp10: Opcode = ISD::FEXP10; break; 6853 case Intrinsic::floor: Opcode = ISD::FFLOOR; break; 6854 case Intrinsic::ceil: Opcode = ISD::FCEIL; break; 6855 case Intrinsic::trunc: Opcode = ISD::FTRUNC; break; 6856 case Intrinsic::rint: Opcode = ISD::FRINT; break; 6857 case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break; 6858 case Intrinsic::round: Opcode = ISD::FROUND; break; 6859 case Intrinsic::roundeven: Opcode = ISD::FROUNDEVEN; break; 6860 case Intrinsic::canonicalize: Opcode = ISD::FCANONICALIZE; break; 6861 } 6862 // clang-format on 6863 6864 setValue(&I, DAG.getNode(Opcode, sdl, 6865 getValue(I.getArgOperand(0)).getValueType(), 6866 getValue(I.getArgOperand(0)), Flags)); 6867 return; 6868 } 6869 case Intrinsic::lround: 6870 case Intrinsic::llround: 6871 case Intrinsic::lrint: 6872 case Intrinsic::llrint: { 6873 unsigned Opcode; 6874 // clang-format off 6875 switch (Intrinsic) { 6876 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 6877 case Intrinsic::lround: Opcode = ISD::LROUND; break; 6878 case Intrinsic::llround: Opcode = ISD::LLROUND; break; 6879 case Intrinsic::lrint: Opcode = ISD::LRINT; break; 6880 case Intrinsic::llrint: Opcode = ISD::LLRINT; break; 6881 } 6882 // clang-format on 6883 6884 EVT RetVT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 6885 setValue(&I, DAG.getNode(Opcode, sdl, RetVT, 6886 getValue(I.getArgOperand(0)))); 6887 return; 6888 } 6889 case Intrinsic::minnum: 6890 setValue(&I, DAG.getNode(ISD::FMINNUM, sdl, 6891 getValue(I.getArgOperand(0)).getValueType(), 6892 getValue(I.getArgOperand(0)), 6893 getValue(I.getArgOperand(1)), Flags)); 6894 return; 6895 case Intrinsic::maxnum: 6896 setValue(&I, DAG.getNode(ISD::FMAXNUM, sdl, 6897 getValue(I.getArgOperand(0)).getValueType(), 6898 getValue(I.getArgOperand(0)), 6899 getValue(I.getArgOperand(1)), Flags)); 6900 return; 6901 case Intrinsic::minimum: 6902 setValue(&I, DAG.getNode(ISD::FMINIMUM, sdl, 6903 getValue(I.getArgOperand(0)).getValueType(), 6904 getValue(I.getArgOperand(0)), 6905 getValue(I.getArgOperand(1)), Flags)); 6906 return; 6907 case Intrinsic::maximum: 6908 setValue(&I, DAG.getNode(ISD::FMAXIMUM, sdl, 6909 getValue(I.getArgOperand(0)).getValueType(), 6910 getValue(I.getArgOperand(0)), 6911 getValue(I.getArgOperand(1)), Flags)); 6912 return; 6913 case Intrinsic::minimumnum: 6914 setValue(&I, DAG.getNode(ISD::FMINIMUMNUM, sdl, 6915 getValue(I.getArgOperand(0)).getValueType(), 6916 getValue(I.getArgOperand(0)), 6917 getValue(I.getArgOperand(1)), Flags)); 6918 return; 6919 case Intrinsic::maximumnum: 6920 setValue(&I, DAG.getNode(ISD::FMAXIMUMNUM, sdl, 6921 getValue(I.getArgOperand(0)).getValueType(), 6922 getValue(I.getArgOperand(0)), 6923 getValue(I.getArgOperand(1)), Flags)); 6924 return; 6925 case Intrinsic::copysign: 6926 setValue(&I, DAG.getNode(ISD::FCOPYSIGN, sdl, 6927 getValue(I.getArgOperand(0)).getValueType(), 6928 getValue(I.getArgOperand(0)), 6929 getValue(I.getArgOperand(1)), Flags)); 6930 return; 6931 case Intrinsic::ldexp: 6932 setValue(&I, DAG.getNode(ISD::FLDEXP, sdl, 6933 getValue(I.getArgOperand(0)).getValueType(), 6934 getValue(I.getArgOperand(0)), 6935 getValue(I.getArgOperand(1)), Flags)); 6936 return; 6937 case Intrinsic::frexp: { 6938 SmallVector<EVT, 2> ValueVTs; 6939 ComputeValueVTs(TLI, DAG.getDataLayout(), I.getType(), ValueVTs); 6940 SDVTList VTs = DAG.getVTList(ValueVTs); 6941 setValue(&I, 6942 DAG.getNode(ISD::FFREXP, sdl, VTs, getValue(I.getArgOperand(0)))); 6943 return; 6944 } 6945 case Intrinsic::arithmetic_fence: { 6946 setValue(&I, DAG.getNode(ISD::ARITH_FENCE, sdl, 6947 getValue(I.getArgOperand(0)).getValueType(), 6948 getValue(I.getArgOperand(0)), Flags)); 6949 return; 6950 } 6951 case Intrinsic::fma: 6952 setValue(&I, DAG.getNode( 6953 ISD::FMA, sdl, getValue(I.getArgOperand(0)).getValueType(), 6954 getValue(I.getArgOperand(0)), getValue(I.getArgOperand(1)), 6955 getValue(I.getArgOperand(2)), Flags)); 6956 return; 6957 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \ 6958 case Intrinsic::INTRINSIC: 6959 #include "llvm/IR/ConstrainedOps.def" 6960 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(I)); 6961 return; 6962 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID: 6963 #include "llvm/IR/VPIntrinsics.def" 6964 visitVectorPredicationIntrinsic(cast<VPIntrinsic>(I)); 6965 return; 6966 case Intrinsic::fptrunc_round: { 6967 // Get the last argument, the metadata and convert it to an integer in the 6968 // call 6969 Metadata *MD = cast<MetadataAsValue>(I.getArgOperand(1))->getMetadata(); 6970 std::optional<RoundingMode> RoundMode = 6971 convertStrToRoundingMode(cast<MDString>(MD)->getString()); 6972 6973 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 6974 6975 // Propagate fast-math-flags from IR to node(s). 6976 SDNodeFlags Flags; 6977 Flags.copyFMF(*cast<FPMathOperator>(&I)); 6978 SelectionDAG::FlagInserter FlagsInserter(DAG, Flags); 6979 6980 SDValue Result; 6981 Result = DAG.getNode( 6982 ISD::FPTRUNC_ROUND, sdl, VT, getValue(I.getArgOperand(0)), 6983 DAG.getTargetConstant((int)*RoundMode, sdl, MVT::i32)); 6984 setValue(&I, Result); 6985 6986 return; 6987 } 6988 case Intrinsic::fmuladd: { 6989 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 6990 if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict && 6991 TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), VT)) { 6992 setValue(&I, DAG.getNode(ISD::FMA, sdl, 6993 getValue(I.getArgOperand(0)).getValueType(), 6994 getValue(I.getArgOperand(0)), 6995 getValue(I.getArgOperand(1)), 6996 getValue(I.getArgOperand(2)), Flags)); 6997 } else { 6998 // TODO: Intrinsic calls should have fast-math-flags. 6999 SDValue Mul = DAG.getNode( 7000 ISD::FMUL, sdl, getValue(I.getArgOperand(0)).getValueType(), 7001 getValue(I.getArgOperand(0)), getValue(I.getArgOperand(1)), Flags); 7002 SDValue Add = DAG.getNode(ISD::FADD, sdl, 7003 getValue(I.getArgOperand(0)).getValueType(), 7004 Mul, getValue(I.getArgOperand(2)), Flags); 7005 setValue(&I, Add); 7006 } 7007 return; 7008 } 7009 case Intrinsic::convert_to_fp16: 7010 setValue(&I, DAG.getNode(ISD::BITCAST, sdl, MVT::i16, 7011 DAG.getNode(ISD::FP_ROUND, sdl, MVT::f16, 7012 getValue(I.getArgOperand(0)), 7013 DAG.getTargetConstant(0, sdl, 7014 MVT::i32)))); 7015 return; 7016 case Intrinsic::convert_from_fp16: 7017 setValue(&I, DAG.getNode(ISD::FP_EXTEND, sdl, 7018 TLI.getValueType(DAG.getDataLayout(), I.getType()), 7019 DAG.getNode(ISD::BITCAST, sdl, MVT::f16, 7020 getValue(I.getArgOperand(0))))); 7021 return; 7022 case Intrinsic::fptosi_sat: { 7023 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 7024 setValue(&I, DAG.getNode(ISD::FP_TO_SINT_SAT, sdl, VT, 7025 getValue(I.getArgOperand(0)), 7026 DAG.getValueType(VT.getScalarType()))); 7027 return; 7028 } 7029 case Intrinsic::fptoui_sat: { 7030 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 7031 setValue(&I, DAG.getNode(ISD::FP_TO_UINT_SAT, sdl, VT, 7032 getValue(I.getArgOperand(0)), 7033 DAG.getValueType(VT.getScalarType()))); 7034 return; 7035 } 7036 case Intrinsic::set_rounding: 7037 Res = DAG.getNode(ISD::SET_ROUNDING, sdl, MVT::Other, 7038 {getRoot(), getValue(I.getArgOperand(0))}); 7039 setValue(&I, Res); 7040 DAG.setRoot(Res.getValue(0)); 7041 return; 7042 case Intrinsic::is_fpclass: { 7043 const DataLayout DLayout = DAG.getDataLayout(); 7044 EVT DestVT = TLI.getValueType(DLayout, I.getType()); 7045 EVT ArgVT = TLI.getValueType(DLayout, I.getArgOperand(0)->getType()); 7046 FPClassTest Test = static_cast<FPClassTest>( 7047 cast<ConstantInt>(I.getArgOperand(1))->getZExtValue()); 7048 MachineFunction &MF = DAG.getMachineFunction(); 7049 const Function &F = MF.getFunction(); 7050 SDValue Op = getValue(I.getArgOperand(0)); 7051 SDNodeFlags Flags; 7052 Flags.setNoFPExcept( 7053 !F.getAttributes().hasFnAttr(llvm::Attribute::StrictFP)); 7054 // If ISD::IS_FPCLASS should be expanded, do it right now, because the 7055 // expansion can use illegal types. Making expansion early allows 7056 // legalizing these types prior to selection. 7057 if (!TLI.isOperationLegal(ISD::IS_FPCLASS, ArgVT) && 7058 !TLI.isOperationCustom(ISD::IS_FPCLASS, ArgVT)) { 7059 SDValue Result = TLI.expandIS_FPCLASS(DestVT, Op, Test, Flags, sdl, DAG); 7060 setValue(&I, Result); 7061 return; 7062 } 7063 7064 SDValue Check = DAG.getTargetConstant(Test, sdl, MVT::i32); 7065 SDValue V = DAG.getNode(ISD::IS_FPCLASS, sdl, DestVT, {Op, Check}, Flags); 7066 setValue(&I, V); 7067 return; 7068 } 7069 case Intrinsic::get_fpenv: { 7070 const DataLayout DLayout = DAG.getDataLayout(); 7071 EVT EnvVT = TLI.getValueType(DLayout, I.getType()); 7072 Align TempAlign = DAG.getEVTAlign(EnvVT); 7073 SDValue Chain = getRoot(); 7074 // Use GET_FPENV if it is legal or custom. Otherwise use memory-based node 7075 // and temporary storage in stack. 7076 if (TLI.isOperationLegalOrCustom(ISD::GET_FPENV, EnvVT)) { 7077 Res = DAG.getNode( 7078 ISD::GET_FPENV, sdl, 7079 DAG.getVTList(TLI.getValueType(DAG.getDataLayout(), I.getType()), 7080 MVT::Other), 7081 Chain); 7082 } else { 7083 SDValue Temp = DAG.CreateStackTemporary(EnvVT, TempAlign.value()); 7084 int SPFI = cast<FrameIndexSDNode>(Temp.getNode())->getIndex(); 7085 auto MPI = 7086 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI); 7087 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( 7088 MPI, MachineMemOperand::MOStore, LocationSize::beforeOrAfterPointer(), 7089 TempAlign); 7090 Chain = DAG.getGetFPEnv(Chain, sdl, Temp, EnvVT, MMO); 7091 Res = DAG.getLoad(EnvVT, sdl, Chain, Temp, MPI); 7092 } 7093 setValue(&I, Res); 7094 DAG.setRoot(Res.getValue(1)); 7095 return; 7096 } 7097 case Intrinsic::set_fpenv: { 7098 const DataLayout DLayout = DAG.getDataLayout(); 7099 SDValue Env = getValue(I.getArgOperand(0)); 7100 EVT EnvVT = Env.getValueType(); 7101 Align TempAlign = DAG.getEVTAlign(EnvVT); 7102 SDValue Chain = getRoot(); 7103 // If SET_FPENV is custom or legal, use it. Otherwise use loading 7104 // environment from memory. 7105 if (TLI.isOperationLegalOrCustom(ISD::SET_FPENV, EnvVT)) { 7106 Chain = DAG.getNode(ISD::SET_FPENV, sdl, MVT::Other, Chain, Env); 7107 } else { 7108 // Allocate space in stack, copy environment bits into it and use this 7109 // memory in SET_FPENV_MEM. 7110 SDValue Temp = DAG.CreateStackTemporary(EnvVT, TempAlign.value()); 7111 int SPFI = cast<FrameIndexSDNode>(Temp.getNode())->getIndex(); 7112 auto MPI = 7113 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI); 7114 Chain = DAG.getStore(Chain, sdl, Env, Temp, MPI, TempAlign, 7115 MachineMemOperand::MOStore); 7116 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( 7117 MPI, MachineMemOperand::MOLoad, LocationSize::beforeOrAfterPointer(), 7118 TempAlign); 7119 Chain = DAG.getSetFPEnv(Chain, sdl, Temp, EnvVT, MMO); 7120 } 7121 DAG.setRoot(Chain); 7122 return; 7123 } 7124 case Intrinsic::reset_fpenv: 7125 DAG.setRoot(DAG.getNode(ISD::RESET_FPENV, sdl, MVT::Other, getRoot())); 7126 return; 7127 case Intrinsic::get_fpmode: 7128 Res = DAG.getNode( 7129 ISD::GET_FPMODE, sdl, 7130 DAG.getVTList(TLI.getValueType(DAG.getDataLayout(), I.getType()), 7131 MVT::Other), 7132 DAG.getRoot()); 7133 setValue(&I, Res); 7134 DAG.setRoot(Res.getValue(1)); 7135 return; 7136 case Intrinsic::set_fpmode: 7137 Res = DAG.getNode(ISD::SET_FPMODE, sdl, MVT::Other, {DAG.getRoot()}, 7138 getValue(I.getArgOperand(0))); 7139 DAG.setRoot(Res); 7140 return; 7141 case Intrinsic::reset_fpmode: { 7142 Res = DAG.getNode(ISD::RESET_FPMODE, sdl, MVT::Other, getRoot()); 7143 DAG.setRoot(Res); 7144 return; 7145 } 7146 case Intrinsic::pcmarker: { 7147 SDValue Tmp = getValue(I.getArgOperand(0)); 7148 DAG.setRoot(DAG.getNode(ISD::PCMARKER, sdl, MVT::Other, getRoot(), Tmp)); 7149 return; 7150 } 7151 case Intrinsic::readcyclecounter: { 7152 SDValue Op = getRoot(); 7153 Res = DAG.getNode(ISD::READCYCLECOUNTER, sdl, 7154 DAG.getVTList(MVT::i64, MVT::Other), Op); 7155 setValue(&I, Res); 7156 DAG.setRoot(Res.getValue(1)); 7157 return; 7158 } 7159 case Intrinsic::readsteadycounter: { 7160 SDValue Op = getRoot(); 7161 Res = DAG.getNode(ISD::READSTEADYCOUNTER, sdl, 7162 DAG.getVTList(MVT::i64, MVT::Other), Op); 7163 setValue(&I, Res); 7164 DAG.setRoot(Res.getValue(1)); 7165 return; 7166 } 7167 case Intrinsic::bitreverse: 7168 setValue(&I, DAG.getNode(ISD::BITREVERSE, sdl, 7169 getValue(I.getArgOperand(0)).getValueType(), 7170 getValue(I.getArgOperand(0)))); 7171 return; 7172 case Intrinsic::bswap: 7173 setValue(&I, DAG.getNode(ISD::BSWAP, sdl, 7174 getValue(I.getArgOperand(0)).getValueType(), 7175 getValue(I.getArgOperand(0)))); 7176 return; 7177 case Intrinsic::cttz: { 7178 SDValue Arg = getValue(I.getArgOperand(0)); 7179 ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1)); 7180 EVT Ty = Arg.getValueType(); 7181 setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTTZ : ISD::CTTZ_ZERO_UNDEF, 7182 sdl, Ty, Arg)); 7183 return; 7184 } 7185 case Intrinsic::ctlz: { 7186 SDValue Arg = getValue(I.getArgOperand(0)); 7187 ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1)); 7188 EVT Ty = Arg.getValueType(); 7189 setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTLZ : ISD::CTLZ_ZERO_UNDEF, 7190 sdl, Ty, Arg)); 7191 return; 7192 } 7193 case Intrinsic::ctpop: { 7194 SDValue Arg = getValue(I.getArgOperand(0)); 7195 EVT Ty = Arg.getValueType(); 7196 setValue(&I, DAG.getNode(ISD::CTPOP, sdl, Ty, Arg)); 7197 return; 7198 } 7199 case Intrinsic::fshl: 7200 case Intrinsic::fshr: { 7201 bool IsFSHL = Intrinsic == Intrinsic::fshl; 7202 SDValue X = getValue(I.getArgOperand(0)); 7203 SDValue Y = getValue(I.getArgOperand(1)); 7204 SDValue Z = getValue(I.getArgOperand(2)); 7205 EVT VT = X.getValueType(); 7206 7207 if (X == Y) { 7208 auto RotateOpcode = IsFSHL ? ISD::ROTL : ISD::ROTR; 7209 setValue(&I, DAG.getNode(RotateOpcode, sdl, VT, X, Z)); 7210 } else { 7211 auto FunnelOpcode = IsFSHL ? ISD::FSHL : ISD::FSHR; 7212 setValue(&I, DAG.getNode(FunnelOpcode, sdl, VT, X, Y, Z)); 7213 } 7214 return; 7215 } 7216 case Intrinsic::sadd_sat: { 7217 SDValue Op1 = getValue(I.getArgOperand(0)); 7218 SDValue Op2 = getValue(I.getArgOperand(1)); 7219 setValue(&I, DAG.getNode(ISD::SADDSAT, sdl, Op1.getValueType(), Op1, Op2)); 7220 return; 7221 } 7222 case Intrinsic::uadd_sat: { 7223 SDValue Op1 = getValue(I.getArgOperand(0)); 7224 SDValue Op2 = getValue(I.getArgOperand(1)); 7225 setValue(&I, DAG.getNode(ISD::UADDSAT, sdl, Op1.getValueType(), Op1, Op2)); 7226 return; 7227 } 7228 case Intrinsic::ssub_sat: { 7229 SDValue Op1 = getValue(I.getArgOperand(0)); 7230 SDValue Op2 = getValue(I.getArgOperand(1)); 7231 setValue(&I, DAG.getNode(ISD::SSUBSAT, sdl, Op1.getValueType(), Op1, Op2)); 7232 return; 7233 } 7234 case Intrinsic::usub_sat: { 7235 SDValue Op1 = getValue(I.getArgOperand(0)); 7236 SDValue Op2 = getValue(I.getArgOperand(1)); 7237 setValue(&I, DAG.getNode(ISD::USUBSAT, sdl, Op1.getValueType(), Op1, Op2)); 7238 return; 7239 } 7240 case Intrinsic::sshl_sat: { 7241 SDValue Op1 = getValue(I.getArgOperand(0)); 7242 SDValue Op2 = getValue(I.getArgOperand(1)); 7243 setValue(&I, DAG.getNode(ISD::SSHLSAT, sdl, Op1.getValueType(), Op1, Op2)); 7244 return; 7245 } 7246 case Intrinsic::ushl_sat: { 7247 SDValue Op1 = getValue(I.getArgOperand(0)); 7248 SDValue Op2 = getValue(I.getArgOperand(1)); 7249 setValue(&I, DAG.getNode(ISD::USHLSAT, sdl, Op1.getValueType(), Op1, Op2)); 7250 return; 7251 } 7252 case Intrinsic::smul_fix: 7253 case Intrinsic::umul_fix: 7254 case Intrinsic::smul_fix_sat: 7255 case Intrinsic::umul_fix_sat: { 7256 SDValue Op1 = getValue(I.getArgOperand(0)); 7257 SDValue Op2 = getValue(I.getArgOperand(1)); 7258 SDValue Op3 = getValue(I.getArgOperand(2)); 7259 setValue(&I, DAG.getNode(FixedPointIntrinsicToOpcode(Intrinsic), sdl, 7260 Op1.getValueType(), Op1, Op2, Op3)); 7261 return; 7262 } 7263 case Intrinsic::sdiv_fix: 7264 case Intrinsic::udiv_fix: 7265 case Intrinsic::sdiv_fix_sat: 7266 case Intrinsic::udiv_fix_sat: { 7267 SDValue Op1 = getValue(I.getArgOperand(0)); 7268 SDValue Op2 = getValue(I.getArgOperand(1)); 7269 SDValue Op3 = getValue(I.getArgOperand(2)); 7270 setValue(&I, expandDivFix(FixedPointIntrinsicToOpcode(Intrinsic), sdl, 7271 Op1, Op2, Op3, DAG, TLI)); 7272 return; 7273 } 7274 case Intrinsic::smax: { 7275 SDValue Op1 = getValue(I.getArgOperand(0)); 7276 SDValue Op2 = getValue(I.getArgOperand(1)); 7277 setValue(&I, DAG.getNode(ISD::SMAX, sdl, Op1.getValueType(), Op1, Op2)); 7278 return; 7279 } 7280 case Intrinsic::smin: { 7281 SDValue Op1 = getValue(I.getArgOperand(0)); 7282 SDValue Op2 = getValue(I.getArgOperand(1)); 7283 setValue(&I, DAG.getNode(ISD::SMIN, sdl, Op1.getValueType(), Op1, Op2)); 7284 return; 7285 } 7286 case Intrinsic::umax: { 7287 SDValue Op1 = getValue(I.getArgOperand(0)); 7288 SDValue Op2 = getValue(I.getArgOperand(1)); 7289 setValue(&I, DAG.getNode(ISD::UMAX, sdl, Op1.getValueType(), Op1, Op2)); 7290 return; 7291 } 7292 case Intrinsic::umin: { 7293 SDValue Op1 = getValue(I.getArgOperand(0)); 7294 SDValue Op2 = getValue(I.getArgOperand(1)); 7295 setValue(&I, DAG.getNode(ISD::UMIN, sdl, Op1.getValueType(), Op1, Op2)); 7296 return; 7297 } 7298 case Intrinsic::abs: { 7299 // TODO: Preserve "int min is poison" arg in SDAG? 7300 SDValue Op1 = getValue(I.getArgOperand(0)); 7301 setValue(&I, DAG.getNode(ISD::ABS, sdl, Op1.getValueType(), Op1)); 7302 return; 7303 } 7304 case Intrinsic::scmp: { 7305 SDValue Op1 = getValue(I.getArgOperand(0)); 7306 SDValue Op2 = getValue(I.getArgOperand(1)); 7307 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 7308 setValue(&I, DAG.getNode(ISD::SCMP, sdl, DestVT, Op1, Op2)); 7309 break; 7310 } 7311 case Intrinsic::ucmp: { 7312 SDValue Op1 = getValue(I.getArgOperand(0)); 7313 SDValue Op2 = getValue(I.getArgOperand(1)); 7314 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 7315 setValue(&I, DAG.getNode(ISD::UCMP, sdl, DestVT, Op1, Op2)); 7316 break; 7317 } 7318 case Intrinsic::stacksave: { 7319 SDValue Op = getRoot(); 7320 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 7321 Res = DAG.getNode(ISD::STACKSAVE, sdl, DAG.getVTList(VT, MVT::Other), Op); 7322 setValue(&I, Res); 7323 DAG.setRoot(Res.getValue(1)); 7324 return; 7325 } 7326 case Intrinsic::stackrestore: 7327 Res = getValue(I.getArgOperand(0)); 7328 DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, sdl, MVT::Other, getRoot(), Res)); 7329 return; 7330 case Intrinsic::get_dynamic_area_offset: { 7331 SDValue Op = getRoot(); 7332 EVT PtrTy = TLI.getFrameIndexTy(DAG.getDataLayout()); 7333 EVT ResTy = TLI.getValueType(DAG.getDataLayout(), I.getType()); 7334 // Result type for @llvm.get.dynamic.area.offset should match PtrTy for 7335 // target. 7336 if (PtrTy.getFixedSizeInBits() < ResTy.getFixedSizeInBits()) 7337 report_fatal_error("Wrong result type for @llvm.get.dynamic.area.offset" 7338 " intrinsic!"); 7339 Res = DAG.getNode(ISD::GET_DYNAMIC_AREA_OFFSET, sdl, DAG.getVTList(ResTy), 7340 Op); 7341 DAG.setRoot(Op); 7342 setValue(&I, Res); 7343 return; 7344 } 7345 case Intrinsic::stackguard: { 7346 MachineFunction &MF = DAG.getMachineFunction(); 7347 const Module &M = *MF.getFunction().getParent(); 7348 EVT PtrTy = TLI.getValueType(DAG.getDataLayout(), I.getType()); 7349 SDValue Chain = getRoot(); 7350 if (TLI.useLoadStackGuardNode()) { 7351 Res = getLoadStackGuard(DAG, sdl, Chain); 7352 Res = DAG.getPtrExtOrTrunc(Res, sdl, PtrTy); 7353 } else { 7354 const Value *Global = TLI.getSDagStackGuard(M); 7355 Align Align = DAG.getDataLayout().getPrefTypeAlign(Global->getType()); 7356 Res = DAG.getLoad(PtrTy, sdl, Chain, getValue(Global), 7357 MachinePointerInfo(Global, 0), Align, 7358 MachineMemOperand::MOVolatile); 7359 } 7360 if (TLI.useStackGuardXorFP()) 7361 Res = TLI.emitStackGuardXorFP(DAG, Res, sdl); 7362 DAG.setRoot(Chain); 7363 setValue(&I, Res); 7364 return; 7365 } 7366 case Intrinsic::stackprotector: { 7367 // Emit code into the DAG to store the stack guard onto the stack. 7368 MachineFunction &MF = DAG.getMachineFunction(); 7369 MachineFrameInfo &MFI = MF.getFrameInfo(); 7370 SDValue Src, Chain = getRoot(); 7371 7372 if (TLI.useLoadStackGuardNode()) 7373 Src = getLoadStackGuard(DAG, sdl, Chain); 7374 else 7375 Src = getValue(I.getArgOperand(0)); // The guard's value. 7376 7377 AllocaInst *Slot = cast<AllocaInst>(I.getArgOperand(1)); 7378 7379 int FI = FuncInfo.StaticAllocaMap[Slot]; 7380 MFI.setStackProtectorIndex(FI); 7381 EVT PtrTy = TLI.getFrameIndexTy(DAG.getDataLayout()); 7382 7383 SDValue FIN = DAG.getFrameIndex(FI, PtrTy); 7384 7385 // Store the stack protector onto the stack. 7386 Res = DAG.getStore( 7387 Chain, sdl, Src, FIN, 7388 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), 7389 MaybeAlign(), MachineMemOperand::MOVolatile); 7390 setValue(&I, Res); 7391 DAG.setRoot(Res); 7392 return; 7393 } 7394 case Intrinsic::objectsize: 7395 llvm_unreachable("llvm.objectsize.* should have been lowered already"); 7396 7397 case Intrinsic::is_constant: 7398 llvm_unreachable("llvm.is.constant.* should have been lowered already"); 7399 7400 case Intrinsic::annotation: 7401 case Intrinsic::ptr_annotation: 7402 case Intrinsic::launder_invariant_group: 7403 case Intrinsic::strip_invariant_group: 7404 // Drop the intrinsic, but forward the value 7405 setValue(&I, getValue(I.getOperand(0))); 7406 return; 7407 7408 case Intrinsic::assume: 7409 case Intrinsic::experimental_noalias_scope_decl: 7410 case Intrinsic::var_annotation: 7411 case Intrinsic::sideeffect: 7412 // Discard annotate attributes, noalias scope declarations, assumptions, and 7413 // artificial side-effects. 7414 return; 7415 7416 case Intrinsic::codeview_annotation: { 7417 // Emit a label associated with this metadata. 7418 MachineFunction &MF = DAG.getMachineFunction(); 7419 MCSymbol *Label = MF.getContext().createTempSymbol("annotation", true); 7420 Metadata *MD = cast<MetadataAsValue>(I.getArgOperand(0))->getMetadata(); 7421 MF.addCodeViewAnnotation(Label, cast<MDNode>(MD)); 7422 Res = DAG.getLabelNode(ISD::ANNOTATION_LABEL, sdl, getRoot(), Label); 7423 DAG.setRoot(Res); 7424 return; 7425 } 7426 7427 case Intrinsic::init_trampoline: { 7428 const Function *F = cast<Function>(I.getArgOperand(1)->stripPointerCasts()); 7429 7430 SDValue Ops[6]; 7431 Ops[0] = getRoot(); 7432 Ops[1] = getValue(I.getArgOperand(0)); 7433 Ops[2] = getValue(I.getArgOperand(1)); 7434 Ops[3] = getValue(I.getArgOperand(2)); 7435 Ops[4] = DAG.getSrcValue(I.getArgOperand(0)); 7436 Ops[5] = DAG.getSrcValue(F); 7437 7438 Res = DAG.getNode(ISD::INIT_TRAMPOLINE, sdl, MVT::Other, Ops); 7439 7440 DAG.setRoot(Res); 7441 return; 7442 } 7443 case Intrinsic::adjust_trampoline: 7444 setValue(&I, DAG.getNode(ISD::ADJUST_TRAMPOLINE, sdl, 7445 TLI.getPointerTy(DAG.getDataLayout()), 7446 getValue(I.getArgOperand(0)))); 7447 return; 7448 case Intrinsic::gcroot: { 7449 assert(DAG.getMachineFunction().getFunction().hasGC() && 7450 "only valid in functions with gc specified, enforced by Verifier"); 7451 assert(GFI && "implied by previous"); 7452 const Value *Alloca = I.getArgOperand(0)->stripPointerCasts(); 7453 const Constant *TypeMap = cast<Constant>(I.getArgOperand(1)); 7454 7455 FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode()); 7456 GFI->addStackRoot(FI->getIndex(), TypeMap); 7457 return; 7458 } 7459 case Intrinsic::gcread: 7460 case Intrinsic::gcwrite: 7461 llvm_unreachable("GC failed to lower gcread/gcwrite intrinsics!"); 7462 case Intrinsic::get_rounding: 7463 Res = DAG.getNode(ISD::GET_ROUNDING, sdl, {MVT::i32, MVT::Other}, getRoot()); 7464 setValue(&I, Res); 7465 DAG.setRoot(Res.getValue(1)); 7466 return; 7467 7468 case Intrinsic::expect: 7469 // Just replace __builtin_expect(exp, c) with EXP. 7470 setValue(&I, getValue(I.getArgOperand(0))); 7471 return; 7472 7473 case Intrinsic::ubsantrap: 7474 case Intrinsic::debugtrap: 7475 case Intrinsic::trap: { 7476 StringRef TrapFuncName = 7477 I.getAttributes().getFnAttr("trap-func-name").getValueAsString(); 7478 if (TrapFuncName.empty()) { 7479 switch (Intrinsic) { 7480 case Intrinsic::trap: 7481 DAG.setRoot(DAG.getNode(ISD::TRAP, sdl, MVT::Other, getRoot())); 7482 break; 7483 case Intrinsic::debugtrap: 7484 DAG.setRoot(DAG.getNode(ISD::DEBUGTRAP, sdl, MVT::Other, getRoot())); 7485 break; 7486 case Intrinsic::ubsantrap: 7487 DAG.setRoot(DAG.getNode( 7488 ISD::UBSANTRAP, sdl, MVT::Other, getRoot(), 7489 DAG.getTargetConstant( 7490 cast<ConstantInt>(I.getArgOperand(0))->getZExtValue(), sdl, 7491 MVT::i32))); 7492 break; 7493 default: llvm_unreachable("unknown trap intrinsic"); 7494 } 7495 DAG.addNoMergeSiteInfo(DAG.getRoot().getNode(), 7496 I.hasFnAttr(Attribute::NoMerge)); 7497 return; 7498 } 7499 TargetLowering::ArgListTy Args; 7500 if (Intrinsic == Intrinsic::ubsantrap) { 7501 Args.push_back(TargetLoweringBase::ArgListEntry()); 7502 Args[0].Val = I.getArgOperand(0); 7503 Args[0].Node = getValue(Args[0].Val); 7504 Args[0].Ty = Args[0].Val->getType(); 7505 } 7506 7507 TargetLowering::CallLoweringInfo CLI(DAG); 7508 CLI.setDebugLoc(sdl).setChain(getRoot()).setLibCallee( 7509 CallingConv::C, I.getType(), 7510 DAG.getExternalSymbol(TrapFuncName.data(), 7511 TLI.getPointerTy(DAG.getDataLayout())), 7512 std::move(Args)); 7513 CLI.NoMerge = I.hasFnAttr(Attribute::NoMerge); 7514 std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI); 7515 DAG.setRoot(Result.second); 7516 return; 7517 } 7518 7519 case Intrinsic::allow_runtime_check: 7520 case Intrinsic::allow_ubsan_check: 7521 setValue(&I, getValue(ConstantInt::getTrue(I.getType()))); 7522 return; 7523 7524 case Intrinsic::uadd_with_overflow: 7525 case Intrinsic::sadd_with_overflow: 7526 case Intrinsic::usub_with_overflow: 7527 case Intrinsic::ssub_with_overflow: 7528 case Intrinsic::umul_with_overflow: 7529 case Intrinsic::smul_with_overflow: { 7530 ISD::NodeType Op; 7531 switch (Intrinsic) { 7532 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 7533 case Intrinsic::uadd_with_overflow: Op = ISD::UADDO; break; 7534 case Intrinsic::sadd_with_overflow: Op = ISD::SADDO; break; 7535 case Intrinsic::usub_with_overflow: Op = ISD::USUBO; break; 7536 case Intrinsic::ssub_with_overflow: Op = ISD::SSUBO; break; 7537 case Intrinsic::umul_with_overflow: Op = ISD::UMULO; break; 7538 case Intrinsic::smul_with_overflow: Op = ISD::SMULO; break; 7539 } 7540 SDValue Op1 = getValue(I.getArgOperand(0)); 7541 SDValue Op2 = getValue(I.getArgOperand(1)); 7542 7543 EVT ResultVT = Op1.getValueType(); 7544 EVT OverflowVT = MVT::i1; 7545 if (ResultVT.isVector()) 7546 OverflowVT = EVT::getVectorVT( 7547 *Context, OverflowVT, ResultVT.getVectorElementCount()); 7548 7549 SDVTList VTs = DAG.getVTList(ResultVT, OverflowVT); 7550 setValue(&I, DAG.getNode(Op, sdl, VTs, Op1, Op2)); 7551 return; 7552 } 7553 case Intrinsic::prefetch: { 7554 SDValue Ops[5]; 7555 unsigned rw = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue(); 7556 auto Flags = rw == 0 ? MachineMemOperand::MOLoad :MachineMemOperand::MOStore; 7557 Ops[0] = DAG.getRoot(); 7558 Ops[1] = getValue(I.getArgOperand(0)); 7559 Ops[2] = DAG.getTargetConstant(*cast<ConstantInt>(I.getArgOperand(1)), sdl, 7560 MVT::i32); 7561 Ops[3] = DAG.getTargetConstant(*cast<ConstantInt>(I.getArgOperand(2)), sdl, 7562 MVT::i32); 7563 Ops[4] = DAG.getTargetConstant(*cast<ConstantInt>(I.getArgOperand(3)), sdl, 7564 MVT::i32); 7565 SDValue Result = DAG.getMemIntrinsicNode( 7566 ISD::PREFETCH, sdl, DAG.getVTList(MVT::Other), Ops, 7567 EVT::getIntegerVT(*Context, 8), MachinePointerInfo(I.getArgOperand(0)), 7568 /* align */ std::nullopt, Flags); 7569 7570 // Chain the prefetch in parallel with any pending loads, to stay out of 7571 // the way of later optimizations. 7572 PendingLoads.push_back(Result); 7573 Result = getRoot(); 7574 DAG.setRoot(Result); 7575 return; 7576 } 7577 case Intrinsic::lifetime_start: 7578 case Intrinsic::lifetime_end: { 7579 bool IsStart = (Intrinsic == Intrinsic::lifetime_start); 7580 // Stack coloring is not enabled in O0, discard region information. 7581 if (TM.getOptLevel() == CodeGenOptLevel::None) 7582 return; 7583 7584 const int64_t ObjectSize = 7585 cast<ConstantInt>(I.getArgOperand(0))->getSExtValue(); 7586 Value *const ObjectPtr = I.getArgOperand(1); 7587 SmallVector<const Value *, 4> Allocas; 7588 getUnderlyingObjects(ObjectPtr, Allocas); 7589 7590 for (const Value *Alloca : Allocas) { 7591 const AllocaInst *LifetimeObject = dyn_cast_or_null<AllocaInst>(Alloca); 7592 7593 // Could not find an Alloca. 7594 if (!LifetimeObject) 7595 continue; 7596 7597 // First check that the Alloca is static, otherwise it won't have a 7598 // valid frame index. 7599 auto SI = FuncInfo.StaticAllocaMap.find(LifetimeObject); 7600 if (SI == FuncInfo.StaticAllocaMap.end()) 7601 return; 7602 7603 const int FrameIndex = SI->second; 7604 int64_t Offset; 7605 if (GetPointerBaseWithConstantOffset( 7606 ObjectPtr, Offset, DAG.getDataLayout()) != LifetimeObject) 7607 Offset = -1; // Cannot determine offset from alloca to lifetime object. 7608 Res = DAG.getLifetimeNode(IsStart, sdl, getRoot(), FrameIndex, ObjectSize, 7609 Offset); 7610 DAG.setRoot(Res); 7611 } 7612 return; 7613 } 7614 case Intrinsic::pseudoprobe: { 7615 auto Guid = cast<ConstantInt>(I.getArgOperand(0))->getZExtValue(); 7616 auto Index = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue(); 7617 auto Attr = cast<ConstantInt>(I.getArgOperand(2))->getZExtValue(); 7618 Res = DAG.getPseudoProbeNode(sdl, getRoot(), Guid, Index, Attr); 7619 DAG.setRoot(Res); 7620 return; 7621 } 7622 case Intrinsic::invariant_start: 7623 // Discard region information. 7624 setValue(&I, 7625 DAG.getUNDEF(TLI.getValueType(DAG.getDataLayout(), I.getType()))); 7626 return; 7627 case Intrinsic::invariant_end: 7628 // Discard region information. 7629 return; 7630 case Intrinsic::clear_cache: { 7631 SDValue InputChain = DAG.getRoot(); 7632 SDValue StartVal = getValue(I.getArgOperand(0)); 7633 SDValue EndVal = getValue(I.getArgOperand(1)); 7634 Res = DAG.getNode(ISD::CLEAR_CACHE, sdl, DAG.getVTList(MVT::Other), 7635 {InputChain, StartVal, EndVal}); 7636 setValue(&I, Res); 7637 DAG.setRoot(Res); 7638 return; 7639 } 7640 case Intrinsic::donothing: 7641 case Intrinsic::seh_try_begin: 7642 case Intrinsic::seh_scope_begin: 7643 case Intrinsic::seh_try_end: 7644 case Intrinsic::seh_scope_end: 7645 // ignore 7646 return; 7647 case Intrinsic::experimental_stackmap: 7648 visitStackmap(I); 7649 return; 7650 case Intrinsic::experimental_patchpoint_void: 7651 case Intrinsic::experimental_patchpoint: 7652 visitPatchpoint(I); 7653 return; 7654 case Intrinsic::experimental_gc_statepoint: 7655 LowerStatepoint(cast<GCStatepointInst>(I)); 7656 return; 7657 case Intrinsic::experimental_gc_result: 7658 visitGCResult(cast<GCResultInst>(I)); 7659 return; 7660 case Intrinsic::experimental_gc_relocate: 7661 visitGCRelocate(cast<GCRelocateInst>(I)); 7662 return; 7663 case Intrinsic::instrprof_cover: 7664 llvm_unreachable("instrprof failed to lower a cover"); 7665 case Intrinsic::instrprof_increment: 7666 llvm_unreachable("instrprof failed to lower an increment"); 7667 case Intrinsic::instrprof_timestamp: 7668 llvm_unreachable("instrprof failed to lower a timestamp"); 7669 case Intrinsic::instrprof_value_profile: 7670 llvm_unreachable("instrprof failed to lower a value profiling call"); 7671 case Intrinsic::instrprof_mcdc_parameters: 7672 llvm_unreachable("instrprof failed to lower mcdc parameters"); 7673 case Intrinsic::instrprof_mcdc_tvbitmap_update: 7674 llvm_unreachable("instrprof failed to lower an mcdc tvbitmap update"); 7675 case Intrinsic::localescape: { 7676 MachineFunction &MF = DAG.getMachineFunction(); 7677 const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo(); 7678 7679 // Directly emit some LOCAL_ESCAPE machine instrs. Label assignment emission 7680 // is the same on all targets. 7681 for (unsigned Idx = 0, E = I.arg_size(); Idx < E; ++Idx) { 7682 Value *Arg = I.getArgOperand(Idx)->stripPointerCasts(); 7683 if (isa<ConstantPointerNull>(Arg)) 7684 continue; // Skip null pointers. They represent a hole in index space. 7685 AllocaInst *Slot = cast<AllocaInst>(Arg); 7686 assert(FuncInfo.StaticAllocaMap.count(Slot) && 7687 "can only escape static allocas"); 7688 int FI = FuncInfo.StaticAllocaMap[Slot]; 7689 MCSymbol *FrameAllocSym = MF.getContext().getOrCreateFrameAllocSymbol( 7690 GlobalValue::dropLLVMManglingEscape(MF.getName()), Idx); 7691 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, dl, 7692 TII->get(TargetOpcode::LOCAL_ESCAPE)) 7693 .addSym(FrameAllocSym) 7694 .addFrameIndex(FI); 7695 } 7696 7697 return; 7698 } 7699 7700 case Intrinsic::localrecover: { 7701 // i8* @llvm.localrecover(i8* %fn, i8* %fp, i32 %idx) 7702 MachineFunction &MF = DAG.getMachineFunction(); 7703 7704 // Get the symbol that defines the frame offset. 7705 auto *Fn = cast<Function>(I.getArgOperand(0)->stripPointerCasts()); 7706 auto *Idx = cast<ConstantInt>(I.getArgOperand(2)); 7707 unsigned IdxVal = 7708 unsigned(Idx->getLimitedValue(std::numeric_limits<int>::max())); 7709 MCSymbol *FrameAllocSym = MF.getContext().getOrCreateFrameAllocSymbol( 7710 GlobalValue::dropLLVMManglingEscape(Fn->getName()), IdxVal); 7711 7712 Value *FP = I.getArgOperand(1); 7713 SDValue FPVal = getValue(FP); 7714 EVT PtrVT = FPVal.getValueType(); 7715 7716 // Create a MCSymbol for the label to avoid any target lowering 7717 // that would make this PC relative. 7718 SDValue OffsetSym = DAG.getMCSymbol(FrameAllocSym, PtrVT); 7719 SDValue OffsetVal = 7720 DAG.getNode(ISD::LOCAL_RECOVER, sdl, PtrVT, OffsetSym); 7721 7722 // Add the offset to the FP. 7723 SDValue Add = DAG.getMemBasePlusOffset(FPVal, OffsetVal, sdl); 7724 setValue(&I, Add); 7725 7726 return; 7727 } 7728 7729 case Intrinsic::fake_use: { 7730 Value *V = I.getArgOperand(0); 7731 SDValue Ops[2]; 7732 // For Values not declared or previously used in this basic block, the 7733 // NodeMap will not have an entry, and `getValue` will assert if V has no 7734 // valid register value. 7735 auto FakeUseValue = [&]() -> SDValue { 7736 SDValue &N = NodeMap[V]; 7737 if (N.getNode()) 7738 return N; 7739 7740 // If there's a virtual register allocated and initialized for this 7741 // value, use it. 7742 if (SDValue copyFromReg = getCopyFromRegs(V, V->getType())) 7743 return copyFromReg; 7744 // FIXME: Do we want to preserve constants? It seems pointless. 7745 if (isa<Constant>(V)) 7746 return getValue(V); 7747 return SDValue(); 7748 }(); 7749 if (!FakeUseValue || FakeUseValue.isUndef()) 7750 return; 7751 Ops[0] = getRoot(); 7752 Ops[1] = FakeUseValue; 7753 // Also, do not translate a fake use with an undef operand, or any other 7754 // empty SDValues. 7755 if (!Ops[1] || Ops[1].isUndef()) 7756 return; 7757 DAG.setRoot(DAG.getNode(ISD::FAKE_USE, sdl, MVT::Other, Ops)); 7758 return; 7759 } 7760 7761 case Intrinsic::eh_exceptionpointer: 7762 case Intrinsic::eh_exceptioncode: { 7763 // Get the exception pointer vreg, copy from it, and resize it to fit. 7764 const auto *CPI = cast<CatchPadInst>(I.getArgOperand(0)); 7765 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout()); 7766 const TargetRegisterClass *PtrRC = TLI.getRegClassFor(PtrVT); 7767 Register VReg = FuncInfo.getCatchPadExceptionPointerVReg(CPI, PtrRC); 7768 SDValue N = DAG.getCopyFromReg(DAG.getEntryNode(), sdl, VReg, PtrVT); 7769 if (Intrinsic == Intrinsic::eh_exceptioncode) 7770 N = DAG.getZExtOrTrunc(N, sdl, MVT::i32); 7771 setValue(&I, N); 7772 return; 7773 } 7774 case Intrinsic::xray_customevent: { 7775 // Here we want to make sure that the intrinsic behaves as if it has a 7776 // specific calling convention. 7777 const auto &Triple = DAG.getTarget().getTargetTriple(); 7778 if (!Triple.isAArch64(64) && Triple.getArch() != Triple::x86_64) 7779 return; 7780 7781 SmallVector<SDValue, 8> Ops; 7782 7783 // We want to say that we always want the arguments in registers. 7784 SDValue LogEntryVal = getValue(I.getArgOperand(0)); 7785 SDValue StrSizeVal = getValue(I.getArgOperand(1)); 7786 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 7787 SDValue Chain = getRoot(); 7788 Ops.push_back(LogEntryVal); 7789 Ops.push_back(StrSizeVal); 7790 Ops.push_back(Chain); 7791 7792 // We need to enforce the calling convention for the callsite, so that 7793 // argument ordering is enforced correctly, and that register allocation can 7794 // see that some registers may be assumed clobbered and have to preserve 7795 // them across calls to the intrinsic. 7796 MachineSDNode *MN = DAG.getMachineNode(TargetOpcode::PATCHABLE_EVENT_CALL, 7797 sdl, NodeTys, Ops); 7798 SDValue patchableNode = SDValue(MN, 0); 7799 DAG.setRoot(patchableNode); 7800 setValue(&I, patchableNode); 7801 return; 7802 } 7803 case Intrinsic::xray_typedevent: { 7804 // Here we want to make sure that the intrinsic behaves as if it has a 7805 // specific calling convention. 7806 const auto &Triple = DAG.getTarget().getTargetTriple(); 7807 if (!Triple.isAArch64(64) && Triple.getArch() != Triple::x86_64) 7808 return; 7809 7810 SmallVector<SDValue, 8> Ops; 7811 7812 // We want to say that we always want the arguments in registers. 7813 // It's unclear to me how manipulating the selection DAG here forces callers 7814 // to provide arguments in registers instead of on the stack. 7815 SDValue LogTypeId = getValue(I.getArgOperand(0)); 7816 SDValue LogEntryVal = getValue(I.getArgOperand(1)); 7817 SDValue StrSizeVal = getValue(I.getArgOperand(2)); 7818 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 7819 SDValue Chain = getRoot(); 7820 Ops.push_back(LogTypeId); 7821 Ops.push_back(LogEntryVal); 7822 Ops.push_back(StrSizeVal); 7823 Ops.push_back(Chain); 7824 7825 // We need to enforce the calling convention for the callsite, so that 7826 // argument ordering is enforced correctly, and that register allocation can 7827 // see that some registers may be assumed clobbered and have to preserve 7828 // them across calls to the intrinsic. 7829 MachineSDNode *MN = DAG.getMachineNode( 7830 TargetOpcode::PATCHABLE_TYPED_EVENT_CALL, sdl, NodeTys, Ops); 7831 SDValue patchableNode = SDValue(MN, 0); 7832 DAG.setRoot(patchableNode); 7833 setValue(&I, patchableNode); 7834 return; 7835 } 7836 case Intrinsic::experimental_deoptimize: 7837 LowerDeoptimizeCall(&I); 7838 return; 7839 case Intrinsic::stepvector: 7840 visitStepVector(I); 7841 return; 7842 case Intrinsic::vector_reduce_fadd: 7843 case Intrinsic::vector_reduce_fmul: 7844 case Intrinsic::vector_reduce_add: 7845 case Intrinsic::vector_reduce_mul: 7846 case Intrinsic::vector_reduce_and: 7847 case Intrinsic::vector_reduce_or: 7848 case Intrinsic::vector_reduce_xor: 7849 case Intrinsic::vector_reduce_smax: 7850 case Intrinsic::vector_reduce_smin: 7851 case Intrinsic::vector_reduce_umax: 7852 case Intrinsic::vector_reduce_umin: 7853 case Intrinsic::vector_reduce_fmax: 7854 case Intrinsic::vector_reduce_fmin: 7855 case Intrinsic::vector_reduce_fmaximum: 7856 case Intrinsic::vector_reduce_fminimum: 7857 visitVectorReduce(I, Intrinsic); 7858 return; 7859 7860 case Intrinsic::icall_branch_funnel: { 7861 SmallVector<SDValue, 16> Ops; 7862 Ops.push_back(getValue(I.getArgOperand(0))); 7863 7864 int64_t Offset; 7865 auto *Base = dyn_cast<GlobalObject>(GetPointerBaseWithConstantOffset( 7866 I.getArgOperand(1), Offset, DAG.getDataLayout())); 7867 if (!Base) 7868 report_fatal_error( 7869 "llvm.icall.branch.funnel operand must be a GlobalValue"); 7870 Ops.push_back(DAG.getTargetGlobalAddress(Base, sdl, MVT::i64, 0)); 7871 7872 struct BranchFunnelTarget { 7873 int64_t Offset; 7874 SDValue Target; 7875 }; 7876 SmallVector<BranchFunnelTarget, 8> Targets; 7877 7878 for (unsigned Op = 1, N = I.arg_size(); Op != N; Op += 2) { 7879 auto *ElemBase = dyn_cast<GlobalObject>(GetPointerBaseWithConstantOffset( 7880 I.getArgOperand(Op), Offset, DAG.getDataLayout())); 7881 if (ElemBase != Base) 7882 report_fatal_error("all llvm.icall.branch.funnel operands must refer " 7883 "to the same GlobalValue"); 7884 7885 SDValue Val = getValue(I.getArgOperand(Op + 1)); 7886 auto *GA = dyn_cast<GlobalAddressSDNode>(Val); 7887 if (!GA) 7888 report_fatal_error( 7889 "llvm.icall.branch.funnel operand must be a GlobalValue"); 7890 Targets.push_back({Offset, DAG.getTargetGlobalAddress( 7891 GA->getGlobal(), sdl, Val.getValueType(), 7892 GA->getOffset())}); 7893 } 7894 llvm::sort(Targets, 7895 [](const BranchFunnelTarget &T1, const BranchFunnelTarget &T2) { 7896 return T1.Offset < T2.Offset; 7897 }); 7898 7899 for (auto &T : Targets) { 7900 Ops.push_back(DAG.getTargetConstant(T.Offset, sdl, MVT::i32)); 7901 Ops.push_back(T.Target); 7902 } 7903 7904 Ops.push_back(DAG.getRoot()); // Chain 7905 SDValue N(DAG.getMachineNode(TargetOpcode::ICALL_BRANCH_FUNNEL, sdl, 7906 MVT::Other, Ops), 7907 0); 7908 DAG.setRoot(N); 7909 setValue(&I, N); 7910 HasTailCall = true; 7911 return; 7912 } 7913 7914 case Intrinsic::wasm_landingpad_index: 7915 // Information this intrinsic contained has been transferred to 7916 // MachineFunction in SelectionDAGISel::PrepareEHLandingPad. We can safely 7917 // delete it now. 7918 return; 7919 7920 case Intrinsic::aarch64_settag: 7921 case Intrinsic::aarch64_settag_zero: { 7922 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo(); 7923 bool ZeroMemory = Intrinsic == Intrinsic::aarch64_settag_zero; 7924 SDValue Val = TSI.EmitTargetCodeForSetTag( 7925 DAG, sdl, getRoot(), getValue(I.getArgOperand(0)), 7926 getValue(I.getArgOperand(1)), MachinePointerInfo(I.getArgOperand(0)), 7927 ZeroMemory); 7928 DAG.setRoot(Val); 7929 setValue(&I, Val); 7930 return; 7931 } 7932 case Intrinsic::amdgcn_cs_chain: { 7933 assert(I.arg_size() == 5 && "Additional args not supported yet"); 7934 assert(cast<ConstantInt>(I.getOperand(4))->isZero() && 7935 "Non-zero flags not supported yet"); 7936 7937 // At this point we don't care if it's amdgpu_cs_chain or 7938 // amdgpu_cs_chain_preserve. 7939 CallingConv::ID CC = CallingConv::AMDGPU_CS_Chain; 7940 7941 Type *RetTy = I.getType(); 7942 assert(RetTy->isVoidTy() && "Should not return"); 7943 7944 SDValue Callee = getValue(I.getOperand(0)); 7945 7946 // We only have 2 actual args: one for the SGPRs and one for the VGPRs. 7947 // We'll also tack the value of the EXEC mask at the end. 7948 TargetLowering::ArgListTy Args; 7949 Args.reserve(3); 7950 7951 for (unsigned Idx : {2, 3, 1}) { 7952 TargetLowering::ArgListEntry Arg; 7953 Arg.Node = getValue(I.getOperand(Idx)); 7954 Arg.Ty = I.getOperand(Idx)->getType(); 7955 Arg.setAttributes(&I, Idx); 7956 Args.push_back(Arg); 7957 } 7958 7959 assert(Args[0].IsInReg && "SGPR args should be marked inreg"); 7960 assert(!Args[1].IsInReg && "VGPR args should not be marked inreg"); 7961 Args[2].IsInReg = true; // EXEC should be inreg 7962 7963 TargetLowering::CallLoweringInfo CLI(DAG); 7964 CLI.setDebugLoc(getCurSDLoc()) 7965 .setChain(getRoot()) 7966 .setCallee(CC, RetTy, Callee, std::move(Args)) 7967 .setNoReturn(true) 7968 .setTailCall(true) 7969 .setConvergent(I.isConvergent()); 7970 CLI.CB = &I; 7971 std::pair<SDValue, SDValue> Result = 7972 lowerInvokable(CLI, /*EHPadBB*/ nullptr); 7973 (void)Result; 7974 assert(!Result.first.getNode() && !Result.second.getNode() && 7975 "Should've lowered as tail call"); 7976 7977 HasTailCall = true; 7978 return; 7979 } 7980 case Intrinsic::ptrmask: { 7981 SDValue Ptr = getValue(I.getOperand(0)); 7982 SDValue Mask = getValue(I.getOperand(1)); 7983 7984 // On arm64_32, pointers are 32 bits when stored in memory, but 7985 // zero-extended to 64 bits when in registers. Thus the mask is 32 bits to 7986 // match the index type, but the pointer is 64 bits, so the the mask must be 7987 // zero-extended up to 64 bits to match the pointer. 7988 EVT PtrVT = 7989 TLI.getValueType(DAG.getDataLayout(), I.getOperand(0)->getType()); 7990 EVT MemVT = 7991 TLI.getMemValueType(DAG.getDataLayout(), I.getOperand(0)->getType()); 7992 assert(PtrVT == Ptr.getValueType()); 7993 assert(MemVT == Mask.getValueType()); 7994 if (MemVT != PtrVT) 7995 Mask = DAG.getPtrExtOrTrunc(Mask, sdl, PtrVT); 7996 7997 setValue(&I, DAG.getNode(ISD::AND, sdl, PtrVT, Ptr, Mask)); 7998 return; 7999 } 8000 case Intrinsic::threadlocal_address: { 8001 setValue(&I, getValue(I.getOperand(0))); 8002 return; 8003 } 8004 case Intrinsic::get_active_lane_mask: { 8005 EVT CCVT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 8006 SDValue Index = getValue(I.getOperand(0)); 8007 EVT ElementVT = Index.getValueType(); 8008 8009 if (!TLI.shouldExpandGetActiveLaneMask(CCVT, ElementVT)) { 8010 visitTargetIntrinsic(I, Intrinsic); 8011 return; 8012 } 8013 8014 SDValue TripCount = getValue(I.getOperand(1)); 8015 EVT VecTy = EVT::getVectorVT(*DAG.getContext(), ElementVT, 8016 CCVT.getVectorElementCount()); 8017 8018 SDValue VectorIndex = DAG.getSplat(VecTy, sdl, Index); 8019 SDValue VectorTripCount = DAG.getSplat(VecTy, sdl, TripCount); 8020 SDValue VectorStep = DAG.getStepVector(sdl, VecTy); 8021 SDValue VectorInduction = DAG.getNode( 8022 ISD::UADDSAT, sdl, VecTy, VectorIndex, VectorStep); 8023 SDValue SetCC = DAG.getSetCC(sdl, CCVT, VectorInduction, 8024 VectorTripCount, ISD::CondCode::SETULT); 8025 setValue(&I, SetCC); 8026 return; 8027 } 8028 case Intrinsic::experimental_get_vector_length: { 8029 assert(cast<ConstantInt>(I.getOperand(1))->getSExtValue() > 0 && 8030 "Expected positive VF"); 8031 unsigned VF = cast<ConstantInt>(I.getOperand(1))->getZExtValue(); 8032 bool IsScalable = cast<ConstantInt>(I.getOperand(2))->isOne(); 8033 8034 SDValue Count = getValue(I.getOperand(0)); 8035 EVT CountVT = Count.getValueType(); 8036 8037 if (!TLI.shouldExpandGetVectorLength(CountVT, VF, IsScalable)) { 8038 visitTargetIntrinsic(I, Intrinsic); 8039 return; 8040 } 8041 8042 // Expand to a umin between the trip count and the maximum elements the type 8043 // can hold. 8044 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 8045 8046 // Extend the trip count to at least the result VT. 8047 if (CountVT.bitsLT(VT)) { 8048 Count = DAG.getNode(ISD::ZERO_EXTEND, sdl, VT, Count); 8049 CountVT = VT; 8050 } 8051 8052 SDValue MaxEVL = DAG.getElementCount(sdl, CountVT, 8053 ElementCount::get(VF, IsScalable)); 8054 8055 SDValue UMin = DAG.getNode(ISD::UMIN, sdl, CountVT, Count, MaxEVL); 8056 // Clip to the result type if needed. 8057 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, sdl, VT, UMin); 8058 8059 setValue(&I, Trunc); 8060 return; 8061 } 8062 case Intrinsic::experimental_vector_partial_reduce_add: { 8063 8064 if (!TLI.shouldExpandPartialReductionIntrinsic(cast<IntrinsicInst>(&I))) { 8065 visitTargetIntrinsic(I, Intrinsic); 8066 return; 8067 } 8068 8069 setValue(&I, DAG.getPartialReduceAdd(sdl, EVT::getEVT(I.getType()), 8070 getValue(I.getOperand(0)), 8071 getValue(I.getOperand(1)))); 8072 return; 8073 } 8074 case Intrinsic::experimental_cttz_elts: { 8075 auto DL = getCurSDLoc(); 8076 SDValue Op = getValue(I.getOperand(0)); 8077 EVT OpVT = Op.getValueType(); 8078 8079 if (!TLI.shouldExpandCttzElements(OpVT)) { 8080 visitTargetIntrinsic(I, Intrinsic); 8081 return; 8082 } 8083 8084 if (OpVT.getScalarType() != MVT::i1) { 8085 // Compare the input vector elements to zero & use to count trailing zeros 8086 SDValue AllZero = DAG.getConstant(0, DL, OpVT); 8087 OpVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, 8088 OpVT.getVectorElementCount()); 8089 Op = DAG.getSetCC(DL, OpVT, Op, AllZero, ISD::SETNE); 8090 } 8091 8092 // If the zero-is-poison flag is set, we can assume the upper limit 8093 // of the result is VF-1. 8094 bool ZeroIsPoison = 8095 !cast<ConstantSDNode>(getValue(I.getOperand(1)))->isZero(); 8096 ConstantRange VScaleRange(1, true); // Dummy value. 8097 if (isa<ScalableVectorType>(I.getOperand(0)->getType())) 8098 VScaleRange = getVScaleRange(I.getCaller(), 64); 8099 unsigned EltWidth = TLI.getBitWidthForCttzElements( 8100 I.getType(), OpVT.getVectorElementCount(), ZeroIsPoison, &VScaleRange); 8101 8102 MVT NewEltTy = MVT::getIntegerVT(EltWidth); 8103 8104 // Create the new vector type & get the vector length 8105 EVT NewVT = EVT::getVectorVT(*DAG.getContext(), NewEltTy, 8106 OpVT.getVectorElementCount()); 8107 8108 SDValue VL = 8109 DAG.getElementCount(DL, NewEltTy, OpVT.getVectorElementCount()); 8110 8111 SDValue StepVec = DAG.getStepVector(DL, NewVT); 8112 SDValue SplatVL = DAG.getSplat(NewVT, DL, VL); 8113 SDValue StepVL = DAG.getNode(ISD::SUB, DL, NewVT, SplatVL, StepVec); 8114 SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, Op); 8115 SDValue And = DAG.getNode(ISD::AND, DL, NewVT, StepVL, Ext); 8116 SDValue Max = DAG.getNode(ISD::VECREDUCE_UMAX, DL, NewEltTy, And); 8117 SDValue Sub = DAG.getNode(ISD::SUB, DL, NewEltTy, VL, Max); 8118 8119 EVT RetTy = TLI.getValueType(DAG.getDataLayout(), I.getType()); 8120 SDValue Ret = DAG.getZExtOrTrunc(Sub, DL, RetTy); 8121 8122 setValue(&I, Ret); 8123 return; 8124 } 8125 case Intrinsic::vector_insert: { 8126 SDValue Vec = getValue(I.getOperand(0)); 8127 SDValue SubVec = getValue(I.getOperand(1)); 8128 SDValue Index = getValue(I.getOperand(2)); 8129 8130 // The intrinsic's index type is i64, but the SDNode requires an index type 8131 // suitable for the target. Convert the index as required. 8132 MVT VectorIdxTy = TLI.getVectorIdxTy(DAG.getDataLayout()); 8133 if (Index.getValueType() != VectorIdxTy) 8134 Index = DAG.getVectorIdxConstant(Index->getAsZExtVal(), sdl); 8135 8136 EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 8137 setValue(&I, DAG.getNode(ISD::INSERT_SUBVECTOR, sdl, ResultVT, Vec, SubVec, 8138 Index)); 8139 return; 8140 } 8141 case Intrinsic::vector_extract: { 8142 SDValue Vec = getValue(I.getOperand(0)); 8143 SDValue Index = getValue(I.getOperand(1)); 8144 EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 8145 8146 // The intrinsic's index type is i64, but the SDNode requires an index type 8147 // suitable for the target. Convert the index as required. 8148 MVT VectorIdxTy = TLI.getVectorIdxTy(DAG.getDataLayout()); 8149 if (Index.getValueType() != VectorIdxTy) 8150 Index = DAG.getVectorIdxConstant(Index->getAsZExtVal(), sdl); 8151 8152 setValue(&I, 8153 DAG.getNode(ISD::EXTRACT_SUBVECTOR, sdl, ResultVT, Vec, Index)); 8154 return; 8155 } 8156 case Intrinsic::vector_reverse: 8157 visitVectorReverse(I); 8158 return; 8159 case Intrinsic::vector_splice: 8160 visitVectorSplice(I); 8161 return; 8162 case Intrinsic::callbr_landingpad: 8163 visitCallBrLandingPad(I); 8164 return; 8165 case Intrinsic::vector_interleave2: 8166 visitVectorInterleave(I); 8167 return; 8168 case Intrinsic::vector_deinterleave2: 8169 visitVectorDeinterleave(I); 8170 return; 8171 case Intrinsic::experimental_vector_compress: 8172 setValue(&I, DAG.getNode(ISD::VECTOR_COMPRESS, sdl, 8173 getValue(I.getArgOperand(0)).getValueType(), 8174 getValue(I.getArgOperand(0)), 8175 getValue(I.getArgOperand(1)), 8176 getValue(I.getArgOperand(2)), Flags)); 8177 return; 8178 case Intrinsic::experimental_convergence_anchor: 8179 case Intrinsic::experimental_convergence_entry: 8180 case Intrinsic::experimental_convergence_loop: 8181 visitConvergenceControl(I, Intrinsic); 8182 return; 8183 case Intrinsic::experimental_vector_histogram_add: { 8184 visitVectorHistogram(I, Intrinsic); 8185 return; 8186 } 8187 } 8188 } 8189 8190 void SelectionDAGBuilder::visitConstrainedFPIntrinsic( 8191 const ConstrainedFPIntrinsic &FPI) { 8192 SDLoc sdl = getCurSDLoc(); 8193 8194 // We do not need to serialize constrained FP intrinsics against 8195 // each other or against (nonvolatile) loads, so they can be 8196 // chained like loads. 8197 SDValue Chain = DAG.getRoot(); 8198 SmallVector<SDValue, 4> Opers; 8199 Opers.push_back(Chain); 8200 for (unsigned I = 0, E = FPI.getNonMetadataArgCount(); I != E; ++I) 8201 Opers.push_back(getValue(FPI.getArgOperand(I))); 8202 8203 auto pushOutChain = [this](SDValue Result, fp::ExceptionBehavior EB) { 8204 assert(Result.getNode()->getNumValues() == 2); 8205 8206 // Push node to the appropriate list so that future instructions can be 8207 // chained up correctly. 8208 SDValue OutChain = Result.getValue(1); 8209 switch (EB) { 8210 case fp::ExceptionBehavior::ebIgnore: 8211 // The only reason why ebIgnore nodes still need to be chained is that 8212 // they might depend on the current rounding mode, and therefore must 8213 // not be moved across instruction that may change that mode. 8214 [[fallthrough]]; 8215 case fp::ExceptionBehavior::ebMayTrap: 8216 // These must not be moved across calls or instructions that may change 8217 // floating-point exception masks. 8218 PendingConstrainedFP.push_back(OutChain); 8219 break; 8220 case fp::ExceptionBehavior::ebStrict: 8221 // These must not be moved across calls or instructions that may change 8222 // floating-point exception masks or read floating-point exception flags. 8223 // In addition, they cannot be optimized out even if unused. 8224 PendingConstrainedFPStrict.push_back(OutChain); 8225 break; 8226 } 8227 }; 8228 8229 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 8230 EVT VT = TLI.getValueType(DAG.getDataLayout(), FPI.getType()); 8231 SDVTList VTs = DAG.getVTList(VT, MVT::Other); 8232 fp::ExceptionBehavior EB = *FPI.getExceptionBehavior(); 8233 8234 SDNodeFlags Flags; 8235 if (EB == fp::ExceptionBehavior::ebIgnore) 8236 Flags.setNoFPExcept(true); 8237 8238 if (auto *FPOp = dyn_cast<FPMathOperator>(&FPI)) 8239 Flags.copyFMF(*FPOp); 8240 8241 unsigned Opcode; 8242 switch (FPI.getIntrinsicID()) { 8243 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. 8244 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ 8245 case Intrinsic::INTRINSIC: \ 8246 Opcode = ISD::STRICT_##DAGN; \ 8247 break; 8248 #include "llvm/IR/ConstrainedOps.def" 8249 case Intrinsic::experimental_constrained_fmuladd: { 8250 Opcode = ISD::STRICT_FMA; 8251 // Break fmuladd into fmul and fadd. 8252 if (TM.Options.AllowFPOpFusion == FPOpFusion::Strict || 8253 !TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), VT)) { 8254 Opers.pop_back(); 8255 SDValue Mul = DAG.getNode(ISD::STRICT_FMUL, sdl, VTs, Opers, Flags); 8256 pushOutChain(Mul, EB); 8257 Opcode = ISD::STRICT_FADD; 8258 Opers.clear(); 8259 Opers.push_back(Mul.getValue(1)); 8260 Opers.push_back(Mul.getValue(0)); 8261 Opers.push_back(getValue(FPI.getArgOperand(2))); 8262 } 8263 break; 8264 } 8265 } 8266 8267 // A few strict DAG nodes carry additional operands that are not 8268 // set up by the default code above. 8269 switch (Opcode) { 8270 default: break; 8271 case ISD::STRICT_FP_ROUND: 8272 Opers.push_back( 8273 DAG.getTargetConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()))); 8274 break; 8275 case ISD::STRICT_FSETCC: 8276 case ISD::STRICT_FSETCCS: { 8277 auto *FPCmp = dyn_cast<ConstrainedFPCmpIntrinsic>(&FPI); 8278 ISD::CondCode Condition = getFCmpCondCode(FPCmp->getPredicate()); 8279 if (TM.Options.NoNaNsFPMath) 8280 Condition = getFCmpCodeWithoutNaN(Condition); 8281 Opers.push_back(DAG.getCondCode(Condition)); 8282 break; 8283 } 8284 } 8285 8286 SDValue Result = DAG.getNode(Opcode, sdl, VTs, Opers, Flags); 8287 pushOutChain(Result, EB); 8288 8289 SDValue FPResult = Result.getValue(0); 8290 setValue(&FPI, FPResult); 8291 } 8292 8293 static unsigned getISDForVPIntrinsic(const VPIntrinsic &VPIntrin) { 8294 std::optional<unsigned> ResOPC; 8295 switch (VPIntrin.getIntrinsicID()) { 8296 case Intrinsic::vp_ctlz: { 8297 bool IsZeroUndef = cast<ConstantInt>(VPIntrin.getArgOperand(1))->isOne(); 8298 ResOPC = IsZeroUndef ? ISD::VP_CTLZ_ZERO_UNDEF : ISD::VP_CTLZ; 8299 break; 8300 } 8301 case Intrinsic::vp_cttz: { 8302 bool IsZeroUndef = cast<ConstantInt>(VPIntrin.getArgOperand(1))->isOne(); 8303 ResOPC = IsZeroUndef ? ISD::VP_CTTZ_ZERO_UNDEF : ISD::VP_CTTZ; 8304 break; 8305 } 8306 case Intrinsic::vp_cttz_elts: { 8307 bool IsZeroPoison = cast<ConstantInt>(VPIntrin.getArgOperand(1))->isOne(); 8308 ResOPC = IsZeroPoison ? ISD::VP_CTTZ_ELTS_ZERO_UNDEF : ISD::VP_CTTZ_ELTS; 8309 break; 8310 } 8311 #define HELPER_MAP_VPID_TO_VPSD(VPID, VPSD) \ 8312 case Intrinsic::VPID: \ 8313 ResOPC = ISD::VPSD; \ 8314 break; 8315 #include "llvm/IR/VPIntrinsics.def" 8316 } 8317 8318 if (!ResOPC) 8319 llvm_unreachable( 8320 "Inconsistency: no SDNode available for this VPIntrinsic!"); 8321 8322 if (*ResOPC == ISD::VP_REDUCE_SEQ_FADD || 8323 *ResOPC == ISD::VP_REDUCE_SEQ_FMUL) { 8324 if (VPIntrin.getFastMathFlags().allowReassoc()) 8325 return *ResOPC == ISD::VP_REDUCE_SEQ_FADD ? ISD::VP_REDUCE_FADD 8326 : ISD::VP_REDUCE_FMUL; 8327 } 8328 8329 return *ResOPC; 8330 } 8331 8332 void SelectionDAGBuilder::visitVPLoad( 8333 const VPIntrinsic &VPIntrin, EVT VT, 8334 const SmallVectorImpl<SDValue> &OpValues) { 8335 SDLoc DL = getCurSDLoc(); 8336 Value *PtrOperand = VPIntrin.getArgOperand(0); 8337 MaybeAlign Alignment = VPIntrin.getPointerAlignment(); 8338 AAMDNodes AAInfo = VPIntrin.getAAMetadata(); 8339 const MDNode *Ranges = getRangeMetadata(VPIntrin); 8340 SDValue LD; 8341 // Do not serialize variable-length loads of constant memory with 8342 // anything. 8343 if (!Alignment) 8344 Alignment = DAG.getEVTAlign(VT); 8345 MemoryLocation ML = MemoryLocation::getAfter(PtrOperand, AAInfo); 8346 bool AddToChain = !AA || !AA->pointsToConstantMemory(ML); 8347 SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode(); 8348 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( 8349 MachinePointerInfo(PtrOperand), MachineMemOperand::MOLoad, 8350 LocationSize::beforeOrAfterPointer(), *Alignment, AAInfo, Ranges); 8351 LD = DAG.getLoadVP(VT, DL, InChain, OpValues[0], OpValues[1], OpValues[2], 8352 MMO, false /*IsExpanding */); 8353 if (AddToChain) 8354 PendingLoads.push_back(LD.getValue(1)); 8355 setValue(&VPIntrin, LD); 8356 } 8357 8358 void SelectionDAGBuilder::visitVPGather( 8359 const VPIntrinsic &VPIntrin, EVT VT, 8360 const SmallVectorImpl<SDValue> &OpValues) { 8361 SDLoc DL = getCurSDLoc(); 8362 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 8363 Value *PtrOperand = VPIntrin.getArgOperand(0); 8364 MaybeAlign Alignment = VPIntrin.getPointerAlignment(); 8365 AAMDNodes AAInfo = VPIntrin.getAAMetadata(); 8366 const MDNode *Ranges = getRangeMetadata(VPIntrin); 8367 SDValue LD; 8368 if (!Alignment) 8369 Alignment = DAG.getEVTAlign(VT.getScalarType()); 8370 unsigned AS = 8371 PtrOperand->getType()->getScalarType()->getPointerAddressSpace(); 8372 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( 8373 MachinePointerInfo(AS), MachineMemOperand::MOLoad, 8374 LocationSize::beforeOrAfterPointer(), *Alignment, AAInfo, Ranges); 8375 SDValue Base, Index, Scale; 8376 ISD::MemIndexType IndexType; 8377 bool UniformBase = getUniformBase(PtrOperand, Base, Index, IndexType, Scale, 8378 this, VPIntrin.getParent(), 8379 VT.getScalarStoreSize()); 8380 if (!UniformBase) { 8381 Base = DAG.getConstant(0, DL, TLI.getPointerTy(DAG.getDataLayout())); 8382 Index = getValue(PtrOperand); 8383 IndexType = ISD::SIGNED_SCALED; 8384 Scale = DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout())); 8385 } 8386 EVT IdxVT = Index.getValueType(); 8387 EVT EltTy = IdxVT.getVectorElementType(); 8388 if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) { 8389 EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy); 8390 Index = DAG.getNode(ISD::SIGN_EXTEND, DL, NewIdxVT, Index); 8391 } 8392 LD = DAG.getGatherVP( 8393 DAG.getVTList(VT, MVT::Other), VT, DL, 8394 {DAG.getRoot(), Base, Index, Scale, OpValues[1], OpValues[2]}, MMO, 8395 IndexType); 8396 PendingLoads.push_back(LD.getValue(1)); 8397 setValue(&VPIntrin, LD); 8398 } 8399 8400 void SelectionDAGBuilder::visitVPStore( 8401 const VPIntrinsic &VPIntrin, const SmallVectorImpl<SDValue> &OpValues) { 8402 SDLoc DL = getCurSDLoc(); 8403 Value *PtrOperand = VPIntrin.getArgOperand(1); 8404 EVT VT = OpValues[0].getValueType(); 8405 MaybeAlign Alignment = VPIntrin.getPointerAlignment(); 8406 AAMDNodes AAInfo = VPIntrin.getAAMetadata(); 8407 SDValue ST; 8408 if (!Alignment) 8409 Alignment = DAG.getEVTAlign(VT); 8410 SDValue Ptr = OpValues[1]; 8411 SDValue Offset = DAG.getUNDEF(Ptr.getValueType()); 8412 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( 8413 MachinePointerInfo(PtrOperand), MachineMemOperand::MOStore, 8414 LocationSize::beforeOrAfterPointer(), *Alignment, AAInfo); 8415 ST = DAG.getStoreVP(getMemoryRoot(), DL, OpValues[0], Ptr, Offset, 8416 OpValues[2], OpValues[3], VT, MMO, ISD::UNINDEXED, 8417 /* IsTruncating */ false, /*IsCompressing*/ false); 8418 DAG.setRoot(ST); 8419 setValue(&VPIntrin, ST); 8420 } 8421 8422 void SelectionDAGBuilder::visitVPScatter( 8423 const VPIntrinsic &VPIntrin, const SmallVectorImpl<SDValue> &OpValues) { 8424 SDLoc DL = getCurSDLoc(); 8425 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 8426 Value *PtrOperand = VPIntrin.getArgOperand(1); 8427 EVT VT = OpValues[0].getValueType(); 8428 MaybeAlign Alignment = VPIntrin.getPointerAlignment(); 8429 AAMDNodes AAInfo = VPIntrin.getAAMetadata(); 8430 SDValue ST; 8431 if (!Alignment) 8432 Alignment = DAG.getEVTAlign(VT.getScalarType()); 8433 unsigned AS = 8434 PtrOperand->getType()->getScalarType()->getPointerAddressSpace(); 8435 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( 8436 MachinePointerInfo(AS), MachineMemOperand::MOStore, 8437 LocationSize::beforeOrAfterPointer(), *Alignment, AAInfo); 8438 SDValue Base, Index, Scale; 8439 ISD::MemIndexType IndexType; 8440 bool UniformBase = getUniformBase(PtrOperand, Base, Index, IndexType, Scale, 8441 this, VPIntrin.getParent(), 8442 VT.getScalarStoreSize()); 8443 if (!UniformBase) { 8444 Base = DAG.getConstant(0, DL, TLI.getPointerTy(DAG.getDataLayout())); 8445 Index = getValue(PtrOperand); 8446 IndexType = ISD::SIGNED_SCALED; 8447 Scale = 8448 DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout())); 8449 } 8450 EVT IdxVT = Index.getValueType(); 8451 EVT EltTy = IdxVT.getVectorElementType(); 8452 if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) { 8453 EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy); 8454 Index = DAG.getNode(ISD::SIGN_EXTEND, DL, NewIdxVT, Index); 8455 } 8456 ST = DAG.getScatterVP(DAG.getVTList(MVT::Other), VT, DL, 8457 {getMemoryRoot(), OpValues[0], Base, Index, Scale, 8458 OpValues[2], OpValues[3]}, 8459 MMO, IndexType); 8460 DAG.setRoot(ST); 8461 setValue(&VPIntrin, ST); 8462 } 8463 8464 void SelectionDAGBuilder::visitVPStridedLoad( 8465 const VPIntrinsic &VPIntrin, EVT VT, 8466 const SmallVectorImpl<SDValue> &OpValues) { 8467 SDLoc DL = getCurSDLoc(); 8468 Value *PtrOperand = VPIntrin.getArgOperand(0); 8469 MaybeAlign Alignment = VPIntrin.getPointerAlignment(); 8470 if (!Alignment) 8471 Alignment = DAG.getEVTAlign(VT.getScalarType()); 8472 AAMDNodes AAInfo = VPIntrin.getAAMetadata(); 8473 const MDNode *Ranges = getRangeMetadata(VPIntrin); 8474 MemoryLocation ML = MemoryLocation::getAfter(PtrOperand, AAInfo); 8475 bool AddToChain = !AA || !AA->pointsToConstantMemory(ML); 8476 SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode(); 8477 unsigned AS = PtrOperand->getType()->getPointerAddressSpace(); 8478 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( 8479 MachinePointerInfo(AS), MachineMemOperand::MOLoad, 8480 LocationSize::beforeOrAfterPointer(), *Alignment, AAInfo, Ranges); 8481 8482 SDValue LD = DAG.getStridedLoadVP(VT, DL, InChain, OpValues[0], OpValues[1], 8483 OpValues[2], OpValues[3], MMO, 8484 false /*IsExpanding*/); 8485 8486 if (AddToChain) 8487 PendingLoads.push_back(LD.getValue(1)); 8488 setValue(&VPIntrin, LD); 8489 } 8490 8491 void SelectionDAGBuilder::visitVPStridedStore( 8492 const VPIntrinsic &VPIntrin, const SmallVectorImpl<SDValue> &OpValues) { 8493 SDLoc DL = getCurSDLoc(); 8494 Value *PtrOperand = VPIntrin.getArgOperand(1); 8495 EVT VT = OpValues[0].getValueType(); 8496 MaybeAlign Alignment = VPIntrin.getPointerAlignment(); 8497 if (!Alignment) 8498 Alignment = DAG.getEVTAlign(VT.getScalarType()); 8499 AAMDNodes AAInfo = VPIntrin.getAAMetadata(); 8500 unsigned AS = PtrOperand->getType()->getPointerAddressSpace(); 8501 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( 8502 MachinePointerInfo(AS), MachineMemOperand::MOStore, 8503 LocationSize::beforeOrAfterPointer(), *Alignment, AAInfo); 8504 8505 SDValue ST = DAG.getStridedStoreVP( 8506 getMemoryRoot(), DL, OpValues[0], OpValues[1], 8507 DAG.getUNDEF(OpValues[1].getValueType()), OpValues[2], OpValues[3], 8508 OpValues[4], VT, MMO, ISD::UNINDEXED, /*IsTruncating*/ false, 8509 /*IsCompressing*/ false); 8510 8511 DAG.setRoot(ST); 8512 setValue(&VPIntrin, ST); 8513 } 8514 8515 void SelectionDAGBuilder::visitVPCmp(const VPCmpIntrinsic &VPIntrin) { 8516 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 8517 SDLoc DL = getCurSDLoc(); 8518 8519 ISD::CondCode Condition; 8520 CmpInst::Predicate CondCode = VPIntrin.getPredicate(); 8521 bool IsFP = VPIntrin.getOperand(0)->getType()->isFPOrFPVectorTy(); 8522 if (IsFP) { 8523 // FIXME: Regular fcmps are FPMathOperators which may have fast-math (nnan) 8524 // flags, but calls that don't return floating-point types can't be 8525 // FPMathOperators, like vp.fcmp. This affects constrained fcmp too. 8526 Condition = getFCmpCondCode(CondCode); 8527 if (TM.Options.NoNaNsFPMath) 8528 Condition = getFCmpCodeWithoutNaN(Condition); 8529 } else { 8530 Condition = getICmpCondCode(CondCode); 8531 } 8532 8533 SDValue Op1 = getValue(VPIntrin.getOperand(0)); 8534 SDValue Op2 = getValue(VPIntrin.getOperand(1)); 8535 // #2 is the condition code 8536 SDValue MaskOp = getValue(VPIntrin.getOperand(3)); 8537 SDValue EVL = getValue(VPIntrin.getOperand(4)); 8538 MVT EVLParamVT = TLI.getVPExplicitVectorLengthTy(); 8539 assert(EVLParamVT.isScalarInteger() && EVLParamVT.bitsGE(MVT::i32) && 8540 "Unexpected target EVL type"); 8541 EVL = DAG.getNode(ISD::ZERO_EXTEND, DL, EVLParamVT, EVL); 8542 8543 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 8544 VPIntrin.getType()); 8545 setValue(&VPIntrin, 8546 DAG.getSetCCVP(DL, DestVT, Op1, Op2, Condition, MaskOp, EVL)); 8547 } 8548 8549 void SelectionDAGBuilder::visitVectorPredicationIntrinsic( 8550 const VPIntrinsic &VPIntrin) { 8551 SDLoc DL = getCurSDLoc(); 8552 unsigned Opcode = getISDForVPIntrinsic(VPIntrin); 8553 8554 auto IID = VPIntrin.getIntrinsicID(); 8555 8556 if (const auto *CmpI = dyn_cast<VPCmpIntrinsic>(&VPIntrin)) 8557 return visitVPCmp(*CmpI); 8558 8559 SmallVector<EVT, 4> ValueVTs; 8560 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 8561 ComputeValueVTs(TLI, DAG.getDataLayout(), VPIntrin.getType(), ValueVTs); 8562 SDVTList VTs = DAG.getVTList(ValueVTs); 8563 8564 auto EVLParamPos = VPIntrinsic::getVectorLengthParamPos(IID); 8565 8566 MVT EVLParamVT = TLI.getVPExplicitVectorLengthTy(); 8567 assert(EVLParamVT.isScalarInteger() && EVLParamVT.bitsGE(MVT::i32) && 8568 "Unexpected target EVL type"); 8569 8570 // Request operands. 8571 SmallVector<SDValue, 7> OpValues; 8572 for (unsigned I = 0; I < VPIntrin.arg_size(); ++I) { 8573 auto Op = getValue(VPIntrin.getArgOperand(I)); 8574 if (I == EVLParamPos) 8575 Op = DAG.getNode(ISD::ZERO_EXTEND, DL, EVLParamVT, Op); 8576 OpValues.push_back(Op); 8577 } 8578 8579 switch (Opcode) { 8580 default: { 8581 SDNodeFlags SDFlags; 8582 if (auto *FPMO = dyn_cast<FPMathOperator>(&VPIntrin)) 8583 SDFlags.copyFMF(*FPMO); 8584 SDValue Result = DAG.getNode(Opcode, DL, VTs, OpValues, SDFlags); 8585 setValue(&VPIntrin, Result); 8586 break; 8587 } 8588 case ISD::VP_LOAD: 8589 visitVPLoad(VPIntrin, ValueVTs[0], OpValues); 8590 break; 8591 case ISD::VP_GATHER: 8592 visitVPGather(VPIntrin, ValueVTs[0], OpValues); 8593 break; 8594 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD: 8595 visitVPStridedLoad(VPIntrin, ValueVTs[0], OpValues); 8596 break; 8597 case ISD::VP_STORE: 8598 visitVPStore(VPIntrin, OpValues); 8599 break; 8600 case ISD::VP_SCATTER: 8601 visitVPScatter(VPIntrin, OpValues); 8602 break; 8603 case ISD::EXPERIMENTAL_VP_STRIDED_STORE: 8604 visitVPStridedStore(VPIntrin, OpValues); 8605 break; 8606 case ISD::VP_FMULADD: { 8607 assert(OpValues.size() == 5 && "Unexpected number of operands"); 8608 SDNodeFlags SDFlags; 8609 if (auto *FPMO = dyn_cast<FPMathOperator>(&VPIntrin)) 8610 SDFlags.copyFMF(*FPMO); 8611 if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict && 8612 TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), ValueVTs[0])) { 8613 setValue(&VPIntrin, DAG.getNode(ISD::VP_FMA, DL, VTs, OpValues, SDFlags)); 8614 } else { 8615 SDValue Mul = DAG.getNode( 8616 ISD::VP_FMUL, DL, VTs, 8617 {OpValues[0], OpValues[1], OpValues[3], OpValues[4]}, SDFlags); 8618 SDValue Add = 8619 DAG.getNode(ISD::VP_FADD, DL, VTs, 8620 {Mul, OpValues[2], OpValues[3], OpValues[4]}, SDFlags); 8621 setValue(&VPIntrin, Add); 8622 } 8623 break; 8624 } 8625 case ISD::VP_IS_FPCLASS: { 8626 const DataLayout DLayout = DAG.getDataLayout(); 8627 EVT DestVT = TLI.getValueType(DLayout, VPIntrin.getType()); 8628 auto Constant = OpValues[1]->getAsZExtVal(); 8629 SDValue Check = DAG.getTargetConstant(Constant, DL, MVT::i32); 8630 SDValue V = DAG.getNode(ISD::VP_IS_FPCLASS, DL, DestVT, 8631 {OpValues[0], Check, OpValues[2], OpValues[3]}); 8632 setValue(&VPIntrin, V); 8633 return; 8634 } 8635 case ISD::VP_INTTOPTR: { 8636 SDValue N = OpValues[0]; 8637 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), VPIntrin.getType()); 8638 EVT PtrMemVT = TLI.getMemValueType(DAG.getDataLayout(), VPIntrin.getType()); 8639 N = DAG.getVPPtrExtOrTrunc(getCurSDLoc(), DestVT, N, OpValues[1], 8640 OpValues[2]); 8641 N = DAG.getVPZExtOrTrunc(getCurSDLoc(), PtrMemVT, N, OpValues[1], 8642 OpValues[2]); 8643 setValue(&VPIntrin, N); 8644 break; 8645 } 8646 case ISD::VP_PTRTOINT: { 8647 SDValue N = OpValues[0]; 8648 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 8649 VPIntrin.getType()); 8650 EVT PtrMemVT = TLI.getMemValueType(DAG.getDataLayout(), 8651 VPIntrin.getOperand(0)->getType()); 8652 N = DAG.getVPPtrExtOrTrunc(getCurSDLoc(), PtrMemVT, N, OpValues[1], 8653 OpValues[2]); 8654 N = DAG.getVPZExtOrTrunc(getCurSDLoc(), DestVT, N, OpValues[1], 8655 OpValues[2]); 8656 setValue(&VPIntrin, N); 8657 break; 8658 } 8659 case ISD::VP_ABS: 8660 case ISD::VP_CTLZ: 8661 case ISD::VP_CTLZ_ZERO_UNDEF: 8662 case ISD::VP_CTTZ: 8663 case ISD::VP_CTTZ_ZERO_UNDEF: 8664 case ISD::VP_CTTZ_ELTS_ZERO_UNDEF: 8665 case ISD::VP_CTTZ_ELTS: { 8666 SDValue Result = 8667 DAG.getNode(Opcode, DL, VTs, {OpValues[0], OpValues[2], OpValues[3]}); 8668 setValue(&VPIntrin, Result); 8669 break; 8670 } 8671 } 8672 } 8673 8674 SDValue SelectionDAGBuilder::lowerStartEH(SDValue Chain, 8675 const BasicBlock *EHPadBB, 8676 MCSymbol *&BeginLabel) { 8677 MachineFunction &MF = DAG.getMachineFunction(); 8678 8679 // Insert a label before the invoke call to mark the try range. This can be 8680 // used to detect deletion of the invoke via the MachineModuleInfo. 8681 BeginLabel = MF.getContext().createTempSymbol(); 8682 8683 // For SjLj, keep track of which landing pads go with which invokes 8684 // so as to maintain the ordering of pads in the LSDA. 8685 unsigned CallSiteIndex = FuncInfo.getCurrentCallSite(); 8686 if (CallSiteIndex) { 8687 MF.setCallSiteBeginLabel(BeginLabel, CallSiteIndex); 8688 LPadToCallSiteMap[FuncInfo.getMBB(EHPadBB)].push_back(CallSiteIndex); 8689 8690 // Now that the call site is handled, stop tracking it. 8691 FuncInfo.setCurrentCallSite(0); 8692 } 8693 8694 return DAG.getEHLabel(getCurSDLoc(), Chain, BeginLabel); 8695 } 8696 8697 SDValue SelectionDAGBuilder::lowerEndEH(SDValue Chain, const InvokeInst *II, 8698 const BasicBlock *EHPadBB, 8699 MCSymbol *BeginLabel) { 8700 assert(BeginLabel && "BeginLabel should've been set"); 8701 8702 MachineFunction &MF = DAG.getMachineFunction(); 8703 8704 // Insert a label at the end of the invoke call to mark the try range. This 8705 // can be used to detect deletion of the invoke via the MachineModuleInfo. 8706 MCSymbol *EndLabel = MF.getContext().createTempSymbol(); 8707 Chain = DAG.getEHLabel(getCurSDLoc(), Chain, EndLabel); 8708 8709 // Inform MachineModuleInfo of range. 8710 auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn()); 8711 // There is a platform (e.g. wasm) that uses funclet style IR but does not 8712 // actually use outlined funclets and their LSDA info style. 8713 if (MF.hasEHFunclets() && isFuncletEHPersonality(Pers)) { 8714 assert(II && "II should've been set"); 8715 WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo(); 8716 EHInfo->addIPToStateRange(II, BeginLabel, EndLabel); 8717 } else if (!isScopedEHPersonality(Pers)) { 8718 assert(EHPadBB); 8719 MF.addInvoke(FuncInfo.getMBB(EHPadBB), BeginLabel, EndLabel); 8720 } 8721 8722 return Chain; 8723 } 8724 8725 std::pair<SDValue, SDValue> 8726 SelectionDAGBuilder::lowerInvokable(TargetLowering::CallLoweringInfo &CLI, 8727 const BasicBlock *EHPadBB) { 8728 MCSymbol *BeginLabel = nullptr; 8729 8730 if (EHPadBB) { 8731 // Both PendingLoads and PendingExports must be flushed here; 8732 // this call might not return. 8733 (void)getRoot(); 8734 DAG.setRoot(lowerStartEH(getControlRoot(), EHPadBB, BeginLabel)); 8735 CLI.setChain(getRoot()); 8736 } 8737 8738 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 8739 std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI); 8740 8741 assert((CLI.IsTailCall || Result.second.getNode()) && 8742 "Non-null chain expected with non-tail call!"); 8743 assert((Result.second.getNode() || !Result.first.getNode()) && 8744 "Null value expected with tail call!"); 8745 8746 if (!Result.second.getNode()) { 8747 // As a special case, a null chain means that a tail call has been emitted 8748 // and the DAG root is already updated. 8749 HasTailCall = true; 8750 8751 // Since there's no actual continuation from this block, nothing can be 8752 // relying on us setting vregs for them. 8753 PendingExports.clear(); 8754 } else { 8755 DAG.setRoot(Result.second); 8756 } 8757 8758 if (EHPadBB) { 8759 DAG.setRoot(lowerEndEH(getRoot(), cast_or_null<InvokeInst>(CLI.CB), EHPadBB, 8760 BeginLabel)); 8761 Result.second = getRoot(); 8762 } 8763 8764 return Result; 8765 } 8766 8767 void SelectionDAGBuilder::LowerCallTo(const CallBase &CB, SDValue Callee, 8768 bool isTailCall, bool isMustTailCall, 8769 const BasicBlock *EHPadBB, 8770 const TargetLowering::PtrAuthInfo *PAI) { 8771 auto &DL = DAG.getDataLayout(); 8772 FunctionType *FTy = CB.getFunctionType(); 8773 Type *RetTy = CB.getType(); 8774 8775 TargetLowering::ArgListTy Args; 8776 Args.reserve(CB.arg_size()); 8777 8778 const Value *SwiftErrorVal = nullptr; 8779 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 8780 8781 if (isTailCall) { 8782 // Avoid emitting tail calls in functions with the disable-tail-calls 8783 // attribute. 8784 auto *Caller = CB.getParent()->getParent(); 8785 if (Caller->getFnAttribute("disable-tail-calls").getValueAsString() == 8786 "true" && !isMustTailCall) 8787 isTailCall = false; 8788 8789 // We can't tail call inside a function with a swifterror argument. Lowering 8790 // does not support this yet. It would have to move into the swifterror 8791 // register before the call. 8792 if (TLI.supportSwiftError() && 8793 Caller->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) 8794 isTailCall = false; 8795 } 8796 8797 for (auto I = CB.arg_begin(), E = CB.arg_end(); I != E; ++I) { 8798 TargetLowering::ArgListEntry Entry; 8799 const Value *V = *I; 8800 8801 // Skip empty types 8802 if (V->getType()->isEmptyTy()) 8803 continue; 8804 8805 SDValue ArgNode = getValue(V); 8806 Entry.Node = ArgNode; Entry.Ty = V->getType(); 8807 8808 Entry.setAttributes(&CB, I - CB.arg_begin()); 8809 8810 // Use swifterror virtual register as input to the call. 8811 if (Entry.IsSwiftError && TLI.supportSwiftError()) { 8812 SwiftErrorVal = V; 8813 // We find the virtual register for the actual swifterror argument. 8814 // Instead of using the Value, we use the virtual register instead. 8815 Entry.Node = 8816 DAG.getRegister(SwiftError.getOrCreateVRegUseAt(&CB, FuncInfo.MBB, V), 8817 EVT(TLI.getPointerTy(DL))); 8818 } 8819 8820 Args.push_back(Entry); 8821 8822 // If we have an explicit sret argument that is an Instruction, (i.e., it 8823 // might point to function-local memory), we can't meaningfully tail-call. 8824 if (Entry.IsSRet && isa<Instruction>(V)) 8825 isTailCall = false; 8826 } 8827 8828 // If call site has a cfguardtarget operand bundle, create and add an 8829 // additional ArgListEntry. 8830 if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_cfguardtarget)) { 8831 TargetLowering::ArgListEntry Entry; 8832 Value *V = Bundle->Inputs[0]; 8833 SDValue ArgNode = getValue(V); 8834 Entry.Node = ArgNode; 8835 Entry.Ty = V->getType(); 8836 Entry.IsCFGuardTarget = true; 8837 Args.push_back(Entry); 8838 } 8839 8840 // Check if target-independent constraints permit a tail call here. 8841 // Target-dependent constraints are checked within TLI->LowerCallTo. 8842 if (isTailCall && !isInTailCallPosition(CB, DAG.getTarget())) 8843 isTailCall = false; 8844 8845 // Disable tail calls if there is an swifterror argument. Targets have not 8846 // been updated to support tail calls. 8847 if (TLI.supportSwiftError() && SwiftErrorVal) 8848 isTailCall = false; 8849 8850 ConstantInt *CFIType = nullptr; 8851 if (CB.isIndirectCall()) { 8852 if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_kcfi)) { 8853 if (!TLI.supportKCFIBundles()) 8854 report_fatal_error( 8855 "Target doesn't support calls with kcfi operand bundles."); 8856 CFIType = cast<ConstantInt>(Bundle->Inputs[0]); 8857 assert(CFIType->getType()->isIntegerTy(32) && "Invalid CFI type"); 8858 } 8859 } 8860 8861 SDValue ConvControlToken; 8862 if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_convergencectrl)) { 8863 auto *Token = Bundle->Inputs[0].get(); 8864 ConvControlToken = getValue(Token); 8865 } 8866 8867 TargetLowering::CallLoweringInfo CLI(DAG); 8868 CLI.setDebugLoc(getCurSDLoc()) 8869 .setChain(getRoot()) 8870 .setCallee(RetTy, FTy, Callee, std::move(Args), CB) 8871 .setTailCall(isTailCall) 8872 .setConvergent(CB.isConvergent()) 8873 .setIsPreallocated( 8874 CB.countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0) 8875 .setCFIType(CFIType) 8876 .setConvergenceControlToken(ConvControlToken); 8877 8878 // Set the pointer authentication info if we have it. 8879 if (PAI) { 8880 if (!TLI.supportPtrAuthBundles()) 8881 report_fatal_error( 8882 "This target doesn't support calls with ptrauth operand bundles."); 8883 CLI.setPtrAuth(*PAI); 8884 } 8885 8886 std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB); 8887 8888 if (Result.first.getNode()) { 8889 Result.first = lowerRangeToAssertZExt(DAG, CB, Result.first); 8890 setValue(&CB, Result.first); 8891 } 8892 8893 // The last element of CLI.InVals has the SDValue for swifterror return. 8894 // Here we copy it to a virtual register and update SwiftErrorMap for 8895 // book-keeping. 8896 if (SwiftErrorVal && TLI.supportSwiftError()) { 8897 // Get the last element of InVals. 8898 SDValue Src = CLI.InVals.back(); 8899 Register VReg = 8900 SwiftError.getOrCreateVRegDefAt(&CB, FuncInfo.MBB, SwiftErrorVal); 8901 SDValue CopyNode = CLI.DAG.getCopyToReg(Result.second, CLI.DL, VReg, Src); 8902 DAG.setRoot(CopyNode); 8903 } 8904 } 8905 8906 static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT, 8907 SelectionDAGBuilder &Builder) { 8908 // Check to see if this load can be trivially constant folded, e.g. if the 8909 // input is from a string literal. 8910 if (const Constant *LoadInput = dyn_cast<Constant>(PtrVal)) { 8911 // Cast pointer to the type we really want to load. 8912 Type *LoadTy = 8913 Type::getIntNTy(PtrVal->getContext(), LoadVT.getScalarSizeInBits()); 8914 if (LoadVT.isVector()) 8915 LoadTy = FixedVectorType::get(LoadTy, LoadVT.getVectorNumElements()); 8916 8917 LoadInput = ConstantExpr::getBitCast(const_cast<Constant *>(LoadInput), 8918 PointerType::getUnqual(LoadTy)); 8919 8920 if (const Constant *LoadCst = 8921 ConstantFoldLoadFromConstPtr(const_cast<Constant *>(LoadInput), 8922 LoadTy, Builder.DAG.getDataLayout())) 8923 return Builder.getValue(LoadCst); 8924 } 8925 8926 // Otherwise, we have to emit the load. If the pointer is to unfoldable but 8927 // still constant memory, the input chain can be the entry node. 8928 SDValue Root; 8929 bool ConstantMemory = false; 8930 8931 // Do not serialize (non-volatile) loads of constant memory with anything. 8932 if (Builder.AA && Builder.AA->pointsToConstantMemory(PtrVal)) { 8933 Root = Builder.DAG.getEntryNode(); 8934 ConstantMemory = true; 8935 } else { 8936 // Do not serialize non-volatile loads against each other. 8937 Root = Builder.DAG.getRoot(); 8938 } 8939 8940 SDValue Ptr = Builder.getValue(PtrVal); 8941 SDValue LoadVal = 8942 Builder.DAG.getLoad(LoadVT, Builder.getCurSDLoc(), Root, Ptr, 8943 MachinePointerInfo(PtrVal), Align(1)); 8944 8945 if (!ConstantMemory) 8946 Builder.PendingLoads.push_back(LoadVal.getValue(1)); 8947 return LoadVal; 8948 } 8949 8950 /// Record the value for an instruction that produces an integer result, 8951 /// converting the type where necessary. 8952 void SelectionDAGBuilder::processIntegerCallValue(const Instruction &I, 8953 SDValue Value, 8954 bool IsSigned) { 8955 EVT VT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 8956 I.getType(), true); 8957 Value = DAG.getExtOrTrunc(IsSigned, Value, getCurSDLoc(), VT); 8958 setValue(&I, Value); 8959 } 8960 8961 /// See if we can lower a memcmp/bcmp call into an optimized form. If so, return 8962 /// true and lower it. Otherwise return false, and it will be lowered like a 8963 /// normal call. 8964 /// The caller already checked that \p I calls the appropriate LibFunc with a 8965 /// correct prototype. 8966 bool SelectionDAGBuilder::visitMemCmpBCmpCall(const CallInst &I) { 8967 const Value *LHS = I.getArgOperand(0), *RHS = I.getArgOperand(1); 8968 const Value *Size = I.getArgOperand(2); 8969 const ConstantSDNode *CSize = dyn_cast<ConstantSDNode>(getValue(Size)); 8970 if (CSize && CSize->getZExtValue() == 0) { 8971 EVT CallVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), 8972 I.getType(), true); 8973 setValue(&I, DAG.getConstant(0, getCurSDLoc(), CallVT)); 8974 return true; 8975 } 8976 8977 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo(); 8978 std::pair<SDValue, SDValue> Res = TSI.EmitTargetCodeForMemcmp( 8979 DAG, getCurSDLoc(), DAG.getRoot(), getValue(LHS), getValue(RHS), 8980 getValue(Size), MachinePointerInfo(LHS), MachinePointerInfo(RHS)); 8981 if (Res.first.getNode()) { 8982 processIntegerCallValue(I, Res.first, true); 8983 PendingLoads.push_back(Res.second); 8984 return true; 8985 } 8986 8987 // memcmp(S1,S2,2) != 0 -> (*(short*)LHS != *(short*)RHS) != 0 8988 // memcmp(S1,S2,4) != 0 -> (*(int*)LHS != *(int*)RHS) != 0 8989 if (!CSize || !isOnlyUsedInZeroEqualityComparison(&I)) 8990 return false; 8991 8992 // If the target has a fast compare for the given size, it will return a 8993 // preferred load type for that size. Require that the load VT is legal and 8994 // that the target supports unaligned loads of that type. Otherwise, return 8995 // INVALID. 8996 auto hasFastLoadsAndCompare = [&](unsigned NumBits) { 8997 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 8998 MVT LVT = TLI.hasFastEqualityCompare(NumBits); 8999 if (LVT != MVT::INVALID_SIMPLE_VALUE_TYPE) { 9000 // TODO: Handle 5 byte compare as 4-byte + 1 byte. 9001 // TODO: Handle 8 byte compare on x86-32 as two 32-bit loads. 9002 // TODO: Check alignment of src and dest ptrs. 9003 unsigned DstAS = LHS->getType()->getPointerAddressSpace(); 9004 unsigned SrcAS = RHS->getType()->getPointerAddressSpace(); 9005 if (!TLI.isTypeLegal(LVT) || 9006 !TLI.allowsMisalignedMemoryAccesses(LVT, SrcAS) || 9007 !TLI.allowsMisalignedMemoryAccesses(LVT, DstAS)) 9008 LVT = MVT::INVALID_SIMPLE_VALUE_TYPE; 9009 } 9010 9011 return LVT; 9012 }; 9013 9014 // This turns into unaligned loads. We only do this if the target natively 9015 // supports the MVT we'll be loading or if it is small enough (<= 4) that 9016 // we'll only produce a small number of byte loads. 9017 MVT LoadVT; 9018 unsigned NumBitsToCompare = CSize->getZExtValue() * 8; 9019 switch (NumBitsToCompare) { 9020 default: 9021 return false; 9022 case 16: 9023 LoadVT = MVT::i16; 9024 break; 9025 case 32: 9026 LoadVT = MVT::i32; 9027 break; 9028 case 64: 9029 case 128: 9030 case 256: 9031 LoadVT = hasFastLoadsAndCompare(NumBitsToCompare); 9032 break; 9033 } 9034 9035 if (LoadVT == MVT::INVALID_SIMPLE_VALUE_TYPE) 9036 return false; 9037 9038 SDValue LoadL = getMemCmpLoad(LHS, LoadVT, *this); 9039 SDValue LoadR = getMemCmpLoad(RHS, LoadVT, *this); 9040 9041 // Bitcast to a wide integer type if the loads are vectors. 9042 if (LoadVT.isVector()) { 9043 EVT CmpVT = EVT::getIntegerVT(LHS->getContext(), LoadVT.getSizeInBits()); 9044 LoadL = DAG.getBitcast(CmpVT, LoadL); 9045 LoadR = DAG.getBitcast(CmpVT, LoadR); 9046 } 9047 9048 SDValue Cmp = DAG.getSetCC(getCurSDLoc(), MVT::i1, LoadL, LoadR, ISD::SETNE); 9049 processIntegerCallValue(I, Cmp, false); 9050 return true; 9051 } 9052 9053 /// See if we can lower a memchr call into an optimized form. If so, return 9054 /// true and lower it. Otherwise return false, and it will be lowered like a 9055 /// normal call. 9056 /// The caller already checked that \p I calls the appropriate LibFunc with a 9057 /// correct prototype. 9058 bool SelectionDAGBuilder::visitMemChrCall(const CallInst &I) { 9059 const Value *Src = I.getArgOperand(0); 9060 const Value *Char = I.getArgOperand(1); 9061 const Value *Length = I.getArgOperand(2); 9062 9063 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo(); 9064 std::pair<SDValue, SDValue> Res = 9065 TSI.EmitTargetCodeForMemchr(DAG, getCurSDLoc(), DAG.getRoot(), 9066 getValue(Src), getValue(Char), getValue(Length), 9067 MachinePointerInfo(Src)); 9068 if (Res.first.getNode()) { 9069 setValue(&I, Res.first); 9070 PendingLoads.push_back(Res.second); 9071 return true; 9072 } 9073 9074 return false; 9075 } 9076 9077 /// See if we can lower a mempcpy call into an optimized form. If so, return 9078 /// true and lower it. Otherwise return false, and it will be lowered like a 9079 /// normal call. 9080 /// The caller already checked that \p I calls the appropriate LibFunc with a 9081 /// correct prototype. 9082 bool SelectionDAGBuilder::visitMemPCpyCall(const CallInst &I) { 9083 SDValue Dst = getValue(I.getArgOperand(0)); 9084 SDValue Src = getValue(I.getArgOperand(1)); 9085 SDValue Size = getValue(I.getArgOperand(2)); 9086 9087 Align DstAlign = DAG.InferPtrAlign(Dst).valueOrOne(); 9088 Align SrcAlign = DAG.InferPtrAlign(Src).valueOrOne(); 9089 // DAG::getMemcpy needs Alignment to be defined. 9090 Align Alignment = std::min(DstAlign, SrcAlign); 9091 9092 SDLoc sdl = getCurSDLoc(); 9093 9094 // In the mempcpy context we need to pass in a false value for isTailCall 9095 // because the return pointer needs to be adjusted by the size of 9096 // the copied memory. 9097 SDValue Root = getMemoryRoot(); 9098 SDValue MC = DAG.getMemcpy( 9099 Root, sdl, Dst, Src, Size, Alignment, false, false, /*CI=*/nullptr, 9100 std::nullopt, MachinePointerInfo(I.getArgOperand(0)), 9101 MachinePointerInfo(I.getArgOperand(1)), I.getAAMetadata()); 9102 assert(MC.getNode() != nullptr && 9103 "** memcpy should not be lowered as TailCall in mempcpy context **"); 9104 DAG.setRoot(MC); 9105 9106 // Check if Size needs to be truncated or extended. 9107 Size = DAG.getSExtOrTrunc(Size, sdl, Dst.getValueType()); 9108 9109 // Adjust return pointer to point just past the last dst byte. 9110 SDValue DstPlusSize = DAG.getNode(ISD::ADD, sdl, Dst.getValueType(), 9111 Dst, Size); 9112 setValue(&I, DstPlusSize); 9113 return true; 9114 } 9115 9116 /// See if we can lower a strcpy call into an optimized form. If so, return 9117 /// true and lower it, otherwise return false and it will be lowered like a 9118 /// normal call. 9119 /// The caller already checked that \p I calls the appropriate LibFunc with a 9120 /// correct prototype. 9121 bool SelectionDAGBuilder::visitStrCpyCall(const CallInst &I, bool isStpcpy) { 9122 const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1); 9123 9124 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo(); 9125 std::pair<SDValue, SDValue> Res = 9126 TSI.EmitTargetCodeForStrcpy(DAG, getCurSDLoc(), getRoot(), 9127 getValue(Arg0), getValue(Arg1), 9128 MachinePointerInfo(Arg0), 9129 MachinePointerInfo(Arg1), isStpcpy); 9130 if (Res.first.getNode()) { 9131 setValue(&I, Res.first); 9132 DAG.setRoot(Res.second); 9133 return true; 9134 } 9135 9136 return false; 9137 } 9138 9139 /// See if we can lower a strcmp call into an optimized form. If so, return 9140 /// true and lower it, otherwise return false and it will be lowered like a 9141 /// normal call. 9142 /// The caller already checked that \p I calls the appropriate LibFunc with a 9143 /// correct prototype. 9144 bool SelectionDAGBuilder::visitStrCmpCall(const CallInst &I) { 9145 const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1); 9146 9147 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo(); 9148 std::pair<SDValue, SDValue> Res = 9149 TSI.EmitTargetCodeForStrcmp(DAG, getCurSDLoc(), DAG.getRoot(), 9150 getValue(Arg0), getValue(Arg1), 9151 MachinePointerInfo(Arg0), 9152 MachinePointerInfo(Arg1)); 9153 if (Res.first.getNode()) { 9154 processIntegerCallValue(I, Res.first, true); 9155 PendingLoads.push_back(Res.second); 9156 return true; 9157 } 9158 9159 return false; 9160 } 9161 9162 /// See if we can lower a strlen call into an optimized form. If so, return 9163 /// true and lower it, otherwise return false and it will be lowered like a 9164 /// normal call. 9165 /// The caller already checked that \p I calls the appropriate LibFunc with a 9166 /// correct prototype. 9167 bool SelectionDAGBuilder::visitStrLenCall(const CallInst &I) { 9168 const Value *Arg0 = I.getArgOperand(0); 9169 9170 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo(); 9171 std::pair<SDValue, SDValue> Res = 9172 TSI.EmitTargetCodeForStrlen(DAG, getCurSDLoc(), DAG.getRoot(), 9173 getValue(Arg0), MachinePointerInfo(Arg0)); 9174 if (Res.first.getNode()) { 9175 processIntegerCallValue(I, Res.first, false); 9176 PendingLoads.push_back(Res.second); 9177 return true; 9178 } 9179 9180 return false; 9181 } 9182 9183 /// See if we can lower a strnlen call into an optimized form. If so, return 9184 /// true and lower it, otherwise return false and it will be lowered like a 9185 /// normal call. 9186 /// The caller already checked that \p I calls the appropriate LibFunc with a 9187 /// correct prototype. 9188 bool SelectionDAGBuilder::visitStrNLenCall(const CallInst &I) { 9189 const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1); 9190 9191 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo(); 9192 std::pair<SDValue, SDValue> Res = 9193 TSI.EmitTargetCodeForStrnlen(DAG, getCurSDLoc(), DAG.getRoot(), 9194 getValue(Arg0), getValue(Arg1), 9195 MachinePointerInfo(Arg0)); 9196 if (Res.first.getNode()) { 9197 processIntegerCallValue(I, Res.first, false); 9198 PendingLoads.push_back(Res.second); 9199 return true; 9200 } 9201 9202 return false; 9203 } 9204 9205 /// See if we can lower a unary floating-point operation into an SDNode with 9206 /// the specified Opcode. If so, return true and lower it, otherwise return 9207 /// false and it will be lowered like a normal call. 9208 /// The caller already checked that \p I calls the appropriate LibFunc with a 9209 /// correct prototype. 9210 bool SelectionDAGBuilder::visitUnaryFloatCall(const CallInst &I, 9211 unsigned Opcode) { 9212 // We already checked this call's prototype; verify it doesn't modify errno. 9213 if (!I.onlyReadsMemory()) 9214 return false; 9215 9216 SDNodeFlags Flags; 9217 Flags.copyFMF(cast<FPMathOperator>(I)); 9218 9219 SDValue Tmp = getValue(I.getArgOperand(0)); 9220 setValue(&I, 9221 DAG.getNode(Opcode, getCurSDLoc(), Tmp.getValueType(), Tmp, Flags)); 9222 return true; 9223 } 9224 9225 /// See if we can lower a binary floating-point operation into an SDNode with 9226 /// the specified Opcode. If so, return true and lower it. Otherwise return 9227 /// false, and it will be lowered like a normal call. 9228 /// The caller already checked that \p I calls the appropriate LibFunc with a 9229 /// correct prototype. 9230 bool SelectionDAGBuilder::visitBinaryFloatCall(const CallInst &I, 9231 unsigned Opcode) { 9232 // We already checked this call's prototype; verify it doesn't modify errno. 9233 if (!I.onlyReadsMemory()) 9234 return false; 9235 9236 SDNodeFlags Flags; 9237 Flags.copyFMF(cast<FPMathOperator>(I)); 9238 9239 SDValue Tmp0 = getValue(I.getArgOperand(0)); 9240 SDValue Tmp1 = getValue(I.getArgOperand(1)); 9241 EVT VT = Tmp0.getValueType(); 9242 setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), VT, Tmp0, Tmp1, Flags)); 9243 return true; 9244 } 9245 9246 void SelectionDAGBuilder::visitCall(const CallInst &I) { 9247 // Handle inline assembly differently. 9248 if (I.isInlineAsm()) { 9249 visitInlineAsm(I); 9250 return; 9251 } 9252 9253 diagnoseDontCall(I); 9254 9255 if (Function *F = I.getCalledFunction()) { 9256 if (F->isDeclaration()) { 9257 // Is this an LLVM intrinsic or a target-specific intrinsic? 9258 unsigned IID = F->getIntrinsicID(); 9259 if (!IID) 9260 if (const TargetIntrinsicInfo *II = TM.getIntrinsicInfo()) 9261 IID = II->getIntrinsicID(F); 9262 9263 if (IID) { 9264 visitIntrinsicCall(I, IID); 9265 return; 9266 } 9267 } 9268 9269 // Check for well-known libc/libm calls. If the function is internal, it 9270 // can't be a library call. Don't do the check if marked as nobuiltin for 9271 // some reason or the call site requires strict floating point semantics. 9272 LibFunc Func; 9273 if (!I.isNoBuiltin() && !I.isStrictFP() && !F->hasLocalLinkage() && 9274 F->hasName() && LibInfo->getLibFunc(*F, Func) && 9275 LibInfo->hasOptimizedCodeGen(Func)) { 9276 switch (Func) { 9277 default: break; 9278 case LibFunc_bcmp: 9279 if (visitMemCmpBCmpCall(I)) 9280 return; 9281 break; 9282 case LibFunc_copysign: 9283 case LibFunc_copysignf: 9284 case LibFunc_copysignl: 9285 // We already checked this call's prototype; verify it doesn't modify 9286 // errno. 9287 if (I.onlyReadsMemory()) { 9288 SDValue LHS = getValue(I.getArgOperand(0)); 9289 SDValue RHS = getValue(I.getArgOperand(1)); 9290 setValue(&I, DAG.getNode(ISD::FCOPYSIGN, getCurSDLoc(), 9291 LHS.getValueType(), LHS, RHS)); 9292 return; 9293 } 9294 break; 9295 case LibFunc_fabs: 9296 case LibFunc_fabsf: 9297 case LibFunc_fabsl: 9298 if (visitUnaryFloatCall(I, ISD::FABS)) 9299 return; 9300 break; 9301 case LibFunc_fmin: 9302 case LibFunc_fminf: 9303 case LibFunc_fminl: 9304 if (visitBinaryFloatCall(I, ISD::FMINNUM)) 9305 return; 9306 break; 9307 case LibFunc_fmax: 9308 case LibFunc_fmaxf: 9309 case LibFunc_fmaxl: 9310 if (visitBinaryFloatCall(I, ISD::FMAXNUM)) 9311 return; 9312 break; 9313 case LibFunc_fminimum_num: 9314 case LibFunc_fminimum_numf: 9315 case LibFunc_fminimum_numl: 9316 if (visitBinaryFloatCall(I, ISD::FMINIMUMNUM)) 9317 return; 9318 break; 9319 case LibFunc_fmaximum_num: 9320 case LibFunc_fmaximum_numf: 9321 case LibFunc_fmaximum_numl: 9322 if (visitBinaryFloatCall(I, ISD::FMAXIMUMNUM)) 9323 return; 9324 break; 9325 case LibFunc_sin: 9326 case LibFunc_sinf: 9327 case LibFunc_sinl: 9328 if (visitUnaryFloatCall(I, ISD::FSIN)) 9329 return; 9330 break; 9331 case LibFunc_cos: 9332 case LibFunc_cosf: 9333 case LibFunc_cosl: 9334 if (visitUnaryFloatCall(I, ISD::FCOS)) 9335 return; 9336 break; 9337 case LibFunc_tan: 9338 case LibFunc_tanf: 9339 case LibFunc_tanl: 9340 if (visitUnaryFloatCall(I, ISD::FTAN)) 9341 return; 9342 break; 9343 case LibFunc_asin: 9344 case LibFunc_asinf: 9345 case LibFunc_asinl: 9346 if (visitUnaryFloatCall(I, ISD::FASIN)) 9347 return; 9348 break; 9349 case LibFunc_acos: 9350 case LibFunc_acosf: 9351 case LibFunc_acosl: 9352 if (visitUnaryFloatCall(I, ISD::FACOS)) 9353 return; 9354 break; 9355 case LibFunc_atan: 9356 case LibFunc_atanf: 9357 case LibFunc_atanl: 9358 if (visitUnaryFloatCall(I, ISD::FATAN)) 9359 return; 9360 break; 9361 case LibFunc_sinh: 9362 case LibFunc_sinhf: 9363 case LibFunc_sinhl: 9364 if (visitUnaryFloatCall(I, ISD::FSINH)) 9365 return; 9366 break; 9367 case LibFunc_cosh: 9368 case LibFunc_coshf: 9369 case LibFunc_coshl: 9370 if (visitUnaryFloatCall(I, ISD::FCOSH)) 9371 return; 9372 break; 9373 case LibFunc_tanh: 9374 case LibFunc_tanhf: 9375 case LibFunc_tanhl: 9376 if (visitUnaryFloatCall(I, ISD::FTANH)) 9377 return; 9378 break; 9379 case LibFunc_sqrt: 9380 case LibFunc_sqrtf: 9381 case LibFunc_sqrtl: 9382 case LibFunc_sqrt_finite: 9383 case LibFunc_sqrtf_finite: 9384 case LibFunc_sqrtl_finite: 9385 if (visitUnaryFloatCall(I, ISD::FSQRT)) 9386 return; 9387 break; 9388 case LibFunc_floor: 9389 case LibFunc_floorf: 9390 case LibFunc_floorl: 9391 if (visitUnaryFloatCall(I, ISD::FFLOOR)) 9392 return; 9393 break; 9394 case LibFunc_nearbyint: 9395 case LibFunc_nearbyintf: 9396 case LibFunc_nearbyintl: 9397 if (visitUnaryFloatCall(I, ISD::FNEARBYINT)) 9398 return; 9399 break; 9400 case LibFunc_ceil: 9401 case LibFunc_ceilf: 9402 case LibFunc_ceill: 9403 if (visitUnaryFloatCall(I, ISD::FCEIL)) 9404 return; 9405 break; 9406 case LibFunc_rint: 9407 case LibFunc_rintf: 9408 case LibFunc_rintl: 9409 if (visitUnaryFloatCall(I, ISD::FRINT)) 9410 return; 9411 break; 9412 case LibFunc_round: 9413 case LibFunc_roundf: 9414 case LibFunc_roundl: 9415 if (visitUnaryFloatCall(I, ISD::FROUND)) 9416 return; 9417 break; 9418 case LibFunc_trunc: 9419 case LibFunc_truncf: 9420 case LibFunc_truncl: 9421 if (visitUnaryFloatCall(I, ISD::FTRUNC)) 9422 return; 9423 break; 9424 case LibFunc_log2: 9425 case LibFunc_log2f: 9426 case LibFunc_log2l: 9427 if (visitUnaryFloatCall(I, ISD::FLOG2)) 9428 return; 9429 break; 9430 case LibFunc_exp2: 9431 case LibFunc_exp2f: 9432 case LibFunc_exp2l: 9433 if (visitUnaryFloatCall(I, ISD::FEXP2)) 9434 return; 9435 break; 9436 case LibFunc_exp10: 9437 case LibFunc_exp10f: 9438 case LibFunc_exp10l: 9439 if (visitUnaryFloatCall(I, ISD::FEXP10)) 9440 return; 9441 break; 9442 case LibFunc_ldexp: 9443 case LibFunc_ldexpf: 9444 case LibFunc_ldexpl: 9445 if (visitBinaryFloatCall(I, ISD::FLDEXP)) 9446 return; 9447 break; 9448 case LibFunc_memcmp: 9449 if (visitMemCmpBCmpCall(I)) 9450 return; 9451 break; 9452 case LibFunc_mempcpy: 9453 if (visitMemPCpyCall(I)) 9454 return; 9455 break; 9456 case LibFunc_memchr: 9457 if (visitMemChrCall(I)) 9458 return; 9459 break; 9460 case LibFunc_strcpy: 9461 if (visitStrCpyCall(I, false)) 9462 return; 9463 break; 9464 case LibFunc_stpcpy: 9465 if (visitStrCpyCall(I, true)) 9466 return; 9467 break; 9468 case LibFunc_strcmp: 9469 if (visitStrCmpCall(I)) 9470 return; 9471 break; 9472 case LibFunc_strlen: 9473 if (visitStrLenCall(I)) 9474 return; 9475 break; 9476 case LibFunc_strnlen: 9477 if (visitStrNLenCall(I)) 9478 return; 9479 break; 9480 } 9481 } 9482 } 9483 9484 if (I.countOperandBundlesOfType(LLVMContext::OB_ptrauth)) { 9485 LowerCallSiteWithPtrAuthBundle(cast<CallBase>(I), /*EHPadBB=*/nullptr); 9486 return; 9487 } 9488 9489 // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't 9490 // have to do anything here to lower funclet bundles. 9491 // CFGuardTarget bundles are lowered in LowerCallTo. 9492 assert(!I.hasOperandBundlesOtherThan( 9493 {LLVMContext::OB_deopt, LLVMContext::OB_funclet, 9494 LLVMContext::OB_cfguardtarget, LLVMContext::OB_preallocated, 9495 LLVMContext::OB_clang_arc_attachedcall, LLVMContext::OB_kcfi, 9496 LLVMContext::OB_convergencectrl}) && 9497 "Cannot lower calls with arbitrary operand bundles!"); 9498 9499 SDValue Callee = getValue(I.getCalledOperand()); 9500 9501 if (I.hasDeoptState()) 9502 LowerCallSiteWithDeoptBundle(&I, Callee, nullptr); 9503 else 9504 // Check if we can potentially perform a tail call. More detailed checking 9505 // is be done within LowerCallTo, after more information about the call is 9506 // known. 9507 LowerCallTo(I, Callee, I.isTailCall(), I.isMustTailCall()); 9508 } 9509 9510 void SelectionDAGBuilder::LowerCallSiteWithPtrAuthBundle( 9511 const CallBase &CB, const BasicBlock *EHPadBB) { 9512 auto PAB = CB.getOperandBundle("ptrauth"); 9513 const Value *CalleeV = CB.getCalledOperand(); 9514 9515 // Gather the call ptrauth data from the operand bundle: 9516 // [ i32 <key>, i64 <discriminator> ] 9517 const auto *Key = cast<ConstantInt>(PAB->Inputs[0]); 9518 const Value *Discriminator = PAB->Inputs[1]; 9519 9520 assert(Key->getType()->isIntegerTy(32) && "Invalid ptrauth key"); 9521 assert(Discriminator->getType()->isIntegerTy(64) && 9522 "Invalid ptrauth discriminator"); 9523 9524 // Look through ptrauth constants to find the raw callee. 9525 // Do a direct unauthenticated call if we found it and everything matches. 9526 if (const auto *CalleeCPA = dyn_cast<ConstantPtrAuth>(CalleeV)) 9527 if (CalleeCPA->isKnownCompatibleWith(Key, Discriminator, 9528 DAG.getDataLayout())) 9529 return LowerCallTo(CB, getValue(CalleeCPA->getPointer()), CB.isTailCall(), 9530 CB.isMustTailCall(), EHPadBB); 9531 9532 // Functions should never be ptrauth-called directly. 9533 assert(!isa<Function>(CalleeV) && "invalid direct ptrauth call"); 9534 9535 // Otherwise, do an authenticated indirect call. 9536 TargetLowering::PtrAuthInfo PAI = {Key->getZExtValue(), 9537 getValue(Discriminator)}; 9538 9539 LowerCallTo(CB, getValue(CalleeV), CB.isTailCall(), CB.isMustTailCall(), 9540 EHPadBB, &PAI); 9541 } 9542 9543 namespace { 9544 9545 /// AsmOperandInfo - This contains information for each constraint that we are 9546 /// lowering. 9547 class SDISelAsmOperandInfo : public TargetLowering::AsmOperandInfo { 9548 public: 9549 /// CallOperand - If this is the result output operand or a clobber 9550 /// this is null, otherwise it is the incoming operand to the CallInst. 9551 /// This gets modified as the asm is processed. 9552 SDValue CallOperand; 9553 9554 /// AssignedRegs - If this is a register or register class operand, this 9555 /// contains the set of register corresponding to the operand. 9556 RegsForValue AssignedRegs; 9557 9558 explicit SDISelAsmOperandInfo(const TargetLowering::AsmOperandInfo &info) 9559 : TargetLowering::AsmOperandInfo(info), CallOperand(nullptr, 0) { 9560 } 9561 9562 /// Whether or not this operand accesses memory 9563 bool hasMemory(const TargetLowering &TLI) const { 9564 // Indirect operand accesses access memory. 9565 if (isIndirect) 9566 return true; 9567 9568 for (const auto &Code : Codes) 9569 if (TLI.getConstraintType(Code) == TargetLowering::C_Memory) 9570 return true; 9571 9572 return false; 9573 } 9574 }; 9575 9576 9577 } // end anonymous namespace 9578 9579 /// Make sure that the output operand \p OpInfo and its corresponding input 9580 /// operand \p MatchingOpInfo have compatible constraint types (otherwise error 9581 /// out). 9582 static void patchMatchingInput(const SDISelAsmOperandInfo &OpInfo, 9583 SDISelAsmOperandInfo &MatchingOpInfo, 9584 SelectionDAG &DAG) { 9585 if (OpInfo.ConstraintVT == MatchingOpInfo.ConstraintVT) 9586 return; 9587 9588 const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo(); 9589 const auto &TLI = DAG.getTargetLoweringInfo(); 9590 9591 std::pair<unsigned, const TargetRegisterClass *> MatchRC = 9592 TLI.getRegForInlineAsmConstraint(TRI, OpInfo.ConstraintCode, 9593 OpInfo.ConstraintVT); 9594 std::pair<unsigned, const TargetRegisterClass *> InputRC = 9595 TLI.getRegForInlineAsmConstraint(TRI, MatchingOpInfo.ConstraintCode, 9596 MatchingOpInfo.ConstraintVT); 9597 const bool OutOpIsIntOrFP = 9598 OpInfo.ConstraintVT.isInteger() || OpInfo.ConstraintVT.isFloatingPoint(); 9599 const bool InOpIsIntOrFP = MatchingOpInfo.ConstraintVT.isInteger() || 9600 MatchingOpInfo.ConstraintVT.isFloatingPoint(); 9601 if ((OutOpIsIntOrFP != InOpIsIntOrFP) || (MatchRC.second != InputRC.second)) { 9602 // FIXME: error out in a more elegant fashion 9603 report_fatal_error("Unsupported asm: input constraint" 9604 " with a matching output constraint of" 9605 " incompatible type!"); 9606 } 9607 MatchingOpInfo.ConstraintVT = OpInfo.ConstraintVT; 9608 } 9609 9610 /// Get a direct memory input to behave well as an indirect operand. 9611 /// This may introduce stores, hence the need for a \p Chain. 9612 /// \return The (possibly updated) chain. 9613 static SDValue getAddressForMemoryInput(SDValue Chain, const SDLoc &Location, 9614 SDISelAsmOperandInfo &OpInfo, 9615 SelectionDAG &DAG) { 9616 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 9617 9618 // If we don't have an indirect input, put it in the constpool if we can, 9619 // otherwise spill it to a stack slot. 9620 // TODO: This isn't quite right. We need to handle these according to 9621 // the addressing mode that the constraint wants. Also, this may take 9622 // an additional register for the computation and we don't want that 9623 // either. 9624 9625 // If the operand is a float, integer, or vector constant, spill to a 9626 // constant pool entry to get its address. 9627 const Value *OpVal = OpInfo.CallOperandVal; 9628 if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) || 9629 isa<ConstantVector>(OpVal) || isa<ConstantDataVector>(OpVal)) { 9630 OpInfo.CallOperand = DAG.getConstantPool( 9631 cast<Constant>(OpVal), TLI.getPointerTy(DAG.getDataLayout())); 9632 return Chain; 9633 } 9634 9635 // Otherwise, create a stack slot and emit a store to it before the asm. 9636 Type *Ty = OpVal->getType(); 9637 auto &DL = DAG.getDataLayout(); 9638 TypeSize TySize = DL.getTypeAllocSize(Ty); 9639 MachineFunction &MF = DAG.getMachineFunction(); 9640 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); 9641 int StackID = 0; 9642 if (TySize.isScalable()) 9643 StackID = TFI->getStackIDForScalableVectors(); 9644 int SSFI = MF.getFrameInfo().CreateStackObject(TySize.getKnownMinValue(), 9645 DL.getPrefTypeAlign(Ty), false, 9646 nullptr, StackID); 9647 SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getFrameIndexTy(DL)); 9648 Chain = DAG.getTruncStore(Chain, Location, OpInfo.CallOperand, StackSlot, 9649 MachinePointerInfo::getFixedStack(MF, SSFI), 9650 TLI.getMemValueType(DL, Ty)); 9651 OpInfo.CallOperand = StackSlot; 9652 9653 return Chain; 9654 } 9655 9656 /// GetRegistersForValue - Assign registers (virtual or physical) for the 9657 /// specified operand. We prefer to assign virtual registers, to allow the 9658 /// register allocator to handle the assignment process. However, if the asm 9659 /// uses features that we can't model on machineinstrs, we have SDISel do the 9660 /// allocation. This produces generally horrible, but correct, code. 9661 /// 9662 /// OpInfo describes the operand 9663 /// RefOpInfo describes the matching operand if any, the operand otherwise 9664 static std::optional<unsigned> 9665 getRegistersForValue(SelectionDAG &DAG, const SDLoc &DL, 9666 SDISelAsmOperandInfo &OpInfo, 9667 SDISelAsmOperandInfo &RefOpInfo) { 9668 LLVMContext &Context = *DAG.getContext(); 9669 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 9670 9671 MachineFunction &MF = DAG.getMachineFunction(); 9672 SmallVector<Register, 4> Regs; 9673 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); 9674 9675 // No work to do for memory/address operands. 9676 if (OpInfo.ConstraintType == TargetLowering::C_Memory || 9677 OpInfo.ConstraintType == TargetLowering::C_Address) 9678 return std::nullopt; 9679 9680 // If this is a constraint for a single physreg, or a constraint for a 9681 // register class, find it. 9682 unsigned AssignedReg; 9683 const TargetRegisterClass *RC; 9684 std::tie(AssignedReg, RC) = TLI.getRegForInlineAsmConstraint( 9685 &TRI, RefOpInfo.ConstraintCode, RefOpInfo.ConstraintVT); 9686 // RC is unset only on failure. Return immediately. 9687 if (!RC) 9688 return std::nullopt; 9689 9690 // Get the actual register value type. This is important, because the user 9691 // may have asked for (e.g.) the AX register in i32 type. We need to 9692 // remember that AX is actually i16 to get the right extension. 9693 const MVT RegVT = *TRI.legalclasstypes_begin(*RC); 9694 9695 if (OpInfo.ConstraintVT != MVT::Other && RegVT != MVT::Untyped) { 9696 // If this is an FP operand in an integer register (or visa versa), or more 9697 // generally if the operand value disagrees with the register class we plan 9698 // to stick it in, fix the operand type. 9699 // 9700 // If this is an input value, the bitcast to the new type is done now. 9701 // Bitcast for output value is done at the end of visitInlineAsm(). 9702 if ((OpInfo.Type == InlineAsm::isOutput || 9703 OpInfo.Type == InlineAsm::isInput) && 9704 !TRI.isTypeLegalForClass(*RC, OpInfo.ConstraintVT)) { 9705 // Try to convert to the first EVT that the reg class contains. If the 9706 // types are identical size, use a bitcast to convert (e.g. two differing 9707 // vector types). Note: output bitcast is done at the end of 9708 // visitInlineAsm(). 9709 if (RegVT.getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) { 9710 // Exclude indirect inputs while they are unsupported because the code 9711 // to perform the load is missing and thus OpInfo.CallOperand still 9712 // refers to the input address rather than the pointed-to value. 9713 if (OpInfo.Type == InlineAsm::isInput && !OpInfo.isIndirect) 9714 OpInfo.CallOperand = 9715 DAG.getNode(ISD::BITCAST, DL, RegVT, OpInfo.CallOperand); 9716 OpInfo.ConstraintVT = RegVT; 9717 // If the operand is an FP value and we want it in integer registers, 9718 // use the corresponding integer type. This turns an f64 value into 9719 // i64, which can be passed with two i32 values on a 32-bit machine. 9720 } else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) { 9721 MVT VT = MVT::getIntegerVT(OpInfo.ConstraintVT.getSizeInBits()); 9722 if (OpInfo.Type == InlineAsm::isInput) 9723 OpInfo.CallOperand = 9724 DAG.getNode(ISD::BITCAST, DL, VT, OpInfo.CallOperand); 9725 OpInfo.ConstraintVT = VT; 9726 } 9727 } 9728 } 9729 9730 // No need to allocate a matching input constraint since the constraint it's 9731 // matching to has already been allocated. 9732 if (OpInfo.isMatchingInputConstraint()) 9733 return std::nullopt; 9734 9735 EVT ValueVT = OpInfo.ConstraintVT; 9736 if (OpInfo.ConstraintVT == MVT::Other) 9737 ValueVT = RegVT; 9738 9739 // Initialize NumRegs. 9740 unsigned NumRegs = 1; 9741 if (OpInfo.ConstraintVT != MVT::Other) 9742 NumRegs = TLI.getNumRegisters(Context, OpInfo.ConstraintVT, RegVT); 9743 9744 // If this is a constraint for a specific physical register, like {r17}, 9745 // assign it now. 9746 9747 // If this associated to a specific register, initialize iterator to correct 9748 // place. If virtual, make sure we have enough registers 9749 9750 // Initialize iterator if necessary 9751 TargetRegisterClass::iterator I = RC->begin(); 9752 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 9753 9754 // Do not check for single registers. 9755 if (AssignedReg) { 9756 I = std::find(I, RC->end(), AssignedReg); 9757 if (I == RC->end()) { 9758 // RC does not contain the selected register, which indicates a 9759 // mismatch between the register and the required type/bitwidth. 9760 return {AssignedReg}; 9761 } 9762 } 9763 9764 for (; NumRegs; --NumRegs, ++I) { 9765 assert(I != RC->end() && "Ran out of registers to allocate!"); 9766 Register R = AssignedReg ? Register(*I) : RegInfo.createVirtualRegister(RC); 9767 Regs.push_back(R); 9768 } 9769 9770 OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT); 9771 return std::nullopt; 9772 } 9773 9774 static unsigned 9775 findMatchingInlineAsmOperand(unsigned OperandNo, 9776 const std::vector<SDValue> &AsmNodeOperands) { 9777 // Scan until we find the definition we already emitted of this operand. 9778 unsigned CurOp = InlineAsm::Op_FirstOperand; 9779 for (; OperandNo; --OperandNo) { 9780 // Advance to the next operand. 9781 unsigned OpFlag = AsmNodeOperands[CurOp]->getAsZExtVal(); 9782 const InlineAsm::Flag F(OpFlag); 9783 assert( 9784 (F.isRegDefKind() || F.isRegDefEarlyClobberKind() || F.isMemKind()) && 9785 "Skipped past definitions?"); 9786 CurOp += F.getNumOperandRegisters() + 1; 9787 } 9788 return CurOp; 9789 } 9790 9791 namespace { 9792 9793 class ExtraFlags { 9794 unsigned Flags = 0; 9795 9796 public: 9797 explicit ExtraFlags(const CallBase &Call) { 9798 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand()); 9799 if (IA->hasSideEffects()) 9800 Flags |= InlineAsm::Extra_HasSideEffects; 9801 if (IA->isAlignStack()) 9802 Flags |= InlineAsm::Extra_IsAlignStack; 9803 if (Call.isConvergent()) 9804 Flags |= InlineAsm::Extra_IsConvergent; 9805 Flags |= IA->getDialect() * InlineAsm::Extra_AsmDialect; 9806 } 9807 9808 void update(const TargetLowering::AsmOperandInfo &OpInfo) { 9809 // Ideally, we would only check against memory constraints. However, the 9810 // meaning of an Other constraint can be target-specific and we can't easily 9811 // reason about it. Therefore, be conservative and set MayLoad/MayStore 9812 // for Other constraints as well. 9813 if (OpInfo.ConstraintType == TargetLowering::C_Memory || 9814 OpInfo.ConstraintType == TargetLowering::C_Other) { 9815 if (OpInfo.Type == InlineAsm::isInput) 9816 Flags |= InlineAsm::Extra_MayLoad; 9817 else if (OpInfo.Type == InlineAsm::isOutput) 9818 Flags |= InlineAsm::Extra_MayStore; 9819 else if (OpInfo.Type == InlineAsm::isClobber) 9820 Flags |= (InlineAsm::Extra_MayLoad | InlineAsm::Extra_MayStore); 9821 } 9822 } 9823 9824 unsigned get() const { return Flags; } 9825 }; 9826 9827 } // end anonymous namespace 9828 9829 static bool isFunction(SDValue Op) { 9830 if (Op && Op.getOpcode() == ISD::GlobalAddress) { 9831 if (auto *GA = dyn_cast<GlobalAddressSDNode>(Op)) { 9832 auto Fn = dyn_cast_or_null<Function>(GA->getGlobal()); 9833 9834 // In normal "call dllimport func" instruction (non-inlineasm) it force 9835 // indirect access by specifing call opcode. And usually specially print 9836 // asm with indirect symbol (i.g: "*") according to opcode. Inline asm can 9837 // not do in this way now. (In fact, this is similar with "Data Access" 9838 // action). So here we ignore dllimport function. 9839 if (Fn && !Fn->hasDLLImportStorageClass()) 9840 return true; 9841 } 9842 } 9843 return false; 9844 } 9845 9846 /// visitInlineAsm - Handle a call to an InlineAsm object. 9847 void SelectionDAGBuilder::visitInlineAsm(const CallBase &Call, 9848 const BasicBlock *EHPadBB) { 9849 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand()); 9850 9851 /// ConstraintOperands - Information about all of the constraints. 9852 SmallVector<SDISelAsmOperandInfo, 16> ConstraintOperands; 9853 9854 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 9855 TargetLowering::AsmOperandInfoVector TargetConstraints = TLI.ParseConstraints( 9856 DAG.getDataLayout(), DAG.getSubtarget().getRegisterInfo(), Call); 9857 9858 // First Pass: Calculate HasSideEffects and ExtraFlags (AlignStack, 9859 // AsmDialect, MayLoad, MayStore). 9860 bool HasSideEffect = IA->hasSideEffects(); 9861 ExtraFlags ExtraInfo(Call); 9862 9863 for (auto &T : TargetConstraints) { 9864 ConstraintOperands.push_back(SDISelAsmOperandInfo(T)); 9865 SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back(); 9866 9867 if (OpInfo.CallOperandVal) 9868 OpInfo.CallOperand = getValue(OpInfo.CallOperandVal); 9869 9870 if (!HasSideEffect) 9871 HasSideEffect = OpInfo.hasMemory(TLI); 9872 9873 // Determine if this InlineAsm MayLoad or MayStore based on the constraints. 9874 // FIXME: Could we compute this on OpInfo rather than T? 9875 9876 // Compute the constraint code and ConstraintType to use. 9877 TLI.ComputeConstraintToUse(T, SDValue()); 9878 9879 if (T.ConstraintType == TargetLowering::C_Immediate && 9880 OpInfo.CallOperand && !isa<ConstantSDNode>(OpInfo.CallOperand)) 9881 // We've delayed emitting a diagnostic like the "n" constraint because 9882 // inlining could cause an integer showing up. 9883 return emitInlineAsmError(Call, "constraint '" + Twine(T.ConstraintCode) + 9884 "' expects an integer constant " 9885 "expression"); 9886 9887 ExtraInfo.update(T); 9888 } 9889 9890 // We won't need to flush pending loads if this asm doesn't touch 9891 // memory and is nonvolatile. 9892 SDValue Glue, Chain = (HasSideEffect) ? getRoot() : DAG.getRoot(); 9893 9894 bool EmitEHLabels = isa<InvokeInst>(Call); 9895 if (EmitEHLabels) { 9896 assert(EHPadBB && "InvokeInst must have an EHPadBB"); 9897 } 9898 bool IsCallBr = isa<CallBrInst>(Call); 9899 9900 if (IsCallBr || EmitEHLabels) { 9901 // If this is a callbr or invoke we need to flush pending exports since 9902 // inlineasm_br and invoke are terminators. 9903 // We need to do this before nodes are glued to the inlineasm_br node. 9904 Chain = getControlRoot(); 9905 } 9906 9907 MCSymbol *BeginLabel = nullptr; 9908 if (EmitEHLabels) { 9909 Chain = lowerStartEH(Chain, EHPadBB, BeginLabel); 9910 } 9911 9912 int OpNo = -1; 9913 SmallVector<StringRef> AsmStrs; 9914 IA->collectAsmStrs(AsmStrs); 9915 9916 // Second pass over the constraints: compute which constraint option to use. 9917 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) { 9918 if (OpInfo.hasArg() || OpInfo.Type == InlineAsm::isOutput) 9919 OpNo++; 9920 9921 // If this is an output operand with a matching input operand, look up the 9922 // matching input. If their types mismatch, e.g. one is an integer, the 9923 // other is floating point, or their sizes are different, flag it as an 9924 // error. 9925 if (OpInfo.hasMatchingInput()) { 9926 SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput]; 9927 patchMatchingInput(OpInfo, Input, DAG); 9928 } 9929 9930 // Compute the constraint code and ConstraintType to use. 9931 TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG); 9932 9933 if ((OpInfo.ConstraintType == TargetLowering::C_Memory && 9934 OpInfo.Type == InlineAsm::isClobber) || 9935 OpInfo.ConstraintType == TargetLowering::C_Address) 9936 continue; 9937 9938 // In Linux PIC model, there are 4 cases about value/label addressing: 9939 // 9940 // 1: Function call or Label jmp inside the module. 9941 // 2: Data access (such as global variable, static variable) inside module. 9942 // 3: Function call or Label jmp outside the module. 9943 // 4: Data access (such as global variable) outside the module. 9944 // 9945 // Due to current llvm inline asm architecture designed to not "recognize" 9946 // the asm code, there are quite troubles for us to treat mem addressing 9947 // differently for same value/adress used in different instuctions. 9948 // For example, in pic model, call a func may in plt way or direclty 9949 // pc-related, but lea/mov a function adress may use got. 9950 // 9951 // Here we try to "recognize" function call for the case 1 and case 3 in 9952 // inline asm. And try to adjust the constraint for them. 9953 // 9954 // TODO: Due to current inline asm didn't encourage to jmp to the outsider 9955 // label, so here we don't handle jmp function label now, but we need to 9956 // enhance it (especilly in PIC model) if we meet meaningful requirements. 9957 if (OpInfo.isIndirect && isFunction(OpInfo.CallOperand) && 9958 TLI.isInlineAsmTargetBranch(AsmStrs, OpNo) && 9959 TM.getCodeModel() != CodeModel::Large) { 9960 OpInfo.isIndirect = false; 9961 OpInfo.ConstraintType = TargetLowering::C_Address; 9962 } 9963 9964 // If this is a memory input, and if the operand is not indirect, do what we 9965 // need to provide an address for the memory input. 9966 if (OpInfo.ConstraintType == TargetLowering::C_Memory && 9967 !OpInfo.isIndirect) { 9968 assert((OpInfo.isMultipleAlternative || 9969 (OpInfo.Type == InlineAsm::isInput)) && 9970 "Can only indirectify direct input operands!"); 9971 9972 // Memory operands really want the address of the value. 9973 Chain = getAddressForMemoryInput(Chain, getCurSDLoc(), OpInfo, DAG); 9974 9975 // There is no longer a Value* corresponding to this operand. 9976 OpInfo.CallOperandVal = nullptr; 9977 9978 // It is now an indirect operand. 9979 OpInfo.isIndirect = true; 9980 } 9981 9982 } 9983 9984 // AsmNodeOperands - The operands for the ISD::INLINEASM node. 9985 std::vector<SDValue> AsmNodeOperands; 9986 AsmNodeOperands.push_back(SDValue()); // reserve space for input chain 9987 AsmNodeOperands.push_back(DAG.getTargetExternalSymbol( 9988 IA->getAsmString().c_str(), TLI.getProgramPointerTy(DAG.getDataLayout()))); 9989 9990 // If we have a !srcloc metadata node associated with it, we want to attach 9991 // this to the ultimately generated inline asm machineinstr. To do this, we 9992 // pass in the third operand as this (potentially null) inline asm MDNode. 9993 const MDNode *SrcLoc = Call.getMetadata("srcloc"); 9994 AsmNodeOperands.push_back(DAG.getMDNode(SrcLoc)); 9995 9996 // Remember the HasSideEffect, AlignStack, AsmDialect, MayLoad and MayStore 9997 // bits as operand 3. 9998 AsmNodeOperands.push_back(DAG.getTargetConstant( 9999 ExtraInfo.get(), getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout()))); 10000 10001 // Third pass: Loop over operands to prepare DAG-level operands.. As part of 10002 // this, assign virtual and physical registers for inputs and otput. 10003 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) { 10004 // Assign Registers. 10005 SDISelAsmOperandInfo &RefOpInfo = 10006 OpInfo.isMatchingInputConstraint() 10007 ? ConstraintOperands[OpInfo.getMatchedOperand()] 10008 : OpInfo; 10009 const auto RegError = 10010 getRegistersForValue(DAG, getCurSDLoc(), OpInfo, RefOpInfo); 10011 if (RegError) { 10012 const MachineFunction &MF = DAG.getMachineFunction(); 10013 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); 10014 const char *RegName = TRI.getName(*RegError); 10015 emitInlineAsmError(Call, "register '" + Twine(RegName) + 10016 "' allocated for constraint '" + 10017 Twine(OpInfo.ConstraintCode) + 10018 "' does not match required type"); 10019 return; 10020 } 10021 10022 auto DetectWriteToReservedRegister = [&]() { 10023 const MachineFunction &MF = DAG.getMachineFunction(); 10024 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); 10025 for (unsigned Reg : OpInfo.AssignedRegs.Regs) { 10026 if (Register::isPhysicalRegister(Reg) && 10027 TRI.isInlineAsmReadOnlyReg(MF, Reg)) { 10028 const char *RegName = TRI.getName(Reg); 10029 emitInlineAsmError(Call, "write to reserved register '" + 10030 Twine(RegName) + "'"); 10031 return true; 10032 } 10033 } 10034 return false; 10035 }; 10036 assert((OpInfo.ConstraintType != TargetLowering::C_Address || 10037 (OpInfo.Type == InlineAsm::isInput && 10038 !OpInfo.isMatchingInputConstraint())) && 10039 "Only address as input operand is allowed."); 10040 10041 switch (OpInfo.Type) { 10042 case InlineAsm::isOutput: 10043 if (OpInfo.ConstraintType == TargetLowering::C_Memory) { 10044 const InlineAsm::ConstraintCode ConstraintID = 10045 TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode); 10046 assert(ConstraintID != InlineAsm::ConstraintCode::Unknown && 10047 "Failed to convert memory constraint code to constraint id."); 10048 10049 // Add information to the INLINEASM node to know about this output. 10050 InlineAsm::Flag OpFlags(InlineAsm::Kind::Mem, 1); 10051 OpFlags.setMemConstraint(ConstraintID); 10052 AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlags, getCurSDLoc(), 10053 MVT::i32)); 10054 AsmNodeOperands.push_back(OpInfo.CallOperand); 10055 } else { 10056 // Otherwise, this outputs to a register (directly for C_Register / 10057 // C_RegisterClass, and a target-defined fashion for 10058 // C_Immediate/C_Other). Find a register that we can use. 10059 if (OpInfo.AssignedRegs.Regs.empty()) { 10060 emitInlineAsmError( 10061 Call, "couldn't allocate output register for constraint '" + 10062 Twine(OpInfo.ConstraintCode) + "'"); 10063 return; 10064 } 10065 10066 if (DetectWriteToReservedRegister()) 10067 return; 10068 10069 // Add information to the INLINEASM node to know that this register is 10070 // set. 10071 OpInfo.AssignedRegs.AddInlineAsmOperands( 10072 OpInfo.isEarlyClobber ? InlineAsm::Kind::RegDefEarlyClobber 10073 : InlineAsm::Kind::RegDef, 10074 false, 0, getCurSDLoc(), DAG, AsmNodeOperands); 10075 } 10076 break; 10077 10078 case InlineAsm::isInput: 10079 case InlineAsm::isLabel: { 10080 SDValue InOperandVal = OpInfo.CallOperand; 10081 10082 if (OpInfo.isMatchingInputConstraint()) { 10083 // If this is required to match an output register we have already set, 10084 // just use its register. 10085 auto CurOp = findMatchingInlineAsmOperand(OpInfo.getMatchedOperand(), 10086 AsmNodeOperands); 10087 InlineAsm::Flag Flag(AsmNodeOperands[CurOp]->getAsZExtVal()); 10088 if (Flag.isRegDefKind() || Flag.isRegDefEarlyClobberKind()) { 10089 if (OpInfo.isIndirect) { 10090 // This happens on gcc/testsuite/gcc.dg/pr8788-1.c 10091 emitInlineAsmError(Call, "inline asm not supported yet: " 10092 "don't know how to handle tied " 10093 "indirect register inputs"); 10094 return; 10095 } 10096 10097 SmallVector<Register, 4> Regs; 10098 MachineFunction &MF = DAG.getMachineFunction(); 10099 MachineRegisterInfo &MRI = MF.getRegInfo(); 10100 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); 10101 auto *R = cast<RegisterSDNode>(AsmNodeOperands[CurOp+1]); 10102 Register TiedReg = R->getReg(); 10103 MVT RegVT = R->getSimpleValueType(0); 10104 const TargetRegisterClass *RC = 10105 TiedReg.isVirtual() ? MRI.getRegClass(TiedReg) 10106 : RegVT != MVT::Untyped ? TLI.getRegClassFor(RegVT) 10107 : TRI.getMinimalPhysRegClass(TiedReg); 10108 for (unsigned i = 0, e = Flag.getNumOperandRegisters(); i != e; ++i) 10109 Regs.push_back(MRI.createVirtualRegister(RC)); 10110 10111 RegsForValue MatchedRegs(Regs, RegVT, InOperandVal.getValueType()); 10112 10113 SDLoc dl = getCurSDLoc(); 10114 // Use the produced MatchedRegs object to 10115 MatchedRegs.getCopyToRegs(InOperandVal, DAG, dl, Chain, &Glue, &Call); 10116 MatchedRegs.AddInlineAsmOperands(InlineAsm::Kind::RegUse, true, 10117 OpInfo.getMatchedOperand(), dl, DAG, 10118 AsmNodeOperands); 10119 break; 10120 } 10121 10122 assert(Flag.isMemKind() && "Unknown matching constraint!"); 10123 assert(Flag.getNumOperandRegisters() == 1 && 10124 "Unexpected number of operands"); 10125 // Add information to the INLINEASM node to know about this input. 10126 // See InlineAsm.h isUseOperandTiedToDef. 10127 Flag.clearMemConstraint(); 10128 Flag.setMatchingOp(OpInfo.getMatchedOperand()); 10129 AsmNodeOperands.push_back(DAG.getTargetConstant( 10130 Flag, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout()))); 10131 AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]); 10132 break; 10133 } 10134 10135 // Treat indirect 'X' constraint as memory. 10136 if (OpInfo.ConstraintType == TargetLowering::C_Other && 10137 OpInfo.isIndirect) 10138 OpInfo.ConstraintType = TargetLowering::C_Memory; 10139 10140 if (OpInfo.ConstraintType == TargetLowering::C_Immediate || 10141 OpInfo.ConstraintType == TargetLowering::C_Other) { 10142 std::vector<SDValue> Ops; 10143 TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode, 10144 Ops, DAG); 10145 if (Ops.empty()) { 10146 if (OpInfo.ConstraintType == TargetLowering::C_Immediate) 10147 if (isa<ConstantSDNode>(InOperandVal)) { 10148 emitInlineAsmError(Call, "value out of range for constraint '" + 10149 Twine(OpInfo.ConstraintCode) + "'"); 10150 return; 10151 } 10152 10153 emitInlineAsmError(Call, 10154 "invalid operand for inline asm constraint '" + 10155 Twine(OpInfo.ConstraintCode) + "'"); 10156 return; 10157 } 10158 10159 // Add information to the INLINEASM node to know about this input. 10160 InlineAsm::Flag ResOpType(InlineAsm::Kind::Imm, Ops.size()); 10161 AsmNodeOperands.push_back(DAG.getTargetConstant( 10162 ResOpType, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout()))); 10163 llvm::append_range(AsmNodeOperands, Ops); 10164 break; 10165 } 10166 10167 if (OpInfo.ConstraintType == TargetLowering::C_Memory) { 10168 assert((OpInfo.isIndirect || 10169 OpInfo.ConstraintType != TargetLowering::C_Memory) && 10170 "Operand must be indirect to be a mem!"); 10171 assert(InOperandVal.getValueType() == 10172 TLI.getPointerTy(DAG.getDataLayout()) && 10173 "Memory operands expect pointer values"); 10174 10175 const InlineAsm::ConstraintCode ConstraintID = 10176 TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode); 10177 assert(ConstraintID != InlineAsm::ConstraintCode::Unknown && 10178 "Failed to convert memory constraint code to constraint id."); 10179 10180 // Add information to the INLINEASM node to know about this input. 10181 InlineAsm::Flag ResOpType(InlineAsm::Kind::Mem, 1); 10182 ResOpType.setMemConstraint(ConstraintID); 10183 AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType, 10184 getCurSDLoc(), 10185 MVT::i32)); 10186 AsmNodeOperands.push_back(InOperandVal); 10187 break; 10188 } 10189 10190 if (OpInfo.ConstraintType == TargetLowering::C_Address) { 10191 const InlineAsm::ConstraintCode ConstraintID = 10192 TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode); 10193 assert(ConstraintID != InlineAsm::ConstraintCode::Unknown && 10194 "Failed to convert memory constraint code to constraint id."); 10195 10196 InlineAsm::Flag ResOpType(InlineAsm::Kind::Mem, 1); 10197 10198 SDValue AsmOp = InOperandVal; 10199 if (isFunction(InOperandVal)) { 10200 auto *GA = cast<GlobalAddressSDNode>(InOperandVal); 10201 ResOpType = InlineAsm::Flag(InlineAsm::Kind::Func, 1); 10202 AsmOp = DAG.getTargetGlobalAddress(GA->getGlobal(), getCurSDLoc(), 10203 InOperandVal.getValueType(), 10204 GA->getOffset()); 10205 } 10206 10207 // Add information to the INLINEASM node to know about this input. 10208 ResOpType.setMemConstraint(ConstraintID); 10209 10210 AsmNodeOperands.push_back( 10211 DAG.getTargetConstant(ResOpType, getCurSDLoc(), MVT::i32)); 10212 10213 AsmNodeOperands.push_back(AsmOp); 10214 break; 10215 } 10216 10217 if (OpInfo.ConstraintType != TargetLowering::C_RegisterClass && 10218 OpInfo.ConstraintType != TargetLowering::C_Register) { 10219 emitInlineAsmError(Call, "unknown asm constraint '" + 10220 Twine(OpInfo.ConstraintCode) + "'"); 10221 return; 10222 } 10223 10224 // TODO: Support this. 10225 if (OpInfo.isIndirect) { 10226 emitInlineAsmError( 10227 Call, "Don't know how to handle indirect register inputs yet " 10228 "for constraint '" + 10229 Twine(OpInfo.ConstraintCode) + "'"); 10230 return; 10231 } 10232 10233 // Copy the input into the appropriate registers. 10234 if (OpInfo.AssignedRegs.Regs.empty()) { 10235 emitInlineAsmError(Call, 10236 "couldn't allocate input reg for constraint '" + 10237 Twine(OpInfo.ConstraintCode) + "'"); 10238 return; 10239 } 10240 10241 if (DetectWriteToReservedRegister()) 10242 return; 10243 10244 SDLoc dl = getCurSDLoc(); 10245 10246 OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, dl, Chain, &Glue, 10247 &Call); 10248 10249 OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind::RegUse, false, 10250 0, dl, DAG, AsmNodeOperands); 10251 break; 10252 } 10253 case InlineAsm::isClobber: 10254 // Add the clobbered value to the operand list, so that the register 10255 // allocator is aware that the physreg got clobbered. 10256 if (!OpInfo.AssignedRegs.Regs.empty()) 10257 OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind::Clobber, 10258 false, 0, getCurSDLoc(), DAG, 10259 AsmNodeOperands); 10260 break; 10261 } 10262 } 10263 10264 // Finish up input operands. Set the input chain and add the flag last. 10265 AsmNodeOperands[InlineAsm::Op_InputChain] = Chain; 10266 if (Glue.getNode()) AsmNodeOperands.push_back(Glue); 10267 10268 unsigned ISDOpc = IsCallBr ? ISD::INLINEASM_BR : ISD::INLINEASM; 10269 Chain = DAG.getNode(ISDOpc, getCurSDLoc(), 10270 DAG.getVTList(MVT::Other, MVT::Glue), AsmNodeOperands); 10271 Glue = Chain.getValue(1); 10272 10273 // Do additional work to generate outputs. 10274 10275 SmallVector<EVT, 1> ResultVTs; 10276 SmallVector<SDValue, 1> ResultValues; 10277 SmallVector<SDValue, 8> OutChains; 10278 10279 llvm::Type *CallResultType = Call.getType(); 10280 ArrayRef<Type *> ResultTypes; 10281 if (StructType *StructResult = dyn_cast<StructType>(CallResultType)) 10282 ResultTypes = StructResult->elements(); 10283 else if (!CallResultType->isVoidTy()) 10284 ResultTypes = ArrayRef(CallResultType); 10285 10286 auto CurResultType = ResultTypes.begin(); 10287 auto handleRegAssign = [&](SDValue V) { 10288 assert(CurResultType != ResultTypes.end() && "Unexpected value"); 10289 assert((*CurResultType)->isSized() && "Unexpected unsized type"); 10290 EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), *CurResultType); 10291 ++CurResultType; 10292 // If the type of the inline asm call site return value is different but has 10293 // same size as the type of the asm output bitcast it. One example of this 10294 // is for vectors with different width / number of elements. This can 10295 // happen for register classes that can contain multiple different value 10296 // types. The preg or vreg allocated may not have the same VT as was 10297 // expected. 10298 // 10299 // This can also happen for a return value that disagrees with the register 10300 // class it is put in, eg. a double in a general-purpose register on a 10301 // 32-bit machine. 10302 if (ResultVT != V.getValueType() && 10303 ResultVT.getSizeInBits() == V.getValueSizeInBits()) 10304 V = DAG.getNode(ISD::BITCAST, getCurSDLoc(), ResultVT, V); 10305 else if (ResultVT != V.getValueType() && ResultVT.isInteger() && 10306 V.getValueType().isInteger()) { 10307 // If a result value was tied to an input value, the computed result 10308 // may have a wider width than the expected result. Extract the 10309 // relevant portion. 10310 V = DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), ResultVT, V); 10311 } 10312 assert(ResultVT == V.getValueType() && "Asm result value mismatch!"); 10313 ResultVTs.push_back(ResultVT); 10314 ResultValues.push_back(V); 10315 }; 10316 10317 // Deal with output operands. 10318 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) { 10319 if (OpInfo.Type == InlineAsm::isOutput) { 10320 SDValue Val; 10321 // Skip trivial output operands. 10322 if (OpInfo.AssignedRegs.Regs.empty()) 10323 continue; 10324 10325 switch (OpInfo.ConstraintType) { 10326 case TargetLowering::C_Register: 10327 case TargetLowering::C_RegisterClass: 10328 Val = OpInfo.AssignedRegs.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), 10329 Chain, &Glue, &Call); 10330 break; 10331 case TargetLowering::C_Immediate: 10332 case TargetLowering::C_Other: 10333 Val = TLI.LowerAsmOutputForConstraint(Chain, Glue, getCurSDLoc(), 10334 OpInfo, DAG); 10335 break; 10336 case TargetLowering::C_Memory: 10337 break; // Already handled. 10338 case TargetLowering::C_Address: 10339 break; // Silence warning. 10340 case TargetLowering::C_Unknown: 10341 assert(false && "Unexpected unknown constraint"); 10342 } 10343 10344 // Indirect output manifest as stores. Record output chains. 10345 if (OpInfo.isIndirect) { 10346 const Value *Ptr = OpInfo.CallOperandVal; 10347 assert(Ptr && "Expected value CallOperandVal for indirect asm operand"); 10348 SDValue Store = DAG.getStore(Chain, getCurSDLoc(), Val, getValue(Ptr), 10349 MachinePointerInfo(Ptr)); 10350 OutChains.push_back(Store); 10351 } else { 10352 // generate CopyFromRegs to associated registers. 10353 assert(!Call.getType()->isVoidTy() && "Bad inline asm!"); 10354 if (Val.getOpcode() == ISD::MERGE_VALUES) { 10355 for (const SDValue &V : Val->op_values()) 10356 handleRegAssign(V); 10357 } else 10358 handleRegAssign(Val); 10359 } 10360 } 10361 } 10362 10363 // Set results. 10364 if (!ResultValues.empty()) { 10365 assert(CurResultType == ResultTypes.end() && 10366 "Mismatch in number of ResultTypes"); 10367 assert(ResultValues.size() == ResultTypes.size() && 10368 "Mismatch in number of output operands in asm result"); 10369 10370 SDValue V = DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(), 10371 DAG.getVTList(ResultVTs), ResultValues); 10372 setValue(&Call, V); 10373 } 10374 10375 // Collect store chains. 10376 if (!OutChains.empty()) 10377 Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other, OutChains); 10378 10379 if (EmitEHLabels) { 10380 Chain = lowerEndEH(Chain, cast<InvokeInst>(&Call), EHPadBB, BeginLabel); 10381 } 10382 10383 // Only Update Root if inline assembly has a memory effect. 10384 if (ResultValues.empty() || HasSideEffect || !OutChains.empty() || IsCallBr || 10385 EmitEHLabels) 10386 DAG.setRoot(Chain); 10387 } 10388 10389 void SelectionDAGBuilder::emitInlineAsmError(const CallBase &Call, 10390 const Twine &Message) { 10391 LLVMContext &Ctx = *DAG.getContext(); 10392 Ctx.emitError(&Call, Message); 10393 10394 // Make sure we leave the DAG in a valid state 10395 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 10396 SmallVector<EVT, 1> ValueVTs; 10397 ComputeValueVTs(TLI, DAG.getDataLayout(), Call.getType(), ValueVTs); 10398 10399 if (ValueVTs.empty()) 10400 return; 10401 10402 SmallVector<SDValue, 1> Ops; 10403 for (const EVT &VT : ValueVTs) 10404 Ops.push_back(DAG.getUNDEF(VT)); 10405 10406 setValue(&Call, DAG.getMergeValues(Ops, getCurSDLoc())); 10407 } 10408 10409 void SelectionDAGBuilder::visitVAStart(const CallInst &I) { 10410 DAG.setRoot(DAG.getNode(ISD::VASTART, getCurSDLoc(), 10411 MVT::Other, getRoot(), 10412 getValue(I.getArgOperand(0)), 10413 DAG.getSrcValue(I.getArgOperand(0)))); 10414 } 10415 10416 void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) { 10417 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 10418 const DataLayout &DL = DAG.getDataLayout(); 10419 SDValue V = DAG.getVAArg( 10420 TLI.getMemValueType(DAG.getDataLayout(), I.getType()), getCurSDLoc(), 10421 getRoot(), getValue(I.getOperand(0)), DAG.getSrcValue(I.getOperand(0)), 10422 DL.getABITypeAlign(I.getType()).value()); 10423 DAG.setRoot(V.getValue(1)); 10424 10425 if (I.getType()->isPointerTy()) 10426 V = DAG.getPtrExtOrTrunc( 10427 V, getCurSDLoc(), TLI.getValueType(DAG.getDataLayout(), I.getType())); 10428 setValue(&I, V); 10429 } 10430 10431 void SelectionDAGBuilder::visitVAEnd(const CallInst &I) { 10432 DAG.setRoot(DAG.getNode(ISD::VAEND, getCurSDLoc(), 10433 MVT::Other, getRoot(), 10434 getValue(I.getArgOperand(0)), 10435 DAG.getSrcValue(I.getArgOperand(0)))); 10436 } 10437 10438 void SelectionDAGBuilder::visitVACopy(const CallInst &I) { 10439 DAG.setRoot(DAG.getNode(ISD::VACOPY, getCurSDLoc(), 10440 MVT::Other, getRoot(), 10441 getValue(I.getArgOperand(0)), 10442 getValue(I.getArgOperand(1)), 10443 DAG.getSrcValue(I.getArgOperand(0)), 10444 DAG.getSrcValue(I.getArgOperand(1)))); 10445 } 10446 10447 SDValue SelectionDAGBuilder::lowerRangeToAssertZExt(SelectionDAG &DAG, 10448 const Instruction &I, 10449 SDValue Op) { 10450 std::optional<ConstantRange> CR = getRange(I); 10451 10452 if (!CR || CR->isFullSet() || CR->isEmptySet() || CR->isUpperWrapped()) 10453 return Op; 10454 10455 APInt Lo = CR->getUnsignedMin(); 10456 if (!Lo.isMinValue()) 10457 return Op; 10458 10459 APInt Hi = CR->getUnsignedMax(); 10460 unsigned Bits = std::max(Hi.getActiveBits(), 10461 static_cast<unsigned>(IntegerType::MIN_INT_BITS)); 10462 10463 EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), Bits); 10464 10465 SDLoc SL = getCurSDLoc(); 10466 10467 SDValue ZExt = DAG.getNode(ISD::AssertZext, SL, Op.getValueType(), Op, 10468 DAG.getValueType(SmallVT)); 10469 unsigned NumVals = Op.getNode()->getNumValues(); 10470 if (NumVals == 1) 10471 return ZExt; 10472 10473 SmallVector<SDValue, 4> Ops; 10474 10475 Ops.push_back(ZExt); 10476 for (unsigned I = 1; I != NumVals; ++I) 10477 Ops.push_back(Op.getValue(I)); 10478 10479 return DAG.getMergeValues(Ops, SL); 10480 } 10481 10482 /// Populate a CallLowerinInfo (into \p CLI) based on the properties of 10483 /// the call being lowered. 10484 /// 10485 /// This is a helper for lowering intrinsics that follow a target calling 10486 /// convention or require stack pointer adjustment. Only a subset of the 10487 /// intrinsic's operands need to participate in the calling convention. 10488 void SelectionDAGBuilder::populateCallLoweringInfo( 10489 TargetLowering::CallLoweringInfo &CLI, const CallBase *Call, 10490 unsigned ArgIdx, unsigned NumArgs, SDValue Callee, Type *ReturnTy, 10491 AttributeSet RetAttrs, bool IsPatchPoint) { 10492 TargetLowering::ArgListTy Args; 10493 Args.reserve(NumArgs); 10494 10495 // Populate the argument list. 10496 // Attributes for args start at offset 1, after the return attribute. 10497 for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs; 10498 ArgI != ArgE; ++ArgI) { 10499 const Value *V = Call->getOperand(ArgI); 10500 10501 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic."); 10502 10503 TargetLowering::ArgListEntry Entry; 10504 Entry.Node = getValue(V); 10505 Entry.Ty = V->getType(); 10506 Entry.setAttributes(Call, ArgI); 10507 Args.push_back(Entry); 10508 } 10509 10510 CLI.setDebugLoc(getCurSDLoc()) 10511 .setChain(getRoot()) 10512 .setCallee(Call->getCallingConv(), ReturnTy, Callee, std::move(Args), 10513 RetAttrs) 10514 .setDiscardResult(Call->use_empty()) 10515 .setIsPatchPoint(IsPatchPoint) 10516 .setIsPreallocated( 10517 Call->countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0); 10518 } 10519 10520 /// Add a stack map intrinsic call's live variable operands to a stackmap 10521 /// or patchpoint target node's operand list. 10522 /// 10523 /// Constants are converted to TargetConstants purely as an optimization to 10524 /// avoid constant materialization and register allocation. 10525 /// 10526 /// FrameIndex operands are converted to TargetFrameIndex so that ISEL does not 10527 /// generate addess computation nodes, and so FinalizeISel can convert the 10528 /// TargetFrameIndex into a DirectMemRefOp StackMap location. This avoids 10529 /// address materialization and register allocation, but may also be required 10530 /// for correctness. If a StackMap (or PatchPoint) intrinsic directly uses an 10531 /// alloca in the entry block, then the runtime may assume that the alloca's 10532 /// StackMap location can be read immediately after compilation and that the 10533 /// location is valid at any point during execution (this is similar to the 10534 /// assumption made by the llvm.gcroot intrinsic). If the alloca's location were 10535 /// only available in a register, then the runtime would need to trap when 10536 /// execution reaches the StackMap in order to read the alloca's location. 10537 static void addStackMapLiveVars(const CallBase &Call, unsigned StartIdx, 10538 const SDLoc &DL, SmallVectorImpl<SDValue> &Ops, 10539 SelectionDAGBuilder &Builder) { 10540 SelectionDAG &DAG = Builder.DAG; 10541 for (unsigned I = StartIdx; I < Call.arg_size(); I++) { 10542 SDValue Op = Builder.getValue(Call.getArgOperand(I)); 10543 10544 // Things on the stack are pointer-typed, meaning that they are already 10545 // legal and can be emitted directly to target nodes. 10546 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op)) { 10547 Ops.push_back(DAG.getTargetFrameIndex(FI->getIndex(), Op.getValueType())); 10548 } else { 10549 // Otherwise emit a target independent node to be legalised. 10550 Ops.push_back(Builder.getValue(Call.getArgOperand(I))); 10551 } 10552 } 10553 } 10554 10555 /// Lower llvm.experimental.stackmap. 10556 void SelectionDAGBuilder::visitStackmap(const CallInst &CI) { 10557 // void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>, 10558 // [live variables...]) 10559 10560 assert(CI.getType()->isVoidTy() && "Stackmap cannot return a value."); 10561 10562 SDValue Chain, InGlue, Callee; 10563 SmallVector<SDValue, 32> Ops; 10564 10565 SDLoc DL = getCurSDLoc(); 10566 Callee = getValue(CI.getCalledOperand()); 10567 10568 // The stackmap intrinsic only records the live variables (the arguments 10569 // passed to it) and emits NOPS (if requested). Unlike the patchpoint 10570 // intrinsic, this won't be lowered to a function call. This means we don't 10571 // have to worry about calling conventions and target specific lowering code. 10572 // Instead we perform the call lowering right here. 10573 // 10574 // chain, flag = CALLSEQ_START(chain, 0, 0) 10575 // chain, flag = STACKMAP(id, nbytes, ..., chain, flag) 10576 // chain, flag = CALLSEQ_END(chain, 0, 0, flag) 10577 // 10578 Chain = DAG.getCALLSEQ_START(getRoot(), 0, 0, DL); 10579 InGlue = Chain.getValue(1); 10580 10581 // Add the STACKMAP operands, starting with DAG house-keeping. 10582 Ops.push_back(Chain); 10583 Ops.push_back(InGlue); 10584 10585 // Add the <id>, <numShadowBytes> operands. 10586 // 10587 // These do not require legalisation, and can be emitted directly to target 10588 // constant nodes. 10589 SDValue ID = getValue(CI.getArgOperand(0)); 10590 assert(ID.getValueType() == MVT::i64); 10591 SDValue IDConst = 10592 DAG.getTargetConstant(ID->getAsZExtVal(), DL, ID.getValueType()); 10593 Ops.push_back(IDConst); 10594 10595 SDValue Shad = getValue(CI.getArgOperand(1)); 10596 assert(Shad.getValueType() == MVT::i32); 10597 SDValue ShadConst = 10598 DAG.getTargetConstant(Shad->getAsZExtVal(), DL, Shad.getValueType()); 10599 Ops.push_back(ShadConst); 10600 10601 // Add the live variables. 10602 addStackMapLiveVars(CI, 2, DL, Ops, *this); 10603 10604 // Create the STACKMAP node. 10605 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 10606 Chain = DAG.getNode(ISD::STACKMAP, DL, NodeTys, Ops); 10607 InGlue = Chain.getValue(1); 10608 10609 Chain = DAG.getCALLSEQ_END(Chain, 0, 0, InGlue, DL); 10610 10611 // Stackmaps don't generate values, so nothing goes into the NodeMap. 10612 10613 // Set the root to the target-lowered call chain. 10614 DAG.setRoot(Chain); 10615 10616 // Inform the Frame Information that we have a stackmap in this function. 10617 FuncInfo.MF->getFrameInfo().setHasStackMap(); 10618 } 10619 10620 /// Lower llvm.experimental.patchpoint directly to its target opcode. 10621 void SelectionDAGBuilder::visitPatchpoint(const CallBase &CB, 10622 const BasicBlock *EHPadBB) { 10623 // <ty> @llvm.experimental.patchpoint.<ty>(i64 <id>, 10624 // i32 <numBytes>, 10625 // i8* <target>, 10626 // i32 <numArgs>, 10627 // [Args...], 10628 // [live variables...]) 10629 10630 CallingConv::ID CC = CB.getCallingConv(); 10631 bool IsAnyRegCC = CC == CallingConv::AnyReg; 10632 bool HasDef = !CB.getType()->isVoidTy(); 10633 SDLoc dl = getCurSDLoc(); 10634 SDValue Callee = getValue(CB.getArgOperand(PatchPointOpers::TargetPos)); 10635 10636 // Handle immediate and symbolic callees. 10637 if (auto* ConstCallee = dyn_cast<ConstantSDNode>(Callee)) 10638 Callee = DAG.getIntPtrConstant(ConstCallee->getZExtValue(), dl, 10639 /*isTarget=*/true); 10640 else if (auto* SymbolicCallee = dyn_cast<GlobalAddressSDNode>(Callee)) 10641 Callee = DAG.getTargetGlobalAddress(SymbolicCallee->getGlobal(), 10642 SDLoc(SymbolicCallee), 10643 SymbolicCallee->getValueType(0)); 10644 10645 // Get the real number of arguments participating in the call <numArgs> 10646 SDValue NArgVal = getValue(CB.getArgOperand(PatchPointOpers::NArgPos)); 10647 unsigned NumArgs = NArgVal->getAsZExtVal(); 10648 10649 // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs> 10650 // Intrinsics include all meta-operands up to but not including CC. 10651 unsigned NumMetaOpers = PatchPointOpers::CCPos; 10652 assert(CB.arg_size() >= NumMetaOpers + NumArgs && 10653 "Not enough arguments provided to the patchpoint intrinsic"); 10654 10655 // For AnyRegCC the arguments are lowered later on manually. 10656 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs; 10657 Type *ReturnTy = 10658 IsAnyRegCC ? Type::getVoidTy(*DAG.getContext()) : CB.getType(); 10659 10660 TargetLowering::CallLoweringInfo CLI(DAG); 10661 populateCallLoweringInfo(CLI, &CB, NumMetaOpers, NumCallArgs, Callee, 10662 ReturnTy, CB.getAttributes().getRetAttrs(), true); 10663 std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB); 10664 10665 SDNode *CallEnd = Result.second.getNode(); 10666 if (CallEnd->getOpcode() == ISD::EH_LABEL) 10667 CallEnd = CallEnd->getOperand(0).getNode(); 10668 if (HasDef && (CallEnd->getOpcode() == ISD::CopyFromReg)) 10669 CallEnd = CallEnd->getOperand(0).getNode(); 10670 10671 /// Get a call instruction from the call sequence chain. 10672 /// Tail calls are not allowed. 10673 assert(CallEnd->getOpcode() == ISD::CALLSEQ_END && 10674 "Expected a callseq node."); 10675 SDNode *Call = CallEnd->getOperand(0).getNode(); 10676 bool HasGlue = Call->getGluedNode(); 10677 10678 // Replace the target specific call node with the patchable intrinsic. 10679 SmallVector<SDValue, 8> Ops; 10680 10681 // Push the chain. 10682 Ops.push_back(*(Call->op_begin())); 10683 10684 // Optionally, push the glue (if any). 10685 if (HasGlue) 10686 Ops.push_back(*(Call->op_end() - 1)); 10687 10688 // Push the register mask info. 10689 if (HasGlue) 10690 Ops.push_back(*(Call->op_end() - 2)); 10691 else 10692 Ops.push_back(*(Call->op_end() - 1)); 10693 10694 // Add the <id> and <numBytes> constants. 10695 SDValue IDVal = getValue(CB.getArgOperand(PatchPointOpers::IDPos)); 10696 Ops.push_back(DAG.getTargetConstant(IDVal->getAsZExtVal(), dl, MVT::i64)); 10697 SDValue NBytesVal = getValue(CB.getArgOperand(PatchPointOpers::NBytesPos)); 10698 Ops.push_back(DAG.getTargetConstant(NBytesVal->getAsZExtVal(), dl, MVT::i32)); 10699 10700 // Add the callee. 10701 Ops.push_back(Callee); 10702 10703 // Adjust <numArgs> to account for any arguments that have been passed on the 10704 // stack instead. 10705 // Call Node: Chain, Target, {Args}, RegMask, [Glue] 10706 unsigned NumCallRegArgs = Call->getNumOperands() - (HasGlue ? 4 : 3); 10707 NumCallRegArgs = IsAnyRegCC ? NumArgs : NumCallRegArgs; 10708 Ops.push_back(DAG.getTargetConstant(NumCallRegArgs, dl, MVT::i32)); 10709 10710 // Add the calling convention 10711 Ops.push_back(DAG.getTargetConstant((unsigned)CC, dl, MVT::i32)); 10712 10713 // Add the arguments we omitted previously. The register allocator should 10714 // place these in any free register. 10715 if (IsAnyRegCC) 10716 for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i) 10717 Ops.push_back(getValue(CB.getArgOperand(i))); 10718 10719 // Push the arguments from the call instruction. 10720 SDNode::op_iterator e = HasGlue ? Call->op_end()-2 : Call->op_end()-1; 10721 Ops.append(Call->op_begin() + 2, e); 10722 10723 // Push live variables for the stack map. 10724 addStackMapLiveVars(CB, NumMetaOpers + NumArgs, dl, Ops, *this); 10725 10726 SDVTList NodeTys; 10727 if (IsAnyRegCC && HasDef) { 10728 // Create the return types based on the intrinsic definition 10729 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 10730 SmallVector<EVT, 3> ValueVTs; 10731 ComputeValueVTs(TLI, DAG.getDataLayout(), CB.getType(), ValueVTs); 10732 assert(ValueVTs.size() == 1 && "Expected only one return value type."); 10733 10734 // There is always a chain and a glue type at the end 10735 ValueVTs.push_back(MVT::Other); 10736 ValueVTs.push_back(MVT::Glue); 10737 NodeTys = DAG.getVTList(ValueVTs); 10738 } else 10739 NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 10740 10741 // Replace the target specific call node with a PATCHPOINT node. 10742 SDValue PPV = DAG.getNode(ISD::PATCHPOINT, dl, NodeTys, Ops); 10743 10744 // Update the NodeMap. 10745 if (HasDef) { 10746 if (IsAnyRegCC) 10747 setValue(&CB, SDValue(PPV.getNode(), 0)); 10748 else 10749 setValue(&CB, Result.first); 10750 } 10751 10752 // Fixup the consumers of the intrinsic. The chain and glue may be used in the 10753 // call sequence. Furthermore the location of the chain and glue can change 10754 // when the AnyReg calling convention is used and the intrinsic returns a 10755 // value. 10756 if (IsAnyRegCC && HasDef) { 10757 SDValue From[] = {SDValue(Call, 0), SDValue(Call, 1)}; 10758 SDValue To[] = {PPV.getValue(1), PPV.getValue(2)}; 10759 DAG.ReplaceAllUsesOfValuesWith(From, To, 2); 10760 } else 10761 DAG.ReplaceAllUsesWith(Call, PPV.getNode()); 10762 DAG.DeleteNode(Call); 10763 10764 // Inform the Frame Information that we have a patchpoint in this function. 10765 FuncInfo.MF->getFrameInfo().setHasPatchPoint(); 10766 } 10767 10768 void SelectionDAGBuilder::visitVectorReduce(const CallInst &I, 10769 unsigned Intrinsic) { 10770 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 10771 SDValue Op1 = getValue(I.getArgOperand(0)); 10772 SDValue Op2; 10773 if (I.arg_size() > 1) 10774 Op2 = getValue(I.getArgOperand(1)); 10775 SDLoc dl = getCurSDLoc(); 10776 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 10777 SDValue Res; 10778 SDNodeFlags SDFlags; 10779 if (auto *FPMO = dyn_cast<FPMathOperator>(&I)) 10780 SDFlags.copyFMF(*FPMO); 10781 10782 switch (Intrinsic) { 10783 case Intrinsic::vector_reduce_fadd: 10784 if (SDFlags.hasAllowReassociation()) 10785 Res = DAG.getNode(ISD::FADD, dl, VT, Op1, 10786 DAG.getNode(ISD::VECREDUCE_FADD, dl, VT, Op2, SDFlags), 10787 SDFlags); 10788 else 10789 Res = DAG.getNode(ISD::VECREDUCE_SEQ_FADD, dl, VT, Op1, Op2, SDFlags); 10790 break; 10791 case Intrinsic::vector_reduce_fmul: 10792 if (SDFlags.hasAllowReassociation()) 10793 Res = DAG.getNode(ISD::FMUL, dl, VT, Op1, 10794 DAG.getNode(ISD::VECREDUCE_FMUL, dl, VT, Op2, SDFlags), 10795 SDFlags); 10796 else 10797 Res = DAG.getNode(ISD::VECREDUCE_SEQ_FMUL, dl, VT, Op1, Op2, SDFlags); 10798 break; 10799 case Intrinsic::vector_reduce_add: 10800 Res = DAG.getNode(ISD::VECREDUCE_ADD, dl, VT, Op1); 10801 break; 10802 case Intrinsic::vector_reduce_mul: 10803 Res = DAG.getNode(ISD::VECREDUCE_MUL, dl, VT, Op1); 10804 break; 10805 case Intrinsic::vector_reduce_and: 10806 Res = DAG.getNode(ISD::VECREDUCE_AND, dl, VT, Op1); 10807 break; 10808 case Intrinsic::vector_reduce_or: 10809 Res = DAG.getNode(ISD::VECREDUCE_OR, dl, VT, Op1); 10810 break; 10811 case Intrinsic::vector_reduce_xor: 10812 Res = DAG.getNode(ISD::VECREDUCE_XOR, dl, VT, Op1); 10813 break; 10814 case Intrinsic::vector_reduce_smax: 10815 Res = DAG.getNode(ISD::VECREDUCE_SMAX, dl, VT, Op1); 10816 break; 10817 case Intrinsic::vector_reduce_smin: 10818 Res = DAG.getNode(ISD::VECREDUCE_SMIN, dl, VT, Op1); 10819 break; 10820 case Intrinsic::vector_reduce_umax: 10821 Res = DAG.getNode(ISD::VECREDUCE_UMAX, dl, VT, Op1); 10822 break; 10823 case Intrinsic::vector_reduce_umin: 10824 Res = DAG.getNode(ISD::VECREDUCE_UMIN, dl, VT, Op1); 10825 break; 10826 case Intrinsic::vector_reduce_fmax: 10827 Res = DAG.getNode(ISD::VECREDUCE_FMAX, dl, VT, Op1, SDFlags); 10828 break; 10829 case Intrinsic::vector_reduce_fmin: 10830 Res = DAG.getNode(ISD::VECREDUCE_FMIN, dl, VT, Op1, SDFlags); 10831 break; 10832 case Intrinsic::vector_reduce_fmaximum: 10833 Res = DAG.getNode(ISD::VECREDUCE_FMAXIMUM, dl, VT, Op1, SDFlags); 10834 break; 10835 case Intrinsic::vector_reduce_fminimum: 10836 Res = DAG.getNode(ISD::VECREDUCE_FMINIMUM, dl, VT, Op1, SDFlags); 10837 break; 10838 default: 10839 llvm_unreachable("Unhandled vector reduce intrinsic"); 10840 } 10841 setValue(&I, Res); 10842 } 10843 10844 /// Returns an AttributeList representing the attributes applied to the return 10845 /// value of the given call. 10846 static AttributeList getReturnAttrs(TargetLowering::CallLoweringInfo &CLI) { 10847 SmallVector<Attribute::AttrKind, 2> Attrs; 10848 if (CLI.RetSExt) 10849 Attrs.push_back(Attribute::SExt); 10850 if (CLI.RetZExt) 10851 Attrs.push_back(Attribute::ZExt); 10852 if (CLI.IsInReg) 10853 Attrs.push_back(Attribute::InReg); 10854 10855 return AttributeList::get(CLI.RetTy->getContext(), AttributeList::ReturnIndex, 10856 Attrs); 10857 } 10858 10859 /// TargetLowering::LowerCallTo - This is the default LowerCallTo 10860 /// implementation, which just calls LowerCall. 10861 /// FIXME: When all targets are 10862 /// migrated to using LowerCall, this hook should be integrated into SDISel. 10863 std::pair<SDValue, SDValue> 10864 TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const { 10865 // Handle the incoming return values from the call. 10866 CLI.Ins.clear(); 10867 Type *OrigRetTy = CLI.RetTy; 10868 SmallVector<EVT, 4> RetTys; 10869 SmallVector<TypeSize, 4> Offsets; 10870 auto &DL = CLI.DAG.getDataLayout(); 10871 ComputeValueVTs(*this, DL, CLI.RetTy, RetTys, &Offsets); 10872 10873 if (CLI.IsPostTypeLegalization) { 10874 // If we are lowering a libcall after legalization, split the return type. 10875 SmallVector<EVT, 4> OldRetTys; 10876 SmallVector<TypeSize, 4> OldOffsets; 10877 RetTys.swap(OldRetTys); 10878 Offsets.swap(OldOffsets); 10879 10880 for (size_t i = 0, e = OldRetTys.size(); i != e; ++i) { 10881 EVT RetVT = OldRetTys[i]; 10882 uint64_t Offset = OldOffsets[i]; 10883 MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), RetVT); 10884 unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), RetVT); 10885 unsigned RegisterVTByteSZ = RegisterVT.getSizeInBits() / 8; 10886 RetTys.append(NumRegs, RegisterVT); 10887 for (unsigned j = 0; j != NumRegs; ++j) 10888 Offsets.push_back(TypeSize::getFixed(Offset + j * RegisterVTByteSZ)); 10889 } 10890 } 10891 10892 SmallVector<ISD::OutputArg, 4> Outs; 10893 GetReturnInfo(CLI.CallConv, CLI.RetTy, getReturnAttrs(CLI), Outs, *this, DL); 10894 10895 bool CanLowerReturn = 10896 this->CanLowerReturn(CLI.CallConv, CLI.DAG.getMachineFunction(), 10897 CLI.IsVarArg, Outs, CLI.RetTy->getContext()); 10898 10899 SDValue DemoteStackSlot; 10900 int DemoteStackIdx = -100; 10901 if (!CanLowerReturn) { 10902 // FIXME: equivalent assert? 10903 // assert(!CS.hasInAllocaArgument() && 10904 // "sret demotion is incompatible with inalloca"); 10905 uint64_t TySize = DL.getTypeAllocSize(CLI.RetTy); 10906 Align Alignment = DL.getPrefTypeAlign(CLI.RetTy); 10907 MachineFunction &MF = CLI.DAG.getMachineFunction(); 10908 DemoteStackIdx = 10909 MF.getFrameInfo().CreateStackObject(TySize, Alignment, false); 10910 Type *StackSlotPtrType = PointerType::get(CLI.RetTy, 10911 DL.getAllocaAddrSpace()); 10912 10913 DemoteStackSlot = CLI.DAG.getFrameIndex(DemoteStackIdx, getFrameIndexTy(DL)); 10914 ArgListEntry Entry; 10915 Entry.Node = DemoteStackSlot; 10916 Entry.Ty = StackSlotPtrType; 10917 Entry.IsSExt = false; 10918 Entry.IsZExt = false; 10919 Entry.IsInReg = false; 10920 Entry.IsSRet = true; 10921 Entry.IsNest = false; 10922 Entry.IsByVal = false; 10923 Entry.IsByRef = false; 10924 Entry.IsReturned = false; 10925 Entry.IsSwiftSelf = false; 10926 Entry.IsSwiftAsync = false; 10927 Entry.IsSwiftError = false; 10928 Entry.IsCFGuardTarget = false; 10929 Entry.Alignment = Alignment; 10930 CLI.getArgs().insert(CLI.getArgs().begin(), Entry); 10931 CLI.NumFixedArgs += 1; 10932 CLI.getArgs()[0].IndirectType = CLI.RetTy; 10933 CLI.RetTy = Type::getVoidTy(CLI.RetTy->getContext()); 10934 10935 // sret demotion isn't compatible with tail-calls, since the sret argument 10936 // points into the callers stack frame. 10937 CLI.IsTailCall = false; 10938 } else { 10939 bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters( 10940 CLI.RetTy, CLI.CallConv, CLI.IsVarArg, DL); 10941 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) { 10942 ISD::ArgFlagsTy Flags; 10943 if (NeedsRegBlock) { 10944 Flags.setInConsecutiveRegs(); 10945 if (I == RetTys.size() - 1) 10946 Flags.setInConsecutiveRegsLast(); 10947 } 10948 EVT VT = RetTys[I]; 10949 MVT RegisterVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(), 10950 CLI.CallConv, VT); 10951 unsigned NumRegs = getNumRegistersForCallingConv(CLI.RetTy->getContext(), 10952 CLI.CallConv, VT); 10953 for (unsigned i = 0; i != NumRegs; ++i) { 10954 ISD::InputArg MyFlags; 10955 MyFlags.Flags = Flags; 10956 MyFlags.VT = RegisterVT; 10957 MyFlags.ArgVT = VT; 10958 MyFlags.Used = CLI.IsReturnValueUsed; 10959 if (CLI.RetTy->isPointerTy()) { 10960 MyFlags.Flags.setPointer(); 10961 MyFlags.Flags.setPointerAddrSpace( 10962 cast<PointerType>(CLI.RetTy)->getAddressSpace()); 10963 } 10964 if (CLI.RetSExt) 10965 MyFlags.Flags.setSExt(); 10966 if (CLI.RetZExt) 10967 MyFlags.Flags.setZExt(); 10968 if (CLI.IsInReg) 10969 MyFlags.Flags.setInReg(); 10970 CLI.Ins.push_back(MyFlags); 10971 } 10972 } 10973 } 10974 10975 // We push in swifterror return as the last element of CLI.Ins. 10976 ArgListTy &Args = CLI.getArgs(); 10977 if (supportSwiftError()) { 10978 for (const ArgListEntry &Arg : Args) { 10979 if (Arg.IsSwiftError) { 10980 ISD::InputArg MyFlags; 10981 MyFlags.VT = getPointerTy(DL); 10982 MyFlags.ArgVT = EVT(getPointerTy(DL)); 10983 MyFlags.Flags.setSwiftError(); 10984 CLI.Ins.push_back(MyFlags); 10985 } 10986 } 10987 } 10988 10989 // Handle all of the outgoing arguments. 10990 CLI.Outs.clear(); 10991 CLI.OutVals.clear(); 10992 for (unsigned i = 0, e = Args.size(); i != e; ++i) { 10993 SmallVector<EVT, 4> ValueVTs; 10994 ComputeValueVTs(*this, DL, Args[i].Ty, ValueVTs); 10995 // FIXME: Split arguments if CLI.IsPostTypeLegalization 10996 Type *FinalType = Args[i].Ty; 10997 if (Args[i].IsByVal) 10998 FinalType = Args[i].IndirectType; 10999 bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters( 11000 FinalType, CLI.CallConv, CLI.IsVarArg, DL); 11001 for (unsigned Value = 0, NumValues = ValueVTs.size(); Value != NumValues; 11002 ++Value) { 11003 EVT VT = ValueVTs[Value]; 11004 Type *ArgTy = VT.getTypeForEVT(CLI.RetTy->getContext()); 11005 SDValue Op = SDValue(Args[i].Node.getNode(), 11006 Args[i].Node.getResNo() + Value); 11007 ISD::ArgFlagsTy Flags; 11008 11009 // Certain targets (such as MIPS), may have a different ABI alignment 11010 // for a type depending on the context. Give the target a chance to 11011 // specify the alignment it wants. 11012 const Align OriginalAlignment(getABIAlignmentForCallingConv(ArgTy, DL)); 11013 Flags.setOrigAlign(OriginalAlignment); 11014 11015 if (Args[i].Ty->isPointerTy()) { 11016 Flags.setPointer(); 11017 Flags.setPointerAddrSpace( 11018 cast<PointerType>(Args[i].Ty)->getAddressSpace()); 11019 } 11020 if (Args[i].IsZExt) 11021 Flags.setZExt(); 11022 if (Args[i].IsSExt) 11023 Flags.setSExt(); 11024 if (Args[i].IsNoExt) 11025 Flags.setNoExt(); 11026 if (Args[i].IsInReg) { 11027 // If we are using vectorcall calling convention, a structure that is 11028 // passed InReg - is surely an HVA 11029 if (CLI.CallConv == CallingConv::X86_VectorCall && 11030 isa<StructType>(FinalType)) { 11031 // The first value of a structure is marked 11032 if (0 == Value) 11033 Flags.setHvaStart(); 11034 Flags.setHva(); 11035 } 11036 // Set InReg Flag 11037 Flags.setInReg(); 11038 } 11039 if (Args[i].IsSRet) 11040 Flags.setSRet(); 11041 if (Args[i].IsSwiftSelf) 11042 Flags.setSwiftSelf(); 11043 if (Args[i].IsSwiftAsync) 11044 Flags.setSwiftAsync(); 11045 if (Args[i].IsSwiftError) 11046 Flags.setSwiftError(); 11047 if (Args[i].IsCFGuardTarget) 11048 Flags.setCFGuardTarget(); 11049 if (Args[i].IsByVal) 11050 Flags.setByVal(); 11051 if (Args[i].IsByRef) 11052 Flags.setByRef(); 11053 if (Args[i].IsPreallocated) { 11054 Flags.setPreallocated(); 11055 // Set the byval flag for CCAssignFn callbacks that don't know about 11056 // preallocated. This way we can know how many bytes we should've 11057 // allocated and how many bytes a callee cleanup function will pop. If 11058 // we port preallocated to more targets, we'll have to add custom 11059 // preallocated handling in the various CC lowering callbacks. 11060 Flags.setByVal(); 11061 } 11062 if (Args[i].IsInAlloca) { 11063 Flags.setInAlloca(); 11064 // Set the byval flag for CCAssignFn callbacks that don't know about 11065 // inalloca. This way we can know how many bytes we should've allocated 11066 // and how many bytes a callee cleanup function will pop. If we port 11067 // inalloca to more targets, we'll have to add custom inalloca handling 11068 // in the various CC lowering callbacks. 11069 Flags.setByVal(); 11070 } 11071 Align MemAlign; 11072 if (Args[i].IsByVal || Args[i].IsInAlloca || Args[i].IsPreallocated) { 11073 unsigned FrameSize = DL.getTypeAllocSize(Args[i].IndirectType); 11074 Flags.setByValSize(FrameSize); 11075 11076 // info is not there but there are cases it cannot get right. 11077 if (auto MA = Args[i].Alignment) 11078 MemAlign = *MA; 11079 else 11080 MemAlign = Align(getByValTypeAlignment(Args[i].IndirectType, DL)); 11081 } else if (auto MA = Args[i].Alignment) { 11082 MemAlign = *MA; 11083 } else { 11084 MemAlign = OriginalAlignment; 11085 } 11086 Flags.setMemAlign(MemAlign); 11087 if (Args[i].IsNest) 11088 Flags.setNest(); 11089 if (NeedsRegBlock) 11090 Flags.setInConsecutiveRegs(); 11091 11092 MVT PartVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(), 11093 CLI.CallConv, VT); 11094 unsigned NumParts = getNumRegistersForCallingConv(CLI.RetTy->getContext(), 11095 CLI.CallConv, VT); 11096 SmallVector<SDValue, 4> Parts(NumParts); 11097 ISD::NodeType ExtendKind = ISD::ANY_EXTEND; 11098 11099 if (Args[i].IsSExt) 11100 ExtendKind = ISD::SIGN_EXTEND; 11101 else if (Args[i].IsZExt) 11102 ExtendKind = ISD::ZERO_EXTEND; 11103 11104 // Conservatively only handle 'returned' on non-vectors that can be lowered, 11105 // for now. 11106 if (Args[i].IsReturned && !Op.getValueType().isVector() && 11107 CanLowerReturn) { 11108 assert((CLI.RetTy == Args[i].Ty || 11109 (CLI.RetTy->isPointerTy() && Args[i].Ty->isPointerTy() && 11110 CLI.RetTy->getPointerAddressSpace() == 11111 Args[i].Ty->getPointerAddressSpace())) && 11112 RetTys.size() == NumValues && "unexpected use of 'returned'"); 11113 // Before passing 'returned' to the target lowering code, ensure that 11114 // either the register MVT and the actual EVT are the same size or that 11115 // the return value and argument are extended in the same way; in these 11116 // cases it's safe to pass the argument register value unchanged as the 11117 // return register value (although it's at the target's option whether 11118 // to do so) 11119 // TODO: allow code generation to take advantage of partially preserved 11120 // registers rather than clobbering the entire register when the 11121 // parameter extension method is not compatible with the return 11122 // extension method 11123 if ((NumParts * PartVT.getSizeInBits() == VT.getSizeInBits()) || 11124 (ExtendKind != ISD::ANY_EXTEND && CLI.RetSExt == Args[i].IsSExt && 11125 CLI.RetZExt == Args[i].IsZExt)) 11126 Flags.setReturned(); 11127 } 11128 11129 getCopyToParts(CLI.DAG, CLI.DL, Op, &Parts[0], NumParts, PartVT, CLI.CB, 11130 CLI.CallConv, ExtendKind); 11131 11132 for (unsigned j = 0; j != NumParts; ++j) { 11133 // if it isn't first piece, alignment must be 1 11134 // For scalable vectors the scalable part is currently handled 11135 // by individual targets, so we just use the known minimum size here. 11136 ISD::OutputArg MyFlags( 11137 Flags, Parts[j].getValueType().getSimpleVT(), VT, 11138 i < CLI.NumFixedArgs, i, 11139 j * Parts[j].getValueType().getStoreSize().getKnownMinValue()); 11140 if (NumParts > 1 && j == 0) 11141 MyFlags.Flags.setSplit(); 11142 else if (j != 0) { 11143 MyFlags.Flags.setOrigAlign(Align(1)); 11144 if (j == NumParts - 1) 11145 MyFlags.Flags.setSplitEnd(); 11146 } 11147 11148 CLI.Outs.push_back(MyFlags); 11149 CLI.OutVals.push_back(Parts[j]); 11150 } 11151 11152 if (NeedsRegBlock && Value == NumValues - 1) 11153 CLI.Outs[CLI.Outs.size() - 1].Flags.setInConsecutiveRegsLast(); 11154 } 11155 } 11156 11157 SmallVector<SDValue, 4> InVals; 11158 CLI.Chain = LowerCall(CLI, InVals); 11159 11160 // Update CLI.InVals to use outside of this function. 11161 CLI.InVals = InVals; 11162 11163 // Verify that the target's LowerCall behaved as expected. 11164 assert(CLI.Chain.getNode() && CLI.Chain.getValueType() == MVT::Other && 11165 "LowerCall didn't return a valid chain!"); 11166 assert((!CLI.IsTailCall || InVals.empty()) && 11167 "LowerCall emitted a return value for a tail call!"); 11168 assert((CLI.IsTailCall || InVals.size() == CLI.Ins.size()) && 11169 "LowerCall didn't emit the correct number of values!"); 11170 11171 // For a tail call, the return value is merely live-out and there aren't 11172 // any nodes in the DAG representing it. Return a special value to 11173 // indicate that a tail call has been emitted and no more Instructions 11174 // should be processed in the current block. 11175 if (CLI.IsTailCall) { 11176 CLI.DAG.setRoot(CLI.Chain); 11177 return std::make_pair(SDValue(), SDValue()); 11178 } 11179 11180 #ifndef NDEBUG 11181 for (unsigned i = 0, e = CLI.Ins.size(); i != e; ++i) { 11182 assert(InVals[i].getNode() && "LowerCall emitted a null value!"); 11183 assert(EVT(CLI.Ins[i].VT) == InVals[i].getValueType() && 11184 "LowerCall emitted a value with the wrong type!"); 11185 } 11186 #endif 11187 11188 SmallVector<SDValue, 4> ReturnValues; 11189 if (!CanLowerReturn) { 11190 // The instruction result is the result of loading from the 11191 // hidden sret parameter. 11192 SmallVector<EVT, 1> PVTs; 11193 Type *PtrRetTy = 11194 PointerType::get(OrigRetTy->getContext(), DL.getAllocaAddrSpace()); 11195 11196 ComputeValueVTs(*this, DL, PtrRetTy, PVTs); 11197 assert(PVTs.size() == 1 && "Pointers should fit in one register"); 11198 EVT PtrVT = PVTs[0]; 11199 11200 unsigned NumValues = RetTys.size(); 11201 ReturnValues.resize(NumValues); 11202 SmallVector<SDValue, 4> Chains(NumValues); 11203 11204 // An aggregate return value cannot wrap around the address space, so 11205 // offsets to its parts don't wrap either. 11206 SDNodeFlags Flags; 11207 Flags.setNoUnsignedWrap(true); 11208 11209 MachineFunction &MF = CLI.DAG.getMachineFunction(); 11210 Align HiddenSRetAlign = MF.getFrameInfo().getObjectAlign(DemoteStackIdx); 11211 for (unsigned i = 0; i < NumValues; ++i) { 11212 SDValue Add = CLI.DAG.getNode(ISD::ADD, CLI.DL, PtrVT, DemoteStackSlot, 11213 CLI.DAG.getConstant(Offsets[i], CLI.DL, 11214 PtrVT), Flags); 11215 SDValue L = CLI.DAG.getLoad( 11216 RetTys[i], CLI.DL, CLI.Chain, Add, 11217 MachinePointerInfo::getFixedStack(CLI.DAG.getMachineFunction(), 11218 DemoteStackIdx, Offsets[i]), 11219 HiddenSRetAlign); 11220 ReturnValues[i] = L; 11221 Chains[i] = L.getValue(1); 11222 } 11223 11224 CLI.Chain = CLI.DAG.getNode(ISD::TokenFactor, CLI.DL, MVT::Other, Chains); 11225 } else { 11226 // Collect the legal value parts into potentially illegal values 11227 // that correspond to the original function's return values. 11228 std::optional<ISD::NodeType> AssertOp; 11229 if (CLI.RetSExt) 11230 AssertOp = ISD::AssertSext; 11231 else if (CLI.RetZExt) 11232 AssertOp = ISD::AssertZext; 11233 unsigned CurReg = 0; 11234 for (EVT VT : RetTys) { 11235 MVT RegisterVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(), 11236 CLI.CallConv, VT); 11237 unsigned NumRegs = getNumRegistersForCallingConv(CLI.RetTy->getContext(), 11238 CLI.CallConv, VT); 11239 11240 ReturnValues.push_back(getCopyFromParts( 11241 CLI.DAG, CLI.DL, &InVals[CurReg], NumRegs, RegisterVT, VT, nullptr, 11242 CLI.Chain, CLI.CallConv, AssertOp)); 11243 CurReg += NumRegs; 11244 } 11245 11246 // For a function returning void, there is no return value. We can't create 11247 // such a node, so we just return a null return value in that case. In 11248 // that case, nothing will actually look at the value. 11249 if (ReturnValues.empty()) 11250 return std::make_pair(SDValue(), CLI.Chain); 11251 } 11252 11253 SDValue Res = CLI.DAG.getNode(ISD::MERGE_VALUES, CLI.DL, 11254 CLI.DAG.getVTList(RetTys), ReturnValues); 11255 return std::make_pair(Res, CLI.Chain); 11256 } 11257 11258 /// Places new result values for the node in Results (their number 11259 /// and types must exactly match those of the original return values of 11260 /// the node), or leaves Results empty, which indicates that the node is not 11261 /// to be custom lowered after all. 11262 void TargetLowering::LowerOperationWrapper(SDNode *N, 11263 SmallVectorImpl<SDValue> &Results, 11264 SelectionDAG &DAG) const { 11265 SDValue Res = LowerOperation(SDValue(N, 0), DAG); 11266 11267 if (!Res.getNode()) 11268 return; 11269 11270 // If the original node has one result, take the return value from 11271 // LowerOperation as is. It might not be result number 0. 11272 if (N->getNumValues() == 1) { 11273 Results.push_back(Res); 11274 return; 11275 } 11276 11277 // If the original node has multiple results, then the return node should 11278 // have the same number of results. 11279 assert((N->getNumValues() == Res->getNumValues()) && 11280 "Lowering returned the wrong number of results!"); 11281 11282 // Places new result values base on N result number. 11283 for (unsigned I = 0, E = N->getNumValues(); I != E; ++I) 11284 Results.push_back(Res.getValue(I)); 11285 } 11286 11287 SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 11288 llvm_unreachable("LowerOperation not implemented for this target!"); 11289 } 11290 11291 void SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V, 11292 unsigned Reg, 11293 ISD::NodeType ExtendType) { 11294 SDValue Op = getNonRegisterValue(V); 11295 assert((Op.getOpcode() != ISD::CopyFromReg || 11296 cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) && 11297 "Copy from a reg to the same reg!"); 11298 assert(!Register::isPhysicalRegister(Reg) && "Is a physreg"); 11299 11300 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 11301 // If this is an InlineAsm we have to match the registers required, not the 11302 // notional registers required by the type. 11303 11304 RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg, V->getType(), 11305 std::nullopt); // This is not an ABI copy. 11306 SDValue Chain = DAG.getEntryNode(); 11307 11308 if (ExtendType == ISD::ANY_EXTEND) { 11309 auto PreferredExtendIt = FuncInfo.PreferredExtendType.find(V); 11310 if (PreferredExtendIt != FuncInfo.PreferredExtendType.end()) 11311 ExtendType = PreferredExtendIt->second; 11312 } 11313 RFV.getCopyToRegs(Op, DAG, getCurSDLoc(), Chain, nullptr, V, ExtendType); 11314 PendingExports.push_back(Chain); 11315 } 11316 11317 #include "llvm/CodeGen/SelectionDAGISel.h" 11318 11319 /// isOnlyUsedInEntryBlock - If the specified argument is only used in the 11320 /// entry block, return true. This includes arguments used by switches, since 11321 /// the switch may expand into multiple basic blocks. 11322 static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel) { 11323 // With FastISel active, we may be splitting blocks, so force creation 11324 // of virtual registers for all non-dead arguments. 11325 if (FastISel) 11326 return A->use_empty(); 11327 11328 const BasicBlock &Entry = A->getParent()->front(); 11329 for (const User *U : A->users()) 11330 if (cast<Instruction>(U)->getParent() != &Entry || isa<SwitchInst>(U)) 11331 return false; // Use not in entry block. 11332 11333 return true; 11334 } 11335 11336 using ArgCopyElisionMapTy = 11337 DenseMap<const Argument *, 11338 std::pair<const AllocaInst *, const StoreInst *>>; 11339 11340 /// Scan the entry block of the function in FuncInfo for arguments that look 11341 /// like copies into a local alloca. Record any copied arguments in 11342 /// ArgCopyElisionCandidates. 11343 static void 11344 findArgumentCopyElisionCandidates(const DataLayout &DL, 11345 FunctionLoweringInfo *FuncInfo, 11346 ArgCopyElisionMapTy &ArgCopyElisionCandidates) { 11347 // Record the state of every static alloca used in the entry block. Argument 11348 // allocas are all used in the entry block, so we need approximately as many 11349 // entries as we have arguments. 11350 enum StaticAllocaInfo { Unknown, Clobbered, Elidable }; 11351 SmallDenseMap<const AllocaInst *, StaticAllocaInfo, 8> StaticAllocas; 11352 unsigned NumArgs = FuncInfo->Fn->arg_size(); 11353 StaticAllocas.reserve(NumArgs * 2); 11354 11355 auto GetInfoIfStaticAlloca = [&](const Value *V) -> StaticAllocaInfo * { 11356 if (!V) 11357 return nullptr; 11358 V = V->stripPointerCasts(); 11359 const auto *AI = dyn_cast<AllocaInst>(V); 11360 if (!AI || !AI->isStaticAlloca() || !FuncInfo->StaticAllocaMap.count(AI)) 11361 return nullptr; 11362 auto Iter = StaticAllocas.insert({AI, Unknown}); 11363 return &Iter.first->second; 11364 }; 11365 11366 // Look for stores of arguments to static allocas. Look through bitcasts and 11367 // GEPs to handle type coercions, as long as the alloca is fully initialized 11368 // by the store. Any non-store use of an alloca escapes it and any subsequent 11369 // unanalyzed store might write it. 11370 // FIXME: Handle structs initialized with multiple stores. 11371 for (const Instruction &I : FuncInfo->Fn->getEntryBlock()) { 11372 // Look for stores, and handle non-store uses conservatively. 11373 const auto *SI = dyn_cast<StoreInst>(&I); 11374 if (!SI) { 11375 // We will look through cast uses, so ignore them completely. 11376 if (I.isCast()) 11377 continue; 11378 // Ignore debug info and pseudo op intrinsics, they don't escape or store 11379 // to allocas. 11380 if (I.isDebugOrPseudoInst()) 11381 continue; 11382 // This is an unknown instruction. Assume it escapes or writes to all 11383 // static alloca operands. 11384 for (const Use &U : I.operands()) { 11385 if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(U)) 11386 *Info = StaticAllocaInfo::Clobbered; 11387 } 11388 continue; 11389 } 11390 11391 // If the stored value is a static alloca, mark it as escaped. 11392 if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(SI->getValueOperand())) 11393 *Info = StaticAllocaInfo::Clobbered; 11394 11395 // Check if the destination is a static alloca. 11396 const Value *Dst = SI->getPointerOperand()->stripPointerCasts(); 11397 StaticAllocaInfo *Info = GetInfoIfStaticAlloca(Dst); 11398 if (!Info) 11399 continue; 11400 const AllocaInst *AI = cast<AllocaInst>(Dst); 11401 11402 // Skip allocas that have been initialized or clobbered. 11403 if (*Info != StaticAllocaInfo::Unknown) 11404 continue; 11405 11406 // Check if the stored value is an argument, and that this store fully 11407 // initializes the alloca. 11408 // If the argument type has padding bits we can't directly forward a pointer 11409 // as the upper bits may contain garbage. 11410 // Don't elide copies from the same argument twice. 11411 const Value *Val = SI->getValueOperand()->stripPointerCasts(); 11412 const auto *Arg = dyn_cast<Argument>(Val); 11413 if (!Arg || Arg->hasPassPointeeByValueCopyAttr() || 11414 Arg->getType()->isEmptyTy() || 11415 DL.getTypeStoreSize(Arg->getType()) != 11416 DL.getTypeAllocSize(AI->getAllocatedType()) || 11417 !DL.typeSizeEqualsStoreSize(Arg->getType()) || 11418 ArgCopyElisionCandidates.count(Arg)) { 11419 *Info = StaticAllocaInfo::Clobbered; 11420 continue; 11421 } 11422 11423 LLVM_DEBUG(dbgs() << "Found argument copy elision candidate: " << *AI 11424 << '\n'); 11425 11426 // Mark this alloca and store for argument copy elision. 11427 *Info = StaticAllocaInfo::Elidable; 11428 ArgCopyElisionCandidates.insert({Arg, {AI, SI}}); 11429 11430 // Stop scanning if we've seen all arguments. This will happen early in -O0 11431 // builds, which is useful, because -O0 builds have large entry blocks and 11432 // many allocas. 11433 if (ArgCopyElisionCandidates.size() == NumArgs) 11434 break; 11435 } 11436 } 11437 11438 /// Try to elide argument copies from memory into a local alloca. Succeeds if 11439 /// ArgVal is a load from a suitable fixed stack object. 11440 static void tryToElideArgumentCopy( 11441 FunctionLoweringInfo &FuncInfo, SmallVectorImpl<SDValue> &Chains, 11442 DenseMap<int, int> &ArgCopyElisionFrameIndexMap, 11443 SmallPtrSetImpl<const Instruction *> &ElidedArgCopyInstrs, 11444 ArgCopyElisionMapTy &ArgCopyElisionCandidates, const Argument &Arg, 11445 ArrayRef<SDValue> ArgVals, bool &ArgHasUses) { 11446 // Check if this is a load from a fixed stack object. 11447 auto *LNode = dyn_cast<LoadSDNode>(ArgVals[0]); 11448 if (!LNode) 11449 return; 11450 auto *FINode = dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()); 11451 if (!FINode) 11452 return; 11453 11454 // Check that the fixed stack object is the right size and alignment. 11455 // Look at the alignment that the user wrote on the alloca instead of looking 11456 // at the stack object. 11457 auto ArgCopyIter = ArgCopyElisionCandidates.find(&Arg); 11458 assert(ArgCopyIter != ArgCopyElisionCandidates.end()); 11459 const AllocaInst *AI = ArgCopyIter->second.first; 11460 int FixedIndex = FINode->getIndex(); 11461 int &AllocaIndex = FuncInfo.StaticAllocaMap[AI]; 11462 int OldIndex = AllocaIndex; 11463 MachineFrameInfo &MFI = FuncInfo.MF->getFrameInfo(); 11464 if (MFI.getObjectSize(FixedIndex) != MFI.getObjectSize(OldIndex)) { 11465 LLVM_DEBUG( 11466 dbgs() << " argument copy elision failed due to bad fixed stack " 11467 "object size\n"); 11468 return; 11469 } 11470 Align RequiredAlignment = AI->getAlign(); 11471 if (MFI.getObjectAlign(FixedIndex) < RequiredAlignment) { 11472 LLVM_DEBUG(dbgs() << " argument copy elision failed: alignment of alloca " 11473 "greater than stack argument alignment (" 11474 << DebugStr(RequiredAlignment) << " vs " 11475 << DebugStr(MFI.getObjectAlign(FixedIndex)) << ")\n"); 11476 return; 11477 } 11478 11479 // Perform the elision. Delete the old stack object and replace its only use 11480 // in the variable info map. Mark the stack object as mutable and aliased. 11481 LLVM_DEBUG({ 11482 dbgs() << "Eliding argument copy from " << Arg << " to " << *AI << '\n' 11483 << " Replacing frame index " << OldIndex << " with " << FixedIndex 11484 << '\n'; 11485 }); 11486 MFI.RemoveStackObject(OldIndex); 11487 MFI.setIsImmutableObjectIndex(FixedIndex, false); 11488 MFI.setIsAliasedObjectIndex(FixedIndex, true); 11489 AllocaIndex = FixedIndex; 11490 ArgCopyElisionFrameIndexMap.insert({OldIndex, FixedIndex}); 11491 for (SDValue ArgVal : ArgVals) 11492 Chains.push_back(ArgVal.getValue(1)); 11493 11494 // Avoid emitting code for the store implementing the copy. 11495 const StoreInst *SI = ArgCopyIter->second.second; 11496 ElidedArgCopyInstrs.insert(SI); 11497 11498 // Check for uses of the argument again so that we can avoid exporting ArgVal 11499 // if it is't used by anything other than the store. 11500 for (const Value *U : Arg.users()) { 11501 if (U != SI) { 11502 ArgHasUses = true; 11503 break; 11504 } 11505 } 11506 } 11507 11508 void SelectionDAGISel::LowerArguments(const Function &F) { 11509 SelectionDAG &DAG = SDB->DAG; 11510 SDLoc dl = SDB->getCurSDLoc(); 11511 const DataLayout &DL = DAG.getDataLayout(); 11512 SmallVector<ISD::InputArg, 16> Ins; 11513 11514 // In Naked functions we aren't going to save any registers. 11515 if (F.hasFnAttribute(Attribute::Naked)) 11516 return; 11517 11518 if (!FuncInfo->CanLowerReturn) { 11519 // Put in an sret pointer parameter before all the other parameters. 11520 SmallVector<EVT, 1> ValueVTs; 11521 ComputeValueVTs(*TLI, DAG.getDataLayout(), 11522 PointerType::get(F.getContext(), 11523 DAG.getDataLayout().getAllocaAddrSpace()), 11524 ValueVTs); 11525 11526 // NOTE: Assuming that a pointer will never break down to more than one VT 11527 // or one register. 11528 ISD::ArgFlagsTy Flags; 11529 Flags.setSRet(); 11530 MVT RegisterVT = TLI->getRegisterType(*DAG.getContext(), ValueVTs[0]); 11531 ISD::InputArg RetArg(Flags, RegisterVT, ValueVTs[0], true, 11532 ISD::InputArg::NoArgIndex, 0); 11533 Ins.push_back(RetArg); 11534 } 11535 11536 // Look for stores of arguments to static allocas. Mark such arguments with a 11537 // flag to ask the target to give us the memory location of that argument if 11538 // available. 11539 ArgCopyElisionMapTy ArgCopyElisionCandidates; 11540 findArgumentCopyElisionCandidates(DL, FuncInfo.get(), 11541 ArgCopyElisionCandidates); 11542 11543 // Set up the incoming argument description vector. 11544 for (const Argument &Arg : F.args()) { 11545 unsigned ArgNo = Arg.getArgNo(); 11546 SmallVector<EVT, 4> ValueVTs; 11547 ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs); 11548 bool isArgValueUsed = !Arg.use_empty(); 11549 unsigned PartBase = 0; 11550 Type *FinalType = Arg.getType(); 11551 if (Arg.hasAttribute(Attribute::ByVal)) 11552 FinalType = Arg.getParamByValType(); 11553 bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters( 11554 FinalType, F.getCallingConv(), F.isVarArg(), DL); 11555 for (unsigned Value = 0, NumValues = ValueVTs.size(); 11556 Value != NumValues; ++Value) { 11557 EVT VT = ValueVTs[Value]; 11558 Type *ArgTy = VT.getTypeForEVT(*DAG.getContext()); 11559 ISD::ArgFlagsTy Flags; 11560 11561 11562 if (Arg.getType()->isPointerTy()) { 11563 Flags.setPointer(); 11564 Flags.setPointerAddrSpace( 11565 cast<PointerType>(Arg.getType())->getAddressSpace()); 11566 } 11567 if (Arg.hasAttribute(Attribute::ZExt)) 11568 Flags.setZExt(); 11569 if (Arg.hasAttribute(Attribute::SExt)) 11570 Flags.setSExt(); 11571 if (Arg.hasAttribute(Attribute::InReg)) { 11572 // If we are using vectorcall calling convention, a structure that is 11573 // passed InReg - is surely an HVA 11574 if (F.getCallingConv() == CallingConv::X86_VectorCall && 11575 isa<StructType>(Arg.getType())) { 11576 // The first value of a structure is marked 11577 if (0 == Value) 11578 Flags.setHvaStart(); 11579 Flags.setHva(); 11580 } 11581 // Set InReg Flag 11582 Flags.setInReg(); 11583 } 11584 if (Arg.hasAttribute(Attribute::StructRet)) 11585 Flags.setSRet(); 11586 if (Arg.hasAttribute(Attribute::SwiftSelf)) 11587 Flags.setSwiftSelf(); 11588 if (Arg.hasAttribute(Attribute::SwiftAsync)) 11589 Flags.setSwiftAsync(); 11590 if (Arg.hasAttribute(Attribute::SwiftError)) 11591 Flags.setSwiftError(); 11592 if (Arg.hasAttribute(Attribute::ByVal)) 11593 Flags.setByVal(); 11594 if (Arg.hasAttribute(Attribute::ByRef)) 11595 Flags.setByRef(); 11596 if (Arg.hasAttribute(Attribute::InAlloca)) { 11597 Flags.setInAlloca(); 11598 // Set the byval flag for CCAssignFn callbacks that don't know about 11599 // inalloca. This way we can know how many bytes we should've allocated 11600 // and how many bytes a callee cleanup function will pop. If we port 11601 // inalloca to more targets, we'll have to add custom inalloca handling 11602 // in the various CC lowering callbacks. 11603 Flags.setByVal(); 11604 } 11605 if (Arg.hasAttribute(Attribute::Preallocated)) { 11606 Flags.setPreallocated(); 11607 // Set the byval flag for CCAssignFn callbacks that don't know about 11608 // preallocated. This way we can know how many bytes we should've 11609 // allocated and how many bytes a callee cleanup function will pop. If 11610 // we port preallocated to more targets, we'll have to add custom 11611 // preallocated handling in the various CC lowering callbacks. 11612 Flags.setByVal(); 11613 } 11614 11615 // Certain targets (such as MIPS), may have a different ABI alignment 11616 // for a type depending on the context. Give the target a chance to 11617 // specify the alignment it wants. 11618 const Align OriginalAlignment( 11619 TLI->getABIAlignmentForCallingConv(ArgTy, DL)); 11620 Flags.setOrigAlign(OriginalAlignment); 11621 11622 Align MemAlign; 11623 Type *ArgMemTy = nullptr; 11624 if (Flags.isByVal() || Flags.isInAlloca() || Flags.isPreallocated() || 11625 Flags.isByRef()) { 11626 if (!ArgMemTy) 11627 ArgMemTy = Arg.getPointeeInMemoryValueType(); 11628 11629 uint64_t MemSize = DL.getTypeAllocSize(ArgMemTy); 11630 11631 // For in-memory arguments, size and alignment should be passed from FE. 11632 // BE will guess if this info is not there but there are cases it cannot 11633 // get right. 11634 if (auto ParamAlign = Arg.getParamStackAlign()) 11635 MemAlign = *ParamAlign; 11636 else if ((ParamAlign = Arg.getParamAlign())) 11637 MemAlign = *ParamAlign; 11638 else 11639 MemAlign = Align(TLI->getByValTypeAlignment(ArgMemTy, DL)); 11640 if (Flags.isByRef()) 11641 Flags.setByRefSize(MemSize); 11642 else 11643 Flags.setByValSize(MemSize); 11644 } else if (auto ParamAlign = Arg.getParamStackAlign()) { 11645 MemAlign = *ParamAlign; 11646 } else { 11647 MemAlign = OriginalAlignment; 11648 } 11649 Flags.setMemAlign(MemAlign); 11650 11651 if (Arg.hasAttribute(Attribute::Nest)) 11652 Flags.setNest(); 11653 if (NeedsRegBlock) 11654 Flags.setInConsecutiveRegs(); 11655 if (ArgCopyElisionCandidates.count(&Arg)) 11656 Flags.setCopyElisionCandidate(); 11657 if (Arg.hasAttribute(Attribute::Returned)) 11658 Flags.setReturned(); 11659 11660 MVT RegisterVT = TLI->getRegisterTypeForCallingConv( 11661 *CurDAG->getContext(), F.getCallingConv(), VT); 11662 unsigned NumRegs = TLI->getNumRegistersForCallingConv( 11663 *CurDAG->getContext(), F.getCallingConv(), VT); 11664 for (unsigned i = 0; i != NumRegs; ++i) { 11665 // For scalable vectors, use the minimum size; individual targets 11666 // are responsible for handling scalable vector arguments and 11667 // return values. 11668 ISD::InputArg MyFlags( 11669 Flags, RegisterVT, VT, isArgValueUsed, ArgNo, 11670 PartBase + i * RegisterVT.getStoreSize().getKnownMinValue()); 11671 if (NumRegs > 1 && i == 0) 11672 MyFlags.Flags.setSplit(); 11673 // if it isn't first piece, alignment must be 1 11674 else if (i > 0) { 11675 MyFlags.Flags.setOrigAlign(Align(1)); 11676 if (i == NumRegs - 1) 11677 MyFlags.Flags.setSplitEnd(); 11678 } 11679 Ins.push_back(MyFlags); 11680 } 11681 if (NeedsRegBlock && Value == NumValues - 1) 11682 Ins[Ins.size() - 1].Flags.setInConsecutiveRegsLast(); 11683 PartBase += VT.getStoreSize().getKnownMinValue(); 11684 } 11685 } 11686 11687 // Call the target to set up the argument values. 11688 SmallVector<SDValue, 8> InVals; 11689 SDValue NewRoot = TLI->LowerFormalArguments( 11690 DAG.getRoot(), F.getCallingConv(), F.isVarArg(), Ins, dl, DAG, InVals); 11691 11692 // Verify that the target's LowerFormalArguments behaved as expected. 11693 assert(NewRoot.getNode() && NewRoot.getValueType() == MVT::Other && 11694 "LowerFormalArguments didn't return a valid chain!"); 11695 assert(InVals.size() == Ins.size() && 11696 "LowerFormalArguments didn't emit the correct number of values!"); 11697 LLVM_DEBUG({ 11698 for (unsigned i = 0, e = Ins.size(); i != e; ++i) { 11699 assert(InVals[i].getNode() && 11700 "LowerFormalArguments emitted a null value!"); 11701 assert(EVT(Ins[i].VT) == InVals[i].getValueType() && 11702 "LowerFormalArguments emitted a value with the wrong type!"); 11703 } 11704 }); 11705 11706 // Update the DAG with the new chain value resulting from argument lowering. 11707 DAG.setRoot(NewRoot); 11708 11709 // Set up the argument values. 11710 unsigned i = 0; 11711 if (!FuncInfo->CanLowerReturn) { 11712 // Create a virtual register for the sret pointer, and put in a copy 11713 // from the sret argument into it. 11714 SmallVector<EVT, 1> ValueVTs; 11715 ComputeValueVTs(*TLI, DAG.getDataLayout(), 11716 PointerType::get(F.getContext(), 11717 DAG.getDataLayout().getAllocaAddrSpace()), 11718 ValueVTs); 11719 MVT VT = ValueVTs[0].getSimpleVT(); 11720 MVT RegVT = TLI->getRegisterType(*CurDAG->getContext(), VT); 11721 std::optional<ISD::NodeType> AssertOp; 11722 SDValue ArgValue = 11723 getCopyFromParts(DAG, dl, &InVals[0], 1, RegVT, VT, nullptr, NewRoot, 11724 F.getCallingConv(), AssertOp); 11725 11726 MachineFunction& MF = SDB->DAG.getMachineFunction(); 11727 MachineRegisterInfo& RegInfo = MF.getRegInfo(); 11728 Register SRetReg = 11729 RegInfo.createVirtualRegister(TLI->getRegClassFor(RegVT)); 11730 FuncInfo->DemoteRegister = SRetReg; 11731 NewRoot = 11732 SDB->DAG.getCopyToReg(NewRoot, SDB->getCurSDLoc(), SRetReg, ArgValue); 11733 DAG.setRoot(NewRoot); 11734 11735 // i indexes lowered arguments. Bump it past the hidden sret argument. 11736 ++i; 11737 } 11738 11739 SmallVector<SDValue, 4> Chains; 11740 DenseMap<int, int> ArgCopyElisionFrameIndexMap; 11741 for (const Argument &Arg : F.args()) { 11742 SmallVector<SDValue, 4> ArgValues; 11743 SmallVector<EVT, 4> ValueVTs; 11744 ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs); 11745 unsigned NumValues = ValueVTs.size(); 11746 if (NumValues == 0) 11747 continue; 11748 11749 bool ArgHasUses = !Arg.use_empty(); 11750 11751 // Elide the copying store if the target loaded this argument from a 11752 // suitable fixed stack object. 11753 if (Ins[i].Flags.isCopyElisionCandidate()) { 11754 unsigned NumParts = 0; 11755 for (EVT VT : ValueVTs) 11756 NumParts += TLI->getNumRegistersForCallingConv(*CurDAG->getContext(), 11757 F.getCallingConv(), VT); 11758 11759 tryToElideArgumentCopy(*FuncInfo, Chains, ArgCopyElisionFrameIndexMap, 11760 ElidedArgCopyInstrs, ArgCopyElisionCandidates, Arg, 11761 ArrayRef(&InVals[i], NumParts), ArgHasUses); 11762 } 11763 11764 // If this argument is unused then remember its value. It is used to generate 11765 // debugging information. 11766 bool isSwiftErrorArg = 11767 TLI->supportSwiftError() && 11768 Arg.hasAttribute(Attribute::SwiftError); 11769 if (!ArgHasUses && !isSwiftErrorArg) { 11770 SDB->setUnusedArgValue(&Arg, InVals[i]); 11771 11772 // Also remember any frame index for use in FastISel. 11773 if (FrameIndexSDNode *FI = 11774 dyn_cast<FrameIndexSDNode>(InVals[i].getNode())) 11775 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex()); 11776 } 11777 11778 for (unsigned Val = 0; Val != NumValues; ++Val) { 11779 EVT VT = ValueVTs[Val]; 11780 MVT PartVT = TLI->getRegisterTypeForCallingConv(*CurDAG->getContext(), 11781 F.getCallingConv(), VT); 11782 unsigned NumParts = TLI->getNumRegistersForCallingConv( 11783 *CurDAG->getContext(), F.getCallingConv(), VT); 11784 11785 // Even an apparent 'unused' swifterror argument needs to be returned. So 11786 // we do generate a copy for it that can be used on return from the 11787 // function. 11788 if (ArgHasUses || isSwiftErrorArg) { 11789 std::optional<ISD::NodeType> AssertOp; 11790 if (Arg.hasAttribute(Attribute::SExt)) 11791 AssertOp = ISD::AssertSext; 11792 else if (Arg.hasAttribute(Attribute::ZExt)) 11793 AssertOp = ISD::AssertZext; 11794 11795 ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i], NumParts, 11796 PartVT, VT, nullptr, NewRoot, 11797 F.getCallingConv(), AssertOp)); 11798 } 11799 11800 i += NumParts; 11801 } 11802 11803 // We don't need to do anything else for unused arguments. 11804 if (ArgValues.empty()) 11805 continue; 11806 11807 // Note down frame index. 11808 if (FrameIndexSDNode *FI = 11809 dyn_cast<FrameIndexSDNode>(ArgValues[0].getNode())) 11810 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex()); 11811 11812 SDValue Res = DAG.getMergeValues(ArrayRef(ArgValues.data(), NumValues), 11813 SDB->getCurSDLoc()); 11814 11815 SDB->setValue(&Arg, Res); 11816 if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::BUILD_PAIR) { 11817 // We want to associate the argument with the frame index, among 11818 // involved operands, that correspond to the lowest address. The 11819 // getCopyFromParts function, called earlier, is swapping the order of 11820 // the operands to BUILD_PAIR depending on endianness. The result of 11821 // that swapping is that the least significant bits of the argument will 11822 // be in the first operand of the BUILD_PAIR node, and the most 11823 // significant bits will be in the second operand. 11824 unsigned LowAddressOp = DAG.getDataLayout().isBigEndian() ? 1 : 0; 11825 if (LoadSDNode *LNode = 11826 dyn_cast<LoadSDNode>(Res.getOperand(LowAddressOp).getNode())) 11827 if (FrameIndexSDNode *FI = 11828 dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode())) 11829 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex()); 11830 } 11831 11832 // Analyses past this point are naive and don't expect an assertion. 11833 if (Res.getOpcode() == ISD::AssertZext) 11834 Res = Res.getOperand(0); 11835 11836 // Update the SwiftErrorVRegDefMap. 11837 if (Res.getOpcode() == ISD::CopyFromReg && isSwiftErrorArg) { 11838 Register Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg(); 11839 if (Reg.isVirtual()) 11840 SwiftError->setCurrentVReg(FuncInfo->MBB, SwiftError->getFunctionArg(), 11841 Reg); 11842 } 11843 11844 // If this argument is live outside of the entry block, insert a copy from 11845 // wherever we got it to the vreg that other BB's will reference it as. 11846 if (Res.getOpcode() == ISD::CopyFromReg) { 11847 // If we can, though, try to skip creating an unnecessary vreg. 11848 // FIXME: This isn't very clean... it would be nice to make this more 11849 // general. 11850 Register Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg(); 11851 if (Reg.isVirtual()) { 11852 FuncInfo->ValueMap[&Arg] = Reg; 11853 continue; 11854 } 11855 } 11856 if (!isOnlyUsedInEntryBlock(&Arg, TM.Options.EnableFastISel)) { 11857 FuncInfo->InitializeRegForValue(&Arg); 11858 SDB->CopyToExportRegsIfNeeded(&Arg); 11859 } 11860 } 11861 11862 if (!Chains.empty()) { 11863 Chains.push_back(NewRoot); 11864 NewRoot = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains); 11865 } 11866 11867 DAG.setRoot(NewRoot); 11868 11869 assert(i == InVals.size() && "Argument register count mismatch!"); 11870 11871 // If any argument copy elisions occurred and we have debug info, update the 11872 // stale frame indices used in the dbg.declare variable info table. 11873 if (!ArgCopyElisionFrameIndexMap.empty()) { 11874 for (MachineFunction::VariableDbgInfo &VI : 11875 MF->getInStackSlotVariableDbgInfo()) { 11876 auto I = ArgCopyElisionFrameIndexMap.find(VI.getStackSlot()); 11877 if (I != ArgCopyElisionFrameIndexMap.end()) 11878 VI.updateStackSlot(I->second); 11879 } 11880 } 11881 11882 // Finally, if the target has anything special to do, allow it to do so. 11883 emitFunctionEntryCode(); 11884 } 11885 11886 /// Handle PHI nodes in successor blocks. Emit code into the SelectionDAG to 11887 /// ensure constants are generated when needed. Remember the virtual registers 11888 /// that need to be added to the Machine PHI nodes as input. We cannot just 11889 /// directly add them, because expansion might result in multiple MBB's for one 11890 /// BB. As such, the start of the BB might correspond to a different MBB than 11891 /// the end. 11892 void 11893 SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) { 11894 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 11895 11896 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled; 11897 11898 // Check PHI nodes in successors that expect a value to be available from this 11899 // block. 11900 for (const BasicBlock *SuccBB : successors(LLVMBB->getTerminator())) { 11901 if (!isa<PHINode>(SuccBB->begin())) continue; 11902 MachineBasicBlock *SuccMBB = FuncInfo.getMBB(SuccBB); 11903 11904 // If this terminator has multiple identical successors (common for 11905 // switches), only handle each succ once. 11906 if (!SuccsHandled.insert(SuccMBB).second) 11907 continue; 11908 11909 MachineBasicBlock::iterator MBBI = SuccMBB->begin(); 11910 11911 // At this point we know that there is a 1-1 correspondence between LLVM PHI 11912 // nodes and Machine PHI nodes, but the incoming operands have not been 11913 // emitted yet. 11914 for (const PHINode &PN : SuccBB->phis()) { 11915 // Ignore dead phi's. 11916 if (PN.use_empty()) 11917 continue; 11918 11919 // Skip empty types 11920 if (PN.getType()->isEmptyTy()) 11921 continue; 11922 11923 unsigned Reg; 11924 const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB); 11925 11926 if (const auto *C = dyn_cast<Constant>(PHIOp)) { 11927 unsigned &RegOut = ConstantsOut[C]; 11928 if (RegOut == 0) { 11929 RegOut = FuncInfo.CreateRegs(C); 11930 // We need to zero/sign extend ConstantInt phi operands to match 11931 // assumptions in FunctionLoweringInfo::ComputePHILiveOutRegInfo. 11932 ISD::NodeType ExtendType = ISD::ANY_EXTEND; 11933 if (auto *CI = dyn_cast<ConstantInt>(C)) 11934 ExtendType = TLI.signExtendConstant(CI) ? ISD::SIGN_EXTEND 11935 : ISD::ZERO_EXTEND; 11936 CopyValueToVirtualRegister(C, RegOut, ExtendType); 11937 } 11938 Reg = RegOut; 11939 } else { 11940 DenseMap<const Value *, Register>::iterator I = 11941 FuncInfo.ValueMap.find(PHIOp); 11942 if (I != FuncInfo.ValueMap.end()) 11943 Reg = I->second; 11944 else { 11945 assert(isa<AllocaInst>(PHIOp) && 11946 FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) && 11947 "Didn't codegen value into a register!??"); 11948 Reg = FuncInfo.CreateRegs(PHIOp); 11949 CopyValueToVirtualRegister(PHIOp, Reg); 11950 } 11951 } 11952 11953 // Remember that this register needs to added to the machine PHI node as 11954 // the input for this MBB. 11955 SmallVector<EVT, 4> ValueVTs; 11956 ComputeValueVTs(TLI, DAG.getDataLayout(), PN.getType(), ValueVTs); 11957 for (EVT VT : ValueVTs) { 11958 const unsigned NumRegisters = TLI.getNumRegisters(*DAG.getContext(), VT); 11959 for (unsigned i = 0; i != NumRegisters; ++i) 11960 FuncInfo.PHINodesToUpdate.push_back( 11961 std::make_pair(&*MBBI++, Reg + i)); 11962 Reg += NumRegisters; 11963 } 11964 } 11965 } 11966 11967 ConstantsOut.clear(); 11968 } 11969 11970 MachineBasicBlock *SelectionDAGBuilder::NextBlock(MachineBasicBlock *MBB) { 11971 MachineFunction::iterator I(MBB); 11972 if (++I == FuncInfo.MF->end()) 11973 return nullptr; 11974 return &*I; 11975 } 11976 11977 /// During lowering new call nodes can be created (such as memset, etc.). 11978 /// Those will become new roots of the current DAG, but complications arise 11979 /// when they are tail calls. In such cases, the call lowering will update 11980 /// the root, but the builder still needs to know that a tail call has been 11981 /// lowered in order to avoid generating an additional return. 11982 void SelectionDAGBuilder::updateDAGForMaybeTailCall(SDValue MaybeTC) { 11983 // If the node is null, we do have a tail call. 11984 if (MaybeTC.getNode() != nullptr) 11985 DAG.setRoot(MaybeTC); 11986 else 11987 HasTailCall = true; 11988 } 11989 11990 void SelectionDAGBuilder::lowerWorkItem(SwitchWorkListItem W, Value *Cond, 11991 MachineBasicBlock *SwitchMBB, 11992 MachineBasicBlock *DefaultMBB) { 11993 MachineFunction *CurMF = FuncInfo.MF; 11994 MachineBasicBlock *NextMBB = nullptr; 11995 MachineFunction::iterator BBI(W.MBB); 11996 if (++BBI != FuncInfo.MF->end()) 11997 NextMBB = &*BBI; 11998 11999 unsigned Size = W.LastCluster - W.FirstCluster + 1; 12000 12001 BranchProbabilityInfo *BPI = FuncInfo.BPI; 12002 12003 if (Size == 2 && W.MBB == SwitchMBB) { 12004 // If any two of the cases has the same destination, and if one value 12005 // is the same as the other, but has one bit unset that the other has set, 12006 // use bit manipulation to do two compares at once. For example: 12007 // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)" 12008 // TODO: This could be extended to merge any 2 cases in switches with 3 12009 // cases. 12010 // TODO: Handle cases where W.CaseBB != SwitchBB. 12011 CaseCluster &Small = *W.FirstCluster; 12012 CaseCluster &Big = *W.LastCluster; 12013 12014 if (Small.Low == Small.High && Big.Low == Big.High && 12015 Small.MBB == Big.MBB) { 12016 const APInt &SmallValue = Small.Low->getValue(); 12017 const APInt &BigValue = Big.Low->getValue(); 12018 12019 // Check that there is only one bit different. 12020 APInt CommonBit = BigValue ^ SmallValue; 12021 if (CommonBit.isPowerOf2()) { 12022 SDValue CondLHS = getValue(Cond); 12023 EVT VT = CondLHS.getValueType(); 12024 SDLoc DL = getCurSDLoc(); 12025 12026 SDValue Or = DAG.getNode(ISD::OR, DL, VT, CondLHS, 12027 DAG.getConstant(CommonBit, DL, VT)); 12028 SDValue Cond = DAG.getSetCC( 12029 DL, MVT::i1, Or, DAG.getConstant(BigValue | SmallValue, DL, VT), 12030 ISD::SETEQ); 12031 12032 // Update successor info. 12033 // Both Small and Big will jump to Small.BB, so we sum up the 12034 // probabilities. 12035 addSuccessorWithProb(SwitchMBB, Small.MBB, Small.Prob + Big.Prob); 12036 if (BPI) 12037 addSuccessorWithProb( 12038 SwitchMBB, DefaultMBB, 12039 // The default destination is the first successor in IR. 12040 BPI->getEdgeProbability(SwitchMBB->getBasicBlock(), (unsigned)0)); 12041 else 12042 addSuccessorWithProb(SwitchMBB, DefaultMBB); 12043 12044 // Insert the true branch. 12045 SDValue BrCond = 12046 DAG.getNode(ISD::BRCOND, DL, MVT::Other, getControlRoot(), Cond, 12047 DAG.getBasicBlock(Small.MBB)); 12048 // Insert the false branch. 12049 BrCond = DAG.getNode(ISD::BR, DL, MVT::Other, BrCond, 12050 DAG.getBasicBlock(DefaultMBB)); 12051 12052 DAG.setRoot(BrCond); 12053 return; 12054 } 12055 } 12056 } 12057 12058 if (TM.getOptLevel() != CodeGenOptLevel::None) { 12059 // Here, we order cases by probability so the most likely case will be 12060 // checked first. However, two clusters can have the same probability in 12061 // which case their relative ordering is non-deterministic. So we use Low 12062 // as a tie-breaker as clusters are guaranteed to never overlap. 12063 llvm::sort(W.FirstCluster, W.LastCluster + 1, 12064 [](const CaseCluster &a, const CaseCluster &b) { 12065 return a.Prob != b.Prob ? 12066 a.Prob > b.Prob : 12067 a.Low->getValue().slt(b.Low->getValue()); 12068 }); 12069 12070 // Rearrange the case blocks so that the last one falls through if possible 12071 // without changing the order of probabilities. 12072 for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster; ) { 12073 --I; 12074 if (I->Prob > W.LastCluster->Prob) 12075 break; 12076 if (I->Kind == CC_Range && I->MBB == NextMBB) { 12077 std::swap(*I, *W.LastCluster); 12078 break; 12079 } 12080 } 12081 } 12082 12083 // Compute total probability. 12084 BranchProbability DefaultProb = W.DefaultProb; 12085 BranchProbability UnhandledProbs = DefaultProb; 12086 for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I) 12087 UnhandledProbs += I->Prob; 12088 12089 MachineBasicBlock *CurMBB = W.MBB; 12090 for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) { 12091 bool FallthroughUnreachable = false; 12092 MachineBasicBlock *Fallthrough; 12093 if (I == W.LastCluster) { 12094 // For the last cluster, fall through to the default destination. 12095 Fallthrough = DefaultMBB; 12096 FallthroughUnreachable = isa<UnreachableInst>( 12097 DefaultMBB->getBasicBlock()->getFirstNonPHIOrDbg()); 12098 } else { 12099 Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock()); 12100 CurMF->insert(BBI, Fallthrough); 12101 // Put Cond in a virtual register to make it available from the new blocks. 12102 ExportFromCurrentBlock(Cond); 12103 } 12104 UnhandledProbs -= I->Prob; 12105 12106 switch (I->Kind) { 12107 case CC_JumpTable: { 12108 // FIXME: Optimize away range check based on pivot comparisons. 12109 JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first; 12110 SwitchCG::JumpTable *JT = &SL->JTCases[I->JTCasesIndex].second; 12111 12112 // The jump block hasn't been inserted yet; insert it here. 12113 MachineBasicBlock *JumpMBB = JT->MBB; 12114 CurMF->insert(BBI, JumpMBB); 12115 12116 auto JumpProb = I->Prob; 12117 auto FallthroughProb = UnhandledProbs; 12118 12119 // If the default statement is a target of the jump table, we evenly 12120 // distribute the default probability to successors of CurMBB. Also 12121 // update the probability on the edge from JumpMBB to Fallthrough. 12122 for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(), 12123 SE = JumpMBB->succ_end(); 12124 SI != SE; ++SI) { 12125 if (*SI == DefaultMBB) { 12126 JumpProb += DefaultProb / 2; 12127 FallthroughProb -= DefaultProb / 2; 12128 JumpMBB->setSuccProbability(SI, DefaultProb / 2); 12129 JumpMBB->normalizeSuccProbs(); 12130 break; 12131 } 12132 } 12133 12134 // If the default clause is unreachable, propagate that knowledge into 12135 // JTH->FallthroughUnreachable which will use it to suppress the range 12136 // check. 12137 // 12138 // However, don't do this if we're doing branch target enforcement, 12139 // because a table branch _without_ a range check can be a tempting JOP 12140 // gadget - out-of-bounds inputs that are impossible in correct 12141 // execution become possible again if an attacker can influence the 12142 // control flow. So if an attacker doesn't already have a BTI bypass 12143 // available, we don't want them to be able to get one out of this 12144 // table branch. 12145 if (FallthroughUnreachable) { 12146 Function &CurFunc = CurMF->getFunction(); 12147 if (!CurFunc.hasFnAttribute("branch-target-enforcement")) 12148 JTH->FallthroughUnreachable = true; 12149 } 12150 12151 if (!JTH->FallthroughUnreachable) 12152 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb); 12153 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb); 12154 CurMBB->normalizeSuccProbs(); 12155 12156 // The jump table header will be inserted in our current block, do the 12157 // range check, and fall through to our fallthrough block. 12158 JTH->HeaderBB = CurMBB; 12159 JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader. 12160 12161 // If we're in the right place, emit the jump table header right now. 12162 if (CurMBB == SwitchMBB) { 12163 visitJumpTableHeader(*JT, *JTH, SwitchMBB); 12164 JTH->Emitted = true; 12165 } 12166 break; 12167 } 12168 case CC_BitTests: { 12169 // FIXME: Optimize away range check based on pivot comparisons. 12170 BitTestBlock *BTB = &SL->BitTestCases[I->BTCasesIndex]; 12171 12172 // The bit test blocks haven't been inserted yet; insert them here. 12173 for (BitTestCase &BTC : BTB->Cases) 12174 CurMF->insert(BBI, BTC.ThisBB); 12175 12176 // Fill in fields of the BitTestBlock. 12177 BTB->Parent = CurMBB; 12178 BTB->Default = Fallthrough; 12179 12180 BTB->DefaultProb = UnhandledProbs; 12181 // If the cases in bit test don't form a contiguous range, we evenly 12182 // distribute the probability on the edge to Fallthrough to two 12183 // successors of CurMBB. 12184 if (!BTB->ContiguousRange) { 12185 BTB->Prob += DefaultProb / 2; 12186 BTB->DefaultProb -= DefaultProb / 2; 12187 } 12188 12189 if (FallthroughUnreachable) 12190 BTB->FallthroughUnreachable = true; 12191 12192 // If we're in the right place, emit the bit test header right now. 12193 if (CurMBB == SwitchMBB) { 12194 visitBitTestHeader(*BTB, SwitchMBB); 12195 BTB->Emitted = true; 12196 } 12197 break; 12198 } 12199 case CC_Range: { 12200 const Value *RHS, *LHS, *MHS; 12201 ISD::CondCode CC; 12202 if (I->Low == I->High) { 12203 // Check Cond == I->Low. 12204 CC = ISD::SETEQ; 12205 LHS = Cond; 12206 RHS=I->Low; 12207 MHS = nullptr; 12208 } else { 12209 // Check I->Low <= Cond <= I->High. 12210 CC = ISD::SETLE; 12211 LHS = I->Low; 12212 MHS = Cond; 12213 RHS = I->High; 12214 } 12215 12216 // If Fallthrough is unreachable, fold away the comparison. 12217 if (FallthroughUnreachable) 12218 CC = ISD::SETTRUE; 12219 12220 // The false probability is the sum of all unhandled cases. 12221 CaseBlock CB(CC, LHS, RHS, MHS, I->MBB, Fallthrough, CurMBB, 12222 getCurSDLoc(), I->Prob, UnhandledProbs); 12223 12224 if (CurMBB == SwitchMBB) 12225 visitSwitchCase(CB, SwitchMBB); 12226 else 12227 SL->SwitchCases.push_back(CB); 12228 12229 break; 12230 } 12231 } 12232 CurMBB = Fallthrough; 12233 } 12234 } 12235 12236 void SelectionDAGBuilder::splitWorkItem(SwitchWorkList &WorkList, 12237 const SwitchWorkListItem &W, 12238 Value *Cond, 12239 MachineBasicBlock *SwitchMBB) { 12240 assert(W.FirstCluster->Low->getValue().slt(W.LastCluster->Low->getValue()) && 12241 "Clusters not sorted?"); 12242 assert(W.LastCluster - W.FirstCluster + 1 >= 2 && "Too small to split!"); 12243 12244 auto [LastLeft, FirstRight, LeftProb, RightProb] = 12245 SL->computeSplitWorkItemInfo(W); 12246 12247 // Use the first element on the right as pivot since we will make less-than 12248 // comparisons against it. 12249 CaseClusterIt PivotCluster = FirstRight; 12250 assert(PivotCluster > W.FirstCluster); 12251 assert(PivotCluster <= W.LastCluster); 12252 12253 CaseClusterIt FirstLeft = W.FirstCluster; 12254 CaseClusterIt LastRight = W.LastCluster; 12255 12256 const ConstantInt *Pivot = PivotCluster->Low; 12257 12258 // New blocks will be inserted immediately after the current one. 12259 MachineFunction::iterator BBI(W.MBB); 12260 ++BBI; 12261 12262 // We will branch to the LHS if Value < Pivot. If LHS is a single cluster, 12263 // we can branch to its destination directly if it's squeezed exactly in 12264 // between the known lower bound and Pivot - 1. 12265 MachineBasicBlock *LeftMBB; 12266 if (FirstLeft == LastLeft && FirstLeft->Kind == CC_Range && 12267 FirstLeft->Low == W.GE && 12268 (FirstLeft->High->getValue() + 1LL) == Pivot->getValue()) { 12269 LeftMBB = FirstLeft->MBB; 12270 } else { 12271 LeftMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock()); 12272 FuncInfo.MF->insert(BBI, LeftMBB); 12273 WorkList.push_back( 12274 {LeftMBB, FirstLeft, LastLeft, W.GE, Pivot, W.DefaultProb / 2}); 12275 // Put Cond in a virtual register to make it available from the new blocks. 12276 ExportFromCurrentBlock(Cond); 12277 } 12278 12279 // Similarly, we will branch to the RHS if Value >= Pivot. If RHS is a 12280 // single cluster, RHS.Low == Pivot, and we can branch to its destination 12281 // directly if RHS.High equals the current upper bound. 12282 MachineBasicBlock *RightMBB; 12283 if (FirstRight == LastRight && FirstRight->Kind == CC_Range && 12284 W.LT && (FirstRight->High->getValue() + 1ULL) == W.LT->getValue()) { 12285 RightMBB = FirstRight->MBB; 12286 } else { 12287 RightMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock()); 12288 FuncInfo.MF->insert(BBI, RightMBB); 12289 WorkList.push_back( 12290 {RightMBB, FirstRight, LastRight, Pivot, W.LT, W.DefaultProb / 2}); 12291 // Put Cond in a virtual register to make it available from the new blocks. 12292 ExportFromCurrentBlock(Cond); 12293 } 12294 12295 // Create the CaseBlock record that will be used to lower the branch. 12296 CaseBlock CB(ISD::SETLT, Cond, Pivot, nullptr, LeftMBB, RightMBB, W.MBB, 12297 getCurSDLoc(), LeftProb, RightProb); 12298 12299 if (W.MBB == SwitchMBB) 12300 visitSwitchCase(CB, SwitchMBB); 12301 else 12302 SL->SwitchCases.push_back(CB); 12303 } 12304 12305 // Scale CaseProb after peeling a case with the probablity of PeeledCaseProb 12306 // from the swith statement. 12307 static BranchProbability scaleCaseProbality(BranchProbability CaseProb, 12308 BranchProbability PeeledCaseProb) { 12309 if (PeeledCaseProb == BranchProbability::getOne()) 12310 return BranchProbability::getZero(); 12311 BranchProbability SwitchProb = PeeledCaseProb.getCompl(); 12312 12313 uint32_t Numerator = CaseProb.getNumerator(); 12314 uint32_t Denominator = SwitchProb.scale(CaseProb.getDenominator()); 12315 return BranchProbability(Numerator, std::max(Numerator, Denominator)); 12316 } 12317 12318 // Try to peel the top probability case if it exceeds the threshold. 12319 // Return current MachineBasicBlock for the switch statement if the peeling 12320 // does not occur. 12321 // If the peeling is performed, return the newly created MachineBasicBlock 12322 // for the peeled switch statement. Also update Clusters to remove the peeled 12323 // case. PeeledCaseProb is the BranchProbability for the peeled case. 12324 MachineBasicBlock *SelectionDAGBuilder::peelDominantCaseCluster( 12325 const SwitchInst &SI, CaseClusterVector &Clusters, 12326 BranchProbability &PeeledCaseProb) { 12327 MachineBasicBlock *SwitchMBB = FuncInfo.MBB; 12328 // Don't perform if there is only one cluster or optimizing for size. 12329 if (SwitchPeelThreshold > 100 || !FuncInfo.BPI || Clusters.size() < 2 || 12330 TM.getOptLevel() == CodeGenOptLevel::None || 12331 SwitchMBB->getParent()->getFunction().hasMinSize()) 12332 return SwitchMBB; 12333 12334 BranchProbability TopCaseProb = BranchProbability(SwitchPeelThreshold, 100); 12335 unsigned PeeledCaseIndex = 0; 12336 bool SwitchPeeled = false; 12337 for (unsigned Index = 0; Index < Clusters.size(); ++Index) { 12338 CaseCluster &CC = Clusters[Index]; 12339 if (CC.Prob < TopCaseProb) 12340 continue; 12341 TopCaseProb = CC.Prob; 12342 PeeledCaseIndex = Index; 12343 SwitchPeeled = true; 12344 } 12345 if (!SwitchPeeled) 12346 return SwitchMBB; 12347 12348 LLVM_DEBUG(dbgs() << "Peeled one top case in switch stmt, prob: " 12349 << TopCaseProb << "\n"); 12350 12351 // Record the MBB for the peeled switch statement. 12352 MachineFunction::iterator BBI(SwitchMBB); 12353 ++BBI; 12354 MachineBasicBlock *PeeledSwitchMBB = 12355 FuncInfo.MF->CreateMachineBasicBlock(SwitchMBB->getBasicBlock()); 12356 FuncInfo.MF->insert(BBI, PeeledSwitchMBB); 12357 12358 ExportFromCurrentBlock(SI.getCondition()); 12359 auto PeeledCaseIt = Clusters.begin() + PeeledCaseIndex; 12360 SwitchWorkListItem W = {SwitchMBB, PeeledCaseIt, PeeledCaseIt, 12361 nullptr, nullptr, TopCaseProb.getCompl()}; 12362 lowerWorkItem(W, SI.getCondition(), SwitchMBB, PeeledSwitchMBB); 12363 12364 Clusters.erase(PeeledCaseIt); 12365 for (CaseCluster &CC : Clusters) { 12366 LLVM_DEBUG( 12367 dbgs() << "Scale the probablity for one cluster, before scaling: " 12368 << CC.Prob << "\n"); 12369 CC.Prob = scaleCaseProbality(CC.Prob, TopCaseProb); 12370 LLVM_DEBUG(dbgs() << "After scaling: " << CC.Prob << "\n"); 12371 } 12372 PeeledCaseProb = TopCaseProb; 12373 return PeeledSwitchMBB; 12374 } 12375 12376 void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) { 12377 // Extract cases from the switch. 12378 BranchProbabilityInfo *BPI = FuncInfo.BPI; 12379 CaseClusterVector Clusters; 12380 Clusters.reserve(SI.getNumCases()); 12381 for (auto I : SI.cases()) { 12382 MachineBasicBlock *Succ = FuncInfo.getMBB(I.getCaseSuccessor()); 12383 const ConstantInt *CaseVal = I.getCaseValue(); 12384 BranchProbability Prob = 12385 BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex()) 12386 : BranchProbability(1, SI.getNumCases() + 1); 12387 Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob)); 12388 } 12389 12390 MachineBasicBlock *DefaultMBB = FuncInfo.getMBB(SI.getDefaultDest()); 12391 12392 // Cluster adjacent cases with the same destination. We do this at all 12393 // optimization levels because it's cheap to do and will make codegen faster 12394 // if there are many clusters. 12395 sortAndRangeify(Clusters); 12396 12397 // The branch probablity of the peeled case. 12398 BranchProbability PeeledCaseProb = BranchProbability::getZero(); 12399 MachineBasicBlock *PeeledSwitchMBB = 12400 peelDominantCaseCluster(SI, Clusters, PeeledCaseProb); 12401 12402 // If there is only the default destination, jump there directly. 12403 MachineBasicBlock *SwitchMBB = FuncInfo.MBB; 12404 if (Clusters.empty()) { 12405 assert(PeeledSwitchMBB == SwitchMBB); 12406 SwitchMBB->addSuccessor(DefaultMBB); 12407 if (DefaultMBB != NextBlock(SwitchMBB)) { 12408 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other, 12409 getControlRoot(), DAG.getBasicBlock(DefaultMBB))); 12410 } 12411 return; 12412 } 12413 12414 SL->findJumpTables(Clusters, &SI, getCurSDLoc(), DefaultMBB, DAG.getPSI(), 12415 DAG.getBFI()); 12416 SL->findBitTestClusters(Clusters, &SI); 12417 12418 LLVM_DEBUG({ 12419 dbgs() << "Case clusters: "; 12420 for (const CaseCluster &C : Clusters) { 12421 if (C.Kind == CC_JumpTable) 12422 dbgs() << "JT:"; 12423 if (C.Kind == CC_BitTests) 12424 dbgs() << "BT:"; 12425 12426 C.Low->getValue().print(dbgs(), true); 12427 if (C.Low != C.High) { 12428 dbgs() << '-'; 12429 C.High->getValue().print(dbgs(), true); 12430 } 12431 dbgs() << ' '; 12432 } 12433 dbgs() << '\n'; 12434 }); 12435 12436 assert(!Clusters.empty()); 12437 SwitchWorkList WorkList; 12438 CaseClusterIt First = Clusters.begin(); 12439 CaseClusterIt Last = Clusters.end() - 1; 12440 auto DefaultProb = getEdgeProbability(PeeledSwitchMBB, DefaultMBB); 12441 // Scale the branchprobability for DefaultMBB if the peel occurs and 12442 // DefaultMBB is not replaced. 12443 if (PeeledCaseProb != BranchProbability::getZero() && 12444 DefaultMBB == FuncInfo.getMBB(SI.getDefaultDest())) 12445 DefaultProb = scaleCaseProbality(DefaultProb, PeeledCaseProb); 12446 WorkList.push_back( 12447 {PeeledSwitchMBB, First, Last, nullptr, nullptr, DefaultProb}); 12448 12449 while (!WorkList.empty()) { 12450 SwitchWorkListItem W = WorkList.pop_back_val(); 12451 unsigned NumClusters = W.LastCluster - W.FirstCluster + 1; 12452 12453 if (NumClusters > 3 && TM.getOptLevel() != CodeGenOptLevel::None && 12454 !DefaultMBB->getParent()->getFunction().hasMinSize()) { 12455 // For optimized builds, lower large range as a balanced binary tree. 12456 splitWorkItem(WorkList, W, SI.getCondition(), SwitchMBB); 12457 continue; 12458 } 12459 12460 lowerWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB); 12461 } 12462 } 12463 12464 void SelectionDAGBuilder::visitStepVector(const CallInst &I) { 12465 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 12466 auto DL = getCurSDLoc(); 12467 EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 12468 setValue(&I, DAG.getStepVector(DL, ResultVT)); 12469 } 12470 12471 void SelectionDAGBuilder::visitVectorReverse(const CallInst &I) { 12472 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 12473 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 12474 12475 SDLoc DL = getCurSDLoc(); 12476 SDValue V = getValue(I.getOperand(0)); 12477 assert(VT == V.getValueType() && "Malformed vector.reverse!"); 12478 12479 if (VT.isScalableVector()) { 12480 setValue(&I, DAG.getNode(ISD::VECTOR_REVERSE, DL, VT, V)); 12481 return; 12482 } 12483 12484 // Use VECTOR_SHUFFLE for the fixed-length vector 12485 // to maintain existing behavior. 12486 SmallVector<int, 8> Mask; 12487 unsigned NumElts = VT.getVectorMinNumElements(); 12488 for (unsigned i = 0; i != NumElts; ++i) 12489 Mask.push_back(NumElts - 1 - i); 12490 12491 setValue(&I, DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), Mask)); 12492 } 12493 12494 void SelectionDAGBuilder::visitVectorDeinterleave(const CallInst &I) { 12495 auto DL = getCurSDLoc(); 12496 SDValue InVec = getValue(I.getOperand(0)); 12497 EVT OutVT = 12498 InVec.getValueType().getHalfNumVectorElementsVT(*DAG.getContext()); 12499 12500 unsigned OutNumElts = OutVT.getVectorMinNumElements(); 12501 12502 // ISD Node needs the input vectors split into two equal parts 12503 SDValue Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OutVT, InVec, 12504 DAG.getVectorIdxConstant(0, DL)); 12505 SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OutVT, InVec, 12506 DAG.getVectorIdxConstant(OutNumElts, DL)); 12507 12508 // Use VECTOR_SHUFFLE for fixed-length vectors to benefit from existing 12509 // legalisation and combines. 12510 if (OutVT.isFixedLengthVector()) { 12511 SDValue Even = DAG.getVectorShuffle(OutVT, DL, Lo, Hi, 12512 createStrideMask(0, 2, OutNumElts)); 12513 SDValue Odd = DAG.getVectorShuffle(OutVT, DL, Lo, Hi, 12514 createStrideMask(1, 2, OutNumElts)); 12515 SDValue Res = DAG.getMergeValues({Even, Odd}, getCurSDLoc()); 12516 setValue(&I, Res); 12517 return; 12518 } 12519 12520 SDValue Res = DAG.getNode(ISD::VECTOR_DEINTERLEAVE, DL, 12521 DAG.getVTList(OutVT, OutVT), Lo, Hi); 12522 setValue(&I, Res); 12523 } 12524 12525 void SelectionDAGBuilder::visitVectorInterleave(const CallInst &I) { 12526 auto DL = getCurSDLoc(); 12527 EVT InVT = getValue(I.getOperand(0)).getValueType(); 12528 SDValue InVec0 = getValue(I.getOperand(0)); 12529 SDValue InVec1 = getValue(I.getOperand(1)); 12530 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 12531 EVT OutVT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 12532 12533 // Use VECTOR_SHUFFLE for fixed-length vectors to benefit from existing 12534 // legalisation and combines. 12535 if (OutVT.isFixedLengthVector()) { 12536 unsigned NumElts = InVT.getVectorMinNumElements(); 12537 SDValue V = DAG.getNode(ISD::CONCAT_VECTORS, DL, OutVT, InVec0, InVec1); 12538 setValue(&I, DAG.getVectorShuffle(OutVT, DL, V, DAG.getUNDEF(OutVT), 12539 createInterleaveMask(NumElts, 2))); 12540 return; 12541 } 12542 12543 SDValue Res = DAG.getNode(ISD::VECTOR_INTERLEAVE, DL, 12544 DAG.getVTList(InVT, InVT), InVec0, InVec1); 12545 Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, OutVT, Res.getValue(0), 12546 Res.getValue(1)); 12547 setValue(&I, Res); 12548 } 12549 12550 void SelectionDAGBuilder::visitFreeze(const FreezeInst &I) { 12551 SmallVector<EVT, 4> ValueVTs; 12552 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), I.getType(), 12553 ValueVTs); 12554 unsigned NumValues = ValueVTs.size(); 12555 if (NumValues == 0) return; 12556 12557 SmallVector<SDValue, 4> Values(NumValues); 12558 SDValue Op = getValue(I.getOperand(0)); 12559 12560 for (unsigned i = 0; i != NumValues; ++i) 12561 Values[i] = DAG.getNode(ISD::FREEZE, getCurSDLoc(), ValueVTs[i], 12562 SDValue(Op.getNode(), Op.getResNo() + i)); 12563 12564 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(), 12565 DAG.getVTList(ValueVTs), Values)); 12566 } 12567 12568 void SelectionDAGBuilder::visitVectorSplice(const CallInst &I) { 12569 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 12570 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType()); 12571 12572 SDLoc DL = getCurSDLoc(); 12573 SDValue V1 = getValue(I.getOperand(0)); 12574 SDValue V2 = getValue(I.getOperand(1)); 12575 int64_t Imm = cast<ConstantInt>(I.getOperand(2))->getSExtValue(); 12576 12577 // VECTOR_SHUFFLE doesn't support a scalable mask so use a dedicated node. 12578 if (VT.isScalableVector()) { 12579 setValue( 12580 &I, DAG.getNode(ISD::VECTOR_SPLICE, DL, VT, V1, V2, 12581 DAG.getSignedConstant( 12582 Imm, DL, TLI.getVectorIdxTy(DAG.getDataLayout())))); 12583 return; 12584 } 12585 12586 unsigned NumElts = VT.getVectorNumElements(); 12587 12588 uint64_t Idx = (NumElts + Imm) % NumElts; 12589 12590 // Use VECTOR_SHUFFLE to maintain original behaviour for fixed-length vectors. 12591 SmallVector<int, 8> Mask; 12592 for (unsigned i = 0; i < NumElts; ++i) 12593 Mask.push_back(Idx + i); 12594 setValue(&I, DAG.getVectorShuffle(VT, DL, V1, V2, Mask)); 12595 } 12596 12597 // Consider the following MIR after SelectionDAG, which produces output in 12598 // phyregs in the first case or virtregs in the second case. 12599 // 12600 // INLINEASM_BR ..., implicit-def $ebx, ..., implicit-def $edx 12601 // %5:gr32 = COPY $ebx 12602 // %6:gr32 = COPY $edx 12603 // %1:gr32 = COPY %6:gr32 12604 // %0:gr32 = COPY %5:gr32 12605 // 12606 // INLINEASM_BR ..., def %5:gr32, ..., def %6:gr32 12607 // %1:gr32 = COPY %6:gr32 12608 // %0:gr32 = COPY %5:gr32 12609 // 12610 // Given %0, we'd like to return $ebx in the first case and %5 in the second. 12611 // Given %1, we'd like to return $edx in the first case and %6 in the second. 12612 // 12613 // If a callbr has outputs, it will have a single mapping in FuncInfo.ValueMap 12614 // to a single virtreg (such as %0). The remaining outputs monotonically 12615 // increase in virtreg number from there. If a callbr has no outputs, then it 12616 // should not have a corresponding callbr landingpad; in fact, the callbr 12617 // landingpad would not even be able to refer to such a callbr. 12618 static Register FollowCopyChain(MachineRegisterInfo &MRI, Register Reg) { 12619 MachineInstr *MI = MRI.def_begin(Reg)->getParent(); 12620 // There is definitely at least one copy. 12621 assert(MI->getOpcode() == TargetOpcode::COPY && 12622 "start of copy chain MUST be COPY"); 12623 Reg = MI->getOperand(1).getReg(); 12624 MI = MRI.def_begin(Reg)->getParent(); 12625 // There may be an optional second copy. 12626 if (MI->getOpcode() == TargetOpcode::COPY) { 12627 assert(Reg.isVirtual() && "expected COPY of virtual register"); 12628 Reg = MI->getOperand(1).getReg(); 12629 assert(Reg.isPhysical() && "expected COPY of physical register"); 12630 MI = MRI.def_begin(Reg)->getParent(); 12631 } 12632 // The start of the chain must be an INLINEASM_BR. 12633 assert(MI->getOpcode() == TargetOpcode::INLINEASM_BR && 12634 "end of copy chain MUST be INLINEASM_BR"); 12635 return Reg; 12636 } 12637 12638 // We must do this walk rather than the simpler 12639 // setValue(&I, getCopyFromRegs(CBR, CBR->getType())); 12640 // otherwise we will end up with copies of virtregs only valid along direct 12641 // edges. 12642 void SelectionDAGBuilder::visitCallBrLandingPad(const CallInst &I) { 12643 SmallVector<EVT, 8> ResultVTs; 12644 SmallVector<SDValue, 8> ResultValues; 12645 const auto *CBR = 12646 cast<CallBrInst>(I.getParent()->getUniquePredecessor()->getTerminator()); 12647 12648 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 12649 const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo(); 12650 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); 12651 12652 unsigned InitialDef = FuncInfo.ValueMap[CBR]; 12653 SDValue Chain = DAG.getRoot(); 12654 12655 // Re-parse the asm constraints string. 12656 TargetLowering::AsmOperandInfoVector TargetConstraints = 12657 TLI.ParseConstraints(DAG.getDataLayout(), TRI, *CBR); 12658 for (auto &T : TargetConstraints) { 12659 SDISelAsmOperandInfo OpInfo(T); 12660 if (OpInfo.Type != InlineAsm::isOutput) 12661 continue; 12662 12663 // Pencil in OpInfo.ConstraintType and OpInfo.ConstraintVT based on the 12664 // individual constraint. 12665 TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG); 12666 12667 switch (OpInfo.ConstraintType) { 12668 case TargetLowering::C_Register: 12669 case TargetLowering::C_RegisterClass: { 12670 // Fill in OpInfo.AssignedRegs.Regs. 12671 getRegistersForValue(DAG, getCurSDLoc(), OpInfo, OpInfo); 12672 12673 // getRegistersForValue may produce 1 to many registers based on whether 12674 // the OpInfo.ConstraintVT is legal on the target or not. 12675 for (Register &Reg : OpInfo.AssignedRegs.Regs) { 12676 Register OriginalDef = FollowCopyChain(MRI, InitialDef++); 12677 if (Register::isPhysicalRegister(OriginalDef)) 12678 FuncInfo.MBB->addLiveIn(OriginalDef); 12679 // Update the assigned registers to use the original defs. 12680 Reg = OriginalDef; 12681 } 12682 12683 SDValue V = OpInfo.AssignedRegs.getCopyFromRegs( 12684 DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, CBR); 12685 ResultValues.push_back(V); 12686 ResultVTs.push_back(OpInfo.ConstraintVT); 12687 break; 12688 } 12689 case TargetLowering::C_Other: { 12690 SDValue Flag; 12691 SDValue V = TLI.LowerAsmOutputForConstraint(Chain, Flag, getCurSDLoc(), 12692 OpInfo, DAG); 12693 ++InitialDef; 12694 ResultValues.push_back(V); 12695 ResultVTs.push_back(OpInfo.ConstraintVT); 12696 break; 12697 } 12698 default: 12699 break; 12700 } 12701 } 12702 SDValue V = DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(), 12703 DAG.getVTList(ResultVTs), ResultValues); 12704 setValue(&I, V); 12705 } 12706