1 //===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This coordinates the per-function state used while generating code. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CodeGenFunction.h" 15 #include "CGBlocks.h" 16 #include "CGCleanup.h" 17 #include "CGCUDARuntime.h" 18 #include "CGCXXABI.h" 19 #include "CGDebugInfo.h" 20 #include "CGOpenMPRuntime.h" 21 #include "CodeGenModule.h" 22 #include "CodeGenPGO.h" 23 #include "TargetInfo.h" 24 #include "clang/AST/ASTContext.h" 25 #include "clang/AST/ASTLambda.h" 26 #include "clang/AST/Decl.h" 27 #include "clang/AST/DeclCXX.h" 28 #include "clang/AST/StmtCXX.h" 29 #include "clang/AST/StmtObjC.h" 30 #include "clang/Basic/Builtins.h" 31 #include "clang/Basic/TargetInfo.h" 32 #include "clang/CodeGen/CGFunctionInfo.h" 33 #include "clang/Frontend/CodeGenOptions.h" 34 #include "clang/Sema/SemaDiagnostic.h" 35 #include "llvm/IR/DataLayout.h" 36 #include "llvm/IR/Dominators.h" 37 #include "llvm/IR/Intrinsics.h" 38 #include "llvm/IR/MDBuilder.h" 39 #include "llvm/IR/Operator.h" 40 #include "llvm/Transforms/Utils/PromoteMemToReg.h" 41 using namespace clang; 42 using namespace CodeGen; 43 44 /// shouldEmitLifetimeMarkers - Decide whether we need emit the life-time 45 /// markers. 46 static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts, 47 const LangOptions &LangOpts) { 48 if (CGOpts.DisableLifetimeMarkers) 49 return false; 50 51 // Disable lifetime markers in msan builds. 52 // FIXME: Remove this when msan works with lifetime markers. 53 if (LangOpts.Sanitize.has(SanitizerKind::Memory)) 54 return false; 55 56 // Asan uses markers for use-after-scope checks. 57 if (CGOpts.SanitizeAddressUseAfterScope) 58 return true; 59 60 // For now, only in optimized builds. 61 return CGOpts.OptimizationLevel != 0; 62 } 63 64 CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext) 65 : CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()), 66 Builder(cgm, cgm.getModule().getContext(), llvm::ConstantFolder(), 67 CGBuilderInserterTy(this)), 68 SanOpts(CGM.getLangOpts().Sanitize), DebugInfo(CGM.getModuleDebugInfo()), 69 PGO(cgm), ShouldEmitLifetimeMarkers(shouldEmitLifetimeMarkers( 70 CGM.getCodeGenOpts(), CGM.getLangOpts())) { 71 if (!suppressNewContext) 72 CGM.getCXXABI().getMangleContext().startNewFunction(); 73 74 llvm::FastMathFlags FMF; 75 if (CGM.getLangOpts().FastMath) 76 FMF.setFast(); 77 if (CGM.getLangOpts().FiniteMathOnly) { 78 FMF.setNoNaNs(); 79 FMF.setNoInfs(); 80 } 81 if (CGM.getCodeGenOpts().NoNaNsFPMath) { 82 FMF.setNoNaNs(); 83 } 84 if (CGM.getCodeGenOpts().NoSignedZeros) { 85 FMF.setNoSignedZeros(); 86 } 87 if (CGM.getCodeGenOpts().ReciprocalMath) { 88 FMF.setAllowReciprocal(); 89 } 90 if (CGM.getCodeGenOpts().Reassociate) { 91 FMF.setAllowReassoc(); 92 } 93 Builder.setFastMathFlags(FMF); 94 } 95 96 CodeGenFunction::~CodeGenFunction() { 97 assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup"); 98 99 // If there are any unclaimed block infos, go ahead and destroy them 100 // now. This can happen if IR-gen gets clever and skips evaluating 101 // something. 102 if (FirstBlockInfo) 103 destroyBlockInfos(FirstBlockInfo); 104 105 if (getLangOpts().OpenMP && CurFn) 106 CGM.getOpenMPRuntime().functionFinished(*this); 107 } 108 109 CharUnits CodeGenFunction::getNaturalPointeeTypeAlignment(QualType T, 110 LValueBaseInfo *BaseInfo, 111 TBAAAccessInfo *TBAAInfo) { 112 return getNaturalTypeAlignment(T->getPointeeType(), BaseInfo, TBAAInfo, 113 /* forPointeeType= */ true); 114 } 115 116 CharUnits CodeGenFunction::getNaturalTypeAlignment(QualType T, 117 LValueBaseInfo *BaseInfo, 118 TBAAAccessInfo *TBAAInfo, 119 bool forPointeeType) { 120 if (TBAAInfo) 121 *TBAAInfo = CGM.getTBAAAccessInfo(T); 122 123 // Honor alignment typedef attributes even on incomplete types. 124 // We also honor them straight for C++ class types, even as pointees; 125 // there's an expressivity gap here. 126 if (auto TT = T->getAs<TypedefType>()) { 127 if (auto Align = TT->getDecl()->getMaxAlignment()) { 128 if (BaseInfo) 129 *BaseInfo = LValueBaseInfo(AlignmentSource::AttributedType); 130 return getContext().toCharUnitsFromBits(Align); 131 } 132 } 133 134 if (BaseInfo) 135 *BaseInfo = LValueBaseInfo(AlignmentSource::Type); 136 137 CharUnits Alignment; 138 if (T->isIncompleteType()) { 139 Alignment = CharUnits::One(); // Shouldn't be used, but pessimistic is best. 140 } else { 141 // For C++ class pointees, we don't know whether we're pointing at a 142 // base or a complete object, so we generally need to use the 143 // non-virtual alignment. 144 const CXXRecordDecl *RD; 145 if (forPointeeType && (RD = T->getAsCXXRecordDecl())) { 146 Alignment = CGM.getClassPointerAlignment(RD); 147 } else { 148 Alignment = getContext().getTypeAlignInChars(T); 149 if (T.getQualifiers().hasUnaligned()) 150 Alignment = CharUnits::One(); 151 } 152 153 // Cap to the global maximum type alignment unless the alignment 154 // was somehow explicit on the type. 155 if (unsigned MaxAlign = getLangOpts().MaxTypeAlign) { 156 if (Alignment.getQuantity() > MaxAlign && 157 !getContext().isAlignmentRequired(T)) 158 Alignment = CharUnits::fromQuantity(MaxAlign); 159 } 160 } 161 return Alignment; 162 } 163 164 LValue CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) { 165 LValueBaseInfo BaseInfo; 166 TBAAAccessInfo TBAAInfo; 167 CharUnits Alignment = getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo); 168 return LValue::MakeAddr(Address(V, Alignment), T, getContext(), BaseInfo, 169 TBAAInfo); 170 } 171 172 /// Given a value of type T* that may not be to a complete object, 173 /// construct an l-value with the natural pointee alignment of T. 174 LValue 175 CodeGenFunction::MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T) { 176 LValueBaseInfo BaseInfo; 177 TBAAAccessInfo TBAAInfo; 178 CharUnits Align = getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo, 179 /* forPointeeType= */ true); 180 return MakeAddrLValue(Address(V, Align), T, BaseInfo, TBAAInfo); 181 } 182 183 184 llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) { 185 return CGM.getTypes().ConvertTypeForMem(T); 186 } 187 188 llvm::Type *CodeGenFunction::ConvertType(QualType T) { 189 return CGM.getTypes().ConvertType(T); 190 } 191 192 TypeEvaluationKind CodeGenFunction::getEvaluationKind(QualType type) { 193 type = type.getCanonicalType(); 194 while (true) { 195 switch (type->getTypeClass()) { 196 #define TYPE(name, parent) 197 #define ABSTRACT_TYPE(name, parent) 198 #define NON_CANONICAL_TYPE(name, parent) case Type::name: 199 #define DEPENDENT_TYPE(name, parent) case Type::name: 200 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name: 201 #include "clang/AST/TypeNodes.def" 202 llvm_unreachable("non-canonical or dependent type in IR-generation"); 203 204 case Type::Auto: 205 case Type::DeducedTemplateSpecialization: 206 llvm_unreachable("undeduced type in IR-generation"); 207 208 // Various scalar types. 209 case Type::Builtin: 210 case Type::Pointer: 211 case Type::BlockPointer: 212 case Type::LValueReference: 213 case Type::RValueReference: 214 case Type::MemberPointer: 215 case Type::Vector: 216 case Type::ExtVector: 217 case Type::FunctionProto: 218 case Type::FunctionNoProto: 219 case Type::Enum: 220 case Type::ObjCObjectPointer: 221 case Type::Pipe: 222 return TEK_Scalar; 223 224 // Complexes. 225 case Type::Complex: 226 return TEK_Complex; 227 228 // Arrays, records, and Objective-C objects. 229 case Type::ConstantArray: 230 case Type::IncompleteArray: 231 case Type::VariableArray: 232 case Type::Record: 233 case Type::ObjCObject: 234 case Type::ObjCInterface: 235 return TEK_Aggregate; 236 237 // We operate on atomic values according to their underlying type. 238 case Type::Atomic: 239 type = cast<AtomicType>(type)->getValueType(); 240 continue; 241 } 242 llvm_unreachable("unknown type kind!"); 243 } 244 } 245 246 llvm::DebugLoc CodeGenFunction::EmitReturnBlock() { 247 // For cleanliness, we try to avoid emitting the return block for 248 // simple cases. 249 llvm::BasicBlock *CurBB = Builder.GetInsertBlock(); 250 251 if (CurBB) { 252 assert(!CurBB->getTerminator() && "Unexpected terminated block."); 253 254 // We have a valid insert point, reuse it if it is empty or there are no 255 // explicit jumps to the return block. 256 if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) { 257 ReturnBlock.getBlock()->replaceAllUsesWith(CurBB); 258 delete ReturnBlock.getBlock(); 259 } else 260 EmitBlock(ReturnBlock.getBlock()); 261 return llvm::DebugLoc(); 262 } 263 264 // Otherwise, if the return block is the target of a single direct 265 // branch then we can just put the code in that block instead. This 266 // cleans up functions which started with a unified return block. 267 if (ReturnBlock.getBlock()->hasOneUse()) { 268 llvm::BranchInst *BI = 269 dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->user_begin()); 270 if (BI && BI->isUnconditional() && 271 BI->getSuccessor(0) == ReturnBlock.getBlock()) { 272 // Record/return the DebugLoc of the simple 'return' expression to be used 273 // later by the actual 'ret' instruction. 274 llvm::DebugLoc Loc = BI->getDebugLoc(); 275 Builder.SetInsertPoint(BI->getParent()); 276 BI->eraseFromParent(); 277 delete ReturnBlock.getBlock(); 278 return Loc; 279 } 280 } 281 282 // FIXME: We are at an unreachable point, there is no reason to emit the block 283 // unless it has uses. However, we still need a place to put the debug 284 // region.end for now. 285 286 EmitBlock(ReturnBlock.getBlock()); 287 return llvm::DebugLoc(); 288 } 289 290 static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) { 291 if (!BB) return; 292 if (!BB->use_empty()) 293 return CGF.CurFn->getBasicBlockList().push_back(BB); 294 delete BB; 295 } 296 297 void CodeGenFunction::FinishFunction(SourceLocation EndLoc) { 298 assert(BreakContinueStack.empty() && 299 "mismatched push/pop in break/continue stack!"); 300 301 bool OnlySimpleReturnStmts = NumSimpleReturnExprs > 0 302 && NumSimpleReturnExprs == NumReturnExprs 303 && ReturnBlock.getBlock()->use_empty(); 304 // Usually the return expression is evaluated before the cleanup 305 // code. If the function contains only a simple return statement, 306 // such as a constant, the location before the cleanup code becomes 307 // the last useful breakpoint in the function, because the simple 308 // return expression will be evaluated after the cleanup code. To be 309 // safe, set the debug location for cleanup code to the location of 310 // the return statement. Otherwise the cleanup code should be at the 311 // end of the function's lexical scope. 312 // 313 // If there are multiple branches to the return block, the branch 314 // instructions will get the location of the return statements and 315 // all will be fine. 316 if (CGDebugInfo *DI = getDebugInfo()) { 317 if (OnlySimpleReturnStmts) 318 DI->EmitLocation(Builder, LastStopPoint); 319 else 320 DI->EmitLocation(Builder, EndLoc); 321 } 322 323 // Pop any cleanups that might have been associated with the 324 // parameters. Do this in whatever block we're currently in; it's 325 // important to do this before we enter the return block or return 326 // edges will be *really* confused. 327 bool HasCleanups = EHStack.stable_begin() != PrologueCleanupDepth; 328 bool HasOnlyLifetimeMarkers = 329 HasCleanups && EHStack.containsOnlyLifetimeMarkers(PrologueCleanupDepth); 330 bool EmitRetDbgLoc = !HasCleanups || HasOnlyLifetimeMarkers; 331 if (HasCleanups) { 332 // Make sure the line table doesn't jump back into the body for 333 // the ret after it's been at EndLoc. 334 if (CGDebugInfo *DI = getDebugInfo()) 335 if (OnlySimpleReturnStmts) 336 DI->EmitLocation(Builder, EndLoc); 337 338 PopCleanupBlocks(PrologueCleanupDepth); 339 } 340 341 // Emit function epilog (to return). 342 llvm::DebugLoc Loc = EmitReturnBlock(); 343 344 if (ShouldInstrumentFunction()) { 345 if (CGM.getCodeGenOpts().InstrumentFunctions) 346 CurFn->addFnAttr("instrument-function-exit", "__cyg_profile_func_exit"); 347 if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining) 348 CurFn->addFnAttr("instrument-function-exit-inlined", 349 "__cyg_profile_func_exit"); 350 } 351 352 // Emit debug descriptor for function end. 353 if (CGDebugInfo *DI = getDebugInfo()) 354 DI->EmitFunctionEnd(Builder, CurFn); 355 356 // Reset the debug location to that of the simple 'return' expression, if any 357 // rather than that of the end of the function's scope '}'. 358 ApplyDebugLocation AL(*this, Loc); 359 EmitFunctionEpilog(*CurFnInfo, EmitRetDbgLoc, EndLoc); 360 EmitEndEHSpec(CurCodeDecl); 361 362 assert(EHStack.empty() && 363 "did not remove all scopes from cleanup stack!"); 364 365 // If someone did an indirect goto, emit the indirect goto block at the end of 366 // the function. 367 if (IndirectBranch) { 368 EmitBlock(IndirectBranch->getParent()); 369 Builder.ClearInsertionPoint(); 370 } 371 372 // If some of our locals escaped, insert a call to llvm.localescape in the 373 // entry block. 374 if (!EscapedLocals.empty()) { 375 // Invert the map from local to index into a simple vector. There should be 376 // no holes. 377 SmallVector<llvm::Value *, 4> EscapeArgs; 378 EscapeArgs.resize(EscapedLocals.size()); 379 for (auto &Pair : EscapedLocals) 380 EscapeArgs[Pair.second] = Pair.first; 381 llvm::Function *FrameEscapeFn = llvm::Intrinsic::getDeclaration( 382 &CGM.getModule(), llvm::Intrinsic::localescape); 383 CGBuilderTy(*this, AllocaInsertPt).CreateCall(FrameEscapeFn, EscapeArgs); 384 } 385 386 // Remove the AllocaInsertPt instruction, which is just a convenience for us. 387 llvm::Instruction *Ptr = AllocaInsertPt; 388 AllocaInsertPt = nullptr; 389 Ptr->eraseFromParent(); 390 391 // If someone took the address of a label but never did an indirect goto, we 392 // made a zero entry PHI node, which is illegal, zap it now. 393 if (IndirectBranch) { 394 llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress()); 395 if (PN->getNumIncomingValues() == 0) { 396 PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType())); 397 PN->eraseFromParent(); 398 } 399 } 400 401 EmitIfUsed(*this, EHResumeBlock); 402 EmitIfUsed(*this, TerminateLandingPad); 403 EmitIfUsed(*this, TerminateHandler); 404 EmitIfUsed(*this, UnreachableBlock); 405 406 for (const auto &FuncletAndParent : TerminateFunclets) 407 EmitIfUsed(*this, FuncletAndParent.second); 408 409 if (CGM.getCodeGenOpts().EmitDeclMetadata) 410 EmitDeclMetadata(); 411 412 for (SmallVectorImpl<std::pair<llvm::Instruction *, llvm::Value *> >::iterator 413 I = DeferredReplacements.begin(), 414 E = DeferredReplacements.end(); 415 I != E; ++I) { 416 I->first->replaceAllUsesWith(I->second); 417 I->first->eraseFromParent(); 418 } 419 420 // Eliminate CleanupDestSlot alloca by replacing it with SSA values and 421 // PHIs if the current function is a coroutine. We don't do it for all 422 // functions as it may result in slight increase in numbers of instructions 423 // if compiled with no optimizations. We do it for coroutine as the lifetime 424 // of CleanupDestSlot alloca make correct coroutine frame building very 425 // difficult. 426 if (NormalCleanupDest.isValid() && isCoroutine()) { 427 llvm::DominatorTree DT(*CurFn); 428 llvm::PromoteMemToReg( 429 cast<llvm::AllocaInst>(NormalCleanupDest.getPointer()), DT); 430 NormalCleanupDest = Address::invalid(); 431 } 432 433 // Add the required-vector-width attribute. 434 if (LargestVectorWidth != 0) 435 CurFn->addFnAttr("min-legal-vector-width", 436 llvm::utostr(LargestVectorWidth)); 437 } 438 439 /// ShouldInstrumentFunction - Return true if the current function should be 440 /// instrumented with __cyg_profile_func_* calls 441 bool CodeGenFunction::ShouldInstrumentFunction() { 442 if (!CGM.getCodeGenOpts().InstrumentFunctions && 443 !CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining && 444 !CGM.getCodeGenOpts().InstrumentFunctionEntryBare) 445 return false; 446 if (!CurFuncDecl || CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) 447 return false; 448 return true; 449 } 450 451 /// ShouldXRayInstrument - Return true if the current function should be 452 /// instrumented with XRay nop sleds. 453 bool CodeGenFunction::ShouldXRayInstrumentFunction() const { 454 return CGM.getCodeGenOpts().XRayInstrumentFunctions; 455 } 456 457 /// AlwaysEmitXRayCustomEvents - Return true if we should emit IR for calls to 458 /// the __xray_customevent(...) builtin calls, when doing XRay instrumentation. 459 bool CodeGenFunction::AlwaysEmitXRayCustomEvents() const { 460 return CGM.getCodeGenOpts().XRayInstrumentFunctions && 461 (CGM.getCodeGenOpts().XRayAlwaysEmitCustomEvents || 462 CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask == 463 XRayInstrKind::Custom); 464 } 465 466 bool CodeGenFunction::AlwaysEmitXRayTypedEvents() const { 467 return CGM.getCodeGenOpts().XRayInstrumentFunctions && 468 (CGM.getCodeGenOpts().XRayAlwaysEmitTypedEvents || 469 CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask == 470 XRayInstrKind::Typed); 471 } 472 473 llvm::Constant * 474 CodeGenFunction::EncodeAddrForUseInPrologue(llvm::Function *F, 475 llvm::Constant *Addr) { 476 // Addresses stored in prologue data can't require run-time fixups and must 477 // be PC-relative. Run-time fixups are undesirable because they necessitate 478 // writable text segments, which are unsafe. And absolute addresses are 479 // undesirable because they break PIE mode. 480 481 // Add a layer of indirection through a private global. Taking its address 482 // won't result in a run-time fixup, even if Addr has linkonce_odr linkage. 483 auto *GV = new llvm::GlobalVariable(CGM.getModule(), Addr->getType(), 484 /*isConstant=*/true, 485 llvm::GlobalValue::PrivateLinkage, Addr); 486 487 // Create a PC-relative address. 488 auto *GOTAsInt = llvm::ConstantExpr::getPtrToInt(GV, IntPtrTy); 489 auto *FuncAsInt = llvm::ConstantExpr::getPtrToInt(F, IntPtrTy); 490 auto *PCRelAsInt = llvm::ConstantExpr::getSub(GOTAsInt, FuncAsInt); 491 return (IntPtrTy == Int32Ty) 492 ? PCRelAsInt 493 : llvm::ConstantExpr::getTrunc(PCRelAsInt, Int32Ty); 494 } 495 496 llvm::Value * 497 CodeGenFunction::DecodeAddrUsedInPrologue(llvm::Value *F, 498 llvm::Value *EncodedAddr) { 499 // Reconstruct the address of the global. 500 auto *PCRelAsInt = Builder.CreateSExt(EncodedAddr, IntPtrTy); 501 auto *FuncAsInt = Builder.CreatePtrToInt(F, IntPtrTy, "func_addr.int"); 502 auto *GOTAsInt = Builder.CreateAdd(PCRelAsInt, FuncAsInt, "global_addr.int"); 503 auto *GOTAddr = Builder.CreateIntToPtr(GOTAsInt, Int8PtrPtrTy, "global_addr"); 504 505 // Load the original pointer through the global. 506 return Builder.CreateLoad(Address(GOTAddr, getPointerAlign()), 507 "decoded_addr"); 508 } 509 510 static void removeImageAccessQualifier(std::string& TyName) { 511 std::string ReadOnlyQual("__read_only"); 512 std::string::size_type ReadOnlyPos = TyName.find(ReadOnlyQual); 513 if (ReadOnlyPos != std::string::npos) 514 // "+ 1" for the space after access qualifier. 515 TyName.erase(ReadOnlyPos, ReadOnlyQual.size() + 1); 516 else { 517 std::string WriteOnlyQual("__write_only"); 518 std::string::size_type WriteOnlyPos = TyName.find(WriteOnlyQual); 519 if (WriteOnlyPos != std::string::npos) 520 TyName.erase(WriteOnlyPos, WriteOnlyQual.size() + 1); 521 else { 522 std::string ReadWriteQual("__read_write"); 523 std::string::size_type ReadWritePos = TyName.find(ReadWriteQual); 524 if (ReadWritePos != std::string::npos) 525 TyName.erase(ReadWritePos, ReadWriteQual.size() + 1); 526 } 527 } 528 } 529 530 // Returns the address space id that should be produced to the 531 // kernel_arg_addr_space metadata. This is always fixed to the ids 532 // as specified in the SPIR 2.0 specification in order to differentiate 533 // for example in clGetKernelArgInfo() implementation between the address 534 // spaces with targets without unique mapping to the OpenCL address spaces 535 // (basically all single AS CPUs). 536 static unsigned ArgInfoAddressSpace(LangAS AS) { 537 switch (AS) { 538 case LangAS::opencl_global: return 1; 539 case LangAS::opencl_constant: return 2; 540 case LangAS::opencl_local: return 3; 541 case LangAS::opencl_generic: return 4; // Not in SPIR 2.0 specs. 542 default: 543 return 0; // Assume private. 544 } 545 } 546 547 // OpenCL v1.2 s5.6.4.6 allows the compiler to store kernel argument 548 // information in the program executable. The argument information stored 549 // includes the argument name, its type, the address and access qualifiers used. 550 static void GenOpenCLArgMetadata(const FunctionDecl *FD, llvm::Function *Fn, 551 CodeGenModule &CGM, llvm::LLVMContext &Context, 552 CGBuilderTy &Builder, ASTContext &ASTCtx) { 553 // Create MDNodes that represent the kernel arg metadata. 554 // Each MDNode is a list in the form of "key", N number of values which is 555 // the same number of values as their are kernel arguments. 556 557 const PrintingPolicy &Policy = ASTCtx.getPrintingPolicy(); 558 559 // MDNode for the kernel argument address space qualifiers. 560 SmallVector<llvm::Metadata *, 8> addressQuals; 561 562 // MDNode for the kernel argument access qualifiers (images only). 563 SmallVector<llvm::Metadata *, 8> accessQuals; 564 565 // MDNode for the kernel argument type names. 566 SmallVector<llvm::Metadata *, 8> argTypeNames; 567 568 // MDNode for the kernel argument base type names. 569 SmallVector<llvm::Metadata *, 8> argBaseTypeNames; 570 571 // MDNode for the kernel argument type qualifiers. 572 SmallVector<llvm::Metadata *, 8> argTypeQuals; 573 574 // MDNode for the kernel argument names. 575 SmallVector<llvm::Metadata *, 8> argNames; 576 577 for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) { 578 const ParmVarDecl *parm = FD->getParamDecl(i); 579 QualType ty = parm->getType(); 580 std::string typeQuals; 581 582 if (ty->isPointerType()) { 583 QualType pointeeTy = ty->getPointeeType(); 584 585 // Get address qualifier. 586 addressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32( 587 ArgInfoAddressSpace(pointeeTy.getAddressSpace())))); 588 589 // Get argument type name. 590 std::string typeName = 591 pointeeTy.getUnqualifiedType().getAsString(Policy) + "*"; 592 593 // Turn "unsigned type" to "utype" 594 std::string::size_type pos = typeName.find("unsigned"); 595 if (pointeeTy.isCanonical() && pos != std::string::npos) 596 typeName.erase(pos+1, 8); 597 598 argTypeNames.push_back(llvm::MDString::get(Context, typeName)); 599 600 std::string baseTypeName = 601 pointeeTy.getUnqualifiedType().getCanonicalType().getAsString( 602 Policy) + 603 "*"; 604 605 // Turn "unsigned type" to "utype" 606 pos = baseTypeName.find("unsigned"); 607 if (pos != std::string::npos) 608 baseTypeName.erase(pos+1, 8); 609 610 argBaseTypeNames.push_back(llvm::MDString::get(Context, baseTypeName)); 611 612 // Get argument type qualifiers: 613 if (ty.isRestrictQualified()) 614 typeQuals = "restrict"; 615 if (pointeeTy.isConstQualified() || 616 (pointeeTy.getAddressSpace() == LangAS::opencl_constant)) 617 typeQuals += typeQuals.empty() ? "const" : " const"; 618 if (pointeeTy.isVolatileQualified()) 619 typeQuals += typeQuals.empty() ? "volatile" : " volatile"; 620 } else { 621 uint32_t AddrSpc = 0; 622 bool isPipe = ty->isPipeType(); 623 if (ty->isImageType() || isPipe) 624 AddrSpc = ArgInfoAddressSpace(LangAS::opencl_global); 625 626 addressQuals.push_back( 627 llvm::ConstantAsMetadata::get(Builder.getInt32(AddrSpc))); 628 629 // Get argument type name. 630 std::string typeName; 631 if (isPipe) 632 typeName = ty.getCanonicalType()->getAs<PipeType>()->getElementType() 633 .getAsString(Policy); 634 else 635 typeName = ty.getUnqualifiedType().getAsString(Policy); 636 637 // Turn "unsigned type" to "utype" 638 std::string::size_type pos = typeName.find("unsigned"); 639 if (ty.isCanonical() && pos != std::string::npos) 640 typeName.erase(pos+1, 8); 641 642 std::string baseTypeName; 643 if (isPipe) 644 baseTypeName = ty.getCanonicalType()->getAs<PipeType>() 645 ->getElementType().getCanonicalType() 646 .getAsString(Policy); 647 else 648 baseTypeName = 649 ty.getUnqualifiedType().getCanonicalType().getAsString(Policy); 650 651 // Remove access qualifiers on images 652 // (as they are inseparable from type in clang implementation, 653 // but OpenCL spec provides a special query to get access qualifier 654 // via clGetKernelArgInfo with CL_KERNEL_ARG_ACCESS_QUALIFIER): 655 if (ty->isImageType()) { 656 removeImageAccessQualifier(typeName); 657 removeImageAccessQualifier(baseTypeName); 658 } 659 660 argTypeNames.push_back(llvm::MDString::get(Context, typeName)); 661 662 // Turn "unsigned type" to "utype" 663 pos = baseTypeName.find("unsigned"); 664 if (pos != std::string::npos) 665 baseTypeName.erase(pos+1, 8); 666 667 argBaseTypeNames.push_back(llvm::MDString::get(Context, baseTypeName)); 668 669 if (isPipe) 670 typeQuals = "pipe"; 671 } 672 673 argTypeQuals.push_back(llvm::MDString::get(Context, typeQuals)); 674 675 // Get image and pipe access qualifier: 676 if (ty->isImageType()|| ty->isPipeType()) { 677 const Decl *PDecl = parm; 678 if (auto *TD = dyn_cast<TypedefType>(ty)) 679 PDecl = TD->getDecl(); 680 const OpenCLAccessAttr *A = PDecl->getAttr<OpenCLAccessAttr>(); 681 if (A && A->isWriteOnly()) 682 accessQuals.push_back(llvm::MDString::get(Context, "write_only")); 683 else if (A && A->isReadWrite()) 684 accessQuals.push_back(llvm::MDString::get(Context, "read_write")); 685 else 686 accessQuals.push_back(llvm::MDString::get(Context, "read_only")); 687 } else 688 accessQuals.push_back(llvm::MDString::get(Context, "none")); 689 690 // Get argument name. 691 argNames.push_back(llvm::MDString::get(Context, parm->getName())); 692 } 693 694 Fn->setMetadata("kernel_arg_addr_space", 695 llvm::MDNode::get(Context, addressQuals)); 696 Fn->setMetadata("kernel_arg_access_qual", 697 llvm::MDNode::get(Context, accessQuals)); 698 Fn->setMetadata("kernel_arg_type", 699 llvm::MDNode::get(Context, argTypeNames)); 700 Fn->setMetadata("kernel_arg_base_type", 701 llvm::MDNode::get(Context, argBaseTypeNames)); 702 Fn->setMetadata("kernel_arg_type_qual", 703 llvm::MDNode::get(Context, argTypeQuals)); 704 if (CGM.getCodeGenOpts().EmitOpenCLArgMetadata) 705 Fn->setMetadata("kernel_arg_name", 706 llvm::MDNode::get(Context, argNames)); 707 } 708 709 void CodeGenFunction::EmitOpenCLKernelMetadata(const FunctionDecl *FD, 710 llvm::Function *Fn) 711 { 712 if (!FD->hasAttr<OpenCLKernelAttr>()) 713 return; 714 715 llvm::LLVMContext &Context = getLLVMContext(); 716 717 GenOpenCLArgMetadata(FD, Fn, CGM, Context, Builder, getContext()); 718 719 if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) { 720 QualType HintQTy = A->getTypeHint(); 721 const ExtVectorType *HintEltQTy = HintQTy->getAs<ExtVectorType>(); 722 bool IsSignedInteger = 723 HintQTy->isSignedIntegerType() || 724 (HintEltQTy && HintEltQTy->getElementType()->isSignedIntegerType()); 725 llvm::Metadata *AttrMDArgs[] = { 726 llvm::ConstantAsMetadata::get(llvm::UndefValue::get( 727 CGM.getTypes().ConvertType(A->getTypeHint()))), 728 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get( 729 llvm::IntegerType::get(Context, 32), 730 llvm::APInt(32, (uint64_t)(IsSignedInteger ? 1 : 0))))}; 731 Fn->setMetadata("vec_type_hint", llvm::MDNode::get(Context, AttrMDArgs)); 732 } 733 734 if (const WorkGroupSizeHintAttr *A = FD->getAttr<WorkGroupSizeHintAttr>()) { 735 llvm::Metadata *AttrMDArgs[] = { 736 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())), 737 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())), 738 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))}; 739 Fn->setMetadata("work_group_size_hint", llvm::MDNode::get(Context, AttrMDArgs)); 740 } 741 742 if (const ReqdWorkGroupSizeAttr *A = FD->getAttr<ReqdWorkGroupSizeAttr>()) { 743 llvm::Metadata *AttrMDArgs[] = { 744 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())), 745 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())), 746 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))}; 747 Fn->setMetadata("reqd_work_group_size", llvm::MDNode::get(Context, AttrMDArgs)); 748 } 749 750 if (const OpenCLIntelReqdSubGroupSizeAttr *A = 751 FD->getAttr<OpenCLIntelReqdSubGroupSizeAttr>()) { 752 llvm::Metadata *AttrMDArgs[] = { 753 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getSubGroupSize()))}; 754 Fn->setMetadata("intel_reqd_sub_group_size", 755 llvm::MDNode::get(Context, AttrMDArgs)); 756 } 757 } 758 759 /// Determine whether the function F ends with a return stmt. 760 static bool endsWithReturn(const Decl* F) { 761 const Stmt *Body = nullptr; 762 if (auto *FD = dyn_cast_or_null<FunctionDecl>(F)) 763 Body = FD->getBody(); 764 else if (auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(F)) 765 Body = OMD->getBody(); 766 767 if (auto *CS = dyn_cast_or_null<CompoundStmt>(Body)) { 768 auto LastStmt = CS->body_rbegin(); 769 if (LastStmt != CS->body_rend()) 770 return isa<ReturnStmt>(*LastStmt); 771 } 772 return false; 773 } 774 775 void CodeGenFunction::markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn) { 776 if (SanOpts.has(SanitizerKind::Thread)) { 777 Fn->addFnAttr("sanitize_thread_no_checking_at_run_time"); 778 Fn->removeFnAttr(llvm::Attribute::SanitizeThread); 779 } 780 } 781 782 static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx) { 783 auto *MD = dyn_cast_or_null<CXXMethodDecl>(D); 784 if (!MD || !MD->getDeclName().getAsIdentifierInfo() || 785 !MD->getDeclName().getAsIdentifierInfo()->isStr("allocate") || 786 (MD->getNumParams() != 1 && MD->getNumParams() != 2)) 787 return false; 788 789 if (MD->parameters()[0]->getType().getCanonicalType() != Ctx.getSizeType()) 790 return false; 791 792 if (MD->getNumParams() == 2) { 793 auto *PT = MD->parameters()[1]->getType()->getAs<PointerType>(); 794 if (!PT || !PT->isVoidPointerType() || 795 !PT->getPointeeType().isConstQualified()) 796 return false; 797 } 798 799 return true; 800 } 801 802 /// Return the UBSan prologue signature for \p FD if one is available. 803 static llvm::Constant *getPrologueSignature(CodeGenModule &CGM, 804 const FunctionDecl *FD) { 805 if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) 806 if (!MD->isStatic()) 807 return nullptr; 808 return CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM); 809 } 810 811 void CodeGenFunction::StartFunction(GlobalDecl GD, 812 QualType RetTy, 813 llvm::Function *Fn, 814 const CGFunctionInfo &FnInfo, 815 const FunctionArgList &Args, 816 SourceLocation Loc, 817 SourceLocation StartLoc) { 818 assert(!CurFn && 819 "Do not use a CodeGenFunction object for more than one function"); 820 821 const Decl *D = GD.getDecl(); 822 823 DidCallStackSave = false; 824 CurCodeDecl = D; 825 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D)) 826 if (FD->usesSEHTry()) 827 CurSEHParent = FD; 828 CurFuncDecl = (D ? D->getNonClosureContext() : nullptr); 829 FnRetTy = RetTy; 830 CurFn = Fn; 831 CurFnInfo = &FnInfo; 832 assert(CurFn->isDeclaration() && "Function already has body?"); 833 834 // If this function has been blacklisted for any of the enabled sanitizers, 835 // disable the sanitizer for the function. 836 do { 837 #define SANITIZER(NAME, ID) \ 838 if (SanOpts.empty()) \ 839 break; \ 840 if (SanOpts.has(SanitizerKind::ID)) \ 841 if (CGM.isInSanitizerBlacklist(SanitizerKind::ID, Fn, Loc)) \ 842 SanOpts.set(SanitizerKind::ID, false); 843 844 #include "clang/Basic/Sanitizers.def" 845 #undef SANITIZER 846 } while (0); 847 848 if (D) { 849 // Apply the no_sanitize* attributes to SanOpts. 850 for (auto Attr : D->specific_attrs<NoSanitizeAttr>()) { 851 SanitizerMask mask = Attr->getMask(); 852 SanOpts.Mask &= ~mask; 853 if (mask & SanitizerKind::Address) 854 SanOpts.set(SanitizerKind::KernelAddress, false); 855 if (mask & SanitizerKind::KernelAddress) 856 SanOpts.set(SanitizerKind::Address, false); 857 if (mask & SanitizerKind::HWAddress) 858 SanOpts.set(SanitizerKind::KernelHWAddress, false); 859 if (mask & SanitizerKind::KernelHWAddress) 860 SanOpts.set(SanitizerKind::HWAddress, false); 861 } 862 } 863 864 // Apply sanitizer attributes to the function. 865 if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress)) 866 Fn->addFnAttr(llvm::Attribute::SanitizeAddress); 867 if (SanOpts.hasOneOf(SanitizerKind::HWAddress | SanitizerKind::KernelHWAddress)) 868 Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress); 869 if (SanOpts.has(SanitizerKind::Thread)) 870 Fn->addFnAttr(llvm::Attribute::SanitizeThread); 871 if (SanOpts.has(SanitizerKind::Memory)) 872 Fn->addFnAttr(llvm::Attribute::SanitizeMemory); 873 if (SanOpts.has(SanitizerKind::SafeStack)) 874 Fn->addFnAttr(llvm::Attribute::SafeStack); 875 if (SanOpts.has(SanitizerKind::ShadowCallStack)) 876 Fn->addFnAttr(llvm::Attribute::ShadowCallStack); 877 878 // Apply fuzzing attribute to the function. 879 if (SanOpts.hasOneOf(SanitizerKind::Fuzzer | SanitizerKind::FuzzerNoLink)) 880 Fn->addFnAttr(llvm::Attribute::OptForFuzzing); 881 882 // Ignore TSan memory acesses from within ObjC/ObjC++ dealloc, initialize, 883 // .cxx_destruct, __destroy_helper_block_ and all of their calees at run time. 884 if (SanOpts.has(SanitizerKind::Thread)) { 885 if (const auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(D)) { 886 IdentifierInfo *II = OMD->getSelector().getIdentifierInfoForSlot(0); 887 if (OMD->getMethodFamily() == OMF_dealloc || 888 OMD->getMethodFamily() == OMF_initialize || 889 (OMD->getSelector().isUnarySelector() && II->isStr(".cxx_destruct"))) { 890 markAsIgnoreThreadCheckingAtRuntime(Fn); 891 } 892 } 893 } 894 895 // Ignore unrelated casts in STL allocate() since the allocator must cast 896 // from void* to T* before object initialization completes. Don't match on the 897 // namespace because not all allocators are in std:: 898 if (D && SanOpts.has(SanitizerKind::CFIUnrelatedCast)) { 899 if (matchesStlAllocatorFn(D, getContext())) 900 SanOpts.Mask &= ~SanitizerKind::CFIUnrelatedCast; 901 } 902 903 // Apply xray attributes to the function (as a string, for now) 904 bool InstrumentXray = ShouldXRayInstrumentFunction() && 905 CGM.getCodeGenOpts().XRayInstrumentationBundle.has( 906 XRayInstrKind::Function); 907 if (D && InstrumentXray) { 908 if (const auto *XRayAttr = D->getAttr<XRayInstrumentAttr>()) { 909 if (XRayAttr->alwaysXRayInstrument()) 910 Fn->addFnAttr("function-instrument", "xray-always"); 911 if (XRayAttr->neverXRayInstrument()) 912 Fn->addFnAttr("function-instrument", "xray-never"); 913 if (const auto *LogArgs = D->getAttr<XRayLogArgsAttr>()) { 914 Fn->addFnAttr("xray-log-args", 915 llvm::utostr(LogArgs->getArgumentCount())); 916 } 917 } else { 918 if (!CGM.imbueXRayAttrs(Fn, Loc)) 919 Fn->addFnAttr( 920 "xray-instruction-threshold", 921 llvm::itostr(CGM.getCodeGenOpts().XRayInstructionThreshold)); 922 } 923 } 924 925 // Add no-jump-tables value. 926 Fn->addFnAttr("no-jump-tables", 927 llvm::toStringRef(CGM.getCodeGenOpts().NoUseJumpTables)); 928 929 // Add profile-sample-accurate value. 930 if (CGM.getCodeGenOpts().ProfileSampleAccurate) 931 Fn->addFnAttr("profile-sample-accurate"); 932 933 if (getLangOpts().OpenCL) { 934 // Add metadata for a kernel function. 935 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) 936 EmitOpenCLKernelMetadata(FD, Fn); 937 } 938 939 // If we are checking function types, emit a function type signature as 940 // prologue data. 941 if (getLangOpts().CPlusPlus && SanOpts.has(SanitizerKind::Function)) { 942 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) { 943 if (llvm::Constant *PrologueSig = getPrologueSignature(CGM, FD)) { 944 // Remove any (C++17) exception specifications, to allow calling e.g. a 945 // noexcept function through a non-noexcept pointer. 946 auto ProtoTy = 947 getContext().getFunctionTypeWithExceptionSpec(FD->getType(), 948 EST_None); 949 llvm::Constant *FTRTTIConst = 950 CGM.GetAddrOfRTTIDescriptor(ProtoTy, /*ForEH=*/true); 951 llvm::Constant *FTRTTIConstEncoded = 952 EncodeAddrForUseInPrologue(Fn, FTRTTIConst); 953 llvm::Constant *PrologueStructElems[] = {PrologueSig, 954 FTRTTIConstEncoded}; 955 llvm::Constant *PrologueStructConst = 956 llvm::ConstantStruct::getAnon(PrologueStructElems, /*Packed=*/true); 957 Fn->setPrologueData(PrologueStructConst); 958 } 959 } 960 } 961 962 // If we're checking nullability, we need to know whether we can check the 963 // return value. Initialize the flag to 'true' and refine it in EmitParmDecl. 964 if (SanOpts.has(SanitizerKind::NullabilityReturn)) { 965 auto Nullability = FnRetTy->getNullability(getContext()); 966 if (Nullability && *Nullability == NullabilityKind::NonNull) { 967 if (!(SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) && 968 CurCodeDecl && CurCodeDecl->getAttr<ReturnsNonNullAttr>())) 969 RetValNullabilityPrecondition = 970 llvm::ConstantInt::getTrue(getLLVMContext()); 971 } 972 } 973 974 // If we're in C++ mode and the function name is "main", it is guaranteed 975 // to be norecurse by the standard (3.6.1.3 "The function main shall not be 976 // used within a program"). 977 if (getLangOpts().CPlusPlus) 978 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) 979 if (FD->isMain()) 980 Fn->addFnAttr(llvm::Attribute::NoRecurse); 981 982 // If a custom alignment is used, force realigning to this alignment on 983 // any main function which certainly will need it. 984 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) 985 if ((FD->isMain() || FD->isMSVCRTEntryPoint()) && 986 CGM.getCodeGenOpts().StackAlignment) 987 Fn->addFnAttr("stackrealign"); 988 989 llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn); 990 991 // Create a marker to make it easy to insert allocas into the entryblock 992 // later. Don't create this with the builder, because we don't want it 993 // folded. 994 llvm::Value *Undef = llvm::UndefValue::get(Int32Ty); 995 AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "allocapt", EntryBB); 996 997 ReturnBlock = getJumpDestInCurrentScope("return"); 998 999 Builder.SetInsertPoint(EntryBB); 1000 1001 // If we're checking the return value, allocate space for a pointer to a 1002 // precise source location of the checked return statement. 1003 if (requiresReturnValueCheck()) { 1004 ReturnLocation = CreateDefaultAlignTempAlloca(Int8PtrTy, "return.sloc.ptr"); 1005 InitTempAlloca(ReturnLocation, llvm::ConstantPointerNull::get(Int8PtrTy)); 1006 } 1007 1008 // Emit subprogram debug descriptor. 1009 if (CGDebugInfo *DI = getDebugInfo()) { 1010 // Reconstruct the type from the argument list so that implicit parameters, 1011 // such as 'this' and 'vtt', show up in the debug info. Preserve the calling 1012 // convention. 1013 CallingConv CC = CallingConv::CC_C; 1014 if (auto *FD = dyn_cast_or_null<FunctionDecl>(D)) 1015 if (const auto *SrcFnTy = FD->getType()->getAs<FunctionType>()) 1016 CC = SrcFnTy->getCallConv(); 1017 SmallVector<QualType, 16> ArgTypes; 1018 for (const VarDecl *VD : Args) 1019 ArgTypes.push_back(VD->getType()); 1020 QualType FnType = getContext().getFunctionType( 1021 RetTy, ArgTypes, FunctionProtoType::ExtProtoInfo(CC)); 1022 DI->EmitFunctionStart(GD, Loc, StartLoc, FnType, CurFn, CurFuncIsThunk, 1023 Builder); 1024 } 1025 1026 if (ShouldInstrumentFunction()) { 1027 if (CGM.getCodeGenOpts().InstrumentFunctions) 1028 CurFn->addFnAttr("instrument-function-entry", "__cyg_profile_func_enter"); 1029 if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining) 1030 CurFn->addFnAttr("instrument-function-entry-inlined", 1031 "__cyg_profile_func_enter"); 1032 if (CGM.getCodeGenOpts().InstrumentFunctionEntryBare) 1033 CurFn->addFnAttr("instrument-function-entry-inlined", 1034 "__cyg_profile_func_enter_bare"); 1035 } 1036 1037 // Since emitting the mcount call here impacts optimizations such as function 1038 // inlining, we just add an attribute to insert a mcount call in backend. 1039 // The attribute "counting-function" is set to mcount function name which is 1040 // architecture dependent. 1041 if (CGM.getCodeGenOpts().InstrumentForProfiling) { 1042 // Calls to fentry/mcount should not be generated if function has 1043 // the no_instrument_function attribute. 1044 if (!CurFuncDecl || !CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) { 1045 if (CGM.getCodeGenOpts().CallFEntry) 1046 Fn->addFnAttr("fentry-call", "true"); 1047 else { 1048 Fn->addFnAttr("instrument-function-entry-inlined", 1049 getTarget().getMCountName()); 1050 } 1051 } 1052 } 1053 1054 if (RetTy->isVoidType()) { 1055 // Void type; nothing to return. 1056 ReturnValue = Address::invalid(); 1057 1058 // Count the implicit return. 1059 if (!endsWithReturn(D)) 1060 ++NumReturnExprs; 1061 } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect && 1062 !hasScalarEvaluationKind(CurFnInfo->getReturnType())) { 1063 // Indirect aggregate return; emit returned value directly into sret slot. 1064 // This reduces code size, and affects correctness in C++. 1065 auto AI = CurFn->arg_begin(); 1066 if (CurFnInfo->getReturnInfo().isSRetAfterThis()) 1067 ++AI; 1068 ReturnValue = Address(&*AI, CurFnInfo->getReturnInfo().getIndirectAlign()); 1069 } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca && 1070 !hasScalarEvaluationKind(CurFnInfo->getReturnType())) { 1071 // Load the sret pointer from the argument struct and return into that. 1072 unsigned Idx = CurFnInfo->getReturnInfo().getInAllocaFieldIndex(); 1073 llvm::Function::arg_iterator EI = CurFn->arg_end(); 1074 --EI; 1075 llvm::Value *Addr = Builder.CreateStructGEP(nullptr, &*EI, Idx); 1076 Addr = Builder.CreateAlignedLoad(Addr, getPointerAlign(), "agg.result"); 1077 ReturnValue = Address(Addr, getNaturalTypeAlignment(RetTy)); 1078 } else { 1079 ReturnValue = CreateIRTemp(RetTy, "retval"); 1080 1081 // Tell the epilog emitter to autorelease the result. We do this 1082 // now so that various specialized functions can suppress it 1083 // during their IR-generation. 1084 if (getLangOpts().ObjCAutoRefCount && 1085 !CurFnInfo->isReturnsRetained() && 1086 RetTy->isObjCRetainableType()) 1087 AutoreleaseResult = true; 1088 } 1089 1090 EmitStartEHSpec(CurCodeDecl); 1091 1092 PrologueCleanupDepth = EHStack.stable_begin(); 1093 1094 // Emit OpenMP specific initialization of the device functions. 1095 if (getLangOpts().OpenMP && CurCodeDecl) 1096 CGM.getOpenMPRuntime().emitFunctionProlog(*this, CurCodeDecl); 1097 1098 EmitFunctionProlog(*CurFnInfo, CurFn, Args); 1099 1100 if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance()) { 1101 CGM.getCXXABI().EmitInstanceFunctionProlog(*this); 1102 const CXXMethodDecl *MD = cast<CXXMethodDecl>(D); 1103 if (MD->getParent()->isLambda() && 1104 MD->getOverloadedOperator() == OO_Call) { 1105 // We're in a lambda; figure out the captures. 1106 MD->getParent()->getCaptureFields(LambdaCaptureFields, 1107 LambdaThisCaptureField); 1108 if (LambdaThisCaptureField) { 1109 // If the lambda captures the object referred to by '*this' - either by 1110 // value or by reference, make sure CXXThisValue points to the correct 1111 // object. 1112 1113 // Get the lvalue for the field (which is a copy of the enclosing object 1114 // or contains the address of the enclosing object). 1115 LValue ThisFieldLValue = EmitLValueForLambdaField(LambdaThisCaptureField); 1116 if (!LambdaThisCaptureField->getType()->isPointerType()) { 1117 // If the enclosing object was captured by value, just use its address. 1118 CXXThisValue = ThisFieldLValue.getAddress().getPointer(); 1119 } else { 1120 // Load the lvalue pointed to by the field, since '*this' was captured 1121 // by reference. 1122 CXXThisValue = 1123 EmitLoadOfLValue(ThisFieldLValue, SourceLocation()).getScalarVal(); 1124 } 1125 } 1126 for (auto *FD : MD->getParent()->fields()) { 1127 if (FD->hasCapturedVLAType()) { 1128 auto *ExprArg = EmitLoadOfLValue(EmitLValueForLambdaField(FD), 1129 SourceLocation()).getScalarVal(); 1130 auto VAT = FD->getCapturedVLAType(); 1131 VLASizeMap[VAT->getSizeExpr()] = ExprArg; 1132 } 1133 } 1134 } else { 1135 // Not in a lambda; just use 'this' from the method. 1136 // FIXME: Should we generate a new load for each use of 'this'? The 1137 // fast register allocator would be happier... 1138 CXXThisValue = CXXABIThisValue; 1139 } 1140 1141 // Check the 'this' pointer once per function, if it's available. 1142 if (CXXABIThisValue) { 1143 SanitizerSet SkippedChecks; 1144 SkippedChecks.set(SanitizerKind::ObjectSize, true); 1145 QualType ThisTy = MD->getThisType(getContext()); 1146 1147 // If this is the call operator of a lambda with no capture-default, it 1148 // may have a static invoker function, which may call this operator with 1149 // a null 'this' pointer. 1150 if (isLambdaCallOperator(MD) && 1151 MD->getParent()->getLambdaCaptureDefault() == LCD_None) 1152 SkippedChecks.set(SanitizerKind::Null, true); 1153 1154 EmitTypeCheck(isa<CXXConstructorDecl>(MD) ? TCK_ConstructorCall 1155 : TCK_MemberCall, 1156 Loc, CXXABIThisValue, ThisTy, 1157 getContext().getTypeAlignInChars(ThisTy->getPointeeType()), 1158 SkippedChecks); 1159 } 1160 } 1161 1162 // If any of the arguments have a variably modified type, make sure to 1163 // emit the type size. 1164 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 1165 i != e; ++i) { 1166 const VarDecl *VD = *i; 1167 1168 // Dig out the type as written from ParmVarDecls; it's unclear whether 1169 // the standard (C99 6.9.1p10) requires this, but we're following the 1170 // precedent set by gcc. 1171 QualType Ty; 1172 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD)) 1173 Ty = PVD->getOriginalType(); 1174 else 1175 Ty = VD->getType(); 1176 1177 if (Ty->isVariablyModifiedType()) 1178 EmitVariablyModifiedType(Ty); 1179 } 1180 // Emit a location at the end of the prologue. 1181 if (CGDebugInfo *DI = getDebugInfo()) 1182 DI->EmitLocation(Builder, StartLoc); 1183 1184 // TODO: Do we need to handle this in two places like we do with 1185 // target-features/target-cpu? 1186 if (CurFuncDecl) 1187 if (const auto *VecWidth = CurFuncDecl->getAttr<MinVectorWidthAttr>()) 1188 LargestVectorWidth = VecWidth->getVectorWidth(); 1189 } 1190 1191 void CodeGenFunction::EmitFunctionBody(FunctionArgList &Args, 1192 const Stmt *Body) { 1193 incrementProfileCounter(Body); 1194 if (const CompoundStmt *S = dyn_cast<CompoundStmt>(Body)) 1195 EmitCompoundStmtWithoutScope(*S); 1196 else 1197 EmitStmt(Body); 1198 } 1199 1200 /// When instrumenting to collect profile data, the counts for some blocks 1201 /// such as switch cases need to not include the fall-through counts, so 1202 /// emit a branch around the instrumentation code. When not instrumenting, 1203 /// this just calls EmitBlock(). 1204 void CodeGenFunction::EmitBlockWithFallThrough(llvm::BasicBlock *BB, 1205 const Stmt *S) { 1206 llvm::BasicBlock *SkipCountBB = nullptr; 1207 if (HaveInsertPoint() && CGM.getCodeGenOpts().hasProfileClangInstr()) { 1208 // When instrumenting for profiling, the fallthrough to certain 1209 // statements needs to skip over the instrumentation code so that we 1210 // get an accurate count. 1211 SkipCountBB = createBasicBlock("skipcount"); 1212 EmitBranch(SkipCountBB); 1213 } 1214 EmitBlock(BB); 1215 uint64_t CurrentCount = getCurrentProfileCount(); 1216 incrementProfileCounter(S); 1217 setCurrentProfileCount(getCurrentProfileCount() + CurrentCount); 1218 if (SkipCountBB) 1219 EmitBlock(SkipCountBB); 1220 } 1221 1222 /// Tries to mark the given function nounwind based on the 1223 /// non-existence of any throwing calls within it. We believe this is 1224 /// lightweight enough to do at -O0. 1225 static void TryMarkNoThrow(llvm::Function *F) { 1226 // LLVM treats 'nounwind' on a function as part of the type, so we 1227 // can't do this on functions that can be overwritten. 1228 if (F->isInterposable()) return; 1229 1230 for (llvm::BasicBlock &BB : *F) 1231 for (llvm::Instruction &I : BB) 1232 if (I.mayThrow()) 1233 return; 1234 1235 F->setDoesNotThrow(); 1236 } 1237 1238 QualType CodeGenFunction::BuildFunctionArgList(GlobalDecl GD, 1239 FunctionArgList &Args) { 1240 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 1241 QualType ResTy = FD->getReturnType(); 1242 1243 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD); 1244 if (MD && MD->isInstance()) { 1245 if (CGM.getCXXABI().HasThisReturn(GD)) 1246 ResTy = MD->getThisType(getContext()); 1247 else if (CGM.getCXXABI().hasMostDerivedReturn(GD)) 1248 ResTy = CGM.getContext().VoidPtrTy; 1249 CGM.getCXXABI().buildThisParam(*this, Args); 1250 } 1251 1252 // The base version of an inheriting constructor whose constructed base is a 1253 // virtual base is not passed any arguments (because it doesn't actually call 1254 // the inherited constructor). 1255 bool PassedParams = true; 1256 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) 1257 if (auto Inherited = CD->getInheritedConstructor()) 1258 PassedParams = 1259 getTypes().inheritingCtorHasParams(Inherited, GD.getCtorType()); 1260 1261 if (PassedParams) { 1262 for (auto *Param : FD->parameters()) { 1263 Args.push_back(Param); 1264 if (!Param->hasAttr<PassObjectSizeAttr>()) 1265 continue; 1266 1267 auto *Implicit = ImplicitParamDecl::Create( 1268 getContext(), Param->getDeclContext(), Param->getLocation(), 1269 /*Id=*/nullptr, getContext().getSizeType(), ImplicitParamDecl::Other); 1270 SizeArguments[Param] = Implicit; 1271 Args.push_back(Implicit); 1272 } 1273 } 1274 1275 if (MD && (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD))) 1276 CGM.getCXXABI().addImplicitStructorParams(*this, ResTy, Args); 1277 1278 return ResTy; 1279 } 1280 1281 static bool 1282 shouldUseUndefinedBehaviorReturnOptimization(const FunctionDecl *FD, 1283 const ASTContext &Context) { 1284 QualType T = FD->getReturnType(); 1285 // Avoid the optimization for functions that return a record type with a 1286 // trivial destructor or another trivially copyable type. 1287 if (const RecordType *RT = T.getCanonicalType()->getAs<RecordType>()) { 1288 if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) 1289 return !ClassDecl->hasTrivialDestructor(); 1290 } 1291 return !T.isTriviallyCopyableType(Context); 1292 } 1293 1294 void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn, 1295 const CGFunctionInfo &FnInfo) { 1296 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 1297 CurGD = GD; 1298 1299 FunctionArgList Args; 1300 QualType ResTy = BuildFunctionArgList(GD, Args); 1301 1302 // Check if we should generate debug info for this function. 1303 if (FD->hasAttr<NoDebugAttr>()) 1304 DebugInfo = nullptr; // disable debug info indefinitely for this function 1305 1306 // The function might not have a body if we're generating thunks for a 1307 // function declaration. 1308 SourceRange BodyRange; 1309 if (Stmt *Body = FD->getBody()) 1310 BodyRange = Body->getSourceRange(); 1311 else 1312 BodyRange = FD->getLocation(); 1313 CurEHLocation = BodyRange.getEnd(); 1314 1315 // Use the location of the start of the function to determine where 1316 // the function definition is located. By default use the location 1317 // of the declaration as the location for the subprogram. A function 1318 // may lack a declaration in the source code if it is created by code 1319 // gen. (examples: _GLOBAL__I_a, __cxx_global_array_dtor, thunk). 1320 SourceLocation Loc = FD->getLocation(); 1321 1322 // If this is a function specialization then use the pattern body 1323 // as the location for the function. 1324 if (const FunctionDecl *SpecDecl = FD->getTemplateInstantiationPattern()) 1325 if (SpecDecl->hasBody(SpecDecl)) 1326 Loc = SpecDecl->getLocation(); 1327 1328 Stmt *Body = FD->getBody(); 1329 1330 // Initialize helper which will detect jumps which can cause invalid lifetime 1331 // markers. 1332 if (Body && ShouldEmitLifetimeMarkers) 1333 Bypasses.Init(Body); 1334 1335 // Emit the standard function prologue. 1336 StartFunction(GD, ResTy, Fn, FnInfo, Args, Loc, BodyRange.getBegin()); 1337 1338 // Generate the body of the function. 1339 PGO.assignRegionCounters(GD, CurFn); 1340 if (isa<CXXDestructorDecl>(FD)) 1341 EmitDestructorBody(Args); 1342 else if (isa<CXXConstructorDecl>(FD)) 1343 EmitConstructorBody(Args); 1344 else if (getLangOpts().CUDA && 1345 !getLangOpts().CUDAIsDevice && 1346 FD->hasAttr<CUDAGlobalAttr>()) 1347 CGM.getCUDARuntime().emitDeviceStub(*this, Args); 1348 else if (isa<CXXMethodDecl>(FD) && 1349 cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()) { 1350 // The lambda static invoker function is special, because it forwards or 1351 // clones the body of the function call operator (but is actually static). 1352 EmitLambdaStaticInvokeBody(cast<CXXMethodDecl>(FD)); 1353 } else if (FD->isDefaulted() && isa<CXXMethodDecl>(FD) && 1354 (cast<CXXMethodDecl>(FD)->isCopyAssignmentOperator() || 1355 cast<CXXMethodDecl>(FD)->isMoveAssignmentOperator())) { 1356 // Implicit copy-assignment gets the same special treatment as implicit 1357 // copy-constructors. 1358 emitImplicitAssignmentOperatorBody(Args); 1359 } else if (Body) { 1360 EmitFunctionBody(Args, Body); 1361 } else 1362 llvm_unreachable("no definition for emitted function"); 1363 1364 // C++11 [stmt.return]p2: 1365 // Flowing off the end of a function [...] results in undefined behavior in 1366 // a value-returning function. 1367 // C11 6.9.1p12: 1368 // If the '}' that terminates a function is reached, and the value of the 1369 // function call is used by the caller, the behavior is undefined. 1370 if (getLangOpts().CPlusPlus && !FD->hasImplicitReturnZero() && !SawAsmBlock && 1371 !FD->getReturnType()->isVoidType() && Builder.GetInsertBlock()) { 1372 bool ShouldEmitUnreachable = 1373 CGM.getCodeGenOpts().StrictReturn || 1374 shouldUseUndefinedBehaviorReturnOptimization(FD, getContext()); 1375 if (SanOpts.has(SanitizerKind::Return)) { 1376 SanitizerScope SanScope(this); 1377 llvm::Value *IsFalse = Builder.getFalse(); 1378 EmitCheck(std::make_pair(IsFalse, SanitizerKind::Return), 1379 SanitizerHandler::MissingReturn, 1380 EmitCheckSourceLocation(FD->getLocation()), None); 1381 } else if (ShouldEmitUnreachable) { 1382 if (CGM.getCodeGenOpts().OptimizationLevel == 0) 1383 EmitTrapCall(llvm::Intrinsic::trap); 1384 } 1385 if (SanOpts.has(SanitizerKind::Return) || ShouldEmitUnreachable) { 1386 Builder.CreateUnreachable(); 1387 Builder.ClearInsertionPoint(); 1388 } 1389 } 1390 1391 // Emit the standard function epilogue. 1392 FinishFunction(BodyRange.getEnd()); 1393 1394 // If we haven't marked the function nothrow through other means, do 1395 // a quick pass now to see if we can. 1396 if (!CurFn->doesNotThrow()) 1397 TryMarkNoThrow(CurFn); 1398 } 1399 1400 /// ContainsLabel - Return true if the statement contains a label in it. If 1401 /// this statement is not executed normally, it not containing a label means 1402 /// that we can just remove the code. 1403 bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) { 1404 // Null statement, not a label! 1405 if (!S) return false; 1406 1407 // If this is a label, we have to emit the code, consider something like: 1408 // if (0) { ... foo: bar(); } goto foo; 1409 // 1410 // TODO: If anyone cared, we could track __label__'s, since we know that you 1411 // can't jump to one from outside their declared region. 1412 if (isa<LabelStmt>(S)) 1413 return true; 1414 1415 // If this is a case/default statement, and we haven't seen a switch, we have 1416 // to emit the code. 1417 if (isa<SwitchCase>(S) && !IgnoreCaseStmts) 1418 return true; 1419 1420 // If this is a switch statement, we want to ignore cases below it. 1421 if (isa<SwitchStmt>(S)) 1422 IgnoreCaseStmts = true; 1423 1424 // Scan subexpressions for verboten labels. 1425 for (const Stmt *SubStmt : S->children()) 1426 if (ContainsLabel(SubStmt, IgnoreCaseStmts)) 1427 return true; 1428 1429 return false; 1430 } 1431 1432 /// containsBreak - Return true if the statement contains a break out of it. 1433 /// If the statement (recursively) contains a switch or loop with a break 1434 /// inside of it, this is fine. 1435 bool CodeGenFunction::containsBreak(const Stmt *S) { 1436 // Null statement, not a label! 1437 if (!S) return false; 1438 1439 // If this is a switch or loop that defines its own break scope, then we can 1440 // include it and anything inside of it. 1441 if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S) || 1442 isa<ForStmt>(S)) 1443 return false; 1444 1445 if (isa<BreakStmt>(S)) 1446 return true; 1447 1448 // Scan subexpressions for verboten breaks. 1449 for (const Stmt *SubStmt : S->children()) 1450 if (containsBreak(SubStmt)) 1451 return true; 1452 1453 return false; 1454 } 1455 1456 bool CodeGenFunction::mightAddDeclToScope(const Stmt *S) { 1457 if (!S) return false; 1458 1459 // Some statement kinds add a scope and thus never add a decl to the current 1460 // scope. Note, this list is longer than the list of statements that might 1461 // have an unscoped decl nested within them, but this way is conservatively 1462 // correct even if more statement kinds are added. 1463 if (isa<IfStmt>(S) || isa<SwitchStmt>(S) || isa<WhileStmt>(S) || 1464 isa<DoStmt>(S) || isa<ForStmt>(S) || isa<CompoundStmt>(S) || 1465 isa<CXXForRangeStmt>(S) || isa<CXXTryStmt>(S) || 1466 isa<ObjCForCollectionStmt>(S) || isa<ObjCAtTryStmt>(S)) 1467 return false; 1468 1469 if (isa<DeclStmt>(S)) 1470 return true; 1471 1472 for (const Stmt *SubStmt : S->children()) 1473 if (mightAddDeclToScope(SubStmt)) 1474 return true; 1475 1476 return false; 1477 } 1478 1479 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold 1480 /// to a constant, or if it does but contains a label, return false. If it 1481 /// constant folds return true and set the boolean result in Result. 1482 bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond, 1483 bool &ResultBool, 1484 bool AllowLabels) { 1485 llvm::APSInt ResultInt; 1486 if (!ConstantFoldsToSimpleInteger(Cond, ResultInt, AllowLabels)) 1487 return false; 1488 1489 ResultBool = ResultInt.getBoolValue(); 1490 return true; 1491 } 1492 1493 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold 1494 /// to a constant, or if it does but contains a label, return false. If it 1495 /// constant folds return true and set the folded value. 1496 bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond, 1497 llvm::APSInt &ResultInt, 1498 bool AllowLabels) { 1499 // FIXME: Rename and handle conversion of other evaluatable things 1500 // to bool. 1501 llvm::APSInt Int; 1502 if (!Cond->EvaluateAsInt(Int, getContext())) 1503 return false; // Not foldable, not integer or not fully evaluatable. 1504 1505 if (!AllowLabels && CodeGenFunction::ContainsLabel(Cond)) 1506 return false; // Contains a label. 1507 1508 ResultInt = Int; 1509 return true; 1510 } 1511 1512 1513 1514 /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if 1515 /// statement) to the specified blocks. Based on the condition, this might try 1516 /// to simplify the codegen of the conditional based on the branch. 1517 /// 1518 void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond, 1519 llvm::BasicBlock *TrueBlock, 1520 llvm::BasicBlock *FalseBlock, 1521 uint64_t TrueCount) { 1522 Cond = Cond->IgnoreParens(); 1523 1524 if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) { 1525 1526 // Handle X && Y in a condition. 1527 if (CondBOp->getOpcode() == BO_LAnd) { 1528 // If we have "1 && X", simplify the code. "0 && X" would have constant 1529 // folded if the case was simple enough. 1530 bool ConstantBool = false; 1531 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) && 1532 ConstantBool) { 1533 // br(1 && X) -> br(X). 1534 incrementProfileCounter(CondBOp); 1535 return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, 1536 TrueCount); 1537 } 1538 1539 // If we have "X && 1", simplify the code to use an uncond branch. 1540 // "X && 0" would have been constant folded to 0. 1541 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) && 1542 ConstantBool) { 1543 // br(X && 1) -> br(X). 1544 return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock, 1545 TrueCount); 1546 } 1547 1548 // Emit the LHS as a conditional. If the LHS conditional is false, we 1549 // want to jump to the FalseBlock. 1550 llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true"); 1551 // The counter tells us how often we evaluate RHS, and all of TrueCount 1552 // can be propagated to that branch. 1553 uint64_t RHSCount = getProfileCount(CondBOp->getRHS()); 1554 1555 ConditionalEvaluation eval(*this); 1556 { 1557 ApplyDebugLocation DL(*this, Cond); 1558 EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock, RHSCount); 1559 EmitBlock(LHSTrue); 1560 } 1561 1562 incrementProfileCounter(CondBOp); 1563 setCurrentProfileCount(getProfileCount(CondBOp->getRHS())); 1564 1565 // Any temporaries created here are conditional. 1566 eval.begin(*this); 1567 EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, TrueCount); 1568 eval.end(*this); 1569 1570 return; 1571 } 1572 1573 if (CondBOp->getOpcode() == BO_LOr) { 1574 // If we have "0 || X", simplify the code. "1 || X" would have constant 1575 // folded if the case was simple enough. 1576 bool ConstantBool = false; 1577 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) && 1578 !ConstantBool) { 1579 // br(0 || X) -> br(X). 1580 incrementProfileCounter(CondBOp); 1581 return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, 1582 TrueCount); 1583 } 1584 1585 // If we have "X || 0", simplify the code to use an uncond branch. 1586 // "X || 1" would have been constant folded to 1. 1587 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) && 1588 !ConstantBool) { 1589 // br(X || 0) -> br(X). 1590 return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock, 1591 TrueCount); 1592 } 1593 1594 // Emit the LHS as a conditional. If the LHS conditional is true, we 1595 // want to jump to the TrueBlock. 1596 llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false"); 1597 // We have the count for entry to the RHS and for the whole expression 1598 // being true, so we can divy up True count between the short circuit and 1599 // the RHS. 1600 uint64_t LHSCount = 1601 getCurrentProfileCount() - getProfileCount(CondBOp->getRHS()); 1602 uint64_t RHSCount = TrueCount - LHSCount; 1603 1604 ConditionalEvaluation eval(*this); 1605 { 1606 ApplyDebugLocation DL(*this, Cond); 1607 EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse, LHSCount); 1608 EmitBlock(LHSFalse); 1609 } 1610 1611 incrementProfileCounter(CondBOp); 1612 setCurrentProfileCount(getProfileCount(CondBOp->getRHS())); 1613 1614 // Any temporaries created here are conditional. 1615 eval.begin(*this); 1616 EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, RHSCount); 1617 1618 eval.end(*this); 1619 1620 return; 1621 } 1622 } 1623 1624 if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) { 1625 // br(!x, t, f) -> br(x, f, t) 1626 if (CondUOp->getOpcode() == UO_LNot) { 1627 // Negate the count. 1628 uint64_t FalseCount = getCurrentProfileCount() - TrueCount; 1629 // Negate the condition and swap the destination blocks. 1630 return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock, 1631 FalseCount); 1632 } 1633 } 1634 1635 if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) { 1636 // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f)) 1637 llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true"); 1638 llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false"); 1639 1640 ConditionalEvaluation cond(*this); 1641 EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock, 1642 getProfileCount(CondOp)); 1643 1644 // When computing PGO branch weights, we only know the overall count for 1645 // the true block. This code is essentially doing tail duplication of the 1646 // naive code-gen, introducing new edges for which counts are not 1647 // available. Divide the counts proportionally between the LHS and RHS of 1648 // the conditional operator. 1649 uint64_t LHSScaledTrueCount = 0; 1650 if (TrueCount) { 1651 double LHSRatio = 1652 getProfileCount(CondOp) / (double)getCurrentProfileCount(); 1653 LHSScaledTrueCount = TrueCount * LHSRatio; 1654 } 1655 1656 cond.begin(*this); 1657 EmitBlock(LHSBlock); 1658 incrementProfileCounter(CondOp); 1659 { 1660 ApplyDebugLocation DL(*this, Cond); 1661 EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock, 1662 LHSScaledTrueCount); 1663 } 1664 cond.end(*this); 1665 1666 cond.begin(*this); 1667 EmitBlock(RHSBlock); 1668 EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock, 1669 TrueCount - LHSScaledTrueCount); 1670 cond.end(*this); 1671 1672 return; 1673 } 1674 1675 if (const CXXThrowExpr *Throw = dyn_cast<CXXThrowExpr>(Cond)) { 1676 // Conditional operator handling can give us a throw expression as a 1677 // condition for a case like: 1678 // br(c ? throw x : y, t, f) -> br(c, br(throw x, t, f), br(y, t, f) 1679 // Fold this to: 1680 // br(c, throw x, br(y, t, f)) 1681 EmitCXXThrowExpr(Throw, /*KeepInsertionPoint*/false); 1682 return; 1683 } 1684 1685 // If the branch has a condition wrapped by __builtin_unpredictable, 1686 // create metadata that specifies that the branch is unpredictable. 1687 // Don't bother if not optimizing because that metadata would not be used. 1688 llvm::MDNode *Unpredictable = nullptr; 1689 auto *Call = dyn_cast<CallExpr>(Cond); 1690 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) { 1691 auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl()); 1692 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) { 1693 llvm::MDBuilder MDHelper(getLLVMContext()); 1694 Unpredictable = MDHelper.createUnpredictable(); 1695 } 1696 } 1697 1698 // Create branch weights based on the number of times we get here and the 1699 // number of times the condition should be true. 1700 uint64_t CurrentCount = std::max(getCurrentProfileCount(), TrueCount); 1701 llvm::MDNode *Weights = 1702 createProfileWeights(TrueCount, CurrentCount - TrueCount); 1703 1704 // Emit the code with the fully general case. 1705 llvm::Value *CondV; 1706 { 1707 ApplyDebugLocation DL(*this, Cond); 1708 CondV = EvaluateExprAsBool(Cond); 1709 } 1710 Builder.CreateCondBr(CondV, TrueBlock, FalseBlock, Weights, Unpredictable); 1711 } 1712 1713 /// ErrorUnsupported - Print out an error that codegen doesn't support the 1714 /// specified stmt yet. 1715 void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type) { 1716 CGM.ErrorUnsupported(S, Type); 1717 } 1718 1719 /// emitNonZeroVLAInit - Emit the "zero" initialization of a 1720 /// variable-length array whose elements have a non-zero bit-pattern. 1721 /// 1722 /// \param baseType the inner-most element type of the array 1723 /// \param src - a char* pointing to the bit-pattern for a single 1724 /// base element of the array 1725 /// \param sizeInChars - the total size of the VLA, in chars 1726 static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType, 1727 Address dest, Address src, 1728 llvm::Value *sizeInChars) { 1729 CGBuilderTy &Builder = CGF.Builder; 1730 1731 CharUnits baseSize = CGF.getContext().getTypeSizeInChars(baseType); 1732 llvm::Value *baseSizeInChars 1733 = llvm::ConstantInt::get(CGF.IntPtrTy, baseSize.getQuantity()); 1734 1735 Address begin = 1736 Builder.CreateElementBitCast(dest, CGF.Int8Ty, "vla.begin"); 1737 llvm::Value *end = 1738 Builder.CreateInBoundsGEP(begin.getPointer(), sizeInChars, "vla.end"); 1739 1740 llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock(); 1741 llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop"); 1742 llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont"); 1743 1744 // Make a loop over the VLA. C99 guarantees that the VLA element 1745 // count must be nonzero. 1746 CGF.EmitBlock(loopBB); 1747 1748 llvm::PHINode *cur = Builder.CreatePHI(begin.getType(), 2, "vla.cur"); 1749 cur->addIncoming(begin.getPointer(), originBB); 1750 1751 CharUnits curAlign = 1752 dest.getAlignment().alignmentOfArrayElement(baseSize); 1753 1754 // memcpy the individual element bit-pattern. 1755 Builder.CreateMemCpy(Address(cur, curAlign), src, baseSizeInChars, 1756 /*volatile*/ false); 1757 1758 // Go to the next element. 1759 llvm::Value *next = 1760 Builder.CreateInBoundsGEP(CGF.Int8Ty, cur, baseSizeInChars, "vla.next"); 1761 1762 // Leave if that's the end of the VLA. 1763 llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone"); 1764 Builder.CreateCondBr(done, contBB, loopBB); 1765 cur->addIncoming(next, loopBB); 1766 1767 CGF.EmitBlock(contBB); 1768 } 1769 1770 void 1771 CodeGenFunction::EmitNullInitialization(Address DestPtr, QualType Ty) { 1772 // Ignore empty classes in C++. 1773 if (getLangOpts().CPlusPlus) { 1774 if (const RecordType *RT = Ty->getAs<RecordType>()) { 1775 if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty()) 1776 return; 1777 } 1778 } 1779 1780 // Cast the dest ptr to the appropriate i8 pointer type. 1781 if (DestPtr.getElementType() != Int8Ty) 1782 DestPtr = Builder.CreateElementBitCast(DestPtr, Int8Ty); 1783 1784 // Get size and alignment info for this aggregate. 1785 CharUnits size = getContext().getTypeSizeInChars(Ty); 1786 1787 llvm::Value *SizeVal; 1788 const VariableArrayType *vla; 1789 1790 // Don't bother emitting a zero-byte memset. 1791 if (size.isZero()) { 1792 // But note that getTypeInfo returns 0 for a VLA. 1793 if (const VariableArrayType *vlaType = 1794 dyn_cast_or_null<VariableArrayType>( 1795 getContext().getAsArrayType(Ty))) { 1796 auto VlaSize = getVLASize(vlaType); 1797 SizeVal = VlaSize.NumElts; 1798 CharUnits eltSize = getContext().getTypeSizeInChars(VlaSize.Type); 1799 if (!eltSize.isOne()) 1800 SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize)); 1801 vla = vlaType; 1802 } else { 1803 return; 1804 } 1805 } else { 1806 SizeVal = CGM.getSize(size); 1807 vla = nullptr; 1808 } 1809 1810 // If the type contains a pointer to data member we can't memset it to zero. 1811 // Instead, create a null constant and copy it to the destination. 1812 // TODO: there are other patterns besides zero that we can usefully memset, 1813 // like -1, which happens to be the pattern used by member-pointers. 1814 if (!CGM.getTypes().isZeroInitializable(Ty)) { 1815 // For a VLA, emit a single element, then splat that over the VLA. 1816 if (vla) Ty = getContext().getBaseElementType(vla); 1817 1818 llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty); 1819 1820 llvm::GlobalVariable *NullVariable = 1821 new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(), 1822 /*isConstant=*/true, 1823 llvm::GlobalVariable::PrivateLinkage, 1824 NullConstant, Twine()); 1825 CharUnits NullAlign = DestPtr.getAlignment(); 1826 NullVariable->setAlignment(NullAlign.getQuantity()); 1827 Address SrcPtr(Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy()), 1828 NullAlign); 1829 1830 if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal); 1831 1832 // Get and call the appropriate llvm.memcpy overload. 1833 Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, false); 1834 return; 1835 } 1836 1837 // Otherwise, just memset the whole thing to zero. This is legal 1838 // because in LLVM, all default initializers (other than the ones we just 1839 // handled above) are guaranteed to have a bit pattern of all zeros. 1840 Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false); 1841 } 1842 1843 llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) { 1844 // Make sure that there is a block for the indirect goto. 1845 if (!IndirectBranch) 1846 GetIndirectGotoBlock(); 1847 1848 llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock(); 1849 1850 // Make sure the indirect branch includes all of the address-taken blocks. 1851 IndirectBranch->addDestination(BB); 1852 return llvm::BlockAddress::get(CurFn, BB); 1853 } 1854 1855 llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() { 1856 // If we already made the indirect branch for indirect goto, return its block. 1857 if (IndirectBranch) return IndirectBranch->getParent(); 1858 1859 CGBuilderTy TmpBuilder(*this, createBasicBlock("indirectgoto")); 1860 1861 // Create the PHI node that indirect gotos will add entries to. 1862 llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0, 1863 "indirect.goto.dest"); 1864 1865 // Create the indirect branch instruction. 1866 IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal); 1867 return IndirectBranch->getParent(); 1868 } 1869 1870 /// Computes the length of an array in elements, as well as the base 1871 /// element type and a properly-typed first element pointer. 1872 llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType, 1873 QualType &baseType, 1874 Address &addr) { 1875 const ArrayType *arrayType = origArrayType; 1876 1877 // If it's a VLA, we have to load the stored size. Note that 1878 // this is the size of the VLA in bytes, not its size in elements. 1879 llvm::Value *numVLAElements = nullptr; 1880 if (isa<VariableArrayType>(arrayType)) { 1881 numVLAElements = getVLASize(cast<VariableArrayType>(arrayType)).NumElts; 1882 1883 // Walk into all VLAs. This doesn't require changes to addr, 1884 // which has type T* where T is the first non-VLA element type. 1885 do { 1886 QualType elementType = arrayType->getElementType(); 1887 arrayType = getContext().getAsArrayType(elementType); 1888 1889 // If we only have VLA components, 'addr' requires no adjustment. 1890 if (!arrayType) { 1891 baseType = elementType; 1892 return numVLAElements; 1893 } 1894 } while (isa<VariableArrayType>(arrayType)); 1895 1896 // We get out here only if we find a constant array type 1897 // inside the VLA. 1898 } 1899 1900 // We have some number of constant-length arrays, so addr should 1901 // have LLVM type [M x [N x [...]]]*. Build a GEP that walks 1902 // down to the first element of addr. 1903 SmallVector<llvm::Value*, 8> gepIndices; 1904 1905 // GEP down to the array type. 1906 llvm::ConstantInt *zero = Builder.getInt32(0); 1907 gepIndices.push_back(zero); 1908 1909 uint64_t countFromCLAs = 1; 1910 QualType eltType; 1911 1912 llvm::ArrayType *llvmArrayType = 1913 dyn_cast<llvm::ArrayType>(addr.getElementType()); 1914 while (llvmArrayType) { 1915 assert(isa<ConstantArrayType>(arrayType)); 1916 assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue() 1917 == llvmArrayType->getNumElements()); 1918 1919 gepIndices.push_back(zero); 1920 countFromCLAs *= llvmArrayType->getNumElements(); 1921 eltType = arrayType->getElementType(); 1922 1923 llvmArrayType = 1924 dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType()); 1925 arrayType = getContext().getAsArrayType(arrayType->getElementType()); 1926 assert((!llvmArrayType || arrayType) && 1927 "LLVM and Clang types are out-of-synch"); 1928 } 1929 1930 if (arrayType) { 1931 // From this point onwards, the Clang array type has been emitted 1932 // as some other type (probably a packed struct). Compute the array 1933 // size, and just emit the 'begin' expression as a bitcast. 1934 while (arrayType) { 1935 countFromCLAs *= 1936 cast<ConstantArrayType>(arrayType)->getSize().getZExtValue(); 1937 eltType = arrayType->getElementType(); 1938 arrayType = getContext().getAsArrayType(eltType); 1939 } 1940 1941 llvm::Type *baseType = ConvertType(eltType); 1942 addr = Builder.CreateElementBitCast(addr, baseType, "array.begin"); 1943 } else { 1944 // Create the actual GEP. 1945 addr = Address(Builder.CreateInBoundsGEP(addr.getPointer(), 1946 gepIndices, "array.begin"), 1947 addr.getAlignment()); 1948 } 1949 1950 baseType = eltType; 1951 1952 llvm::Value *numElements 1953 = llvm::ConstantInt::get(SizeTy, countFromCLAs); 1954 1955 // If we had any VLA dimensions, factor them in. 1956 if (numVLAElements) 1957 numElements = Builder.CreateNUWMul(numVLAElements, numElements); 1958 1959 return numElements; 1960 } 1961 1962 CodeGenFunction::VlaSizePair CodeGenFunction::getVLASize(QualType type) { 1963 const VariableArrayType *vla = getContext().getAsVariableArrayType(type); 1964 assert(vla && "type was not a variable array type!"); 1965 return getVLASize(vla); 1966 } 1967 1968 CodeGenFunction::VlaSizePair 1969 CodeGenFunction::getVLASize(const VariableArrayType *type) { 1970 // The number of elements so far; always size_t. 1971 llvm::Value *numElements = nullptr; 1972 1973 QualType elementType; 1974 do { 1975 elementType = type->getElementType(); 1976 llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()]; 1977 assert(vlaSize && "no size for VLA!"); 1978 assert(vlaSize->getType() == SizeTy); 1979 1980 if (!numElements) { 1981 numElements = vlaSize; 1982 } else { 1983 // It's undefined behavior if this wraps around, so mark it that way. 1984 // FIXME: Teach -fsanitize=undefined to trap this. 1985 numElements = Builder.CreateNUWMul(numElements, vlaSize); 1986 } 1987 } while ((type = getContext().getAsVariableArrayType(elementType))); 1988 1989 return { numElements, elementType }; 1990 } 1991 1992 CodeGenFunction::VlaSizePair 1993 CodeGenFunction::getVLAElements1D(QualType type) { 1994 const VariableArrayType *vla = getContext().getAsVariableArrayType(type); 1995 assert(vla && "type was not a variable array type!"); 1996 return getVLAElements1D(vla); 1997 } 1998 1999 CodeGenFunction::VlaSizePair 2000 CodeGenFunction::getVLAElements1D(const VariableArrayType *Vla) { 2001 llvm::Value *VlaSize = VLASizeMap[Vla->getSizeExpr()]; 2002 assert(VlaSize && "no size for VLA!"); 2003 assert(VlaSize->getType() == SizeTy); 2004 return { VlaSize, Vla->getElementType() }; 2005 } 2006 2007 void CodeGenFunction::EmitVariablyModifiedType(QualType type) { 2008 assert(type->isVariablyModifiedType() && 2009 "Must pass variably modified type to EmitVLASizes!"); 2010 2011 EnsureInsertPoint(); 2012 2013 // We're going to walk down into the type and look for VLA 2014 // expressions. 2015 do { 2016 assert(type->isVariablyModifiedType()); 2017 2018 const Type *ty = type.getTypePtr(); 2019 switch (ty->getTypeClass()) { 2020 2021 #define TYPE(Class, Base) 2022 #define ABSTRACT_TYPE(Class, Base) 2023 #define NON_CANONICAL_TYPE(Class, Base) 2024 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 2025 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) 2026 #include "clang/AST/TypeNodes.def" 2027 llvm_unreachable("unexpected dependent type!"); 2028 2029 // These types are never variably-modified. 2030 case Type::Builtin: 2031 case Type::Complex: 2032 case Type::Vector: 2033 case Type::ExtVector: 2034 case Type::Record: 2035 case Type::Enum: 2036 case Type::Elaborated: 2037 case Type::TemplateSpecialization: 2038 case Type::ObjCTypeParam: 2039 case Type::ObjCObject: 2040 case Type::ObjCInterface: 2041 case Type::ObjCObjectPointer: 2042 llvm_unreachable("type class is never variably-modified!"); 2043 2044 case Type::Adjusted: 2045 type = cast<AdjustedType>(ty)->getAdjustedType(); 2046 break; 2047 2048 case Type::Decayed: 2049 type = cast<DecayedType>(ty)->getPointeeType(); 2050 break; 2051 2052 case Type::Pointer: 2053 type = cast<PointerType>(ty)->getPointeeType(); 2054 break; 2055 2056 case Type::BlockPointer: 2057 type = cast<BlockPointerType>(ty)->getPointeeType(); 2058 break; 2059 2060 case Type::LValueReference: 2061 case Type::RValueReference: 2062 type = cast<ReferenceType>(ty)->getPointeeType(); 2063 break; 2064 2065 case Type::MemberPointer: 2066 type = cast<MemberPointerType>(ty)->getPointeeType(); 2067 break; 2068 2069 case Type::ConstantArray: 2070 case Type::IncompleteArray: 2071 // Losing element qualification here is fine. 2072 type = cast<ArrayType>(ty)->getElementType(); 2073 break; 2074 2075 case Type::VariableArray: { 2076 // Losing element qualification here is fine. 2077 const VariableArrayType *vat = cast<VariableArrayType>(ty); 2078 2079 // Unknown size indication requires no size computation. 2080 // Otherwise, evaluate and record it. 2081 if (const Expr *size = vat->getSizeExpr()) { 2082 // It's possible that we might have emitted this already, 2083 // e.g. with a typedef and a pointer to it. 2084 llvm::Value *&entry = VLASizeMap[size]; 2085 if (!entry) { 2086 llvm::Value *Size = EmitScalarExpr(size); 2087 2088 // C11 6.7.6.2p5: 2089 // If the size is an expression that is not an integer constant 2090 // expression [...] each time it is evaluated it shall have a value 2091 // greater than zero. 2092 if (SanOpts.has(SanitizerKind::VLABound) && 2093 size->getType()->isSignedIntegerType()) { 2094 SanitizerScope SanScope(this); 2095 llvm::Value *Zero = llvm::Constant::getNullValue(Size->getType()); 2096 llvm::Constant *StaticArgs[] = { 2097 EmitCheckSourceLocation(size->getBeginLoc()), 2098 EmitCheckTypeDescriptor(size->getType())}; 2099 EmitCheck(std::make_pair(Builder.CreateICmpSGT(Size, Zero), 2100 SanitizerKind::VLABound), 2101 SanitizerHandler::VLABoundNotPositive, StaticArgs, Size); 2102 } 2103 2104 // Always zexting here would be wrong if it weren't 2105 // undefined behavior to have a negative bound. 2106 entry = Builder.CreateIntCast(Size, SizeTy, /*signed*/ false); 2107 } 2108 } 2109 type = vat->getElementType(); 2110 break; 2111 } 2112 2113 case Type::FunctionProto: 2114 case Type::FunctionNoProto: 2115 type = cast<FunctionType>(ty)->getReturnType(); 2116 break; 2117 2118 case Type::Paren: 2119 case Type::TypeOf: 2120 case Type::UnaryTransform: 2121 case Type::Attributed: 2122 case Type::SubstTemplateTypeParm: 2123 case Type::PackExpansion: 2124 // Keep walking after single level desugaring. 2125 type = type.getSingleStepDesugaredType(getContext()); 2126 break; 2127 2128 case Type::Typedef: 2129 case Type::Decltype: 2130 case Type::Auto: 2131 case Type::DeducedTemplateSpecialization: 2132 // Stop walking: nothing to do. 2133 return; 2134 2135 case Type::TypeOfExpr: 2136 // Stop walking: emit typeof expression. 2137 EmitIgnoredExpr(cast<TypeOfExprType>(ty)->getUnderlyingExpr()); 2138 return; 2139 2140 case Type::Atomic: 2141 type = cast<AtomicType>(ty)->getValueType(); 2142 break; 2143 2144 case Type::Pipe: 2145 type = cast<PipeType>(ty)->getElementType(); 2146 break; 2147 } 2148 } while (type->isVariablyModifiedType()); 2149 } 2150 2151 Address CodeGenFunction::EmitVAListRef(const Expr* E) { 2152 if (getContext().getBuiltinVaListType()->isArrayType()) 2153 return EmitPointerWithAlignment(E); 2154 return EmitLValue(E).getAddress(); 2155 } 2156 2157 Address CodeGenFunction::EmitMSVAListRef(const Expr *E) { 2158 return EmitLValue(E).getAddress(); 2159 } 2160 2161 void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E, 2162 const APValue &Init) { 2163 assert(!Init.isUninit() && "Invalid DeclRefExpr initializer!"); 2164 if (CGDebugInfo *Dbg = getDebugInfo()) 2165 if (CGM.getCodeGenOpts().getDebugInfo() >= codegenoptions::LimitedDebugInfo) 2166 Dbg->EmitGlobalVariable(E->getDecl(), Init); 2167 } 2168 2169 CodeGenFunction::PeepholeProtection 2170 CodeGenFunction::protectFromPeepholes(RValue rvalue) { 2171 // At the moment, the only aggressive peephole we do in IR gen 2172 // is trunc(zext) folding, but if we add more, we can easily 2173 // extend this protection. 2174 2175 if (!rvalue.isScalar()) return PeepholeProtection(); 2176 llvm::Value *value = rvalue.getScalarVal(); 2177 if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection(); 2178 2179 // Just make an extra bitcast. 2180 assert(HaveInsertPoint()); 2181 llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "", 2182 Builder.GetInsertBlock()); 2183 2184 PeepholeProtection protection; 2185 protection.Inst = inst; 2186 return protection; 2187 } 2188 2189 void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) { 2190 if (!protection.Inst) return; 2191 2192 // In theory, we could try to duplicate the peepholes now, but whatever. 2193 protection.Inst->eraseFromParent(); 2194 } 2195 2196 llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Value *AnnotationFn, 2197 llvm::Value *AnnotatedVal, 2198 StringRef AnnotationStr, 2199 SourceLocation Location) { 2200 llvm::Value *Args[4] = { 2201 AnnotatedVal, 2202 Builder.CreateBitCast(CGM.EmitAnnotationString(AnnotationStr), Int8PtrTy), 2203 Builder.CreateBitCast(CGM.EmitAnnotationUnit(Location), Int8PtrTy), 2204 CGM.EmitAnnotationLineNo(Location) 2205 }; 2206 return Builder.CreateCall(AnnotationFn, Args); 2207 } 2208 2209 void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) { 2210 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute"); 2211 // FIXME We create a new bitcast for every annotation because that's what 2212 // llvm-gcc was doing. 2213 for (const auto *I : D->specific_attrs<AnnotateAttr>()) 2214 EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation), 2215 Builder.CreateBitCast(V, CGM.Int8PtrTy, V->getName()), 2216 I->getAnnotation(), D->getLocation()); 2217 } 2218 2219 Address CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D, 2220 Address Addr) { 2221 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute"); 2222 llvm::Value *V = Addr.getPointer(); 2223 llvm::Type *VTy = V->getType(); 2224 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation, 2225 CGM.Int8PtrTy); 2226 2227 for (const auto *I : D->specific_attrs<AnnotateAttr>()) { 2228 // FIXME Always emit the cast inst so we can differentiate between 2229 // annotation on the first field of a struct and annotation on the struct 2230 // itself. 2231 if (VTy != CGM.Int8PtrTy) 2232 V = Builder.Insert(new llvm::BitCastInst(V, CGM.Int8PtrTy)); 2233 V = EmitAnnotationCall(F, V, I->getAnnotation(), D->getLocation()); 2234 V = Builder.CreateBitCast(V, VTy); 2235 } 2236 2237 return Address(V, Addr.getAlignment()); 2238 } 2239 2240 CodeGenFunction::CGCapturedStmtInfo::~CGCapturedStmtInfo() { } 2241 2242 CodeGenFunction::SanitizerScope::SanitizerScope(CodeGenFunction *CGF) 2243 : CGF(CGF) { 2244 assert(!CGF->IsSanitizerScope); 2245 CGF->IsSanitizerScope = true; 2246 } 2247 2248 CodeGenFunction::SanitizerScope::~SanitizerScope() { 2249 CGF->IsSanitizerScope = false; 2250 } 2251 2252 void CodeGenFunction::InsertHelper(llvm::Instruction *I, 2253 const llvm::Twine &Name, 2254 llvm::BasicBlock *BB, 2255 llvm::BasicBlock::iterator InsertPt) const { 2256 LoopStack.InsertHelper(I); 2257 if (IsSanitizerScope) 2258 CGM.getSanitizerMetadata()->disableSanitizerForInstruction(I); 2259 } 2260 2261 void CGBuilderInserter::InsertHelper( 2262 llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB, 2263 llvm::BasicBlock::iterator InsertPt) const { 2264 llvm::IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt); 2265 if (CGF) 2266 CGF->InsertHelper(I, Name, BB, InsertPt); 2267 } 2268 2269 static bool hasRequiredFeatures(const SmallVectorImpl<StringRef> &ReqFeatures, 2270 CodeGenModule &CGM, const FunctionDecl *FD, 2271 std::string &FirstMissing) { 2272 // If there aren't any required features listed then go ahead and return. 2273 if (ReqFeatures.empty()) 2274 return false; 2275 2276 // Now build up the set of caller features and verify that all the required 2277 // features are there. 2278 llvm::StringMap<bool> CallerFeatureMap; 2279 CGM.getFunctionFeatureMap(CallerFeatureMap, FD); 2280 2281 // If we have at least one of the features in the feature list return 2282 // true, otherwise return false. 2283 return std::all_of( 2284 ReqFeatures.begin(), ReqFeatures.end(), [&](StringRef Feature) { 2285 SmallVector<StringRef, 1> OrFeatures; 2286 Feature.split(OrFeatures, '|'); 2287 return std::any_of(OrFeatures.begin(), OrFeatures.end(), 2288 [&](StringRef Feature) { 2289 if (!CallerFeatureMap.lookup(Feature)) { 2290 FirstMissing = Feature.str(); 2291 return false; 2292 } 2293 return true; 2294 }); 2295 }); 2296 } 2297 2298 // Emits an error if we don't have a valid set of target features for the 2299 // called function. 2300 void CodeGenFunction::checkTargetFeatures(const CallExpr *E, 2301 const FunctionDecl *TargetDecl) { 2302 // Early exit if this is an indirect call. 2303 if (!TargetDecl) 2304 return; 2305 2306 // Get the current enclosing function if it exists. If it doesn't 2307 // we can't check the target features anyhow. 2308 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl); 2309 if (!FD) 2310 return; 2311 2312 // Grab the required features for the call. For a builtin this is listed in 2313 // the td file with the default cpu, for an always_inline function this is any 2314 // listed cpu and any listed features. 2315 unsigned BuiltinID = TargetDecl->getBuiltinID(); 2316 std::string MissingFeature; 2317 if (BuiltinID) { 2318 SmallVector<StringRef, 1> ReqFeatures; 2319 const char *FeatureList = 2320 CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID); 2321 // Return if the builtin doesn't have any required features. 2322 if (!FeatureList || StringRef(FeatureList) == "") 2323 return; 2324 StringRef(FeatureList).split(ReqFeatures, ','); 2325 if (!hasRequiredFeatures(ReqFeatures, CGM, FD, MissingFeature)) 2326 CGM.getDiags().Report(E->getBeginLoc(), diag::err_builtin_needs_feature) 2327 << TargetDecl->getDeclName() 2328 << CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID); 2329 2330 } else if (TargetDecl->hasAttr<TargetAttr>() || 2331 TargetDecl->hasAttr<CPUSpecificAttr>()) { 2332 // Get the required features for the callee. 2333 2334 const TargetAttr *TD = TargetDecl->getAttr<TargetAttr>(); 2335 TargetAttr::ParsedTargetAttr ParsedAttr = CGM.filterFunctionTargetAttrs(TD); 2336 2337 SmallVector<StringRef, 1> ReqFeatures; 2338 llvm::StringMap<bool> CalleeFeatureMap; 2339 CGM.getFunctionFeatureMap(CalleeFeatureMap, TargetDecl); 2340 2341 for (const auto &F : ParsedAttr.Features) { 2342 if (F[0] == '+' && CalleeFeatureMap.lookup(F.substr(1))) 2343 ReqFeatures.push_back(StringRef(F).substr(1)); 2344 } 2345 2346 for (const auto &F : CalleeFeatureMap) { 2347 // Only positive features are "required". 2348 if (F.getValue()) 2349 ReqFeatures.push_back(F.getKey()); 2350 } 2351 if (!hasRequiredFeatures(ReqFeatures, CGM, FD, MissingFeature)) 2352 CGM.getDiags().Report(E->getBeginLoc(), diag::err_function_needs_feature) 2353 << FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature; 2354 } 2355 } 2356 2357 void CodeGenFunction::EmitSanitizerStatReport(llvm::SanitizerStatKind SSK) { 2358 if (!CGM.getCodeGenOpts().SanitizeStats) 2359 return; 2360 2361 llvm::IRBuilder<> IRB(Builder.GetInsertBlock(), Builder.GetInsertPoint()); 2362 IRB.SetCurrentDebugLocation(Builder.getCurrentDebugLocation()); 2363 CGM.getSanStats().create(IRB, SSK); 2364 } 2365 2366 llvm::Value *CodeGenFunction::FormResolverCondition( 2367 const TargetMultiVersionResolverOption &RO) { 2368 llvm::Value *TrueCondition = nullptr; 2369 if (!RO.ParsedAttribute.Architecture.empty()) 2370 TrueCondition = EmitX86CpuIs(RO.ParsedAttribute.Architecture); 2371 2372 if (!RO.ParsedAttribute.Features.empty()) { 2373 SmallVector<StringRef, 8> FeatureList; 2374 llvm::for_each(RO.ParsedAttribute.Features, 2375 [&FeatureList](const std::string &Feature) { 2376 FeatureList.push_back(StringRef{Feature}.substr(1)); 2377 }); 2378 llvm::Value *FeatureCmp = EmitX86CpuSupports(FeatureList); 2379 TrueCondition = TrueCondition ? Builder.CreateAnd(TrueCondition, FeatureCmp) 2380 : FeatureCmp; 2381 } 2382 return TrueCondition; 2383 } 2384 2385 void CodeGenFunction::EmitTargetMultiVersionResolver( 2386 llvm::Function *Resolver, 2387 ArrayRef<TargetMultiVersionResolverOption> Options) { 2388 assert((getContext().getTargetInfo().getTriple().getArch() == 2389 llvm::Triple::x86 || 2390 getContext().getTargetInfo().getTriple().getArch() == 2391 llvm::Triple::x86_64) && 2392 "Only implemented for x86 targets"); 2393 2394 // Main function's basic block. 2395 llvm::BasicBlock *CurBlock = createBasicBlock("entry", Resolver); 2396 Builder.SetInsertPoint(CurBlock); 2397 EmitX86CpuInit(); 2398 2399 llvm::Function *DefaultFunc = nullptr; 2400 for (const TargetMultiVersionResolverOption &RO : Options) { 2401 Builder.SetInsertPoint(CurBlock); 2402 llvm::Value *TrueCondition = FormResolverCondition(RO); 2403 2404 if (!TrueCondition) { 2405 DefaultFunc = RO.Function; 2406 } else { 2407 llvm::BasicBlock *RetBlock = createBasicBlock("ro_ret", Resolver); 2408 llvm::IRBuilder<> RetBuilder(RetBlock); 2409 RetBuilder.CreateRet(RO.Function); 2410 CurBlock = createBasicBlock("ro_else", Resolver); 2411 Builder.CreateCondBr(TrueCondition, RetBlock, CurBlock); 2412 } 2413 } 2414 2415 assert(DefaultFunc && "No default version?"); 2416 // Emit return from the 'else-ist' block. 2417 Builder.SetInsertPoint(CurBlock); 2418 Builder.CreateRet(DefaultFunc); 2419 } 2420 2421 void CodeGenFunction::EmitCPUDispatchMultiVersionResolver( 2422 llvm::Function *Resolver, 2423 ArrayRef<CPUDispatchMultiVersionResolverOption> Options) { 2424 assert((getContext().getTargetInfo().getTriple().getArch() == 2425 llvm::Triple::x86 || 2426 getContext().getTargetInfo().getTriple().getArch() == 2427 llvm::Triple::x86_64) && 2428 "Only implemented for x86 targets"); 2429 2430 // Main function's basic block. 2431 llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver); 2432 Builder.SetInsertPoint(CurBlock); 2433 EmitX86CpuInit(); 2434 2435 for (const CPUDispatchMultiVersionResolverOption &RO : Options) { 2436 Builder.SetInsertPoint(CurBlock); 2437 2438 // "generic" case should catch-all. 2439 if (RO.FeatureMask == 0) { 2440 Builder.CreateRet(RO.Function); 2441 return; 2442 } 2443 llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver); 2444 llvm::IRBuilder<> RetBuilder(RetBlock); 2445 RetBuilder.CreateRet(RO.Function); 2446 CurBlock = createBasicBlock("resolver_else", Resolver); 2447 llvm::Value *TrueCondition = EmitX86CpuSupports(RO.FeatureMask); 2448 Builder.CreateCondBr(TrueCondition, RetBlock, CurBlock); 2449 } 2450 2451 Builder.SetInsertPoint(CurBlock); 2452 llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap); 2453 TrapCall->setDoesNotReturn(); 2454 TrapCall->setDoesNotThrow(); 2455 Builder.CreateUnreachable(); 2456 Builder.ClearInsertionPoint(); 2457 } 2458 2459 llvm::DebugLoc CodeGenFunction::SourceLocToDebugLoc(SourceLocation Location) { 2460 if (CGDebugInfo *DI = getDebugInfo()) 2461 return DI->SourceLocToDebugLoc(Location); 2462 2463 return llvm::DebugLoc(); 2464 } 2465