1 //===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This contains code to emit Expr nodes with scalar LLVM types as LLVM code. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "CGCXXABI.h" 14 #include "CGCleanup.h" 15 #include "CGDebugInfo.h" 16 #include "CGObjCRuntime.h" 17 #include "CGOpenMPRuntime.h" 18 #include "CGRecordLayout.h" 19 #include "CodeGenFunction.h" 20 #include "CodeGenModule.h" 21 #include "ConstantEmitter.h" 22 #include "TargetInfo.h" 23 #include "clang/AST/ASTContext.h" 24 #include "clang/AST/Attr.h" 25 #include "clang/AST/DeclObjC.h" 26 #include "clang/AST/Expr.h" 27 #include "clang/AST/ParentMapContext.h" 28 #include "clang/AST/RecordLayout.h" 29 #include "clang/AST/StmtVisitor.h" 30 #include "clang/Basic/CodeGenOptions.h" 31 #include "clang/Basic/TargetInfo.h" 32 #include "llvm/ADT/APFixedPoint.h" 33 #include "llvm/IR/CFG.h" 34 #include "llvm/IR/Constants.h" 35 #include "llvm/IR/DataLayout.h" 36 #include "llvm/IR/DerivedTypes.h" 37 #include "llvm/IR/FixedPointBuilder.h" 38 #include "llvm/IR/Function.h" 39 #include "llvm/IR/GEPNoWrapFlags.h" 40 #include "llvm/IR/GetElementPtrTypeIterator.h" 41 #include "llvm/IR/GlobalVariable.h" 42 #include "llvm/IR/Intrinsics.h" 43 #include "llvm/IR/IntrinsicsPowerPC.h" 44 #include "llvm/IR/MatrixBuilder.h" 45 #include "llvm/IR/Module.h" 46 #include "llvm/Support/TypeSize.h" 47 #include <cstdarg> 48 #include <optional> 49 50 using namespace clang; 51 using namespace CodeGen; 52 using llvm::Value; 53 54 //===----------------------------------------------------------------------===// 55 // Scalar Expression Emitter 56 //===----------------------------------------------------------------------===// 57 58 namespace llvm { 59 extern cl::opt<bool> EnableSingleByteCoverage; 60 } // namespace llvm 61 62 namespace { 63 64 /// Determine whether the given binary operation may overflow. 65 /// Sets \p Result to the value of the operation for BO_Add, BO_Sub, BO_Mul, 66 /// and signed BO_{Div,Rem}. For these opcodes, and for unsigned BO_{Div,Rem}, 67 /// the returned overflow check is precise. The returned value is 'true' for 68 /// all other opcodes, to be conservative. 69 bool mayHaveIntegerOverflow(llvm::ConstantInt *LHS, llvm::ConstantInt *RHS, 70 BinaryOperator::Opcode Opcode, bool Signed, 71 llvm::APInt &Result) { 72 // Assume overflow is possible, unless we can prove otherwise. 73 bool Overflow = true; 74 const auto &LHSAP = LHS->getValue(); 75 const auto &RHSAP = RHS->getValue(); 76 if (Opcode == BO_Add) { 77 Result = Signed ? LHSAP.sadd_ov(RHSAP, Overflow) 78 : LHSAP.uadd_ov(RHSAP, Overflow); 79 } else if (Opcode == BO_Sub) { 80 Result = Signed ? LHSAP.ssub_ov(RHSAP, Overflow) 81 : LHSAP.usub_ov(RHSAP, Overflow); 82 } else if (Opcode == BO_Mul) { 83 Result = Signed ? LHSAP.smul_ov(RHSAP, Overflow) 84 : LHSAP.umul_ov(RHSAP, Overflow); 85 } else if (Opcode == BO_Div || Opcode == BO_Rem) { 86 if (Signed && !RHS->isZero()) 87 Result = LHSAP.sdiv_ov(RHSAP, Overflow); 88 else 89 return false; 90 } 91 return Overflow; 92 } 93 94 struct BinOpInfo { 95 Value *LHS; 96 Value *RHS; 97 QualType Ty; // Computation Type. 98 BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform 99 FPOptions FPFeatures; 100 const Expr *E; // Entire expr, for error unsupported. May not be binop. 101 102 /// Check if the binop can result in integer overflow. 103 bool mayHaveIntegerOverflow() const { 104 // Without constant input, we can't rule out overflow. 105 auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS); 106 auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS); 107 if (!LHSCI || !RHSCI) 108 return true; 109 110 llvm::APInt Result; 111 return ::mayHaveIntegerOverflow( 112 LHSCI, RHSCI, Opcode, Ty->hasSignedIntegerRepresentation(), Result); 113 } 114 115 /// Check if the binop computes a division or a remainder. 116 bool isDivremOp() const { 117 return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign || 118 Opcode == BO_RemAssign; 119 } 120 121 /// Check if the binop can result in an integer division by zero. 122 bool mayHaveIntegerDivisionByZero() const { 123 if (isDivremOp()) 124 if (auto *CI = dyn_cast<llvm::ConstantInt>(RHS)) 125 return CI->isZero(); 126 return true; 127 } 128 129 /// Check if the binop can result in a float division by zero. 130 bool mayHaveFloatDivisionByZero() const { 131 if (isDivremOp()) 132 if (auto *CFP = dyn_cast<llvm::ConstantFP>(RHS)) 133 return CFP->isZero(); 134 return true; 135 } 136 137 /// Check if at least one operand is a fixed point type. In such cases, this 138 /// operation did not follow usual arithmetic conversion and both operands 139 /// might not be of the same type. 140 bool isFixedPointOp() const { 141 // We cannot simply check the result type since comparison operations return 142 // an int. 143 if (const auto *BinOp = dyn_cast<BinaryOperator>(E)) { 144 QualType LHSType = BinOp->getLHS()->getType(); 145 QualType RHSType = BinOp->getRHS()->getType(); 146 return LHSType->isFixedPointType() || RHSType->isFixedPointType(); 147 } 148 if (const auto *UnOp = dyn_cast<UnaryOperator>(E)) 149 return UnOp->getSubExpr()->getType()->isFixedPointType(); 150 return false; 151 } 152 153 /// Check if the RHS has a signed integer representation. 154 bool rhsHasSignedIntegerRepresentation() const { 155 if (const auto *BinOp = dyn_cast<BinaryOperator>(E)) { 156 QualType RHSType = BinOp->getRHS()->getType(); 157 return RHSType->hasSignedIntegerRepresentation(); 158 } 159 return false; 160 } 161 }; 162 163 static bool MustVisitNullValue(const Expr *E) { 164 // If a null pointer expression's type is the C++0x nullptr_t, then 165 // it's not necessarily a simple constant and it must be evaluated 166 // for its potential side effects. 167 return E->getType()->isNullPtrType(); 168 } 169 170 /// If \p E is a widened promoted integer, get its base (unpromoted) type. 171 static std::optional<QualType> getUnwidenedIntegerType(const ASTContext &Ctx, 172 const Expr *E) { 173 const Expr *Base = E->IgnoreImpCasts(); 174 if (E == Base) 175 return std::nullopt; 176 177 QualType BaseTy = Base->getType(); 178 if (!Ctx.isPromotableIntegerType(BaseTy) || 179 Ctx.getTypeSize(BaseTy) >= Ctx.getTypeSize(E->getType())) 180 return std::nullopt; 181 182 return BaseTy; 183 } 184 185 /// Check if \p E is a widened promoted integer. 186 static bool IsWidenedIntegerOp(const ASTContext &Ctx, const Expr *E) { 187 return getUnwidenedIntegerType(Ctx, E).has_value(); 188 } 189 190 /// Check if we can skip the overflow check for \p Op. 191 static bool CanElideOverflowCheck(const ASTContext &Ctx, const BinOpInfo &Op) { 192 assert((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) && 193 "Expected a unary or binary operator"); 194 195 // If the binop has constant inputs and we can prove there is no overflow, 196 // we can elide the overflow check. 197 if (!Op.mayHaveIntegerOverflow()) 198 return true; 199 200 if (Op.Ty->isSignedIntegerType() && 201 Ctx.isTypeIgnoredBySanitizer(SanitizerKind::SignedIntegerOverflow, 202 Op.Ty)) { 203 return true; 204 } 205 206 if (Op.Ty->isUnsignedIntegerType() && 207 Ctx.isTypeIgnoredBySanitizer(SanitizerKind::UnsignedIntegerOverflow, 208 Op.Ty)) { 209 return true; 210 } 211 212 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Op.E); 213 214 if (UO && UO->getOpcode() == UO_Minus && 215 Ctx.getLangOpts().isOverflowPatternExcluded( 216 LangOptions::OverflowPatternExclusionKind::NegUnsignedConst) && 217 UO->isIntegerConstantExpr(Ctx)) 218 return true; 219 220 // If a unary op has a widened operand, the op cannot overflow. 221 if (UO) 222 return !UO->canOverflow(); 223 224 // We usually don't need overflow checks for binops with widened operands. 225 // Multiplication with promoted unsigned operands is a special case. 226 const auto *BO = cast<BinaryOperator>(Op.E); 227 if (BO->hasExcludedOverflowPattern()) 228 return true; 229 230 auto OptionalLHSTy = getUnwidenedIntegerType(Ctx, BO->getLHS()); 231 if (!OptionalLHSTy) 232 return false; 233 234 auto OptionalRHSTy = getUnwidenedIntegerType(Ctx, BO->getRHS()); 235 if (!OptionalRHSTy) 236 return false; 237 238 QualType LHSTy = *OptionalLHSTy; 239 QualType RHSTy = *OptionalRHSTy; 240 241 // This is the simple case: binops without unsigned multiplication, and with 242 // widened operands. No overflow check is needed here. 243 if ((Op.Opcode != BO_Mul && Op.Opcode != BO_MulAssign) || 244 !LHSTy->isUnsignedIntegerType() || !RHSTy->isUnsignedIntegerType()) 245 return true; 246 247 // For unsigned multiplication the overflow check can be elided if either one 248 // of the unpromoted types are less than half the size of the promoted type. 249 unsigned PromotedSize = Ctx.getTypeSize(Op.E->getType()); 250 return (2 * Ctx.getTypeSize(LHSTy)) < PromotedSize || 251 (2 * Ctx.getTypeSize(RHSTy)) < PromotedSize; 252 } 253 254 class ScalarExprEmitter 255 : public StmtVisitor<ScalarExprEmitter, Value*> { 256 CodeGenFunction &CGF; 257 CGBuilderTy &Builder; 258 bool IgnoreResultAssign; 259 llvm::LLVMContext &VMContext; 260 public: 261 262 ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false) 263 : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira), 264 VMContext(cgf.getLLVMContext()) { 265 } 266 267 //===--------------------------------------------------------------------===// 268 // Utilities 269 //===--------------------------------------------------------------------===// 270 271 bool TestAndClearIgnoreResultAssign() { 272 bool I = IgnoreResultAssign; 273 IgnoreResultAssign = false; 274 return I; 275 } 276 277 llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); } 278 LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); } 279 LValue EmitCheckedLValue(const Expr *E, CodeGenFunction::TypeCheckKind TCK) { 280 return CGF.EmitCheckedLValue(E, TCK); 281 } 282 283 void EmitBinOpCheck( 284 ArrayRef<std::pair<Value *, SanitizerKind::SanitizerOrdinal>> Checks, 285 const BinOpInfo &Info); 286 287 Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) { 288 return CGF.EmitLoadOfLValue(LV, Loc).getScalarVal(); 289 } 290 291 void EmitLValueAlignmentAssumption(const Expr *E, Value *V) { 292 const AlignValueAttr *AVAttr = nullptr; 293 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) { 294 const ValueDecl *VD = DRE->getDecl(); 295 296 if (VD->getType()->isReferenceType()) { 297 if (const auto *TTy = 298 VD->getType().getNonReferenceType()->getAs<TypedefType>()) 299 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>(); 300 } else { 301 // Assumptions for function parameters are emitted at the start of the 302 // function, so there is no need to repeat that here, 303 // unless the alignment-assumption sanitizer is enabled, 304 // then we prefer the assumption over alignment attribute 305 // on IR function param. 306 if (isa<ParmVarDecl>(VD) && !CGF.SanOpts.has(SanitizerKind::Alignment)) 307 return; 308 309 AVAttr = VD->getAttr<AlignValueAttr>(); 310 } 311 } 312 313 if (!AVAttr) 314 if (const auto *TTy = E->getType()->getAs<TypedefType>()) 315 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>(); 316 317 if (!AVAttr) 318 return; 319 320 Value *AlignmentValue = CGF.EmitScalarExpr(AVAttr->getAlignment()); 321 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(AlignmentValue); 322 CGF.emitAlignmentAssumption(V, E, AVAttr->getLocation(), AlignmentCI); 323 } 324 325 /// EmitLoadOfLValue - Given an expression with complex type that represents a 326 /// value l-value, this method emits the address of the l-value, then loads 327 /// and returns the result. 328 Value *EmitLoadOfLValue(const Expr *E) { 329 Value *V = EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load), 330 E->getExprLoc()); 331 332 EmitLValueAlignmentAssumption(E, V); 333 return V; 334 } 335 336 /// EmitConversionToBool - Convert the specified expression value to a 337 /// boolean (i1) truth value. This is equivalent to "Val != 0". 338 Value *EmitConversionToBool(Value *Src, QualType DstTy); 339 340 /// Emit a check that a conversion from a floating-point type does not 341 /// overflow. 342 void EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType, 343 Value *Src, QualType SrcType, QualType DstType, 344 llvm::Type *DstTy, SourceLocation Loc); 345 346 /// Known implicit conversion check kinds. 347 /// This is used for bitfield conversion checks as well. 348 /// Keep in sync with the enum of the same name in ubsan_handlers.h 349 enum ImplicitConversionCheckKind : unsigned char { 350 ICCK_IntegerTruncation = 0, // Legacy, was only used by clang 7. 351 ICCK_UnsignedIntegerTruncation = 1, 352 ICCK_SignedIntegerTruncation = 2, 353 ICCK_IntegerSignChange = 3, 354 ICCK_SignedIntegerTruncationOrSignChange = 4, 355 }; 356 357 /// Emit a check that an [implicit] truncation of an integer does not 358 /// discard any bits. It is not UB, so we use the value after truncation. 359 void EmitIntegerTruncationCheck(Value *Src, QualType SrcType, Value *Dst, 360 QualType DstType, SourceLocation Loc); 361 362 /// Emit a check that an [implicit] conversion of an integer does not change 363 /// the sign of the value. It is not UB, so we use the value after conversion. 364 /// NOTE: Src and Dst may be the exact same value! (point to the same thing) 365 void EmitIntegerSignChangeCheck(Value *Src, QualType SrcType, Value *Dst, 366 QualType DstType, SourceLocation Loc); 367 368 /// Emit a conversion from the specified type to the specified destination 369 /// type, both of which are LLVM scalar types. 370 struct ScalarConversionOpts { 371 bool TreatBooleanAsSigned; 372 bool EmitImplicitIntegerTruncationChecks; 373 bool EmitImplicitIntegerSignChangeChecks; 374 375 ScalarConversionOpts() 376 : TreatBooleanAsSigned(false), 377 EmitImplicitIntegerTruncationChecks(false), 378 EmitImplicitIntegerSignChangeChecks(false) {} 379 380 ScalarConversionOpts(clang::SanitizerSet SanOpts) 381 : TreatBooleanAsSigned(false), 382 EmitImplicitIntegerTruncationChecks( 383 SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)), 384 EmitImplicitIntegerSignChangeChecks( 385 SanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) {} 386 }; 387 Value *EmitScalarCast(Value *Src, QualType SrcType, QualType DstType, 388 llvm::Type *SrcTy, llvm::Type *DstTy, 389 ScalarConversionOpts Opts); 390 Value * 391 EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy, 392 SourceLocation Loc, 393 ScalarConversionOpts Opts = ScalarConversionOpts()); 394 395 /// Convert between either a fixed point and other fixed point or fixed point 396 /// and an integer. 397 Value *EmitFixedPointConversion(Value *Src, QualType SrcTy, QualType DstTy, 398 SourceLocation Loc); 399 400 /// Emit a conversion from the specified complex type to the specified 401 /// destination type, where the destination type is an LLVM scalar type. 402 Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src, 403 QualType SrcTy, QualType DstTy, 404 SourceLocation Loc); 405 406 /// EmitNullValue - Emit a value that corresponds to null for the given type. 407 Value *EmitNullValue(QualType Ty); 408 409 /// EmitFloatToBoolConversion - Perform an FP to boolean conversion. 410 Value *EmitFloatToBoolConversion(Value *V) { 411 // Compare against 0.0 for fp scalars. 412 llvm::Value *Zero = llvm::Constant::getNullValue(V->getType()); 413 return Builder.CreateFCmpUNE(V, Zero, "tobool"); 414 } 415 416 /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion. 417 Value *EmitPointerToBoolConversion(Value *V, QualType QT) { 418 Value *Zero = CGF.CGM.getNullPointer(cast<llvm::PointerType>(V->getType()), QT); 419 420 return Builder.CreateICmpNE(V, Zero, "tobool"); 421 } 422 423 Value *EmitIntToBoolConversion(Value *V) { 424 // Because of the type rules of C, we often end up computing a 425 // logical value, then zero extending it to int, then wanting it 426 // as a logical value again. Optimize this common case. 427 if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(V)) { 428 if (ZI->getOperand(0)->getType() == Builder.getInt1Ty()) { 429 Value *Result = ZI->getOperand(0); 430 // If there aren't any more uses, zap the instruction to save space. 431 // Note that there can be more uses, for example if this 432 // is the result of an assignment. 433 if (ZI->use_empty()) 434 ZI->eraseFromParent(); 435 return Result; 436 } 437 } 438 439 return Builder.CreateIsNotNull(V, "tobool"); 440 } 441 442 //===--------------------------------------------------------------------===// 443 // Visitor Methods 444 //===--------------------------------------------------------------------===// 445 446 Value *Visit(Expr *E) { 447 ApplyDebugLocation DL(CGF, E); 448 return StmtVisitor<ScalarExprEmitter, Value*>::Visit(E); 449 } 450 451 Value *VisitStmt(Stmt *S) { 452 S->dump(llvm::errs(), CGF.getContext()); 453 llvm_unreachable("Stmt can't have complex result type!"); 454 } 455 Value *VisitExpr(Expr *S); 456 457 Value *VisitConstantExpr(ConstantExpr *E) { 458 // A constant expression of type 'void' generates no code and produces no 459 // value. 460 if (E->getType()->isVoidType()) 461 return nullptr; 462 463 if (Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) { 464 if (E->isGLValue()) 465 return CGF.EmitLoadOfScalar( 466 Address(Result, CGF.convertTypeForLoadStore(E->getType()), 467 CGF.getContext().getTypeAlignInChars(E->getType())), 468 /*Volatile*/ false, E->getType(), E->getExprLoc()); 469 return Result; 470 } 471 return Visit(E->getSubExpr()); 472 } 473 Value *VisitParenExpr(ParenExpr *PE) { 474 return Visit(PE->getSubExpr()); 475 } 476 Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) { 477 return Visit(E->getReplacement()); 478 } 479 Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) { 480 return Visit(GE->getResultExpr()); 481 } 482 Value *VisitCoawaitExpr(CoawaitExpr *S) { 483 return CGF.EmitCoawaitExpr(*S).getScalarVal(); 484 } 485 Value *VisitCoyieldExpr(CoyieldExpr *S) { 486 return CGF.EmitCoyieldExpr(*S).getScalarVal(); 487 } 488 Value *VisitUnaryCoawait(const UnaryOperator *E) { 489 return Visit(E->getSubExpr()); 490 } 491 492 // Leaves. 493 Value *VisitIntegerLiteral(const IntegerLiteral *E) { 494 return Builder.getInt(E->getValue()); 495 } 496 Value *VisitFixedPointLiteral(const FixedPointLiteral *E) { 497 return Builder.getInt(E->getValue()); 498 } 499 Value *VisitFloatingLiteral(const FloatingLiteral *E) { 500 return llvm::ConstantFP::get(VMContext, E->getValue()); 501 } 502 Value *VisitCharacterLiteral(const CharacterLiteral *E) { 503 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue()); 504 } 505 Value *VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) { 506 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue()); 507 } 508 Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) { 509 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue()); 510 } 511 Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) { 512 if (E->getType()->isVoidType()) 513 return nullptr; 514 515 return EmitNullValue(E->getType()); 516 } 517 Value *VisitGNUNullExpr(const GNUNullExpr *E) { 518 return EmitNullValue(E->getType()); 519 } 520 Value *VisitOffsetOfExpr(OffsetOfExpr *E); 521 Value *VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E); 522 Value *VisitAddrLabelExpr(const AddrLabelExpr *E) { 523 llvm::Value *V = CGF.GetAddrOfLabel(E->getLabel()); 524 return Builder.CreateBitCast(V, ConvertType(E->getType())); 525 } 526 527 Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) { 528 return llvm::ConstantInt::get(ConvertType(E->getType()),E->getPackLength()); 529 } 530 531 Value *VisitPseudoObjectExpr(PseudoObjectExpr *E) { 532 return CGF.EmitPseudoObjectRValue(E).getScalarVal(); 533 } 534 535 Value *VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E); 536 Value *VisitEmbedExpr(EmbedExpr *E); 537 538 Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) { 539 if (E->isGLValue()) 540 return EmitLoadOfLValue(CGF.getOrCreateOpaqueLValueMapping(E), 541 E->getExprLoc()); 542 543 // Otherwise, assume the mapping is the scalar directly. 544 return CGF.getOrCreateOpaqueRValueMapping(E).getScalarVal(); 545 } 546 547 Value *VisitOpenACCAsteriskSizeExpr(OpenACCAsteriskSizeExpr *E) { 548 llvm_unreachable("Codegen for this isn't defined/implemented"); 549 } 550 551 // l-values. 552 Value *VisitDeclRefExpr(DeclRefExpr *E) { 553 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E)) 554 return CGF.emitScalarConstant(Constant, E); 555 return EmitLoadOfLValue(E); 556 } 557 558 Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) { 559 return CGF.EmitObjCSelectorExpr(E); 560 } 561 Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) { 562 return CGF.EmitObjCProtocolExpr(E); 563 } 564 Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) { 565 return EmitLoadOfLValue(E); 566 } 567 Value *VisitObjCMessageExpr(ObjCMessageExpr *E) { 568 if (E->getMethodDecl() && 569 E->getMethodDecl()->getReturnType()->isReferenceType()) 570 return EmitLoadOfLValue(E); 571 return CGF.EmitObjCMessageExpr(E).getScalarVal(); 572 } 573 574 Value *VisitObjCIsaExpr(ObjCIsaExpr *E) { 575 LValue LV = CGF.EmitObjCIsaExpr(E); 576 Value *V = CGF.EmitLoadOfLValue(LV, E->getExprLoc()).getScalarVal(); 577 return V; 578 } 579 580 Value *VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) { 581 VersionTuple Version = E->getVersion(); 582 583 // If we're checking for a platform older than our minimum deployment 584 // target, we can fold the check away. 585 if (Version <= CGF.CGM.getTarget().getPlatformMinVersion()) 586 return llvm::ConstantInt::get(Builder.getInt1Ty(), 1); 587 588 return CGF.EmitBuiltinAvailable(Version); 589 } 590 591 Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E); 592 Value *VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E); 593 Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E); 594 Value *VisitConvertVectorExpr(ConvertVectorExpr *E); 595 Value *VisitMemberExpr(MemberExpr *E); 596 Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); } 597 Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) { 598 // Strictly speaking, we shouldn't be calling EmitLoadOfLValue, which 599 // transitively calls EmitCompoundLiteralLValue, here in C++ since compound 600 // literals aren't l-values in C++. We do so simply because that's the 601 // cleanest way to handle compound literals in C++. 602 // See the discussion here: https://reviews.llvm.org/D64464 603 return EmitLoadOfLValue(E); 604 } 605 606 Value *VisitInitListExpr(InitListExpr *E); 607 608 Value *VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) { 609 assert(CGF.getArrayInitIndex() && 610 "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?"); 611 return CGF.getArrayInitIndex(); 612 } 613 614 Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) { 615 return EmitNullValue(E->getType()); 616 } 617 Value *VisitExplicitCastExpr(ExplicitCastExpr *E) { 618 CGF.CGM.EmitExplicitCastExprType(E, &CGF); 619 return VisitCastExpr(E); 620 } 621 Value *VisitCastExpr(CastExpr *E); 622 623 Value *VisitCallExpr(const CallExpr *E) { 624 if (E->getCallReturnType(CGF.getContext())->isReferenceType()) 625 return EmitLoadOfLValue(E); 626 627 Value *V = CGF.EmitCallExpr(E).getScalarVal(); 628 629 EmitLValueAlignmentAssumption(E, V); 630 return V; 631 } 632 633 Value *VisitStmtExpr(const StmtExpr *E); 634 635 // Unary Operators. 636 Value *VisitUnaryPostDec(const UnaryOperator *E) { 637 LValue LV = EmitLValue(E->getSubExpr()); 638 return EmitScalarPrePostIncDec(E, LV, false, false); 639 } 640 Value *VisitUnaryPostInc(const UnaryOperator *E) { 641 LValue LV = EmitLValue(E->getSubExpr()); 642 return EmitScalarPrePostIncDec(E, LV, true, false); 643 } 644 Value *VisitUnaryPreDec(const UnaryOperator *E) { 645 LValue LV = EmitLValue(E->getSubExpr()); 646 return EmitScalarPrePostIncDec(E, LV, false, true); 647 } 648 Value *VisitUnaryPreInc(const UnaryOperator *E) { 649 LValue LV = EmitLValue(E->getSubExpr()); 650 return EmitScalarPrePostIncDec(E, LV, true, true); 651 } 652 653 llvm::Value *EmitIncDecConsiderOverflowBehavior(const UnaryOperator *E, 654 llvm::Value *InVal, 655 bool IsInc); 656 657 llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, 658 bool isInc, bool isPre); 659 660 661 Value *VisitUnaryAddrOf(const UnaryOperator *E) { 662 if (isa<MemberPointerType>(E->getType())) // never sugared 663 return CGF.CGM.getMemberPointerConstant(E); 664 665 return EmitLValue(E->getSubExpr()).getPointer(CGF); 666 } 667 Value *VisitUnaryDeref(const UnaryOperator *E) { 668 if (E->getType()->isVoidType()) 669 return Visit(E->getSubExpr()); // the actual value should be unused 670 return EmitLoadOfLValue(E); 671 } 672 673 Value *VisitUnaryPlus(const UnaryOperator *E, 674 QualType PromotionType = QualType()); 675 Value *VisitPlus(const UnaryOperator *E, QualType PromotionType); 676 Value *VisitUnaryMinus(const UnaryOperator *E, 677 QualType PromotionType = QualType()); 678 Value *VisitMinus(const UnaryOperator *E, QualType PromotionType); 679 680 Value *VisitUnaryNot (const UnaryOperator *E); 681 Value *VisitUnaryLNot (const UnaryOperator *E); 682 Value *VisitUnaryReal(const UnaryOperator *E, 683 QualType PromotionType = QualType()); 684 Value *VisitReal(const UnaryOperator *E, QualType PromotionType); 685 Value *VisitUnaryImag(const UnaryOperator *E, 686 QualType PromotionType = QualType()); 687 Value *VisitImag(const UnaryOperator *E, QualType PromotionType); 688 Value *VisitUnaryExtension(const UnaryOperator *E) { 689 return Visit(E->getSubExpr()); 690 } 691 692 // C++ 693 Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) { 694 return EmitLoadOfLValue(E); 695 } 696 Value *VisitSourceLocExpr(SourceLocExpr *SLE) { 697 auto &Ctx = CGF.getContext(); 698 APValue Evaluated = 699 SLE->EvaluateInContext(Ctx, CGF.CurSourceLocExprScope.getDefaultExpr()); 700 return ConstantEmitter(CGF).emitAbstract(SLE->getLocation(), Evaluated, 701 SLE->getType()); 702 } 703 704 Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) { 705 CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE); 706 return Visit(DAE->getExpr()); 707 } 708 Value *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) { 709 CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE); 710 return Visit(DIE->getExpr()); 711 } 712 Value *VisitCXXThisExpr(CXXThisExpr *TE) { 713 return CGF.LoadCXXThis(); 714 } 715 716 Value *VisitExprWithCleanups(ExprWithCleanups *E); 717 Value *VisitCXXNewExpr(const CXXNewExpr *E) { 718 return CGF.EmitCXXNewExpr(E); 719 } 720 Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) { 721 CGF.EmitCXXDeleteExpr(E); 722 return nullptr; 723 } 724 725 Value *VisitTypeTraitExpr(const TypeTraitExpr *E) { 726 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue()); 727 } 728 729 Value *VisitConceptSpecializationExpr(const ConceptSpecializationExpr *E) { 730 return Builder.getInt1(E->isSatisfied()); 731 } 732 733 Value *VisitRequiresExpr(const RequiresExpr *E) { 734 return Builder.getInt1(E->isSatisfied()); 735 } 736 737 Value *VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) { 738 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue()); 739 } 740 741 Value *VisitExpressionTraitExpr(const ExpressionTraitExpr *E) { 742 return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue()); 743 } 744 745 Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) { 746 // C++ [expr.pseudo]p1: 747 // The result shall only be used as the operand for the function call 748 // operator (), and the result of such a call has type void. The only 749 // effect is the evaluation of the postfix-expression before the dot or 750 // arrow. 751 CGF.EmitScalarExpr(E->getBase()); 752 return nullptr; 753 } 754 755 Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) { 756 return EmitNullValue(E->getType()); 757 } 758 759 Value *VisitCXXThrowExpr(const CXXThrowExpr *E) { 760 CGF.EmitCXXThrowExpr(E); 761 return nullptr; 762 } 763 764 Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) { 765 return Builder.getInt1(E->getValue()); 766 } 767 768 // Binary Operators. 769 Value *EmitMul(const BinOpInfo &Ops) { 770 if (Ops.Ty->isSignedIntegerOrEnumerationType()) { 771 switch (CGF.getLangOpts().getSignedOverflowBehavior()) { 772 case LangOptions::SOB_Defined: 773 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) 774 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul"); 775 [[fallthrough]]; 776 case LangOptions::SOB_Undefined: 777 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) 778 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul"); 779 [[fallthrough]]; 780 case LangOptions::SOB_Trapping: 781 if (CanElideOverflowCheck(CGF.getContext(), Ops)) 782 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul"); 783 return EmitOverflowCheckedBinOp(Ops); 784 } 785 } 786 787 if (Ops.Ty->isConstantMatrixType()) { 788 llvm::MatrixBuilder MB(Builder); 789 // We need to check the types of the operands of the operator to get the 790 // correct matrix dimensions. 791 auto *BO = cast<BinaryOperator>(Ops.E); 792 auto *LHSMatTy = dyn_cast<ConstantMatrixType>( 793 BO->getLHS()->getType().getCanonicalType()); 794 auto *RHSMatTy = dyn_cast<ConstantMatrixType>( 795 BO->getRHS()->getType().getCanonicalType()); 796 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures); 797 if (LHSMatTy && RHSMatTy) 798 return MB.CreateMatrixMultiply(Ops.LHS, Ops.RHS, LHSMatTy->getNumRows(), 799 LHSMatTy->getNumColumns(), 800 RHSMatTy->getNumColumns()); 801 return MB.CreateScalarMultiply(Ops.LHS, Ops.RHS); 802 } 803 804 if (Ops.Ty->isUnsignedIntegerType() && 805 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) && 806 !CanElideOverflowCheck(CGF.getContext(), Ops)) 807 return EmitOverflowCheckedBinOp(Ops); 808 809 if (Ops.LHS->getType()->isFPOrFPVectorTy()) { 810 // Preserve the old values 811 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures); 812 return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul"); 813 } 814 if (Ops.isFixedPointOp()) 815 return EmitFixedPointBinOp(Ops); 816 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul"); 817 } 818 /// Create a binary op that checks for overflow. 819 /// Currently only supports +, - and *. 820 Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops); 821 822 // Check for undefined division and modulus behaviors. 823 void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops, 824 llvm::Value *Zero,bool isDiv); 825 // Common helper for getting how wide LHS of shift is. 826 static Value *GetMaximumShiftAmount(Value *LHS, Value *RHS, bool RHSIsSigned); 827 828 // Used for shifting constraints for OpenCL, do mask for powers of 2, URem for 829 // non powers of two. 830 Value *ConstrainShiftValue(Value *LHS, Value *RHS, const Twine &Name); 831 832 Value *EmitDiv(const BinOpInfo &Ops); 833 Value *EmitRem(const BinOpInfo &Ops); 834 Value *EmitAdd(const BinOpInfo &Ops); 835 Value *EmitSub(const BinOpInfo &Ops); 836 Value *EmitShl(const BinOpInfo &Ops); 837 Value *EmitShr(const BinOpInfo &Ops); 838 Value *EmitAnd(const BinOpInfo &Ops) { 839 return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and"); 840 } 841 Value *EmitXor(const BinOpInfo &Ops) { 842 return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor"); 843 } 844 Value *EmitOr (const BinOpInfo &Ops) { 845 return Builder.CreateOr(Ops.LHS, Ops.RHS, "or"); 846 } 847 848 // Helper functions for fixed point binary operations. 849 Value *EmitFixedPointBinOp(const BinOpInfo &Ops); 850 851 BinOpInfo EmitBinOps(const BinaryOperator *E, 852 QualType PromotionTy = QualType()); 853 854 Value *EmitPromotedValue(Value *result, QualType PromotionType); 855 Value *EmitUnPromotedValue(Value *result, QualType ExprType); 856 Value *EmitPromoted(const Expr *E, QualType PromotionType); 857 858 LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E, 859 Value *(ScalarExprEmitter::*F)(const BinOpInfo &), 860 Value *&Result); 861 862 Value *EmitCompoundAssign(const CompoundAssignOperator *E, 863 Value *(ScalarExprEmitter::*F)(const BinOpInfo &)); 864 865 QualType getPromotionType(QualType Ty) { 866 const auto &Ctx = CGF.getContext(); 867 if (auto *CT = Ty->getAs<ComplexType>()) { 868 QualType ElementType = CT->getElementType(); 869 if (ElementType.UseExcessPrecision(Ctx)) 870 return Ctx.getComplexType(Ctx.FloatTy); 871 } 872 873 if (Ty.UseExcessPrecision(Ctx)) { 874 if (auto *VT = Ty->getAs<VectorType>()) { 875 unsigned NumElements = VT->getNumElements(); 876 return Ctx.getVectorType(Ctx.FloatTy, NumElements, VT->getVectorKind()); 877 } 878 return Ctx.FloatTy; 879 } 880 881 return QualType(); 882 } 883 884 // Binary operators and binary compound assignment operators. 885 #define HANDLEBINOP(OP) \ 886 Value *VisitBin##OP(const BinaryOperator *E) { \ 887 QualType promotionTy = getPromotionType(E->getType()); \ 888 auto result = Emit##OP(EmitBinOps(E, promotionTy)); \ 889 if (result && !promotionTy.isNull()) \ 890 result = EmitUnPromotedValue(result, E->getType()); \ 891 return result; \ 892 } \ 893 Value *VisitBin##OP##Assign(const CompoundAssignOperator *E) { \ 894 return EmitCompoundAssign(E, &ScalarExprEmitter::Emit##OP); \ 895 } 896 HANDLEBINOP(Mul) 897 HANDLEBINOP(Div) 898 HANDLEBINOP(Rem) 899 HANDLEBINOP(Add) 900 HANDLEBINOP(Sub) 901 HANDLEBINOP(Shl) 902 HANDLEBINOP(Shr) 903 HANDLEBINOP(And) 904 HANDLEBINOP(Xor) 905 HANDLEBINOP(Or) 906 #undef HANDLEBINOP 907 908 // Comparisons. 909 Value *EmitCompare(const BinaryOperator *E, llvm::CmpInst::Predicate UICmpOpc, 910 llvm::CmpInst::Predicate SICmpOpc, 911 llvm::CmpInst::Predicate FCmpOpc, bool IsSignaling); 912 #define VISITCOMP(CODE, UI, SI, FP, SIG) \ 913 Value *VisitBin##CODE(const BinaryOperator *E) { \ 914 return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \ 915 llvm::FCmpInst::FP, SIG); } 916 VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT, true) 917 VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT, true) 918 VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE, true) 919 VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE, true) 920 VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ, false) 921 VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE, false) 922 #undef VISITCOMP 923 924 Value *VisitBinAssign (const BinaryOperator *E); 925 926 Value *VisitBinLAnd (const BinaryOperator *E); 927 Value *VisitBinLOr (const BinaryOperator *E); 928 Value *VisitBinComma (const BinaryOperator *E); 929 930 Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); } 931 Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); } 932 933 Value *VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) { 934 return Visit(E->getSemanticForm()); 935 } 936 937 // Other Operators. 938 Value *VisitBlockExpr(const BlockExpr *BE); 939 Value *VisitAbstractConditionalOperator(const AbstractConditionalOperator *); 940 Value *VisitChooseExpr(ChooseExpr *CE); 941 Value *VisitVAArgExpr(VAArgExpr *VE); 942 Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) { 943 return CGF.EmitObjCStringLiteral(E); 944 } 945 Value *VisitObjCBoxedExpr(ObjCBoxedExpr *E) { 946 return CGF.EmitObjCBoxedExpr(E); 947 } 948 Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) { 949 return CGF.EmitObjCArrayLiteral(E); 950 } 951 Value *VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) { 952 return CGF.EmitObjCDictionaryLiteral(E); 953 } 954 Value *VisitAsTypeExpr(AsTypeExpr *CE); 955 Value *VisitAtomicExpr(AtomicExpr *AE); 956 Value *VisitPackIndexingExpr(PackIndexingExpr *E) { 957 return Visit(E->getSelectedExpr()); 958 } 959 }; 960 } // end anonymous namespace. 961 962 //===----------------------------------------------------------------------===// 963 // Utilities 964 //===----------------------------------------------------------------------===// 965 966 /// EmitConversionToBool - Convert the specified expression value to a 967 /// boolean (i1) truth value. This is equivalent to "Val != 0". 968 Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) { 969 assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs"); 970 971 if (SrcType->isRealFloatingType()) 972 return EmitFloatToBoolConversion(Src); 973 974 if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(SrcType)) 975 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, Src, MPT); 976 977 assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) && 978 "Unknown scalar type to convert"); 979 980 if (isa<llvm::IntegerType>(Src->getType())) 981 return EmitIntToBoolConversion(Src); 982 983 assert(isa<llvm::PointerType>(Src->getType())); 984 return EmitPointerToBoolConversion(Src, SrcType); 985 } 986 987 void ScalarExprEmitter::EmitFloatConversionCheck( 988 Value *OrigSrc, QualType OrigSrcType, Value *Src, QualType SrcType, 989 QualType DstType, llvm::Type *DstTy, SourceLocation Loc) { 990 assert(SrcType->isFloatingType() && "not a conversion from floating point"); 991 if (!isa<llvm::IntegerType>(DstTy)) 992 return; 993 994 CodeGenFunction::SanitizerScope SanScope(&CGF); 995 using llvm::APFloat; 996 using llvm::APSInt; 997 998 llvm::Value *Check = nullptr; 999 const llvm::fltSemantics &SrcSema = 1000 CGF.getContext().getFloatTypeSemantics(OrigSrcType); 1001 1002 // Floating-point to integer. This has undefined behavior if the source is 1003 // +-Inf, NaN, or doesn't fit into the destination type (after truncation 1004 // to an integer). 1005 unsigned Width = CGF.getContext().getIntWidth(DstType); 1006 bool Unsigned = DstType->isUnsignedIntegerOrEnumerationType(); 1007 1008 APSInt Min = APSInt::getMinValue(Width, Unsigned); 1009 APFloat MinSrc(SrcSema, APFloat::uninitialized); 1010 if (MinSrc.convertFromAPInt(Min, !Unsigned, APFloat::rmTowardZero) & 1011 APFloat::opOverflow) 1012 // Don't need an overflow check for lower bound. Just check for 1013 // -Inf/NaN. 1014 MinSrc = APFloat::getInf(SrcSema, true); 1015 else 1016 // Find the largest value which is too small to represent (before 1017 // truncation toward zero). 1018 MinSrc.subtract(APFloat(SrcSema, 1), APFloat::rmTowardNegative); 1019 1020 APSInt Max = APSInt::getMaxValue(Width, Unsigned); 1021 APFloat MaxSrc(SrcSema, APFloat::uninitialized); 1022 if (MaxSrc.convertFromAPInt(Max, !Unsigned, APFloat::rmTowardZero) & 1023 APFloat::opOverflow) 1024 // Don't need an overflow check for upper bound. Just check for 1025 // +Inf/NaN. 1026 MaxSrc = APFloat::getInf(SrcSema, false); 1027 else 1028 // Find the smallest value which is too large to represent (before 1029 // truncation toward zero). 1030 MaxSrc.add(APFloat(SrcSema, 1), APFloat::rmTowardPositive); 1031 1032 // If we're converting from __half, convert the range to float to match 1033 // the type of src. 1034 if (OrigSrcType->isHalfType()) { 1035 const llvm::fltSemantics &Sema = 1036 CGF.getContext().getFloatTypeSemantics(SrcType); 1037 bool IsInexact; 1038 MinSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact); 1039 MaxSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact); 1040 } 1041 1042 llvm::Value *GE = 1043 Builder.CreateFCmpOGT(Src, llvm::ConstantFP::get(VMContext, MinSrc)); 1044 llvm::Value *LE = 1045 Builder.CreateFCmpOLT(Src, llvm::ConstantFP::get(VMContext, MaxSrc)); 1046 Check = Builder.CreateAnd(GE, LE); 1047 1048 llvm::Constant *StaticArgs[] = {CGF.EmitCheckSourceLocation(Loc), 1049 CGF.EmitCheckTypeDescriptor(OrigSrcType), 1050 CGF.EmitCheckTypeDescriptor(DstType)}; 1051 CGF.EmitCheck(std::make_pair(Check, SanitizerKind::SO_FloatCastOverflow), 1052 SanitizerHandler::FloatCastOverflow, StaticArgs, OrigSrc); 1053 } 1054 1055 // Should be called within CodeGenFunction::SanitizerScope RAII scope. 1056 // Returns 'i1 false' when the truncation Src -> Dst was lossy. 1057 static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind, 1058 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>> 1059 EmitIntegerTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst, 1060 QualType DstType, CGBuilderTy &Builder) { 1061 llvm::Type *SrcTy = Src->getType(); 1062 llvm::Type *DstTy = Dst->getType(); 1063 (void)DstTy; // Only used in assert() 1064 1065 // This should be truncation of integral types. 1066 assert(Src != Dst); 1067 assert(SrcTy->getScalarSizeInBits() > Dst->getType()->getScalarSizeInBits()); 1068 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) && 1069 "non-integer llvm type"); 1070 1071 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType(); 1072 bool DstSigned = DstType->isSignedIntegerOrEnumerationType(); 1073 1074 // If both (src and dst) types are unsigned, then it's an unsigned truncation. 1075 // Else, it is a signed truncation. 1076 ScalarExprEmitter::ImplicitConversionCheckKind Kind; 1077 SanitizerKind::SanitizerOrdinal Ordinal; 1078 if (!SrcSigned && !DstSigned) { 1079 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation; 1080 Ordinal = SanitizerKind::SO_ImplicitUnsignedIntegerTruncation; 1081 } else { 1082 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation; 1083 Ordinal = SanitizerKind::SO_ImplicitSignedIntegerTruncation; 1084 } 1085 1086 llvm::Value *Check = nullptr; 1087 // 1. Extend the truncated value back to the same width as the Src. 1088 Check = Builder.CreateIntCast(Dst, SrcTy, DstSigned, "anyext"); 1089 // 2. Equality-compare with the original source value 1090 Check = Builder.CreateICmpEQ(Check, Src, "truncheck"); 1091 // If the comparison result is 'i1 false', then the truncation was lossy. 1092 return std::make_pair(Kind, std::make_pair(Check, Ordinal)); 1093 } 1094 1095 static bool PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck( 1096 QualType SrcType, QualType DstType) { 1097 return SrcType->isIntegerType() && DstType->isIntegerType(); 1098 } 1099 1100 void ScalarExprEmitter::EmitIntegerTruncationCheck(Value *Src, QualType SrcType, 1101 Value *Dst, QualType DstType, 1102 SourceLocation Loc) { 1103 if (!CGF.SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)) 1104 return; 1105 1106 // We only care about int->int conversions here. 1107 // We ignore conversions to/from pointer and/or bool. 1108 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType, 1109 DstType)) 1110 return; 1111 1112 unsigned SrcBits = Src->getType()->getScalarSizeInBits(); 1113 unsigned DstBits = Dst->getType()->getScalarSizeInBits(); 1114 // This must be truncation. Else we do not care. 1115 if (SrcBits <= DstBits) 1116 return; 1117 1118 assert(!DstType->isBooleanType() && "we should not get here with booleans."); 1119 1120 // If the integer sign change sanitizer is enabled, 1121 // and we are truncating from larger unsigned type to smaller signed type, 1122 // let that next sanitizer deal with it. 1123 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType(); 1124 bool DstSigned = DstType->isSignedIntegerOrEnumerationType(); 1125 if (CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange) && 1126 (!SrcSigned && DstSigned)) 1127 return; 1128 1129 CodeGenFunction::SanitizerScope SanScope(&CGF); 1130 1131 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind, 1132 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>> 1133 Check = 1134 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder); 1135 // If the comparison result is 'i1 false', then the truncation was lossy. 1136 1137 // Do we care about this type of truncation? 1138 if (!CGF.SanOpts.has(Check.second.second)) 1139 return; 1140 1141 // Does some SSCL ignore this type? 1142 if (CGF.getContext().isTypeIgnoredBySanitizer( 1143 SanitizerMask::bitPosToMask(Check.second.second), DstType)) 1144 return; 1145 1146 llvm::Constant *StaticArgs[] = { 1147 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType), 1148 CGF.EmitCheckTypeDescriptor(DstType), 1149 llvm::ConstantInt::get(Builder.getInt8Ty(), Check.first), 1150 llvm::ConstantInt::get(Builder.getInt32Ty(), 0)}; 1151 1152 CGF.EmitCheck(Check.second, SanitizerHandler::ImplicitConversion, StaticArgs, 1153 {Src, Dst}); 1154 } 1155 1156 static llvm::Value *EmitIsNegativeTestHelper(Value *V, QualType VType, 1157 const char *Name, 1158 CGBuilderTy &Builder) { 1159 bool VSigned = VType->isSignedIntegerOrEnumerationType(); 1160 llvm::Type *VTy = V->getType(); 1161 if (!VSigned) { 1162 // If the value is unsigned, then it is never negative. 1163 return llvm::ConstantInt::getFalse(VTy->getContext()); 1164 } 1165 llvm::Constant *Zero = llvm::ConstantInt::get(VTy, 0); 1166 return Builder.CreateICmp(llvm::ICmpInst::ICMP_SLT, V, Zero, 1167 llvm::Twine(Name) + "." + V->getName() + 1168 ".negativitycheck"); 1169 } 1170 1171 // Should be called within CodeGenFunction::SanitizerScope RAII scope. 1172 // Returns 'i1 false' when the conversion Src -> Dst changed the sign. 1173 static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind, 1174 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>> 1175 EmitIntegerSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst, 1176 QualType DstType, CGBuilderTy &Builder) { 1177 llvm::Type *SrcTy = Src->getType(); 1178 llvm::Type *DstTy = Dst->getType(); 1179 1180 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) && 1181 "non-integer llvm type"); 1182 1183 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType(); 1184 bool DstSigned = DstType->isSignedIntegerOrEnumerationType(); 1185 (void)SrcSigned; // Only used in assert() 1186 (void)DstSigned; // Only used in assert() 1187 unsigned SrcBits = SrcTy->getScalarSizeInBits(); 1188 unsigned DstBits = DstTy->getScalarSizeInBits(); 1189 (void)SrcBits; // Only used in assert() 1190 (void)DstBits; // Only used in assert() 1191 1192 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) && 1193 "either the widths should be different, or the signednesses."); 1194 1195 // 1. Was the old Value negative? 1196 llvm::Value *SrcIsNegative = 1197 EmitIsNegativeTestHelper(Src, SrcType, "src", Builder); 1198 // 2. Is the new Value negative? 1199 llvm::Value *DstIsNegative = 1200 EmitIsNegativeTestHelper(Dst, DstType, "dst", Builder); 1201 // 3. Now, was the 'negativity status' preserved during the conversion? 1202 // NOTE: conversion from negative to zero is considered to change the sign. 1203 // (We want to get 'false' when the conversion changed the sign) 1204 // So we should just equality-compare the negativity statuses. 1205 llvm::Value *Check = nullptr; 1206 Check = Builder.CreateICmpEQ(SrcIsNegative, DstIsNegative, "signchangecheck"); 1207 // If the comparison result is 'false', then the conversion changed the sign. 1208 return std::make_pair( 1209 ScalarExprEmitter::ICCK_IntegerSignChange, 1210 std::make_pair(Check, SanitizerKind::SO_ImplicitIntegerSignChange)); 1211 } 1212 1213 void ScalarExprEmitter::EmitIntegerSignChangeCheck(Value *Src, QualType SrcType, 1214 Value *Dst, QualType DstType, 1215 SourceLocation Loc) { 1216 if (!CGF.SanOpts.has(SanitizerKind::SO_ImplicitIntegerSignChange)) 1217 return; 1218 1219 llvm::Type *SrcTy = Src->getType(); 1220 llvm::Type *DstTy = Dst->getType(); 1221 1222 // We only care about int->int conversions here. 1223 // We ignore conversions to/from pointer and/or bool. 1224 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType, 1225 DstType)) 1226 return; 1227 1228 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType(); 1229 bool DstSigned = DstType->isSignedIntegerOrEnumerationType(); 1230 unsigned SrcBits = SrcTy->getScalarSizeInBits(); 1231 unsigned DstBits = DstTy->getScalarSizeInBits(); 1232 1233 // Now, we do not need to emit the check in *all* of the cases. 1234 // We can avoid emitting it in some obvious cases where it would have been 1235 // dropped by the opt passes (instcombine) always anyways. 1236 // If it's a cast between effectively the same type, no check. 1237 // NOTE: this is *not* equivalent to checking the canonical types. 1238 if (SrcSigned == DstSigned && SrcBits == DstBits) 1239 return; 1240 // At least one of the values needs to have signed type. 1241 // If both are unsigned, then obviously, neither of them can be negative. 1242 if (!SrcSigned && !DstSigned) 1243 return; 1244 // If the conversion is to *larger* *signed* type, then no check is needed. 1245 // Because either sign-extension happens (so the sign will remain), 1246 // or zero-extension will happen (the sign bit will be zero.) 1247 if ((DstBits > SrcBits) && DstSigned) 1248 return; 1249 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) && 1250 (SrcBits > DstBits) && SrcSigned) { 1251 // If the signed integer truncation sanitizer is enabled, 1252 // and this is a truncation from signed type, then no check is needed. 1253 // Because here sign change check is interchangeable with truncation check. 1254 return; 1255 } 1256 // Does an SSCL have an entry for the DstType under its respective sanitizer 1257 // section? 1258 if (DstSigned && CGF.getContext().isTypeIgnoredBySanitizer( 1259 SanitizerKind::ImplicitSignedIntegerTruncation, DstType)) 1260 return; 1261 if (!DstSigned && 1262 CGF.getContext().isTypeIgnoredBySanitizer( 1263 SanitizerKind::ImplicitUnsignedIntegerTruncation, DstType)) 1264 return; 1265 // That's it. We can't rule out any more cases with the data we have. 1266 1267 CodeGenFunction::SanitizerScope SanScope(&CGF); 1268 1269 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind, 1270 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>> 1271 Check; 1272 1273 // Each of these checks needs to return 'false' when an issue was detected. 1274 ImplicitConversionCheckKind CheckKind; 1275 llvm::SmallVector<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>, 1276 2> 1277 Checks; 1278 // So we can 'and' all the checks together, and still get 'false', 1279 // if at least one of the checks detected an issue. 1280 1281 Check = EmitIntegerSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder); 1282 CheckKind = Check.first; 1283 Checks.emplace_back(Check.second); 1284 1285 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) && 1286 (SrcBits > DstBits) && !SrcSigned && DstSigned) { 1287 // If the signed integer truncation sanitizer was enabled, 1288 // and we are truncating from larger unsigned type to smaller signed type, 1289 // let's handle the case we skipped in that check. 1290 Check = 1291 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder); 1292 CheckKind = ICCK_SignedIntegerTruncationOrSignChange; 1293 Checks.emplace_back(Check.second); 1294 // If the comparison result is 'i1 false', then the truncation was lossy. 1295 } 1296 1297 llvm::Constant *StaticArgs[] = { 1298 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType), 1299 CGF.EmitCheckTypeDescriptor(DstType), 1300 llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind), 1301 llvm::ConstantInt::get(Builder.getInt32Ty(), 0)}; 1302 // EmitCheck() will 'and' all the checks together. 1303 CGF.EmitCheck(Checks, SanitizerHandler::ImplicitConversion, StaticArgs, 1304 {Src, Dst}); 1305 } 1306 1307 // Should be called within CodeGenFunction::SanitizerScope RAII scope. 1308 // Returns 'i1 false' when the truncation Src -> Dst was lossy. 1309 static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind, 1310 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>> 1311 EmitBitfieldTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst, 1312 QualType DstType, CGBuilderTy &Builder) { 1313 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType(); 1314 bool DstSigned = DstType->isSignedIntegerOrEnumerationType(); 1315 1316 ScalarExprEmitter::ImplicitConversionCheckKind Kind; 1317 if (!SrcSigned && !DstSigned) 1318 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation; 1319 else 1320 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation; 1321 1322 llvm::Value *Check = nullptr; 1323 // 1. Extend the truncated value back to the same width as the Src. 1324 Check = Builder.CreateIntCast(Dst, Src->getType(), DstSigned, "bf.anyext"); 1325 // 2. Equality-compare with the original source value 1326 Check = Builder.CreateICmpEQ(Check, Src, "bf.truncheck"); 1327 // If the comparison result is 'i1 false', then the truncation was lossy. 1328 1329 return std::make_pair( 1330 Kind, 1331 std::make_pair(Check, SanitizerKind::SO_ImplicitBitfieldConversion)); 1332 } 1333 1334 // Should be called within CodeGenFunction::SanitizerScope RAII scope. 1335 // Returns 'i1 false' when the conversion Src -> Dst changed the sign. 1336 static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind, 1337 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>> 1338 EmitBitfieldSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst, 1339 QualType DstType, CGBuilderTy &Builder) { 1340 // 1. Was the old Value negative? 1341 llvm::Value *SrcIsNegative = 1342 EmitIsNegativeTestHelper(Src, SrcType, "bf.src", Builder); 1343 // 2. Is the new Value negative? 1344 llvm::Value *DstIsNegative = 1345 EmitIsNegativeTestHelper(Dst, DstType, "bf.dst", Builder); 1346 // 3. Now, was the 'negativity status' preserved during the conversion? 1347 // NOTE: conversion from negative to zero is considered to change the sign. 1348 // (We want to get 'false' when the conversion changed the sign) 1349 // So we should just equality-compare the negativity statuses. 1350 llvm::Value *Check = nullptr; 1351 Check = 1352 Builder.CreateICmpEQ(SrcIsNegative, DstIsNegative, "bf.signchangecheck"); 1353 // If the comparison result is 'false', then the conversion changed the sign. 1354 return std::make_pair( 1355 ScalarExprEmitter::ICCK_IntegerSignChange, 1356 std::make_pair(Check, SanitizerKind::SO_ImplicitBitfieldConversion)); 1357 } 1358 1359 void CodeGenFunction::EmitBitfieldConversionCheck(Value *Src, QualType SrcType, 1360 Value *Dst, QualType DstType, 1361 const CGBitFieldInfo &Info, 1362 SourceLocation Loc) { 1363 1364 if (!SanOpts.has(SanitizerKind::ImplicitBitfieldConversion)) 1365 return; 1366 1367 // We only care about int->int conversions here. 1368 // We ignore conversions to/from pointer and/or bool. 1369 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType, 1370 DstType)) 1371 return; 1372 1373 if (DstType->isBooleanType() || SrcType->isBooleanType()) 1374 return; 1375 1376 // This should be truncation of integral types. 1377 assert(isa<llvm::IntegerType>(Src->getType()) && 1378 isa<llvm::IntegerType>(Dst->getType()) && "non-integer llvm type"); 1379 1380 // TODO: Calculate src width to avoid emitting code 1381 // for unecessary cases. 1382 unsigned SrcBits = ConvertType(SrcType)->getScalarSizeInBits(); 1383 unsigned DstBits = Info.Size; 1384 1385 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType(); 1386 bool DstSigned = DstType->isSignedIntegerOrEnumerationType(); 1387 1388 CodeGenFunction::SanitizerScope SanScope(this); 1389 1390 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind, 1391 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>> 1392 Check; 1393 1394 // Truncation 1395 bool EmitTruncation = DstBits < SrcBits; 1396 // If Dst is signed and Src unsigned, we want to be more specific 1397 // about the CheckKind we emit, in this case we want to emit 1398 // ICCK_SignedIntegerTruncationOrSignChange. 1399 bool EmitTruncationFromUnsignedToSigned = 1400 EmitTruncation && DstSigned && !SrcSigned; 1401 // Sign change 1402 bool SameTypeSameSize = SrcSigned == DstSigned && SrcBits == DstBits; 1403 bool BothUnsigned = !SrcSigned && !DstSigned; 1404 bool LargerSigned = (DstBits > SrcBits) && DstSigned; 1405 // We can avoid emitting sign change checks in some obvious cases 1406 // 1. If Src and Dst have the same signedness and size 1407 // 2. If both are unsigned sign check is unecessary! 1408 // 3. If Dst is signed and bigger than Src, either 1409 // sign-extension or zero-extension will make sure 1410 // the sign remains. 1411 bool EmitSignChange = !SameTypeSameSize && !BothUnsigned && !LargerSigned; 1412 1413 if (EmitTruncation) 1414 Check = 1415 EmitBitfieldTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder); 1416 else if (EmitSignChange) { 1417 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) && 1418 "either the widths should be different, or the signednesses."); 1419 Check = 1420 EmitBitfieldSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder); 1421 } else 1422 return; 1423 1424 ScalarExprEmitter::ImplicitConversionCheckKind CheckKind = Check.first; 1425 if (EmitTruncationFromUnsignedToSigned) 1426 CheckKind = ScalarExprEmitter::ICCK_SignedIntegerTruncationOrSignChange; 1427 1428 llvm::Constant *StaticArgs[] = { 1429 EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(SrcType), 1430 EmitCheckTypeDescriptor(DstType), 1431 llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind), 1432 llvm::ConstantInt::get(Builder.getInt32Ty(), Info.Size)}; 1433 1434 EmitCheck(Check.second, SanitizerHandler::ImplicitConversion, StaticArgs, 1435 {Src, Dst}); 1436 } 1437 1438 Value *ScalarExprEmitter::EmitScalarCast(Value *Src, QualType SrcType, 1439 QualType DstType, llvm::Type *SrcTy, 1440 llvm::Type *DstTy, 1441 ScalarConversionOpts Opts) { 1442 // The Element types determine the type of cast to perform. 1443 llvm::Type *SrcElementTy; 1444 llvm::Type *DstElementTy; 1445 QualType SrcElementType; 1446 QualType DstElementType; 1447 if (SrcType->isMatrixType() && DstType->isMatrixType()) { 1448 SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType(); 1449 DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType(); 1450 SrcElementType = SrcType->castAs<MatrixType>()->getElementType(); 1451 DstElementType = DstType->castAs<MatrixType>()->getElementType(); 1452 } else { 1453 assert(!SrcType->isMatrixType() && !DstType->isMatrixType() && 1454 "cannot cast between matrix and non-matrix types"); 1455 SrcElementTy = SrcTy; 1456 DstElementTy = DstTy; 1457 SrcElementType = SrcType; 1458 DstElementType = DstType; 1459 } 1460 1461 if (isa<llvm::IntegerType>(SrcElementTy)) { 1462 bool InputSigned = SrcElementType->isSignedIntegerOrEnumerationType(); 1463 if (SrcElementType->isBooleanType() && Opts.TreatBooleanAsSigned) { 1464 InputSigned = true; 1465 } 1466 1467 if (isa<llvm::IntegerType>(DstElementTy)) 1468 return Builder.CreateIntCast(Src, DstTy, InputSigned, "conv"); 1469 if (InputSigned) 1470 return Builder.CreateSIToFP(Src, DstTy, "conv"); 1471 return Builder.CreateUIToFP(Src, DstTy, "conv"); 1472 } 1473 1474 if (isa<llvm::IntegerType>(DstElementTy)) { 1475 assert(SrcElementTy->isFloatingPointTy() && "Unknown real conversion"); 1476 bool IsSigned = DstElementType->isSignedIntegerOrEnumerationType(); 1477 1478 // If we can't recognize overflow as undefined behavior, assume that 1479 // overflow saturates. This protects against normal optimizations if we are 1480 // compiling with non-standard FP semantics. 1481 if (!CGF.CGM.getCodeGenOpts().StrictFloatCastOverflow) { 1482 llvm::Intrinsic::ID IID = 1483 IsSigned ? llvm::Intrinsic::fptosi_sat : llvm::Intrinsic::fptoui_sat; 1484 return Builder.CreateCall(CGF.CGM.getIntrinsic(IID, {DstTy, SrcTy}), Src); 1485 } 1486 1487 if (IsSigned) 1488 return Builder.CreateFPToSI(Src, DstTy, "conv"); 1489 return Builder.CreateFPToUI(Src, DstTy, "conv"); 1490 } 1491 1492 if ((DstElementTy->is16bitFPTy() && SrcElementTy->is16bitFPTy())) { 1493 Value *FloatVal = Builder.CreateFPExt(Src, Builder.getFloatTy(), "fpext"); 1494 return Builder.CreateFPTrunc(FloatVal, DstTy, "fptrunc"); 1495 } 1496 if (DstElementTy->getTypeID() < SrcElementTy->getTypeID()) 1497 return Builder.CreateFPTrunc(Src, DstTy, "conv"); 1498 return Builder.CreateFPExt(Src, DstTy, "conv"); 1499 } 1500 1501 /// Emit a conversion from the specified type to the specified destination type, 1502 /// both of which are LLVM scalar types. 1503 Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType, 1504 QualType DstType, 1505 SourceLocation Loc, 1506 ScalarConversionOpts Opts) { 1507 // All conversions involving fixed point types should be handled by the 1508 // EmitFixedPoint family functions. This is done to prevent bloating up this 1509 // function more, and although fixed point numbers are represented by 1510 // integers, we do not want to follow any logic that assumes they should be 1511 // treated as integers. 1512 // TODO(leonardchan): When necessary, add another if statement checking for 1513 // conversions to fixed point types from other types. 1514 if (SrcType->isFixedPointType()) { 1515 if (DstType->isBooleanType()) 1516 // It is important that we check this before checking if the dest type is 1517 // an integer because booleans are technically integer types. 1518 // We do not need to check the padding bit on unsigned types if unsigned 1519 // padding is enabled because overflow into this bit is undefined 1520 // behavior. 1521 return Builder.CreateIsNotNull(Src, "tobool"); 1522 if (DstType->isFixedPointType() || DstType->isIntegerType() || 1523 DstType->isRealFloatingType()) 1524 return EmitFixedPointConversion(Src, SrcType, DstType, Loc); 1525 1526 llvm_unreachable( 1527 "Unhandled scalar conversion from a fixed point type to another type."); 1528 } else if (DstType->isFixedPointType()) { 1529 if (SrcType->isIntegerType() || SrcType->isRealFloatingType()) 1530 // This also includes converting booleans and enums to fixed point types. 1531 return EmitFixedPointConversion(Src, SrcType, DstType, Loc); 1532 1533 llvm_unreachable( 1534 "Unhandled scalar conversion to a fixed point type from another type."); 1535 } 1536 1537 QualType NoncanonicalSrcType = SrcType; 1538 QualType NoncanonicalDstType = DstType; 1539 1540 SrcType = CGF.getContext().getCanonicalType(SrcType); 1541 DstType = CGF.getContext().getCanonicalType(DstType); 1542 if (SrcType == DstType) return Src; 1543 1544 if (DstType->isVoidType()) return nullptr; 1545 1546 llvm::Value *OrigSrc = Src; 1547 QualType OrigSrcType = SrcType; 1548 llvm::Type *SrcTy = Src->getType(); 1549 1550 // Handle conversions to bool first, they are special: comparisons against 0. 1551 if (DstType->isBooleanType()) 1552 return EmitConversionToBool(Src, SrcType); 1553 1554 llvm::Type *DstTy = ConvertType(DstType); 1555 1556 // Cast from half through float if half isn't a native type. 1557 if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) { 1558 // Cast to FP using the intrinsic if the half type itself isn't supported. 1559 if (DstTy->isFloatingPointTy()) { 1560 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) 1561 return Builder.CreateCall( 1562 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, DstTy), 1563 Src); 1564 } else { 1565 // Cast to other types through float, using either the intrinsic or FPExt, 1566 // depending on whether the half type itself is supported 1567 // (as opposed to operations on half, available with NativeHalfType). 1568 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) { 1569 Src = Builder.CreateCall( 1570 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, 1571 CGF.CGM.FloatTy), 1572 Src); 1573 } else { 1574 Src = Builder.CreateFPExt(Src, CGF.CGM.FloatTy, "conv"); 1575 } 1576 SrcType = CGF.getContext().FloatTy; 1577 SrcTy = CGF.FloatTy; 1578 } 1579 } 1580 1581 // Ignore conversions like int -> uint. 1582 if (SrcTy == DstTy) { 1583 if (Opts.EmitImplicitIntegerSignChangeChecks) 1584 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Src, 1585 NoncanonicalDstType, Loc); 1586 1587 return Src; 1588 } 1589 1590 // Handle pointer conversions next: pointers can only be converted to/from 1591 // other pointers and integers. Check for pointer types in terms of LLVM, as 1592 // some native types (like Obj-C id) may map to a pointer type. 1593 if (auto DstPT = dyn_cast<llvm::PointerType>(DstTy)) { 1594 // The source value may be an integer, or a pointer. 1595 if (isa<llvm::PointerType>(SrcTy)) 1596 return Src; 1597 1598 assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?"); 1599 // First, convert to the correct width so that we control the kind of 1600 // extension. 1601 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DstPT); 1602 bool InputSigned = SrcType->isSignedIntegerOrEnumerationType(); 1603 llvm::Value* IntResult = 1604 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv"); 1605 // Then, cast to pointer. 1606 return Builder.CreateIntToPtr(IntResult, DstTy, "conv"); 1607 } 1608 1609 if (isa<llvm::PointerType>(SrcTy)) { 1610 // Must be an ptr to int cast. 1611 assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?"); 1612 return Builder.CreatePtrToInt(Src, DstTy, "conv"); 1613 } 1614 1615 // A scalar can be splatted to an extended vector of the same element type 1616 if (DstType->isExtVectorType() && !SrcType->isVectorType()) { 1617 // Sema should add casts to make sure that the source expression's type is 1618 // the same as the vector's element type (sans qualifiers) 1619 assert(DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() == 1620 SrcType.getTypePtr() && 1621 "Splatted expr doesn't match with vector element type?"); 1622 1623 // Splat the element across to all elements 1624 unsigned NumElements = cast<llvm::FixedVectorType>(DstTy)->getNumElements(); 1625 return Builder.CreateVectorSplat(NumElements, Src, "splat"); 1626 } 1627 1628 if (SrcType->isMatrixType() && DstType->isMatrixType()) 1629 return EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts); 1630 1631 if (isa<llvm::VectorType>(SrcTy) || isa<llvm::VectorType>(DstTy)) { 1632 // Allow bitcast from vector to integer/fp of the same size. 1633 llvm::TypeSize SrcSize = SrcTy->getPrimitiveSizeInBits(); 1634 llvm::TypeSize DstSize = DstTy->getPrimitiveSizeInBits(); 1635 if (SrcSize == DstSize) 1636 return Builder.CreateBitCast(Src, DstTy, "conv"); 1637 1638 // Conversions between vectors of different sizes are not allowed except 1639 // when vectors of half are involved. Operations on storage-only half 1640 // vectors require promoting half vector operands to float vectors and 1641 // truncating the result, which is either an int or float vector, to a 1642 // short or half vector. 1643 1644 // Source and destination are both expected to be vectors. 1645 llvm::Type *SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType(); 1646 llvm::Type *DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType(); 1647 (void)DstElementTy; 1648 1649 assert(((SrcElementTy->isIntegerTy() && 1650 DstElementTy->isIntegerTy()) || 1651 (SrcElementTy->isFloatingPointTy() && 1652 DstElementTy->isFloatingPointTy())) && 1653 "unexpected conversion between a floating-point vector and an " 1654 "integer vector"); 1655 1656 // Truncate an i32 vector to an i16 vector. 1657 if (SrcElementTy->isIntegerTy()) 1658 return Builder.CreateIntCast(Src, DstTy, false, "conv"); 1659 1660 // Truncate a float vector to a half vector. 1661 if (SrcSize > DstSize) 1662 return Builder.CreateFPTrunc(Src, DstTy, "conv"); 1663 1664 // Promote a half vector to a float vector. 1665 return Builder.CreateFPExt(Src, DstTy, "conv"); 1666 } 1667 1668 // Finally, we have the arithmetic types: real int/float. 1669 Value *Res = nullptr; 1670 llvm::Type *ResTy = DstTy; 1671 1672 // An overflowing conversion has undefined behavior if either the source type 1673 // or the destination type is a floating-point type. However, we consider the 1674 // range of representable values for all floating-point types to be 1675 // [-inf,+inf], so no overflow can ever happen when the destination type is a 1676 // floating-point type. 1677 if (CGF.SanOpts.has(SanitizerKind::FloatCastOverflow) && 1678 OrigSrcType->isFloatingType()) 1679 EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, DstTy, 1680 Loc); 1681 1682 // Cast to half through float if half isn't a native type. 1683 if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) { 1684 // Make sure we cast in a single step if from another FP type. 1685 if (SrcTy->isFloatingPointTy()) { 1686 // Use the intrinsic if the half type itself isn't supported 1687 // (as opposed to operations on half, available with NativeHalfType). 1688 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) 1689 return Builder.CreateCall( 1690 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, SrcTy), Src); 1691 // If the half type is supported, just use an fptrunc. 1692 return Builder.CreateFPTrunc(Src, DstTy); 1693 } 1694 DstTy = CGF.FloatTy; 1695 } 1696 1697 Res = EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts); 1698 1699 if (DstTy != ResTy) { 1700 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) { 1701 assert(ResTy->isIntegerTy(16) && "Only half FP requires extra conversion"); 1702 Res = Builder.CreateCall( 1703 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, CGF.CGM.FloatTy), 1704 Res); 1705 } else { 1706 Res = Builder.CreateFPTrunc(Res, ResTy, "conv"); 1707 } 1708 } 1709 1710 if (Opts.EmitImplicitIntegerTruncationChecks) 1711 EmitIntegerTruncationCheck(Src, NoncanonicalSrcType, Res, 1712 NoncanonicalDstType, Loc); 1713 1714 if (Opts.EmitImplicitIntegerSignChangeChecks) 1715 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Res, 1716 NoncanonicalDstType, Loc); 1717 1718 return Res; 1719 } 1720 1721 Value *ScalarExprEmitter::EmitFixedPointConversion(Value *Src, QualType SrcTy, 1722 QualType DstTy, 1723 SourceLocation Loc) { 1724 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder); 1725 llvm::Value *Result; 1726 if (SrcTy->isRealFloatingType()) 1727 Result = FPBuilder.CreateFloatingToFixed(Src, 1728 CGF.getContext().getFixedPointSemantics(DstTy)); 1729 else if (DstTy->isRealFloatingType()) 1730 Result = FPBuilder.CreateFixedToFloating(Src, 1731 CGF.getContext().getFixedPointSemantics(SrcTy), 1732 ConvertType(DstTy)); 1733 else { 1734 auto SrcFPSema = CGF.getContext().getFixedPointSemantics(SrcTy); 1735 auto DstFPSema = CGF.getContext().getFixedPointSemantics(DstTy); 1736 1737 if (DstTy->isIntegerType()) 1738 Result = FPBuilder.CreateFixedToInteger(Src, SrcFPSema, 1739 DstFPSema.getWidth(), 1740 DstFPSema.isSigned()); 1741 else if (SrcTy->isIntegerType()) 1742 Result = FPBuilder.CreateIntegerToFixed(Src, SrcFPSema.isSigned(), 1743 DstFPSema); 1744 else 1745 Result = FPBuilder.CreateFixedToFixed(Src, SrcFPSema, DstFPSema); 1746 } 1747 return Result; 1748 } 1749 1750 /// Emit a conversion from the specified complex type to the specified 1751 /// destination type, where the destination type is an LLVM scalar type. 1752 Value *ScalarExprEmitter::EmitComplexToScalarConversion( 1753 CodeGenFunction::ComplexPairTy Src, QualType SrcTy, QualType DstTy, 1754 SourceLocation Loc) { 1755 // Get the source element type. 1756 SrcTy = SrcTy->castAs<ComplexType>()->getElementType(); 1757 1758 // Handle conversions to bool first, they are special: comparisons against 0. 1759 if (DstTy->isBooleanType()) { 1760 // Complex != 0 -> (Real != 0) | (Imag != 0) 1761 Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy, Loc); 1762 Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy, Loc); 1763 return Builder.CreateOr(Src.first, Src.second, "tobool"); 1764 } 1765 1766 // C99 6.3.1.7p2: "When a value of complex type is converted to a real type, 1767 // the imaginary part of the complex value is discarded and the value of the 1768 // real part is converted according to the conversion rules for the 1769 // corresponding real type. 1770 return EmitScalarConversion(Src.first, SrcTy, DstTy, Loc); 1771 } 1772 1773 Value *ScalarExprEmitter::EmitNullValue(QualType Ty) { 1774 return CGF.EmitFromMemory(CGF.CGM.EmitNullConstant(Ty), Ty); 1775 } 1776 1777 /// Emit a sanitization check for the given "binary" operation (which 1778 /// might actually be a unary increment which has been lowered to a binary 1779 /// operation). The check passes if all values in \p Checks (which are \c i1), 1780 /// are \c true. 1781 void ScalarExprEmitter::EmitBinOpCheck( 1782 ArrayRef<std::pair<Value *, SanitizerKind::SanitizerOrdinal>> Checks, 1783 const BinOpInfo &Info) { 1784 assert(CGF.IsSanitizerScope); 1785 SanitizerHandler Check; 1786 SmallVector<llvm::Constant *, 4> StaticData; 1787 SmallVector<llvm::Value *, 2> DynamicData; 1788 1789 BinaryOperatorKind Opcode = Info.Opcode; 1790 if (BinaryOperator::isCompoundAssignmentOp(Opcode)) 1791 Opcode = BinaryOperator::getOpForCompoundAssignment(Opcode); 1792 1793 StaticData.push_back(CGF.EmitCheckSourceLocation(Info.E->getExprLoc())); 1794 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Info.E); 1795 if (UO && UO->getOpcode() == UO_Minus) { 1796 Check = SanitizerHandler::NegateOverflow; 1797 StaticData.push_back(CGF.EmitCheckTypeDescriptor(UO->getType())); 1798 DynamicData.push_back(Info.RHS); 1799 } else { 1800 if (BinaryOperator::isShiftOp(Opcode)) { 1801 // Shift LHS negative or too large, or RHS out of bounds. 1802 Check = SanitizerHandler::ShiftOutOfBounds; 1803 const BinaryOperator *BO = cast<BinaryOperator>(Info.E); 1804 StaticData.push_back( 1805 CGF.EmitCheckTypeDescriptor(BO->getLHS()->getType())); 1806 StaticData.push_back( 1807 CGF.EmitCheckTypeDescriptor(BO->getRHS()->getType())); 1808 } else if (Opcode == BO_Div || Opcode == BO_Rem) { 1809 // Divide or modulo by zero, or signed overflow (eg INT_MAX / -1). 1810 Check = SanitizerHandler::DivremOverflow; 1811 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty)); 1812 } else { 1813 // Arithmetic overflow (+, -, *). 1814 switch (Opcode) { 1815 case BO_Add: Check = SanitizerHandler::AddOverflow; break; 1816 case BO_Sub: Check = SanitizerHandler::SubOverflow; break; 1817 case BO_Mul: Check = SanitizerHandler::MulOverflow; break; 1818 default: llvm_unreachable("unexpected opcode for bin op check"); 1819 } 1820 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty)); 1821 } 1822 DynamicData.push_back(Info.LHS); 1823 DynamicData.push_back(Info.RHS); 1824 } 1825 1826 CGF.EmitCheck(Checks, Check, StaticData, DynamicData); 1827 } 1828 1829 //===----------------------------------------------------------------------===// 1830 // Visitor Methods 1831 //===----------------------------------------------------------------------===// 1832 1833 Value *ScalarExprEmitter::VisitExpr(Expr *E) { 1834 CGF.ErrorUnsupported(E, "scalar expression"); 1835 if (E->getType()->isVoidType()) 1836 return nullptr; 1837 return llvm::PoisonValue::get(CGF.ConvertType(E->getType())); 1838 } 1839 1840 Value * 1841 ScalarExprEmitter::VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E) { 1842 ASTContext &Context = CGF.getContext(); 1843 unsigned AddrSpace = 1844 Context.getTargetAddressSpace(CGF.CGM.GetGlobalConstantAddressSpace()); 1845 llvm::Constant *GlobalConstStr = Builder.CreateGlobalString( 1846 E->ComputeName(Context), "__usn_str", AddrSpace); 1847 1848 llvm::Type *ExprTy = ConvertType(E->getType()); 1849 return Builder.CreatePointerBitCastOrAddrSpaceCast(GlobalConstStr, ExprTy, 1850 "usn_addr_cast"); 1851 } 1852 1853 Value *ScalarExprEmitter::VisitEmbedExpr(EmbedExpr *E) { 1854 assert(E->getDataElementCount() == 1); 1855 auto It = E->begin(); 1856 return Builder.getInt((*It)->getValue()); 1857 } 1858 1859 Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) { 1860 // Vector Mask Case 1861 if (E->getNumSubExprs() == 2) { 1862 Value *LHS = CGF.EmitScalarExpr(E->getExpr(0)); 1863 Value *RHS = CGF.EmitScalarExpr(E->getExpr(1)); 1864 Value *Mask; 1865 1866 auto *LTy = cast<llvm::FixedVectorType>(LHS->getType()); 1867 unsigned LHSElts = LTy->getNumElements(); 1868 1869 Mask = RHS; 1870 1871 auto *MTy = cast<llvm::FixedVectorType>(Mask->getType()); 1872 1873 // Mask off the high bits of each shuffle index. 1874 Value *MaskBits = 1875 llvm::ConstantInt::get(MTy, llvm::NextPowerOf2(LHSElts - 1) - 1); 1876 Mask = Builder.CreateAnd(Mask, MaskBits, "mask"); 1877 1878 // newv = undef 1879 // mask = mask & maskbits 1880 // for each elt 1881 // n = extract mask i 1882 // x = extract val n 1883 // newv = insert newv, x, i 1884 auto *RTy = llvm::FixedVectorType::get(LTy->getElementType(), 1885 MTy->getNumElements()); 1886 Value* NewV = llvm::PoisonValue::get(RTy); 1887 for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) { 1888 Value *IIndx = llvm::ConstantInt::get(CGF.SizeTy, i); 1889 Value *Indx = Builder.CreateExtractElement(Mask, IIndx, "shuf_idx"); 1890 1891 Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt"); 1892 NewV = Builder.CreateInsertElement(NewV, VExt, IIndx, "shuf_ins"); 1893 } 1894 return NewV; 1895 } 1896 1897 Value* V1 = CGF.EmitScalarExpr(E->getExpr(0)); 1898 Value* V2 = CGF.EmitScalarExpr(E->getExpr(1)); 1899 1900 SmallVector<int, 32> Indices; 1901 for (unsigned i = 2; i < E->getNumSubExprs(); ++i) { 1902 llvm::APSInt Idx = E->getShuffleMaskIdx(CGF.getContext(), i-2); 1903 // Check for -1 and output it as undef in the IR. 1904 if (Idx.isSigned() && Idx.isAllOnes()) 1905 Indices.push_back(-1); 1906 else 1907 Indices.push_back(Idx.getZExtValue()); 1908 } 1909 1910 return Builder.CreateShuffleVector(V1, V2, Indices, "shuffle"); 1911 } 1912 1913 Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) { 1914 QualType SrcType = E->getSrcExpr()->getType(), 1915 DstType = E->getType(); 1916 1917 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr()); 1918 1919 SrcType = CGF.getContext().getCanonicalType(SrcType); 1920 DstType = CGF.getContext().getCanonicalType(DstType); 1921 if (SrcType == DstType) return Src; 1922 1923 assert(SrcType->isVectorType() && 1924 "ConvertVector source type must be a vector"); 1925 assert(DstType->isVectorType() && 1926 "ConvertVector destination type must be a vector"); 1927 1928 llvm::Type *SrcTy = Src->getType(); 1929 llvm::Type *DstTy = ConvertType(DstType); 1930 1931 // Ignore conversions like int -> uint. 1932 if (SrcTy == DstTy) 1933 return Src; 1934 1935 QualType SrcEltType = SrcType->castAs<VectorType>()->getElementType(), 1936 DstEltType = DstType->castAs<VectorType>()->getElementType(); 1937 1938 assert(SrcTy->isVectorTy() && 1939 "ConvertVector source IR type must be a vector"); 1940 assert(DstTy->isVectorTy() && 1941 "ConvertVector destination IR type must be a vector"); 1942 1943 llvm::Type *SrcEltTy = cast<llvm::VectorType>(SrcTy)->getElementType(), 1944 *DstEltTy = cast<llvm::VectorType>(DstTy)->getElementType(); 1945 1946 if (DstEltType->isBooleanType()) { 1947 assert((SrcEltTy->isFloatingPointTy() || 1948 isa<llvm::IntegerType>(SrcEltTy)) && "Unknown boolean conversion"); 1949 1950 llvm::Value *Zero = llvm::Constant::getNullValue(SrcTy); 1951 if (SrcEltTy->isFloatingPointTy()) { 1952 return Builder.CreateFCmpUNE(Src, Zero, "tobool"); 1953 } else { 1954 return Builder.CreateICmpNE(Src, Zero, "tobool"); 1955 } 1956 } 1957 1958 // We have the arithmetic types: real int/float. 1959 Value *Res = nullptr; 1960 1961 if (isa<llvm::IntegerType>(SrcEltTy)) { 1962 bool InputSigned = SrcEltType->isSignedIntegerOrEnumerationType(); 1963 if (isa<llvm::IntegerType>(DstEltTy)) 1964 Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv"); 1965 else if (InputSigned) 1966 Res = Builder.CreateSIToFP(Src, DstTy, "conv"); 1967 else 1968 Res = Builder.CreateUIToFP(Src, DstTy, "conv"); 1969 } else if (isa<llvm::IntegerType>(DstEltTy)) { 1970 assert(SrcEltTy->isFloatingPointTy() && "Unknown real conversion"); 1971 if (DstEltType->isSignedIntegerOrEnumerationType()) 1972 Res = Builder.CreateFPToSI(Src, DstTy, "conv"); 1973 else 1974 Res = Builder.CreateFPToUI(Src, DstTy, "conv"); 1975 } else { 1976 assert(SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() && 1977 "Unknown real conversion"); 1978 if (DstEltTy->getTypeID() < SrcEltTy->getTypeID()) 1979 Res = Builder.CreateFPTrunc(Src, DstTy, "conv"); 1980 else 1981 Res = Builder.CreateFPExt(Src, DstTy, "conv"); 1982 } 1983 1984 return Res; 1985 } 1986 1987 Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) { 1988 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E)) { 1989 CGF.EmitIgnoredExpr(E->getBase()); 1990 return CGF.emitScalarConstant(Constant, E); 1991 } else { 1992 Expr::EvalResult Result; 1993 if (E->EvaluateAsInt(Result, CGF.getContext(), Expr::SE_AllowSideEffects)) { 1994 llvm::APSInt Value = Result.Val.getInt(); 1995 CGF.EmitIgnoredExpr(E->getBase()); 1996 return Builder.getInt(Value); 1997 } 1998 } 1999 2000 llvm::Value *Result = EmitLoadOfLValue(E); 2001 2002 // If -fdebug-info-for-profiling is specified, emit a pseudo variable and its 2003 // debug info for the pointer, even if there is no variable associated with 2004 // the pointer's expression. 2005 if (CGF.CGM.getCodeGenOpts().DebugInfoForProfiling && CGF.getDebugInfo()) { 2006 if (llvm::LoadInst *Load = dyn_cast<llvm::LoadInst>(Result)) { 2007 if (llvm::GetElementPtrInst *GEP = 2008 dyn_cast<llvm::GetElementPtrInst>(Load->getPointerOperand())) { 2009 if (llvm::Instruction *Pointer = 2010 dyn_cast<llvm::Instruction>(GEP->getPointerOperand())) { 2011 QualType Ty = E->getBase()->getType(); 2012 if (!E->isArrow()) 2013 Ty = CGF.getContext().getPointerType(Ty); 2014 CGF.getDebugInfo()->EmitPseudoVariable(Builder, Pointer, Ty); 2015 } 2016 } 2017 } 2018 } 2019 return Result; 2020 } 2021 2022 Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) { 2023 TestAndClearIgnoreResultAssign(); 2024 2025 // Emit subscript expressions in rvalue context's. For most cases, this just 2026 // loads the lvalue formed by the subscript expr. However, we have to be 2027 // careful, because the base of a vector subscript is occasionally an rvalue, 2028 // so we can't get it as an lvalue. 2029 if (!E->getBase()->getType()->isVectorType() && 2030 !E->getBase()->getType()->isSveVLSBuiltinType()) 2031 return EmitLoadOfLValue(E); 2032 2033 // Handle the vector case. The base must be a vector, the index must be an 2034 // integer value. 2035 Value *Base = Visit(E->getBase()); 2036 Value *Idx = Visit(E->getIdx()); 2037 QualType IdxTy = E->getIdx()->getType(); 2038 2039 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds)) 2040 CGF.EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, /*Accessed*/true); 2041 2042 return Builder.CreateExtractElement(Base, Idx, "vecext"); 2043 } 2044 2045 Value *ScalarExprEmitter::VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E) { 2046 TestAndClearIgnoreResultAssign(); 2047 2048 // Handle the vector case. The base must be a vector, the index must be an 2049 // integer value. 2050 Value *RowIdx = CGF.EmitMatrixIndexExpr(E->getRowIdx()); 2051 Value *ColumnIdx = CGF.EmitMatrixIndexExpr(E->getColumnIdx()); 2052 2053 const auto *MatrixTy = E->getBase()->getType()->castAs<ConstantMatrixType>(); 2054 unsigned NumRows = MatrixTy->getNumRows(); 2055 llvm::MatrixBuilder MB(Builder); 2056 Value *Idx = MB.CreateIndex(RowIdx, ColumnIdx, NumRows); 2057 if (CGF.CGM.getCodeGenOpts().OptimizationLevel > 0) 2058 MB.CreateIndexAssumption(Idx, MatrixTy->getNumElementsFlattened()); 2059 2060 Value *Matrix = Visit(E->getBase()); 2061 2062 // TODO: Should we emit bounds checks with SanitizerKind::ArrayBounds? 2063 return Builder.CreateExtractElement(Matrix, Idx, "matrixext"); 2064 } 2065 2066 static int getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx, 2067 unsigned Off) { 2068 int MV = SVI->getMaskValue(Idx); 2069 if (MV == -1) 2070 return -1; 2071 return Off + MV; 2072 } 2073 2074 static int getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty) { 2075 assert(llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue()) && 2076 "Index operand too large for shufflevector mask!"); 2077 return C->getZExtValue(); 2078 } 2079 2080 Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) { 2081 bool Ignore = TestAndClearIgnoreResultAssign(); 2082 (void)Ignore; 2083 assert (Ignore == false && "init list ignored"); 2084 unsigned NumInitElements = E->getNumInits(); 2085 2086 if (E->hadArrayRangeDesignator()) 2087 CGF.ErrorUnsupported(E, "GNU array range designator extension"); 2088 2089 llvm::VectorType *VType = 2090 dyn_cast<llvm::VectorType>(ConvertType(E->getType())); 2091 2092 if (!VType) { 2093 if (NumInitElements == 0) { 2094 // C++11 value-initialization for the scalar. 2095 return EmitNullValue(E->getType()); 2096 } 2097 // We have a scalar in braces. Just use the first element. 2098 return Visit(E->getInit(0)); 2099 } 2100 2101 if (isa<llvm::ScalableVectorType>(VType)) { 2102 if (NumInitElements == 0) { 2103 // C++11 value-initialization for the vector. 2104 return EmitNullValue(E->getType()); 2105 } 2106 2107 if (NumInitElements == 1) { 2108 Expr *InitVector = E->getInit(0); 2109 2110 // Initialize from another scalable vector of the same type. 2111 if (InitVector->getType().getCanonicalType() == 2112 E->getType().getCanonicalType()) 2113 return Visit(InitVector); 2114 } 2115 2116 llvm_unreachable("Unexpected initialization of a scalable vector!"); 2117 } 2118 2119 unsigned ResElts = cast<llvm::FixedVectorType>(VType)->getNumElements(); 2120 2121 // Loop over initializers collecting the Value for each, and remembering 2122 // whether the source was swizzle (ExtVectorElementExpr). This will allow 2123 // us to fold the shuffle for the swizzle into the shuffle for the vector 2124 // initializer, since LLVM optimizers generally do not want to touch 2125 // shuffles. 2126 unsigned CurIdx = 0; 2127 bool VIsPoisonShuffle = false; 2128 llvm::Value *V = llvm::PoisonValue::get(VType); 2129 for (unsigned i = 0; i != NumInitElements; ++i) { 2130 Expr *IE = E->getInit(i); 2131 Value *Init = Visit(IE); 2132 SmallVector<int, 16> Args; 2133 2134 llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType()); 2135 2136 // Handle scalar elements. If the scalar initializer is actually one 2137 // element of a different vector of the same width, use shuffle instead of 2138 // extract+insert. 2139 if (!VVT) { 2140 if (isa<ExtVectorElementExpr>(IE)) { 2141 llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Init); 2142 2143 if (cast<llvm::FixedVectorType>(EI->getVectorOperandType()) 2144 ->getNumElements() == ResElts) { 2145 llvm::ConstantInt *C = cast<llvm::ConstantInt>(EI->getIndexOperand()); 2146 Value *LHS = nullptr, *RHS = nullptr; 2147 if (CurIdx == 0) { 2148 // insert into poison -> shuffle (src, poison) 2149 // shufflemask must use an i32 2150 Args.push_back(getAsInt32(C, CGF.Int32Ty)); 2151 Args.resize(ResElts, -1); 2152 2153 LHS = EI->getVectorOperand(); 2154 RHS = V; 2155 VIsPoisonShuffle = true; 2156 } else if (VIsPoisonShuffle) { 2157 // insert into poison shuffle && size match -> shuffle (v, src) 2158 llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V); 2159 for (unsigned j = 0; j != CurIdx; ++j) 2160 Args.push_back(getMaskElt(SVV, j, 0)); 2161 Args.push_back(ResElts + C->getZExtValue()); 2162 Args.resize(ResElts, -1); 2163 2164 LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0); 2165 RHS = EI->getVectorOperand(); 2166 VIsPoisonShuffle = false; 2167 } 2168 if (!Args.empty()) { 2169 V = Builder.CreateShuffleVector(LHS, RHS, Args); 2170 ++CurIdx; 2171 continue; 2172 } 2173 } 2174 } 2175 V = Builder.CreateInsertElement(V, Init, Builder.getInt32(CurIdx), 2176 "vecinit"); 2177 VIsPoisonShuffle = false; 2178 ++CurIdx; 2179 continue; 2180 } 2181 2182 unsigned InitElts = cast<llvm::FixedVectorType>(VVT)->getNumElements(); 2183 2184 // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's 2185 // input is the same width as the vector being constructed, generate an 2186 // optimized shuffle of the swizzle input into the result. 2187 unsigned Offset = (CurIdx == 0) ? 0 : ResElts; 2188 if (isa<ExtVectorElementExpr>(IE)) { 2189 llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init); 2190 Value *SVOp = SVI->getOperand(0); 2191 auto *OpTy = cast<llvm::FixedVectorType>(SVOp->getType()); 2192 2193 if (OpTy->getNumElements() == ResElts) { 2194 for (unsigned j = 0; j != CurIdx; ++j) { 2195 // If the current vector initializer is a shuffle with poison, merge 2196 // this shuffle directly into it. 2197 if (VIsPoisonShuffle) { 2198 Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0)); 2199 } else { 2200 Args.push_back(j); 2201 } 2202 } 2203 for (unsigned j = 0, je = InitElts; j != je; ++j) 2204 Args.push_back(getMaskElt(SVI, j, Offset)); 2205 Args.resize(ResElts, -1); 2206 2207 if (VIsPoisonShuffle) 2208 V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0); 2209 2210 Init = SVOp; 2211 } 2212 } 2213 2214 // Extend init to result vector length, and then shuffle its contribution 2215 // to the vector initializer into V. 2216 if (Args.empty()) { 2217 for (unsigned j = 0; j != InitElts; ++j) 2218 Args.push_back(j); 2219 Args.resize(ResElts, -1); 2220 Init = Builder.CreateShuffleVector(Init, Args, "vext"); 2221 2222 Args.clear(); 2223 for (unsigned j = 0; j != CurIdx; ++j) 2224 Args.push_back(j); 2225 for (unsigned j = 0; j != InitElts; ++j) 2226 Args.push_back(j + Offset); 2227 Args.resize(ResElts, -1); 2228 } 2229 2230 // If V is poison, make sure it ends up on the RHS of the shuffle to aid 2231 // merging subsequent shuffles into this one. 2232 if (CurIdx == 0) 2233 std::swap(V, Init); 2234 V = Builder.CreateShuffleVector(V, Init, Args, "vecinit"); 2235 VIsPoisonShuffle = isa<llvm::PoisonValue>(Init); 2236 CurIdx += InitElts; 2237 } 2238 2239 // FIXME: evaluate codegen vs. shuffling against constant null vector. 2240 // Emit remaining default initializers. 2241 llvm::Type *EltTy = VType->getElementType(); 2242 2243 // Emit remaining default initializers 2244 for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) { 2245 Value *Idx = Builder.getInt32(CurIdx); 2246 llvm::Value *Init = llvm::Constant::getNullValue(EltTy); 2247 V = Builder.CreateInsertElement(V, Init, Idx, "vecinit"); 2248 } 2249 return V; 2250 } 2251 2252 bool CodeGenFunction::ShouldNullCheckClassCastValue(const CastExpr *CE) { 2253 const Expr *E = CE->getSubExpr(); 2254 2255 if (CE->getCastKind() == CK_UncheckedDerivedToBase) 2256 return false; 2257 2258 if (isa<CXXThisExpr>(E->IgnoreParens())) { 2259 // We always assume that 'this' is never null. 2260 return false; 2261 } 2262 2263 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) { 2264 // And that glvalue casts are never null. 2265 if (ICE->isGLValue()) 2266 return false; 2267 } 2268 2269 return true; 2270 } 2271 2272 // VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts 2273 // have to handle a more broad range of conversions than explicit casts, as they 2274 // handle things like function to ptr-to-function decay etc. 2275 Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { 2276 Expr *E = CE->getSubExpr(); 2277 QualType DestTy = CE->getType(); 2278 CastKind Kind = CE->getCastKind(); 2279 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, CE); 2280 2281 // These cases are generally not written to ignore the result of 2282 // evaluating their sub-expressions, so we clear this now. 2283 bool Ignored = TestAndClearIgnoreResultAssign(); 2284 2285 // Since almost all cast kinds apply to scalars, this switch doesn't have 2286 // a default case, so the compiler will warn on a missing case. The cases 2287 // are in the same order as in the CastKind enum. 2288 switch (Kind) { 2289 case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!"); 2290 case CK_BuiltinFnToFnPtr: 2291 llvm_unreachable("builtin functions are handled elsewhere"); 2292 2293 case CK_LValueBitCast: 2294 case CK_ObjCObjectLValueCast: { 2295 Address Addr = EmitLValue(E).getAddress(); 2296 Addr = Addr.withElementType(CGF.ConvertTypeForMem(DestTy)); 2297 LValue LV = CGF.MakeAddrLValue(Addr, DestTy); 2298 return EmitLoadOfLValue(LV, CE->getExprLoc()); 2299 } 2300 2301 case CK_LValueToRValueBitCast: { 2302 LValue SourceLVal = CGF.EmitLValue(E); 2303 Address Addr = 2304 SourceLVal.getAddress().withElementType(CGF.ConvertTypeForMem(DestTy)); 2305 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy); 2306 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo()); 2307 return EmitLoadOfLValue(DestLV, CE->getExprLoc()); 2308 } 2309 2310 case CK_CPointerToObjCPointerCast: 2311 case CK_BlockPointerToObjCPointerCast: 2312 case CK_AnyPointerToBlockPointerCast: 2313 case CK_BitCast: { 2314 Value *Src = Visit(const_cast<Expr*>(E)); 2315 llvm::Type *SrcTy = Src->getType(); 2316 llvm::Type *DstTy = ConvertType(DestTy); 2317 assert( 2318 (!SrcTy->isPtrOrPtrVectorTy() || !DstTy->isPtrOrPtrVectorTy() || 2319 SrcTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace()) && 2320 "Address-space cast must be used to convert address spaces"); 2321 2322 if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast)) { 2323 if (auto *PT = DestTy->getAs<PointerType>()) { 2324 CGF.EmitVTablePtrCheckForCast( 2325 PT->getPointeeType(), 2326 Address(Src, 2327 CGF.ConvertTypeForMem( 2328 E->getType()->castAs<PointerType>()->getPointeeType()), 2329 CGF.getPointerAlign()), 2330 /*MayBeNull=*/true, CodeGenFunction::CFITCK_UnrelatedCast, 2331 CE->getBeginLoc()); 2332 } 2333 } 2334 2335 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) { 2336 const QualType SrcType = E->getType(); 2337 2338 if (SrcType.mayBeNotDynamicClass() && DestTy.mayBeDynamicClass()) { 2339 // Casting to pointer that could carry dynamic information (provided by 2340 // invariant.group) requires launder. 2341 Src = Builder.CreateLaunderInvariantGroup(Src); 2342 } else if (SrcType.mayBeDynamicClass() && DestTy.mayBeNotDynamicClass()) { 2343 // Casting to pointer that does not carry dynamic information (provided 2344 // by invariant.group) requires stripping it. Note that we don't do it 2345 // if the source could not be dynamic type and destination could be 2346 // dynamic because dynamic information is already laundered. It is 2347 // because launder(strip(src)) == launder(src), so there is no need to 2348 // add extra strip before launder. 2349 Src = Builder.CreateStripInvariantGroup(Src); 2350 } 2351 } 2352 2353 // Update heapallocsite metadata when there is an explicit pointer cast. 2354 if (auto *CI = dyn_cast<llvm::CallBase>(Src)) { 2355 if (CI->getMetadata("heapallocsite") && isa<ExplicitCastExpr>(CE) && 2356 !isa<CastExpr>(E)) { 2357 QualType PointeeType = DestTy->getPointeeType(); 2358 if (!PointeeType.isNull()) 2359 CGF.getDebugInfo()->addHeapAllocSiteMetadata(CI, PointeeType, 2360 CE->getExprLoc()); 2361 } 2362 } 2363 2364 // If Src is a fixed vector and Dst is a scalable vector, and both have the 2365 // same element type, use the llvm.vector.insert intrinsic to perform the 2366 // bitcast. 2367 if (auto *FixedSrcTy = dyn_cast<llvm::FixedVectorType>(SrcTy)) { 2368 if (auto *ScalableDstTy = dyn_cast<llvm::ScalableVectorType>(DstTy)) { 2369 // If we are casting a fixed i8 vector to a scalable i1 predicate 2370 // vector, use a vector insert and bitcast the result. 2371 if (ScalableDstTy->getElementType()->isIntegerTy(1) && 2372 ScalableDstTy->getElementCount().isKnownMultipleOf(8) && 2373 FixedSrcTy->getElementType()->isIntegerTy(8)) { 2374 ScalableDstTy = llvm::ScalableVectorType::get( 2375 FixedSrcTy->getElementType(), 2376 ScalableDstTy->getElementCount().getKnownMinValue() / 8); 2377 } 2378 if (FixedSrcTy->getElementType() == ScalableDstTy->getElementType()) { 2379 llvm::Value *PoisonVec = llvm::PoisonValue::get(ScalableDstTy); 2380 llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty); 2381 llvm::Value *Result = Builder.CreateInsertVector( 2382 ScalableDstTy, PoisonVec, Src, Zero, "cast.scalable"); 2383 if (Result->getType() != DstTy) 2384 Result = Builder.CreateBitCast(Result, DstTy); 2385 return Result; 2386 } 2387 } 2388 } 2389 2390 // If Src is a scalable vector and Dst is a fixed vector, and both have the 2391 // same element type, use the llvm.vector.extract intrinsic to perform the 2392 // bitcast. 2393 if (auto *ScalableSrcTy = dyn_cast<llvm::ScalableVectorType>(SrcTy)) { 2394 if (auto *FixedDstTy = dyn_cast<llvm::FixedVectorType>(DstTy)) { 2395 // If we are casting a scalable i1 predicate vector to a fixed i8 2396 // vector, bitcast the source and use a vector extract. 2397 if (ScalableSrcTy->getElementType()->isIntegerTy(1) && 2398 ScalableSrcTy->getElementCount().isKnownMultipleOf(8) && 2399 FixedDstTy->getElementType()->isIntegerTy(8)) { 2400 ScalableSrcTy = llvm::ScalableVectorType::get( 2401 FixedDstTy->getElementType(), 2402 ScalableSrcTy->getElementCount().getKnownMinValue() / 8); 2403 Src = Builder.CreateBitCast(Src, ScalableSrcTy); 2404 } 2405 if (ScalableSrcTy->getElementType() == FixedDstTy->getElementType()) { 2406 llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty); 2407 return Builder.CreateExtractVector(DstTy, Src, Zero, "cast.fixed"); 2408 } 2409 } 2410 } 2411 2412 // Perform VLAT <-> VLST bitcast through memory. 2413 // TODO: since the llvm.vector.{insert,extract} intrinsics 2414 // require the element types of the vectors to be the same, we 2415 // need to keep this around for bitcasts between VLAT <-> VLST where 2416 // the element types of the vectors are not the same, until we figure 2417 // out a better way of doing these casts. 2418 if ((isa<llvm::FixedVectorType>(SrcTy) && 2419 isa<llvm::ScalableVectorType>(DstTy)) || 2420 (isa<llvm::ScalableVectorType>(SrcTy) && 2421 isa<llvm::FixedVectorType>(DstTy))) { 2422 Address Addr = CGF.CreateDefaultAlignTempAlloca(SrcTy, "saved-value"); 2423 LValue LV = CGF.MakeAddrLValue(Addr, E->getType()); 2424 CGF.EmitStoreOfScalar(Src, LV); 2425 Addr = Addr.withElementType(CGF.ConvertTypeForMem(DestTy)); 2426 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy); 2427 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo()); 2428 return EmitLoadOfLValue(DestLV, CE->getExprLoc()); 2429 } 2430 2431 llvm::Value *Result = Builder.CreateBitCast(Src, DstTy); 2432 return CGF.authPointerToPointerCast(Result, E->getType(), DestTy); 2433 } 2434 case CK_AddressSpaceConversion: { 2435 Expr::EvalResult Result; 2436 if (E->EvaluateAsRValue(Result, CGF.getContext()) && 2437 Result.Val.isNullPointer()) { 2438 // If E has side effect, it is emitted even if its final result is a 2439 // null pointer. In that case, a DCE pass should be able to 2440 // eliminate the useless instructions emitted during translating E. 2441 if (Result.HasSideEffects) 2442 Visit(E); 2443 return CGF.CGM.getNullPointer(cast<llvm::PointerType>( 2444 ConvertType(DestTy)), DestTy); 2445 } 2446 // Since target may map different address spaces in AST to the same address 2447 // space, an address space conversion may end up as a bitcast. 2448 return CGF.CGM.getTargetCodeGenInfo().performAddrSpaceCast( 2449 CGF, Visit(E), E->getType()->getPointeeType().getAddressSpace(), 2450 DestTy->getPointeeType().getAddressSpace(), ConvertType(DestTy)); 2451 } 2452 case CK_AtomicToNonAtomic: 2453 case CK_NonAtomicToAtomic: 2454 case CK_UserDefinedConversion: 2455 return Visit(const_cast<Expr*>(E)); 2456 2457 case CK_NoOp: { 2458 return CE->changesVolatileQualification() ? EmitLoadOfLValue(CE) 2459 : Visit(const_cast<Expr *>(E)); 2460 } 2461 2462 case CK_BaseToDerived: { 2463 const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl(); 2464 assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!"); 2465 2466 Address Base = CGF.EmitPointerWithAlignment(E); 2467 Address Derived = 2468 CGF.GetAddressOfDerivedClass(Base, DerivedClassDecl, 2469 CE->path_begin(), CE->path_end(), 2470 CGF.ShouldNullCheckClassCastValue(CE)); 2471 2472 // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is 2473 // performed and the object is not of the derived type. 2474 if (CGF.sanitizePerformTypeCheck()) 2475 CGF.EmitTypeCheck(CodeGenFunction::TCK_DowncastPointer, CE->getExprLoc(), 2476 Derived, DestTy->getPointeeType()); 2477 2478 if (CGF.SanOpts.has(SanitizerKind::CFIDerivedCast)) 2479 CGF.EmitVTablePtrCheckForCast(DestTy->getPointeeType(), Derived, 2480 /*MayBeNull=*/true, 2481 CodeGenFunction::CFITCK_DerivedCast, 2482 CE->getBeginLoc()); 2483 2484 return CGF.getAsNaturalPointerTo(Derived, CE->getType()->getPointeeType()); 2485 } 2486 case CK_UncheckedDerivedToBase: 2487 case CK_DerivedToBase: { 2488 // The EmitPointerWithAlignment path does this fine; just discard 2489 // the alignment. 2490 return CGF.getAsNaturalPointerTo(CGF.EmitPointerWithAlignment(CE), 2491 CE->getType()->getPointeeType()); 2492 } 2493 2494 case CK_Dynamic: { 2495 Address V = CGF.EmitPointerWithAlignment(E); 2496 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE); 2497 return CGF.EmitDynamicCast(V, DCE); 2498 } 2499 2500 case CK_ArrayToPointerDecay: 2501 return CGF.getAsNaturalPointerTo(CGF.EmitArrayToPointerDecay(E), 2502 CE->getType()->getPointeeType()); 2503 case CK_FunctionToPointerDecay: 2504 return EmitLValue(E).getPointer(CGF); 2505 2506 case CK_NullToPointer: 2507 if (MustVisitNullValue(E)) 2508 CGF.EmitIgnoredExpr(E); 2509 2510 return CGF.CGM.getNullPointer(cast<llvm::PointerType>(ConvertType(DestTy)), 2511 DestTy); 2512 2513 case CK_NullToMemberPointer: { 2514 if (MustVisitNullValue(E)) 2515 CGF.EmitIgnoredExpr(E); 2516 2517 const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>(); 2518 return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT); 2519 } 2520 2521 case CK_ReinterpretMemberPointer: 2522 case CK_BaseToDerivedMemberPointer: 2523 case CK_DerivedToBaseMemberPointer: { 2524 Value *Src = Visit(E); 2525 2526 // Note that the AST doesn't distinguish between checked and 2527 // unchecked member pointer conversions, so we always have to 2528 // implement checked conversions here. This is inefficient when 2529 // actual control flow may be required in order to perform the 2530 // check, which it is for data member pointers (but not member 2531 // function pointers on Itanium and ARM). 2532 return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, CE, Src); 2533 } 2534 2535 case CK_ARCProduceObject: 2536 return CGF.EmitARCRetainScalarExpr(E); 2537 case CK_ARCConsumeObject: 2538 return CGF.EmitObjCConsumeObject(E->getType(), Visit(E)); 2539 case CK_ARCReclaimReturnedObject: 2540 return CGF.EmitARCReclaimReturnedObject(E, /*allowUnsafe*/ Ignored); 2541 case CK_ARCExtendBlockObject: 2542 return CGF.EmitARCExtendBlockObject(E); 2543 2544 case CK_CopyAndAutoreleaseBlockObject: 2545 return CGF.EmitBlockCopyAndAutorelease(Visit(E), E->getType()); 2546 2547 case CK_FloatingRealToComplex: 2548 case CK_FloatingComplexCast: 2549 case CK_IntegralRealToComplex: 2550 case CK_IntegralComplexCast: 2551 case CK_IntegralComplexToFloatingComplex: 2552 case CK_FloatingComplexToIntegralComplex: 2553 case CK_ConstructorConversion: 2554 case CK_ToUnion: 2555 case CK_HLSLArrayRValue: 2556 llvm_unreachable("scalar cast to non-scalar value"); 2557 2558 case CK_LValueToRValue: 2559 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy)); 2560 assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!"); 2561 return Visit(const_cast<Expr*>(E)); 2562 2563 case CK_IntegralToPointer: { 2564 Value *Src = Visit(const_cast<Expr*>(E)); 2565 2566 // First, convert to the correct width so that we control the kind of 2567 // extension. 2568 auto DestLLVMTy = ConvertType(DestTy); 2569 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DestLLVMTy); 2570 bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType(); 2571 llvm::Value* IntResult = 2572 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv"); 2573 2574 auto *IntToPtr = Builder.CreateIntToPtr(IntResult, DestLLVMTy); 2575 2576 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) { 2577 // Going from integer to pointer that could be dynamic requires reloading 2578 // dynamic information from invariant.group. 2579 if (DestTy.mayBeDynamicClass()) 2580 IntToPtr = Builder.CreateLaunderInvariantGroup(IntToPtr); 2581 } 2582 2583 IntToPtr = CGF.authPointerToPointerCast(IntToPtr, E->getType(), DestTy); 2584 return IntToPtr; 2585 } 2586 case CK_PointerToIntegral: { 2587 assert(!DestTy->isBooleanType() && "bool should use PointerToBool"); 2588 auto *PtrExpr = Visit(E); 2589 2590 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) { 2591 const QualType SrcType = E->getType(); 2592 2593 // Casting to integer requires stripping dynamic information as it does 2594 // not carries it. 2595 if (SrcType.mayBeDynamicClass()) 2596 PtrExpr = Builder.CreateStripInvariantGroup(PtrExpr); 2597 } 2598 2599 PtrExpr = CGF.authPointerToPointerCast(PtrExpr, E->getType(), DestTy); 2600 return Builder.CreatePtrToInt(PtrExpr, ConvertType(DestTy)); 2601 } 2602 case CK_ToVoid: { 2603 CGF.EmitIgnoredExpr(E); 2604 return nullptr; 2605 } 2606 case CK_MatrixCast: { 2607 return EmitScalarConversion(Visit(E), E->getType(), DestTy, 2608 CE->getExprLoc()); 2609 } 2610 case CK_VectorSplat: { 2611 llvm::Type *DstTy = ConvertType(DestTy); 2612 Value *Elt = Visit(const_cast<Expr *>(E)); 2613 // Splat the element across to all elements 2614 llvm::ElementCount NumElements = 2615 cast<llvm::VectorType>(DstTy)->getElementCount(); 2616 return Builder.CreateVectorSplat(NumElements, Elt, "splat"); 2617 } 2618 2619 case CK_FixedPointCast: 2620 return EmitScalarConversion(Visit(E), E->getType(), DestTy, 2621 CE->getExprLoc()); 2622 2623 case CK_FixedPointToBoolean: 2624 assert(E->getType()->isFixedPointType() && 2625 "Expected src type to be fixed point type"); 2626 assert(DestTy->isBooleanType() && "Expected dest type to be boolean type"); 2627 return EmitScalarConversion(Visit(E), E->getType(), DestTy, 2628 CE->getExprLoc()); 2629 2630 case CK_FixedPointToIntegral: 2631 assert(E->getType()->isFixedPointType() && 2632 "Expected src type to be fixed point type"); 2633 assert(DestTy->isIntegerType() && "Expected dest type to be an integer"); 2634 return EmitScalarConversion(Visit(E), E->getType(), DestTy, 2635 CE->getExprLoc()); 2636 2637 case CK_IntegralToFixedPoint: 2638 assert(E->getType()->isIntegerType() && 2639 "Expected src type to be an integer"); 2640 assert(DestTy->isFixedPointType() && 2641 "Expected dest type to be fixed point type"); 2642 return EmitScalarConversion(Visit(E), E->getType(), DestTy, 2643 CE->getExprLoc()); 2644 2645 case CK_IntegralCast: { 2646 if (E->getType()->isExtVectorType() && DestTy->isExtVectorType()) { 2647 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType(); 2648 return Builder.CreateIntCast(Visit(E), ConvertType(DestTy), 2649 SrcElTy->isSignedIntegerOrEnumerationType(), 2650 "conv"); 2651 } 2652 ScalarConversionOpts Opts; 2653 if (auto *ICE = dyn_cast<ImplicitCastExpr>(CE)) { 2654 if (!ICE->isPartOfExplicitCast()) 2655 Opts = ScalarConversionOpts(CGF.SanOpts); 2656 } 2657 return EmitScalarConversion(Visit(E), E->getType(), DestTy, 2658 CE->getExprLoc(), Opts); 2659 } 2660 case CK_IntegralToFloating: { 2661 if (E->getType()->isVectorType() && DestTy->isVectorType()) { 2662 // TODO: Support constrained FP intrinsics. 2663 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType(); 2664 if (SrcElTy->isSignedIntegerOrEnumerationType()) 2665 return Builder.CreateSIToFP(Visit(E), ConvertType(DestTy), "conv"); 2666 return Builder.CreateUIToFP(Visit(E), ConvertType(DestTy), "conv"); 2667 } 2668 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE); 2669 return EmitScalarConversion(Visit(E), E->getType(), DestTy, 2670 CE->getExprLoc()); 2671 } 2672 case CK_FloatingToIntegral: { 2673 if (E->getType()->isVectorType() && DestTy->isVectorType()) { 2674 // TODO: Support constrained FP intrinsics. 2675 QualType DstElTy = DestTy->castAs<VectorType>()->getElementType(); 2676 if (DstElTy->isSignedIntegerOrEnumerationType()) 2677 return Builder.CreateFPToSI(Visit(E), ConvertType(DestTy), "conv"); 2678 return Builder.CreateFPToUI(Visit(E), ConvertType(DestTy), "conv"); 2679 } 2680 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE); 2681 return EmitScalarConversion(Visit(E), E->getType(), DestTy, 2682 CE->getExprLoc()); 2683 } 2684 case CK_FloatingCast: { 2685 if (E->getType()->isVectorType() && DestTy->isVectorType()) { 2686 // TODO: Support constrained FP intrinsics. 2687 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType(); 2688 QualType DstElTy = DestTy->castAs<VectorType>()->getElementType(); 2689 if (DstElTy->castAs<BuiltinType>()->getKind() < 2690 SrcElTy->castAs<BuiltinType>()->getKind()) 2691 return Builder.CreateFPTrunc(Visit(E), ConvertType(DestTy), "conv"); 2692 return Builder.CreateFPExt(Visit(E), ConvertType(DestTy), "conv"); 2693 } 2694 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE); 2695 return EmitScalarConversion(Visit(E), E->getType(), DestTy, 2696 CE->getExprLoc()); 2697 } 2698 case CK_FixedPointToFloating: 2699 case CK_FloatingToFixedPoint: { 2700 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE); 2701 return EmitScalarConversion(Visit(E), E->getType(), DestTy, 2702 CE->getExprLoc()); 2703 } 2704 case CK_BooleanToSignedIntegral: { 2705 ScalarConversionOpts Opts; 2706 Opts.TreatBooleanAsSigned = true; 2707 return EmitScalarConversion(Visit(E), E->getType(), DestTy, 2708 CE->getExprLoc(), Opts); 2709 } 2710 case CK_IntegralToBoolean: 2711 return EmitIntToBoolConversion(Visit(E)); 2712 case CK_PointerToBoolean: 2713 return EmitPointerToBoolConversion(Visit(E), E->getType()); 2714 case CK_FloatingToBoolean: { 2715 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE); 2716 return EmitFloatToBoolConversion(Visit(E)); 2717 } 2718 case CK_MemberPointerToBoolean: { 2719 llvm::Value *MemPtr = Visit(E); 2720 const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>(); 2721 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT); 2722 } 2723 2724 case CK_FloatingComplexToReal: 2725 case CK_IntegralComplexToReal: 2726 return CGF.EmitComplexExpr(E, false, true).first; 2727 2728 case CK_FloatingComplexToBoolean: 2729 case CK_IntegralComplexToBoolean: { 2730 CodeGenFunction::ComplexPairTy V = CGF.EmitComplexExpr(E); 2731 2732 // TODO: kill this function off, inline appropriate case here 2733 return EmitComplexToScalarConversion(V, E->getType(), DestTy, 2734 CE->getExprLoc()); 2735 } 2736 2737 case CK_ZeroToOCLOpaqueType: { 2738 assert((DestTy->isEventT() || DestTy->isQueueT() || 2739 DestTy->isOCLIntelSubgroupAVCType()) && 2740 "CK_ZeroToOCLEvent cast on non-event type"); 2741 return llvm::Constant::getNullValue(ConvertType(DestTy)); 2742 } 2743 2744 case CK_IntToOCLSampler: 2745 return CGF.CGM.createOpenCLIntToSamplerConversion(E, CGF); 2746 2747 case CK_HLSLVectorTruncation: { 2748 assert((DestTy->isVectorType() || DestTy->isBuiltinType()) && 2749 "Destination type must be a vector or builtin type."); 2750 Value *Vec = Visit(const_cast<Expr *>(E)); 2751 if (auto *VecTy = DestTy->getAs<VectorType>()) { 2752 SmallVector<int> Mask; 2753 unsigned NumElts = VecTy->getNumElements(); 2754 for (unsigned I = 0; I != NumElts; ++I) 2755 Mask.push_back(I); 2756 2757 return Builder.CreateShuffleVector(Vec, Mask, "trunc"); 2758 } 2759 llvm::Value *Zero = llvm::Constant::getNullValue(CGF.SizeTy); 2760 return Builder.CreateExtractElement(Vec, Zero, "cast.vtrunc"); 2761 } 2762 2763 } // end of switch 2764 2765 llvm_unreachable("unknown scalar cast"); 2766 } 2767 2768 Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) { 2769 CodeGenFunction::StmtExprEvaluation eval(CGF); 2770 Address RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(), 2771 !E->getType()->isVoidType()); 2772 if (!RetAlloca.isValid()) 2773 return nullptr; 2774 return CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(RetAlloca, E->getType()), 2775 E->getExprLoc()); 2776 } 2777 2778 Value *ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) { 2779 CodeGenFunction::RunCleanupsScope Scope(CGF); 2780 Value *V = Visit(E->getSubExpr()); 2781 // Defend against dominance problems caused by jumps out of expression 2782 // evaluation through the shared cleanup block. 2783 Scope.ForceCleanup({&V}); 2784 return V; 2785 } 2786 2787 //===----------------------------------------------------------------------===// 2788 // Unary Operators 2789 //===----------------------------------------------------------------------===// 2790 2791 static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E, 2792 llvm::Value *InVal, bool IsInc, 2793 FPOptions FPFeatures) { 2794 BinOpInfo BinOp; 2795 BinOp.LHS = InVal; 2796 BinOp.RHS = llvm::ConstantInt::get(InVal->getType(), 1, false); 2797 BinOp.Ty = E->getType(); 2798 BinOp.Opcode = IsInc ? BO_Add : BO_Sub; 2799 BinOp.FPFeatures = FPFeatures; 2800 BinOp.E = E; 2801 return BinOp; 2802 } 2803 2804 llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior( 2805 const UnaryOperator *E, llvm::Value *InVal, bool IsInc) { 2806 llvm::Value *Amount = 2807 llvm::ConstantInt::get(InVal->getType(), IsInc ? 1 : -1, true); 2808 StringRef Name = IsInc ? "inc" : "dec"; 2809 switch (CGF.getLangOpts().getSignedOverflowBehavior()) { 2810 case LangOptions::SOB_Defined: 2811 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) 2812 return Builder.CreateAdd(InVal, Amount, Name); 2813 [[fallthrough]]; 2814 case LangOptions::SOB_Undefined: 2815 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) 2816 return Builder.CreateNSWAdd(InVal, Amount, Name); 2817 [[fallthrough]]; 2818 case LangOptions::SOB_Trapping: 2819 BinOpInfo Info = createBinOpInfoFromIncDec( 2820 E, InVal, IsInc, E->getFPFeaturesInEffect(CGF.getLangOpts())); 2821 if (!E->canOverflow() || CanElideOverflowCheck(CGF.getContext(), Info)) 2822 return Builder.CreateNSWAdd(InVal, Amount, Name); 2823 return EmitOverflowCheckedBinOp(Info); 2824 } 2825 llvm_unreachable("Unknown SignedOverflowBehaviorTy"); 2826 } 2827 2828 /// For the purposes of overflow pattern exclusion, does this match the 2829 /// "while(i--)" pattern? 2830 static bool matchesPostDecrInWhile(const UnaryOperator *UO, bool isInc, 2831 bool isPre, ASTContext &Ctx) { 2832 if (isInc || isPre) 2833 return false; 2834 2835 // -fsanitize-undefined-ignore-overflow-pattern=unsigned-post-decr-while 2836 if (!Ctx.getLangOpts().isOverflowPatternExcluded( 2837 LangOptions::OverflowPatternExclusionKind::PostDecrInWhile)) 2838 return false; 2839 2840 // all Parents (usually just one) must be a WhileStmt 2841 for (const auto &Parent : Ctx.getParentMapContext().getParents(*UO)) 2842 if (!Parent.get<WhileStmt>()) 2843 return false; 2844 2845 return true; 2846 } 2847 2848 namespace { 2849 /// Handles check and update for lastprivate conditional variables. 2850 class OMPLastprivateConditionalUpdateRAII { 2851 private: 2852 CodeGenFunction &CGF; 2853 const UnaryOperator *E; 2854 2855 public: 2856 OMPLastprivateConditionalUpdateRAII(CodeGenFunction &CGF, 2857 const UnaryOperator *E) 2858 : CGF(CGF), E(E) {} 2859 ~OMPLastprivateConditionalUpdateRAII() { 2860 if (CGF.getLangOpts().OpenMP) 2861 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional( 2862 CGF, E->getSubExpr()); 2863 } 2864 }; 2865 } // namespace 2866 2867 llvm::Value * 2868 ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, 2869 bool isInc, bool isPre) { 2870 OMPLastprivateConditionalUpdateRAII OMPRegion(CGF, E); 2871 QualType type = E->getSubExpr()->getType(); 2872 llvm::PHINode *atomicPHI = nullptr; 2873 llvm::Value *value; 2874 llvm::Value *input; 2875 llvm::Value *Previous = nullptr; 2876 QualType SrcType = E->getType(); 2877 2878 int amount = (isInc ? 1 : -1); 2879 bool isSubtraction = !isInc; 2880 2881 if (const AtomicType *atomicTy = type->getAs<AtomicType>()) { 2882 type = atomicTy->getValueType(); 2883 if (isInc && type->isBooleanType()) { 2884 llvm::Value *True = CGF.EmitToMemory(Builder.getTrue(), type); 2885 if (isPre) { 2886 Builder.CreateStore(True, LV.getAddress(), LV.isVolatileQualified()) 2887 ->setAtomic(llvm::AtomicOrdering::SequentiallyConsistent); 2888 return Builder.getTrue(); 2889 } 2890 // For atomic bool increment, we just store true and return it for 2891 // preincrement, do an atomic swap with true for postincrement 2892 return Builder.CreateAtomicRMW( 2893 llvm::AtomicRMWInst::Xchg, LV.getAddress(), True, 2894 llvm::AtomicOrdering::SequentiallyConsistent); 2895 } 2896 // Special case for atomic increment / decrement on integers, emit 2897 // atomicrmw instructions. We skip this if we want to be doing overflow 2898 // checking, and fall into the slow path with the atomic cmpxchg loop. 2899 if (!type->isBooleanType() && type->isIntegerType() && 2900 !(type->isUnsignedIntegerType() && 2901 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) && 2902 CGF.getLangOpts().getSignedOverflowBehavior() != 2903 LangOptions::SOB_Trapping) { 2904 llvm::AtomicRMWInst::BinOp aop = isInc ? llvm::AtomicRMWInst::Add : 2905 llvm::AtomicRMWInst::Sub; 2906 llvm::Instruction::BinaryOps op = isInc ? llvm::Instruction::Add : 2907 llvm::Instruction::Sub; 2908 llvm::Value *amt = CGF.EmitToMemory( 2909 llvm::ConstantInt::get(ConvertType(type), 1, true), type); 2910 llvm::Value *old = 2911 Builder.CreateAtomicRMW(aop, LV.getAddress(), amt, 2912 llvm::AtomicOrdering::SequentiallyConsistent); 2913 return isPre ? Builder.CreateBinOp(op, old, amt) : old; 2914 } 2915 // Special case for atomic increment/decrement on floats. 2916 // Bail out non-power-of-2-sized floating point types (e.g., x86_fp80). 2917 if (type->isFloatingType()) { 2918 llvm::Type *Ty = ConvertType(type); 2919 if (llvm::has_single_bit(Ty->getScalarSizeInBits())) { 2920 llvm::AtomicRMWInst::BinOp aop = 2921 isInc ? llvm::AtomicRMWInst::FAdd : llvm::AtomicRMWInst::FSub; 2922 llvm::Instruction::BinaryOps op = 2923 isInc ? llvm::Instruction::FAdd : llvm::Instruction::FSub; 2924 llvm::Value *amt = llvm::ConstantFP::get(Ty, 1.0); 2925 llvm::AtomicRMWInst *old = 2926 CGF.emitAtomicRMWInst(aop, LV.getAddress(), amt, 2927 llvm::AtomicOrdering::SequentiallyConsistent); 2928 2929 return isPre ? Builder.CreateBinOp(op, old, amt) : old; 2930 } 2931 } 2932 value = EmitLoadOfLValue(LV, E->getExprLoc()); 2933 input = value; 2934 // For every other atomic operation, we need to emit a load-op-cmpxchg loop 2935 llvm::BasicBlock *startBB = Builder.GetInsertBlock(); 2936 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn); 2937 value = CGF.EmitToMemory(value, type); 2938 Builder.CreateBr(opBB); 2939 Builder.SetInsertPoint(opBB); 2940 atomicPHI = Builder.CreatePHI(value->getType(), 2); 2941 atomicPHI->addIncoming(value, startBB); 2942 value = atomicPHI; 2943 } else { 2944 value = EmitLoadOfLValue(LV, E->getExprLoc()); 2945 input = value; 2946 } 2947 2948 // Special case of integer increment that we have to check first: bool++. 2949 // Due to promotion rules, we get: 2950 // bool++ -> bool = bool + 1 2951 // -> bool = (int)bool + 1 2952 // -> bool = ((int)bool + 1 != 0) 2953 // An interesting aspect of this is that increment is always true. 2954 // Decrement does not have this property. 2955 if (isInc && type->isBooleanType()) { 2956 value = Builder.getTrue(); 2957 2958 // Most common case by far: integer increment. 2959 } else if (type->isIntegerType()) { 2960 QualType promotedType; 2961 bool canPerformLossyDemotionCheck = false; 2962 2963 bool excludeOverflowPattern = 2964 matchesPostDecrInWhile(E, isInc, isPre, CGF.getContext()); 2965 2966 if (CGF.getContext().isPromotableIntegerType(type)) { 2967 promotedType = CGF.getContext().getPromotedIntegerType(type); 2968 assert(promotedType != type && "Shouldn't promote to the same type."); 2969 canPerformLossyDemotionCheck = true; 2970 canPerformLossyDemotionCheck &= 2971 CGF.getContext().getCanonicalType(type) != 2972 CGF.getContext().getCanonicalType(promotedType); 2973 canPerformLossyDemotionCheck &= 2974 PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck( 2975 type, promotedType); 2976 assert((!canPerformLossyDemotionCheck || 2977 type->isSignedIntegerOrEnumerationType() || 2978 promotedType->isSignedIntegerOrEnumerationType() || 2979 ConvertType(type)->getScalarSizeInBits() == 2980 ConvertType(promotedType)->getScalarSizeInBits()) && 2981 "The following check expects that if we do promotion to different " 2982 "underlying canonical type, at least one of the types (either " 2983 "base or promoted) will be signed, or the bitwidths will match."); 2984 } 2985 if (CGF.SanOpts.hasOneOf( 2986 SanitizerKind::ImplicitIntegerArithmeticValueChange | 2987 SanitizerKind::ImplicitBitfieldConversion) && 2988 canPerformLossyDemotionCheck) { 2989 // While `x += 1` (for `x` with width less than int) is modeled as 2990 // promotion+arithmetics+demotion, and we can catch lossy demotion with 2991 // ease; inc/dec with width less than int can't overflow because of 2992 // promotion rules, so we omit promotion+demotion, which means that we can 2993 // not catch lossy "demotion". Because we still want to catch these cases 2994 // when the sanitizer is enabled, we perform the promotion, then perform 2995 // the increment/decrement in the wider type, and finally 2996 // perform the demotion. This will catch lossy demotions. 2997 2998 // We have a special case for bitfields defined using all the bits of the 2999 // type. In this case we need to do the same trick as for the integer 3000 // sanitizer checks, i.e., promotion -> increment/decrement -> demotion. 3001 3002 value = EmitScalarConversion(value, type, promotedType, E->getExprLoc()); 3003 Value *amt = llvm::ConstantInt::get(value->getType(), amount, true); 3004 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec"); 3005 // Do pass non-default ScalarConversionOpts so that sanitizer check is 3006 // emitted if LV is not a bitfield, otherwise the bitfield sanitizer 3007 // checks will take care of the conversion. 3008 ScalarConversionOpts Opts; 3009 if (!LV.isBitField()) 3010 Opts = ScalarConversionOpts(CGF.SanOpts); 3011 else if (CGF.SanOpts.has(SanitizerKind::ImplicitBitfieldConversion)) { 3012 Previous = value; 3013 SrcType = promotedType; 3014 } 3015 3016 value = EmitScalarConversion(value, promotedType, type, E->getExprLoc(), 3017 Opts); 3018 3019 // Note that signed integer inc/dec with width less than int can't 3020 // overflow because of promotion rules; we're just eliding a few steps 3021 // here. 3022 } else if (E->canOverflow() && type->isSignedIntegerOrEnumerationType()) { 3023 value = EmitIncDecConsiderOverflowBehavior(E, value, isInc); 3024 } else if (E->canOverflow() && type->isUnsignedIntegerType() && 3025 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) && 3026 !excludeOverflowPattern && 3027 !CGF.getContext().isTypeIgnoredBySanitizer( 3028 SanitizerKind::UnsignedIntegerOverflow, E->getType())) { 3029 value = EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec( 3030 E, value, isInc, E->getFPFeaturesInEffect(CGF.getLangOpts()))); 3031 } else { 3032 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount, true); 3033 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec"); 3034 } 3035 3036 // Next most common: pointer increment. 3037 } else if (const PointerType *ptr = type->getAs<PointerType>()) { 3038 QualType type = ptr->getPointeeType(); 3039 3040 // VLA types don't have constant size. 3041 if (const VariableArrayType *vla 3042 = CGF.getContext().getAsVariableArrayType(type)) { 3043 llvm::Value *numElts = CGF.getVLASize(vla).NumElts; 3044 if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize"); 3045 llvm::Type *elemTy = CGF.ConvertTypeForMem(vla->getElementType()); 3046 if (CGF.getLangOpts().PointerOverflowDefined) 3047 value = Builder.CreateGEP(elemTy, value, numElts, "vla.inc"); 3048 else 3049 value = CGF.EmitCheckedInBoundsGEP( 3050 elemTy, value, numElts, /*SignedIndices=*/false, isSubtraction, 3051 E->getExprLoc(), "vla.inc"); 3052 3053 // Arithmetic on function pointers (!) is just +-1. 3054 } else if (type->isFunctionType()) { 3055 llvm::Value *amt = Builder.getInt32(amount); 3056 3057 if (CGF.getLangOpts().PointerOverflowDefined) 3058 value = Builder.CreateGEP(CGF.Int8Ty, value, amt, "incdec.funcptr"); 3059 else 3060 value = 3061 CGF.EmitCheckedInBoundsGEP(CGF.Int8Ty, value, amt, 3062 /*SignedIndices=*/false, isSubtraction, 3063 E->getExprLoc(), "incdec.funcptr"); 3064 3065 // For everything else, we can just do a simple increment. 3066 } else { 3067 llvm::Value *amt = Builder.getInt32(amount); 3068 llvm::Type *elemTy = CGF.ConvertTypeForMem(type); 3069 if (CGF.getLangOpts().PointerOverflowDefined) 3070 value = Builder.CreateGEP(elemTy, value, amt, "incdec.ptr"); 3071 else 3072 value = CGF.EmitCheckedInBoundsGEP( 3073 elemTy, value, amt, /*SignedIndices=*/false, isSubtraction, 3074 E->getExprLoc(), "incdec.ptr"); 3075 } 3076 3077 // Vector increment/decrement. 3078 } else if (type->isVectorType()) { 3079 if (type->hasIntegerRepresentation()) { 3080 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount); 3081 3082 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec"); 3083 } else { 3084 value = Builder.CreateFAdd( 3085 value, 3086 llvm::ConstantFP::get(value->getType(), amount), 3087 isInc ? "inc" : "dec"); 3088 } 3089 3090 // Floating point. 3091 } else if (type->isRealFloatingType()) { 3092 // Add the inc/dec to the real part. 3093 llvm::Value *amt; 3094 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E); 3095 3096 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) { 3097 // Another special case: half FP increment should be done via float 3098 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) { 3099 value = Builder.CreateCall( 3100 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, 3101 CGF.CGM.FloatTy), 3102 input, "incdec.conv"); 3103 } else { 3104 value = Builder.CreateFPExt(input, CGF.CGM.FloatTy, "incdec.conv"); 3105 } 3106 } 3107 3108 if (value->getType()->isFloatTy()) 3109 amt = llvm::ConstantFP::get(VMContext, 3110 llvm::APFloat(static_cast<float>(amount))); 3111 else if (value->getType()->isDoubleTy()) 3112 amt = llvm::ConstantFP::get(VMContext, 3113 llvm::APFloat(static_cast<double>(amount))); 3114 else { 3115 // Remaining types are Half, Bfloat16, LongDouble, __ibm128 or __float128. 3116 // Convert from float. 3117 llvm::APFloat F(static_cast<float>(amount)); 3118 bool ignored; 3119 const llvm::fltSemantics *FS; 3120 // Don't use getFloatTypeSemantics because Half isn't 3121 // necessarily represented using the "half" LLVM type. 3122 if (value->getType()->isFP128Ty()) 3123 FS = &CGF.getTarget().getFloat128Format(); 3124 else if (value->getType()->isHalfTy()) 3125 FS = &CGF.getTarget().getHalfFormat(); 3126 else if (value->getType()->isBFloatTy()) 3127 FS = &CGF.getTarget().getBFloat16Format(); 3128 else if (value->getType()->isPPC_FP128Ty()) 3129 FS = &CGF.getTarget().getIbm128Format(); 3130 else 3131 FS = &CGF.getTarget().getLongDoubleFormat(); 3132 F.convert(*FS, llvm::APFloat::rmTowardZero, &ignored); 3133 amt = llvm::ConstantFP::get(VMContext, F); 3134 } 3135 value = Builder.CreateFAdd(value, amt, isInc ? "inc" : "dec"); 3136 3137 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) { 3138 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) { 3139 value = Builder.CreateCall( 3140 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, 3141 CGF.CGM.FloatTy), 3142 value, "incdec.conv"); 3143 } else { 3144 value = Builder.CreateFPTrunc(value, input->getType(), "incdec.conv"); 3145 } 3146 } 3147 3148 // Fixed-point types. 3149 } else if (type->isFixedPointType()) { 3150 // Fixed-point types are tricky. In some cases, it isn't possible to 3151 // represent a 1 or a -1 in the type at all. Piggyback off of 3152 // EmitFixedPointBinOp to avoid having to reimplement saturation. 3153 BinOpInfo Info; 3154 Info.E = E; 3155 Info.Ty = E->getType(); 3156 Info.Opcode = isInc ? BO_Add : BO_Sub; 3157 Info.LHS = value; 3158 Info.RHS = llvm::ConstantInt::get(value->getType(), 1, false); 3159 // If the type is signed, it's better to represent this as +(-1) or -(-1), 3160 // since -1 is guaranteed to be representable. 3161 if (type->isSignedFixedPointType()) { 3162 Info.Opcode = isInc ? BO_Sub : BO_Add; 3163 Info.RHS = Builder.CreateNeg(Info.RHS); 3164 } 3165 // Now, convert from our invented integer literal to the type of the unary 3166 // op. This will upscale and saturate if necessary. This value can become 3167 // undef in some cases. 3168 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder); 3169 auto DstSema = CGF.getContext().getFixedPointSemantics(Info.Ty); 3170 Info.RHS = FPBuilder.CreateIntegerToFixed(Info.RHS, true, DstSema); 3171 value = EmitFixedPointBinOp(Info); 3172 3173 // Objective-C pointer types. 3174 } else { 3175 const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>(); 3176 3177 CharUnits size = CGF.getContext().getTypeSizeInChars(OPT->getObjectType()); 3178 if (!isInc) size = -size; 3179 llvm::Value *sizeValue = 3180 llvm::ConstantInt::get(CGF.SizeTy, size.getQuantity()); 3181 3182 if (CGF.getLangOpts().PointerOverflowDefined) 3183 value = Builder.CreateGEP(CGF.Int8Ty, value, sizeValue, "incdec.objptr"); 3184 else 3185 value = CGF.EmitCheckedInBoundsGEP( 3186 CGF.Int8Ty, value, sizeValue, /*SignedIndices=*/false, isSubtraction, 3187 E->getExprLoc(), "incdec.objptr"); 3188 value = Builder.CreateBitCast(value, input->getType()); 3189 } 3190 3191 if (atomicPHI) { 3192 llvm::BasicBlock *curBlock = Builder.GetInsertBlock(); 3193 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn); 3194 auto Pair = CGF.EmitAtomicCompareExchange( 3195 LV, RValue::get(atomicPHI), RValue::get(value), E->getExprLoc()); 3196 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), type); 3197 llvm::Value *success = Pair.second; 3198 atomicPHI->addIncoming(old, curBlock); 3199 Builder.CreateCondBr(success, contBB, atomicPHI->getParent()); 3200 Builder.SetInsertPoint(contBB); 3201 return isPre ? value : input; 3202 } 3203 3204 // Store the updated result through the lvalue. 3205 if (LV.isBitField()) { 3206 Value *Src = Previous ? Previous : value; 3207 CGF.EmitStoreThroughBitfieldLValue(RValue::get(value), LV, &value); 3208 CGF.EmitBitfieldConversionCheck(Src, SrcType, value, E->getType(), 3209 LV.getBitFieldInfo(), E->getExprLoc()); 3210 } else 3211 CGF.EmitStoreThroughLValue(RValue::get(value), LV); 3212 3213 // If this is a postinc, return the value read from memory, otherwise use the 3214 // updated value. 3215 return isPre ? value : input; 3216 } 3217 3218 3219 Value *ScalarExprEmitter::VisitUnaryPlus(const UnaryOperator *E, 3220 QualType PromotionType) { 3221 QualType promotionTy = PromotionType.isNull() 3222 ? getPromotionType(E->getSubExpr()->getType()) 3223 : PromotionType; 3224 Value *result = VisitPlus(E, promotionTy); 3225 if (result && !promotionTy.isNull()) 3226 result = EmitUnPromotedValue(result, E->getType()); 3227 return result; 3228 } 3229 3230 Value *ScalarExprEmitter::VisitPlus(const UnaryOperator *E, 3231 QualType PromotionType) { 3232 // This differs from gcc, though, most likely due to a bug in gcc. 3233 TestAndClearIgnoreResultAssign(); 3234 if (!PromotionType.isNull()) 3235 return CGF.EmitPromotedScalarExpr(E->getSubExpr(), PromotionType); 3236 return Visit(E->getSubExpr()); 3237 } 3238 3239 Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E, 3240 QualType PromotionType) { 3241 QualType promotionTy = PromotionType.isNull() 3242 ? getPromotionType(E->getSubExpr()->getType()) 3243 : PromotionType; 3244 Value *result = VisitMinus(E, promotionTy); 3245 if (result && !promotionTy.isNull()) 3246 result = EmitUnPromotedValue(result, E->getType()); 3247 return result; 3248 } 3249 3250 Value *ScalarExprEmitter::VisitMinus(const UnaryOperator *E, 3251 QualType PromotionType) { 3252 TestAndClearIgnoreResultAssign(); 3253 Value *Op; 3254 if (!PromotionType.isNull()) 3255 Op = CGF.EmitPromotedScalarExpr(E->getSubExpr(), PromotionType); 3256 else 3257 Op = Visit(E->getSubExpr()); 3258 3259 // Generate a unary FNeg for FP ops. 3260 if (Op->getType()->isFPOrFPVectorTy()) 3261 return Builder.CreateFNeg(Op, "fneg"); 3262 3263 // Emit unary minus with EmitSub so we handle overflow cases etc. 3264 BinOpInfo BinOp; 3265 BinOp.RHS = Op; 3266 BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType()); 3267 BinOp.Ty = E->getType(); 3268 BinOp.Opcode = BO_Sub; 3269 BinOp.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts()); 3270 BinOp.E = E; 3271 return EmitSub(BinOp); 3272 } 3273 3274 Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) { 3275 TestAndClearIgnoreResultAssign(); 3276 Value *Op = Visit(E->getSubExpr()); 3277 return Builder.CreateNot(Op, "not"); 3278 } 3279 3280 Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) { 3281 // Perform vector logical not on comparison with zero vector. 3282 if (E->getType()->isVectorType() && 3283 E->getType()->castAs<VectorType>()->getVectorKind() == 3284 VectorKind::Generic) { 3285 Value *Oper = Visit(E->getSubExpr()); 3286 Value *Zero = llvm::Constant::getNullValue(Oper->getType()); 3287 Value *Result; 3288 if (Oper->getType()->isFPOrFPVectorTy()) { 3289 CodeGenFunction::CGFPOptionsRAII FPOptsRAII( 3290 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts())); 3291 Result = Builder.CreateFCmp(llvm::CmpInst::FCMP_OEQ, Oper, Zero, "cmp"); 3292 } else 3293 Result = Builder.CreateICmp(llvm::CmpInst::ICMP_EQ, Oper, Zero, "cmp"); 3294 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext"); 3295 } 3296 3297 // Compare operand to zero. 3298 Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr()); 3299 3300 // Invert value. 3301 // TODO: Could dynamically modify easy computations here. For example, if 3302 // the operand is an icmp ne, turn into icmp eq. 3303 BoolVal = Builder.CreateNot(BoolVal, "lnot"); 3304 3305 // ZExt result to the expr type. 3306 return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext"); 3307 } 3308 3309 Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) { 3310 // Try folding the offsetof to a constant. 3311 Expr::EvalResult EVResult; 3312 if (E->EvaluateAsInt(EVResult, CGF.getContext())) { 3313 llvm::APSInt Value = EVResult.Val.getInt(); 3314 return Builder.getInt(Value); 3315 } 3316 3317 // Loop over the components of the offsetof to compute the value. 3318 unsigned n = E->getNumComponents(); 3319 llvm::Type* ResultType = ConvertType(E->getType()); 3320 llvm::Value* Result = llvm::Constant::getNullValue(ResultType); 3321 QualType CurrentType = E->getTypeSourceInfo()->getType(); 3322 for (unsigned i = 0; i != n; ++i) { 3323 OffsetOfNode ON = E->getComponent(i); 3324 llvm::Value *Offset = nullptr; 3325 switch (ON.getKind()) { 3326 case OffsetOfNode::Array: { 3327 // Compute the index 3328 Expr *IdxExpr = E->getIndexExpr(ON.getArrayExprIndex()); 3329 llvm::Value* Idx = CGF.EmitScalarExpr(IdxExpr); 3330 bool IdxSigned = IdxExpr->getType()->isSignedIntegerOrEnumerationType(); 3331 Idx = Builder.CreateIntCast(Idx, ResultType, IdxSigned, "conv"); 3332 3333 // Save the element type 3334 CurrentType = 3335 CGF.getContext().getAsArrayType(CurrentType)->getElementType(); 3336 3337 // Compute the element size 3338 llvm::Value* ElemSize = llvm::ConstantInt::get(ResultType, 3339 CGF.getContext().getTypeSizeInChars(CurrentType).getQuantity()); 3340 3341 // Multiply out to compute the result 3342 Offset = Builder.CreateMul(Idx, ElemSize); 3343 break; 3344 } 3345 3346 case OffsetOfNode::Field: { 3347 FieldDecl *MemberDecl = ON.getField(); 3348 RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl(); 3349 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD); 3350 3351 // Compute the index of the field in its parent. 3352 unsigned i = 0; 3353 // FIXME: It would be nice if we didn't have to loop here! 3354 for (RecordDecl::field_iterator Field = RD->field_begin(), 3355 FieldEnd = RD->field_end(); 3356 Field != FieldEnd; ++Field, ++i) { 3357 if (*Field == MemberDecl) 3358 break; 3359 } 3360 assert(i < RL.getFieldCount() && "offsetof field in wrong type"); 3361 3362 // Compute the offset to the field 3363 int64_t OffsetInt = RL.getFieldOffset(i) / 3364 CGF.getContext().getCharWidth(); 3365 Offset = llvm::ConstantInt::get(ResultType, OffsetInt); 3366 3367 // Save the element type. 3368 CurrentType = MemberDecl->getType(); 3369 break; 3370 } 3371 3372 case OffsetOfNode::Identifier: 3373 llvm_unreachable("dependent __builtin_offsetof"); 3374 3375 case OffsetOfNode::Base: { 3376 if (ON.getBase()->isVirtual()) { 3377 CGF.ErrorUnsupported(E, "virtual base in offsetof"); 3378 continue; 3379 } 3380 3381 RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl(); 3382 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD); 3383 3384 // Save the element type. 3385 CurrentType = ON.getBase()->getType(); 3386 3387 // Compute the offset to the base. 3388 auto *BaseRT = CurrentType->castAs<RecordType>(); 3389 auto *BaseRD = cast<CXXRecordDecl>(BaseRT->getDecl()); 3390 CharUnits OffsetInt = RL.getBaseClassOffset(BaseRD); 3391 Offset = llvm::ConstantInt::get(ResultType, OffsetInt.getQuantity()); 3392 break; 3393 } 3394 } 3395 Result = Builder.CreateAdd(Result, Offset); 3396 } 3397 return Result; 3398 } 3399 3400 /// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of 3401 /// argument of the sizeof expression as an integer. 3402 Value * 3403 ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr( 3404 const UnaryExprOrTypeTraitExpr *E) { 3405 QualType TypeToSize = E->getTypeOfArgument(); 3406 if (auto Kind = E->getKind(); 3407 Kind == UETT_SizeOf || Kind == UETT_DataSizeOf) { 3408 if (const VariableArrayType *VAT = 3409 CGF.getContext().getAsVariableArrayType(TypeToSize)) { 3410 if (E->isArgumentType()) { 3411 // sizeof(type) - make sure to emit the VLA size. 3412 CGF.EmitVariablyModifiedType(TypeToSize); 3413 } else { 3414 // C99 6.5.3.4p2: If the argument is an expression of type 3415 // VLA, it is evaluated. 3416 CGF.EmitIgnoredExpr(E->getArgumentExpr()); 3417 } 3418 3419 auto VlaSize = CGF.getVLASize(VAT); 3420 llvm::Value *size = VlaSize.NumElts; 3421 3422 // Scale the number of non-VLA elements by the non-VLA element size. 3423 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(VlaSize.Type); 3424 if (!eltSize.isOne()) 3425 size = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), size); 3426 3427 return size; 3428 } 3429 } else if (E->getKind() == UETT_OpenMPRequiredSimdAlign) { 3430 auto Alignment = 3431 CGF.getContext() 3432 .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign( 3433 E->getTypeOfArgument()->getPointeeType())) 3434 .getQuantity(); 3435 return llvm::ConstantInt::get(CGF.SizeTy, Alignment); 3436 } else if (E->getKind() == UETT_VectorElements) { 3437 auto *VecTy = cast<llvm::VectorType>(ConvertType(E->getTypeOfArgument())); 3438 return Builder.CreateElementCount(CGF.SizeTy, VecTy->getElementCount()); 3439 } 3440 3441 // If this isn't sizeof(vla), the result must be constant; use the constant 3442 // folding logic so we don't have to duplicate it here. 3443 return Builder.getInt(E->EvaluateKnownConstInt(CGF.getContext())); 3444 } 3445 3446 Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E, 3447 QualType PromotionType) { 3448 QualType promotionTy = PromotionType.isNull() 3449 ? getPromotionType(E->getSubExpr()->getType()) 3450 : PromotionType; 3451 Value *result = VisitReal(E, promotionTy); 3452 if (result && !promotionTy.isNull()) 3453 result = EmitUnPromotedValue(result, E->getType()); 3454 return result; 3455 } 3456 3457 Value *ScalarExprEmitter::VisitReal(const UnaryOperator *E, 3458 QualType PromotionType) { 3459 Expr *Op = E->getSubExpr(); 3460 if (Op->getType()->isAnyComplexType()) { 3461 // If it's an l-value, load through the appropriate subobject l-value. 3462 // Note that we have to ask E because Op might be an l-value that 3463 // this won't work for, e.g. an Obj-C property. 3464 if (E->isGLValue()) { 3465 if (!PromotionType.isNull()) { 3466 CodeGenFunction::ComplexPairTy result = CGF.EmitComplexExpr( 3467 Op, /*IgnoreReal*/ IgnoreResultAssign, /*IgnoreImag*/ true); 3468 if (result.first) 3469 result.first = CGF.EmitPromotedValue(result, PromotionType).first; 3470 return result.first; 3471 } else { 3472 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getExprLoc()) 3473 .getScalarVal(); 3474 } 3475 } 3476 // Otherwise, calculate and project. 3477 return CGF.EmitComplexExpr(Op, false, true).first; 3478 } 3479 3480 if (!PromotionType.isNull()) 3481 return CGF.EmitPromotedScalarExpr(Op, PromotionType); 3482 return Visit(Op); 3483 } 3484 3485 Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E, 3486 QualType PromotionType) { 3487 QualType promotionTy = PromotionType.isNull() 3488 ? getPromotionType(E->getSubExpr()->getType()) 3489 : PromotionType; 3490 Value *result = VisitImag(E, promotionTy); 3491 if (result && !promotionTy.isNull()) 3492 result = EmitUnPromotedValue(result, E->getType()); 3493 return result; 3494 } 3495 3496 Value *ScalarExprEmitter::VisitImag(const UnaryOperator *E, 3497 QualType PromotionType) { 3498 Expr *Op = E->getSubExpr(); 3499 if (Op->getType()->isAnyComplexType()) { 3500 // If it's an l-value, load through the appropriate subobject l-value. 3501 // Note that we have to ask E because Op might be an l-value that 3502 // this won't work for, e.g. an Obj-C property. 3503 if (Op->isGLValue()) { 3504 if (!PromotionType.isNull()) { 3505 CodeGenFunction::ComplexPairTy result = CGF.EmitComplexExpr( 3506 Op, /*IgnoreReal*/ true, /*IgnoreImag*/ IgnoreResultAssign); 3507 if (result.second) 3508 result.second = CGF.EmitPromotedValue(result, PromotionType).second; 3509 return result.second; 3510 } else { 3511 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getExprLoc()) 3512 .getScalarVal(); 3513 } 3514 } 3515 // Otherwise, calculate and project. 3516 return CGF.EmitComplexExpr(Op, true, false).second; 3517 } 3518 3519 // __imag on a scalar returns zero. Emit the subexpr to ensure side 3520 // effects are evaluated, but not the actual value. 3521 if (Op->isGLValue()) 3522 CGF.EmitLValue(Op); 3523 else if (!PromotionType.isNull()) 3524 CGF.EmitPromotedScalarExpr(Op, PromotionType); 3525 else 3526 CGF.EmitScalarExpr(Op, true); 3527 if (!PromotionType.isNull()) 3528 return llvm::Constant::getNullValue(ConvertType(PromotionType)); 3529 return llvm::Constant::getNullValue(ConvertType(E->getType())); 3530 } 3531 3532 //===----------------------------------------------------------------------===// 3533 // Binary Operators 3534 //===----------------------------------------------------------------------===// 3535 3536 Value *ScalarExprEmitter::EmitPromotedValue(Value *result, 3537 QualType PromotionType) { 3538 return CGF.Builder.CreateFPExt(result, ConvertType(PromotionType), "ext"); 3539 } 3540 3541 Value *ScalarExprEmitter::EmitUnPromotedValue(Value *result, 3542 QualType ExprType) { 3543 return CGF.Builder.CreateFPTrunc(result, ConvertType(ExprType), "unpromotion"); 3544 } 3545 3546 Value *ScalarExprEmitter::EmitPromoted(const Expr *E, QualType PromotionType) { 3547 E = E->IgnoreParens(); 3548 if (auto BO = dyn_cast<BinaryOperator>(E)) { 3549 switch (BO->getOpcode()) { 3550 #define HANDLE_BINOP(OP) \ 3551 case BO_##OP: \ 3552 return Emit##OP(EmitBinOps(BO, PromotionType)); 3553 HANDLE_BINOP(Add) 3554 HANDLE_BINOP(Sub) 3555 HANDLE_BINOP(Mul) 3556 HANDLE_BINOP(Div) 3557 #undef HANDLE_BINOP 3558 default: 3559 break; 3560 } 3561 } else if (auto UO = dyn_cast<UnaryOperator>(E)) { 3562 switch (UO->getOpcode()) { 3563 case UO_Imag: 3564 return VisitImag(UO, PromotionType); 3565 case UO_Real: 3566 return VisitReal(UO, PromotionType); 3567 case UO_Minus: 3568 return VisitMinus(UO, PromotionType); 3569 case UO_Plus: 3570 return VisitPlus(UO, PromotionType); 3571 default: 3572 break; 3573 } 3574 } 3575 auto result = Visit(const_cast<Expr *>(E)); 3576 if (result) { 3577 if (!PromotionType.isNull()) 3578 return EmitPromotedValue(result, PromotionType); 3579 else 3580 return EmitUnPromotedValue(result, E->getType()); 3581 } 3582 return result; 3583 } 3584 3585 BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E, 3586 QualType PromotionType) { 3587 TestAndClearIgnoreResultAssign(); 3588 BinOpInfo Result; 3589 Result.LHS = CGF.EmitPromotedScalarExpr(E->getLHS(), PromotionType); 3590 Result.RHS = CGF.EmitPromotedScalarExpr(E->getRHS(), PromotionType); 3591 if (!PromotionType.isNull()) 3592 Result.Ty = PromotionType; 3593 else 3594 Result.Ty = E->getType(); 3595 Result.Opcode = E->getOpcode(); 3596 Result.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts()); 3597 Result.E = E; 3598 return Result; 3599 } 3600 3601 LValue ScalarExprEmitter::EmitCompoundAssignLValue( 3602 const CompoundAssignOperator *E, 3603 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &), 3604 Value *&Result) { 3605 QualType LHSTy = E->getLHS()->getType(); 3606 BinOpInfo OpInfo; 3607 3608 if (E->getComputationResultType()->isAnyComplexType()) 3609 return CGF.EmitScalarCompoundAssignWithComplex(E, Result); 3610 3611 // Emit the RHS first. __block variables need to have the rhs evaluated 3612 // first, plus this should improve codegen a little. 3613 3614 QualType PromotionTypeCR; 3615 PromotionTypeCR = getPromotionType(E->getComputationResultType()); 3616 if (PromotionTypeCR.isNull()) 3617 PromotionTypeCR = E->getComputationResultType(); 3618 QualType PromotionTypeLHS = getPromotionType(E->getComputationLHSType()); 3619 QualType PromotionTypeRHS = getPromotionType(E->getRHS()->getType()); 3620 if (!PromotionTypeRHS.isNull()) 3621 OpInfo.RHS = CGF.EmitPromotedScalarExpr(E->getRHS(), PromotionTypeRHS); 3622 else 3623 OpInfo.RHS = Visit(E->getRHS()); 3624 OpInfo.Ty = PromotionTypeCR; 3625 OpInfo.Opcode = E->getOpcode(); 3626 OpInfo.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts()); 3627 OpInfo.E = E; 3628 // Load/convert the LHS. 3629 LValue LHSLV = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store); 3630 3631 llvm::PHINode *atomicPHI = nullptr; 3632 if (const AtomicType *atomicTy = LHSTy->getAs<AtomicType>()) { 3633 QualType type = atomicTy->getValueType(); 3634 if (!type->isBooleanType() && type->isIntegerType() && 3635 !(type->isUnsignedIntegerType() && 3636 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) && 3637 CGF.getLangOpts().getSignedOverflowBehavior() != 3638 LangOptions::SOB_Trapping) { 3639 llvm::AtomicRMWInst::BinOp AtomicOp = llvm::AtomicRMWInst::BAD_BINOP; 3640 llvm::Instruction::BinaryOps Op; 3641 switch (OpInfo.Opcode) { 3642 // We don't have atomicrmw operands for *, %, /, <<, >> 3643 case BO_MulAssign: case BO_DivAssign: 3644 case BO_RemAssign: 3645 case BO_ShlAssign: 3646 case BO_ShrAssign: 3647 break; 3648 case BO_AddAssign: 3649 AtomicOp = llvm::AtomicRMWInst::Add; 3650 Op = llvm::Instruction::Add; 3651 break; 3652 case BO_SubAssign: 3653 AtomicOp = llvm::AtomicRMWInst::Sub; 3654 Op = llvm::Instruction::Sub; 3655 break; 3656 case BO_AndAssign: 3657 AtomicOp = llvm::AtomicRMWInst::And; 3658 Op = llvm::Instruction::And; 3659 break; 3660 case BO_XorAssign: 3661 AtomicOp = llvm::AtomicRMWInst::Xor; 3662 Op = llvm::Instruction::Xor; 3663 break; 3664 case BO_OrAssign: 3665 AtomicOp = llvm::AtomicRMWInst::Or; 3666 Op = llvm::Instruction::Or; 3667 break; 3668 default: 3669 llvm_unreachable("Invalid compound assignment type"); 3670 } 3671 if (AtomicOp != llvm::AtomicRMWInst::BAD_BINOP) { 3672 llvm::Value *Amt = CGF.EmitToMemory( 3673 EmitScalarConversion(OpInfo.RHS, E->getRHS()->getType(), LHSTy, 3674 E->getExprLoc()), 3675 LHSTy); 3676 3677 llvm::AtomicRMWInst *OldVal = 3678 CGF.emitAtomicRMWInst(AtomicOp, LHSLV.getAddress(), Amt); 3679 3680 // Since operation is atomic, the result type is guaranteed to be the 3681 // same as the input in LLVM terms. 3682 Result = Builder.CreateBinOp(Op, OldVal, Amt); 3683 return LHSLV; 3684 } 3685 } 3686 // FIXME: For floating point types, we should be saving and restoring the 3687 // floating point environment in the loop. 3688 llvm::BasicBlock *startBB = Builder.GetInsertBlock(); 3689 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn); 3690 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc()); 3691 OpInfo.LHS = CGF.EmitToMemory(OpInfo.LHS, type); 3692 Builder.CreateBr(opBB); 3693 Builder.SetInsertPoint(opBB); 3694 atomicPHI = Builder.CreatePHI(OpInfo.LHS->getType(), 2); 3695 atomicPHI->addIncoming(OpInfo.LHS, startBB); 3696 OpInfo.LHS = atomicPHI; 3697 } 3698 else 3699 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc()); 3700 3701 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, OpInfo.FPFeatures); 3702 SourceLocation Loc = E->getExprLoc(); 3703 if (!PromotionTypeLHS.isNull()) 3704 OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy, PromotionTypeLHS, 3705 E->getExprLoc()); 3706 else 3707 OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy, 3708 E->getComputationLHSType(), Loc); 3709 3710 // Expand the binary operator. 3711 Result = (this->*Func)(OpInfo); 3712 3713 // Convert the result back to the LHS type, 3714 // potentially with Implicit Conversion sanitizer check. 3715 // If LHSLV is a bitfield, use default ScalarConversionOpts 3716 // to avoid emit any implicit integer checks. 3717 Value *Previous = nullptr; 3718 if (LHSLV.isBitField()) { 3719 Previous = Result; 3720 Result = EmitScalarConversion(Result, PromotionTypeCR, LHSTy, Loc); 3721 } else 3722 Result = EmitScalarConversion(Result, PromotionTypeCR, LHSTy, Loc, 3723 ScalarConversionOpts(CGF.SanOpts)); 3724 3725 if (atomicPHI) { 3726 llvm::BasicBlock *curBlock = Builder.GetInsertBlock(); 3727 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn); 3728 auto Pair = CGF.EmitAtomicCompareExchange( 3729 LHSLV, RValue::get(atomicPHI), RValue::get(Result), E->getExprLoc()); 3730 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), LHSTy); 3731 llvm::Value *success = Pair.second; 3732 atomicPHI->addIncoming(old, curBlock); 3733 Builder.CreateCondBr(success, contBB, atomicPHI->getParent()); 3734 Builder.SetInsertPoint(contBB); 3735 return LHSLV; 3736 } 3737 3738 // Store the result value into the LHS lvalue. Bit-fields are handled 3739 // specially because the result is altered by the store, i.e., [C99 6.5.16p1] 3740 // 'An assignment expression has the value of the left operand after the 3741 // assignment...'. 3742 if (LHSLV.isBitField()) { 3743 Value *Src = Previous ? Previous : Result; 3744 QualType SrcType = E->getRHS()->getType(); 3745 QualType DstType = E->getLHS()->getType(); 3746 CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, &Result); 3747 CGF.EmitBitfieldConversionCheck(Src, SrcType, Result, DstType, 3748 LHSLV.getBitFieldInfo(), E->getExprLoc()); 3749 } else 3750 CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV); 3751 3752 if (CGF.getLangOpts().OpenMP) 3753 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, 3754 E->getLHS()); 3755 return LHSLV; 3756 } 3757 3758 Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E, 3759 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) { 3760 bool Ignore = TestAndClearIgnoreResultAssign(); 3761 Value *RHS = nullptr; 3762 LValue LHS = EmitCompoundAssignLValue(E, Func, RHS); 3763 3764 // If the result is clearly ignored, return now. 3765 if (Ignore) 3766 return nullptr; 3767 3768 // The result of an assignment in C is the assigned r-value. 3769 if (!CGF.getLangOpts().CPlusPlus) 3770 return RHS; 3771 3772 // If the lvalue is non-volatile, return the computed value of the assignment. 3773 if (!LHS.isVolatileQualified()) 3774 return RHS; 3775 3776 // Otherwise, reload the value. 3777 return EmitLoadOfLValue(LHS, E->getExprLoc()); 3778 } 3779 3780 void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck( 3781 const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) { 3782 SmallVector<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>, 2> 3783 Checks; 3784 3785 if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) { 3786 Checks.push_back(std::make_pair(Builder.CreateICmpNE(Ops.RHS, Zero), 3787 SanitizerKind::SO_IntegerDivideByZero)); 3788 } 3789 3790 const auto *BO = cast<BinaryOperator>(Ops.E); 3791 if (CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow) && 3792 Ops.Ty->hasSignedIntegerRepresentation() && 3793 !IsWidenedIntegerOp(CGF.getContext(), BO->getLHS()) && 3794 Ops.mayHaveIntegerOverflow()) { 3795 llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType()); 3796 3797 llvm::Value *IntMin = 3798 Builder.getInt(llvm::APInt::getSignedMinValue(Ty->getBitWidth())); 3799 llvm::Value *NegOne = llvm::Constant::getAllOnesValue(Ty); 3800 3801 llvm::Value *LHSCmp = Builder.CreateICmpNE(Ops.LHS, IntMin); 3802 llvm::Value *RHSCmp = Builder.CreateICmpNE(Ops.RHS, NegOne); 3803 llvm::Value *NotOverflow = Builder.CreateOr(LHSCmp, RHSCmp, "or"); 3804 Checks.push_back( 3805 std::make_pair(NotOverflow, SanitizerKind::SO_SignedIntegerOverflow)); 3806 } 3807 3808 if (Checks.size() > 0) 3809 EmitBinOpCheck(Checks, Ops); 3810 } 3811 3812 Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) { 3813 { 3814 CodeGenFunction::SanitizerScope SanScope(&CGF); 3815 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) || 3816 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) && 3817 Ops.Ty->isIntegerType() && 3818 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) { 3819 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty)); 3820 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true); 3821 } else if (CGF.SanOpts.has(SanitizerKind::FloatDivideByZero) && 3822 Ops.Ty->isRealFloatingType() && 3823 Ops.mayHaveFloatDivisionByZero()) { 3824 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty)); 3825 llvm::Value *NonZero = Builder.CreateFCmpUNE(Ops.RHS, Zero); 3826 EmitBinOpCheck( 3827 std::make_pair(NonZero, SanitizerKind::SO_FloatDivideByZero), Ops); 3828 } 3829 } 3830 3831 if (Ops.Ty->isConstantMatrixType()) { 3832 llvm::MatrixBuilder MB(Builder); 3833 // We need to check the types of the operands of the operator to get the 3834 // correct matrix dimensions. 3835 auto *BO = cast<BinaryOperator>(Ops.E); 3836 (void)BO; 3837 assert( 3838 isa<ConstantMatrixType>(BO->getLHS()->getType().getCanonicalType()) && 3839 "first operand must be a matrix"); 3840 assert(BO->getRHS()->getType().getCanonicalType()->isArithmeticType() && 3841 "second operand must be an arithmetic type"); 3842 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures); 3843 return MB.CreateScalarDiv(Ops.LHS, Ops.RHS, 3844 Ops.Ty->hasUnsignedIntegerRepresentation()); 3845 } 3846 3847 if (Ops.LHS->getType()->isFPOrFPVectorTy()) { 3848 llvm::Value *Val; 3849 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures); 3850 Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div"); 3851 CGF.SetDivFPAccuracy(Val); 3852 return Val; 3853 } 3854 else if (Ops.isFixedPointOp()) 3855 return EmitFixedPointBinOp(Ops); 3856 else if (Ops.Ty->hasUnsignedIntegerRepresentation()) 3857 return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div"); 3858 else 3859 return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div"); 3860 } 3861 3862 Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) { 3863 // Rem in C can't be a floating point type: C99 6.5.5p2. 3864 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) || 3865 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) && 3866 Ops.Ty->isIntegerType() && 3867 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) { 3868 CodeGenFunction::SanitizerScope SanScope(&CGF); 3869 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty)); 3870 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false); 3871 } 3872 3873 if (Ops.Ty->hasUnsignedIntegerRepresentation()) 3874 return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem"); 3875 else 3876 return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem"); 3877 } 3878 3879 Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) { 3880 unsigned IID; 3881 unsigned OpID = 0; 3882 SanitizerHandler OverflowKind; 3883 3884 bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType(); 3885 switch (Ops.Opcode) { 3886 case BO_Add: 3887 case BO_AddAssign: 3888 OpID = 1; 3889 IID = isSigned ? llvm::Intrinsic::sadd_with_overflow : 3890 llvm::Intrinsic::uadd_with_overflow; 3891 OverflowKind = SanitizerHandler::AddOverflow; 3892 break; 3893 case BO_Sub: 3894 case BO_SubAssign: 3895 OpID = 2; 3896 IID = isSigned ? llvm::Intrinsic::ssub_with_overflow : 3897 llvm::Intrinsic::usub_with_overflow; 3898 OverflowKind = SanitizerHandler::SubOverflow; 3899 break; 3900 case BO_Mul: 3901 case BO_MulAssign: 3902 OpID = 3; 3903 IID = isSigned ? llvm::Intrinsic::smul_with_overflow : 3904 llvm::Intrinsic::umul_with_overflow; 3905 OverflowKind = SanitizerHandler::MulOverflow; 3906 break; 3907 default: 3908 llvm_unreachable("Unsupported operation for overflow detection"); 3909 } 3910 OpID <<= 1; 3911 if (isSigned) 3912 OpID |= 1; 3913 3914 CodeGenFunction::SanitizerScope SanScope(&CGF); 3915 llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty); 3916 3917 llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, opTy); 3918 3919 Value *resultAndOverflow = Builder.CreateCall(intrinsic, {Ops.LHS, Ops.RHS}); 3920 Value *result = Builder.CreateExtractValue(resultAndOverflow, 0); 3921 Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1); 3922 3923 // Handle overflow with llvm.trap if no custom handler has been specified. 3924 const std::string *handlerName = 3925 &CGF.getLangOpts().OverflowHandler; 3926 if (handlerName->empty()) { 3927 // If the signed-integer-overflow sanitizer is enabled, emit a call to its 3928 // runtime. Otherwise, this is a -ftrapv check, so just emit a trap. 3929 if (!isSigned || CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) { 3930 llvm::Value *NotOverflow = Builder.CreateNot(overflow); 3931 SanitizerKind::SanitizerOrdinal Ordinal = 3932 isSigned ? SanitizerKind::SO_SignedIntegerOverflow 3933 : SanitizerKind::SO_UnsignedIntegerOverflow; 3934 EmitBinOpCheck(std::make_pair(NotOverflow, Ordinal), Ops); 3935 } else 3936 CGF.EmitTrapCheck(Builder.CreateNot(overflow), OverflowKind); 3937 return result; 3938 } 3939 3940 // Branch in case of overflow. 3941 llvm::BasicBlock *initialBB = Builder.GetInsertBlock(); 3942 llvm::BasicBlock *continueBB = 3943 CGF.createBasicBlock("nooverflow", CGF.CurFn, initialBB->getNextNode()); 3944 llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn); 3945 3946 Builder.CreateCondBr(overflow, overflowBB, continueBB); 3947 3948 // If an overflow handler is set, then we want to call it and then use its 3949 // result, if it returns. 3950 Builder.SetInsertPoint(overflowBB); 3951 3952 // Get the overflow handler. 3953 llvm::Type *Int8Ty = CGF.Int8Ty; 3954 llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty }; 3955 llvm::FunctionType *handlerTy = 3956 llvm::FunctionType::get(CGF.Int64Ty, argTypes, true); 3957 llvm::FunctionCallee handler = 3958 CGF.CGM.CreateRuntimeFunction(handlerTy, *handlerName); 3959 3960 // Sign extend the args to 64-bit, so that we can use the same handler for 3961 // all types of overflow. 3962 llvm::Value *lhs = Builder.CreateSExt(Ops.LHS, CGF.Int64Ty); 3963 llvm::Value *rhs = Builder.CreateSExt(Ops.RHS, CGF.Int64Ty); 3964 3965 // Call the handler with the two arguments, the operation, and the size of 3966 // the result. 3967 llvm::Value *handlerArgs[] = { 3968 lhs, 3969 rhs, 3970 Builder.getInt8(OpID), 3971 Builder.getInt8(cast<llvm::IntegerType>(opTy)->getBitWidth()) 3972 }; 3973 llvm::Value *handlerResult = 3974 CGF.EmitNounwindRuntimeCall(handler, handlerArgs); 3975 3976 // Truncate the result back to the desired size. 3977 handlerResult = Builder.CreateTrunc(handlerResult, opTy); 3978 Builder.CreateBr(continueBB); 3979 3980 Builder.SetInsertPoint(continueBB); 3981 llvm::PHINode *phi = Builder.CreatePHI(opTy, 2); 3982 phi->addIncoming(result, initialBB); 3983 phi->addIncoming(handlerResult, overflowBB); 3984 3985 return phi; 3986 } 3987 3988 /// Emit pointer + index arithmetic. 3989 static Value *emitPointerArithmetic(CodeGenFunction &CGF, 3990 const BinOpInfo &op, 3991 bool isSubtraction) { 3992 // Must have binary (not unary) expr here. Unary pointer 3993 // increment/decrement doesn't use this path. 3994 const BinaryOperator *expr = cast<BinaryOperator>(op.E); 3995 3996 Value *pointer = op.LHS; 3997 Expr *pointerOperand = expr->getLHS(); 3998 Value *index = op.RHS; 3999 Expr *indexOperand = expr->getRHS(); 4000 4001 // In a subtraction, the LHS is always the pointer. 4002 if (!isSubtraction && !pointer->getType()->isPointerTy()) { 4003 std::swap(pointer, index); 4004 std::swap(pointerOperand, indexOperand); 4005 } 4006 4007 bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType(); 4008 4009 unsigned width = cast<llvm::IntegerType>(index->getType())->getBitWidth(); 4010 auto &DL = CGF.CGM.getDataLayout(); 4011 auto PtrTy = cast<llvm::PointerType>(pointer->getType()); 4012 4013 // Some versions of glibc and gcc use idioms (particularly in their malloc 4014 // routines) that add a pointer-sized integer (known to be a pointer value) 4015 // to a null pointer in order to cast the value back to an integer or as 4016 // part of a pointer alignment algorithm. This is undefined behavior, but 4017 // we'd like to be able to compile programs that use it. 4018 // 4019 // Normally, we'd generate a GEP with a null-pointer base here in response 4020 // to that code, but it's also UB to dereference a pointer created that 4021 // way. Instead (as an acknowledged hack to tolerate the idiom) we will 4022 // generate a direct cast of the integer value to a pointer. 4023 // 4024 // The idiom (p = nullptr + N) is not met if any of the following are true: 4025 // 4026 // The operation is subtraction. 4027 // The index is not pointer-sized. 4028 // The pointer type is not byte-sized. 4029 // 4030 if (BinaryOperator::isNullPointerArithmeticExtension(CGF.getContext(), 4031 op.Opcode, 4032 expr->getLHS(), 4033 expr->getRHS())) 4034 return CGF.Builder.CreateIntToPtr(index, pointer->getType()); 4035 4036 if (width != DL.getIndexTypeSizeInBits(PtrTy)) { 4037 // Zero-extend or sign-extend the pointer value according to 4038 // whether the index is signed or not. 4039 index = CGF.Builder.CreateIntCast(index, DL.getIndexType(PtrTy), isSigned, 4040 "idx.ext"); 4041 } 4042 4043 // If this is subtraction, negate the index. 4044 if (isSubtraction) 4045 index = CGF.Builder.CreateNeg(index, "idx.neg"); 4046 4047 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds)) 4048 CGF.EmitBoundsCheck(op.E, pointerOperand, index, indexOperand->getType(), 4049 /*Accessed*/ false); 4050 4051 const PointerType *pointerType 4052 = pointerOperand->getType()->getAs<PointerType>(); 4053 if (!pointerType) { 4054 QualType objectType = pointerOperand->getType() 4055 ->castAs<ObjCObjectPointerType>() 4056 ->getPointeeType(); 4057 llvm::Value *objectSize 4058 = CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(objectType)); 4059 4060 index = CGF.Builder.CreateMul(index, objectSize); 4061 4062 Value *result = 4063 CGF.Builder.CreateGEP(CGF.Int8Ty, pointer, index, "add.ptr"); 4064 return CGF.Builder.CreateBitCast(result, pointer->getType()); 4065 } 4066 4067 QualType elementType = pointerType->getPointeeType(); 4068 if (const VariableArrayType *vla 4069 = CGF.getContext().getAsVariableArrayType(elementType)) { 4070 // The element count here is the total number of non-VLA elements. 4071 llvm::Value *numElements = CGF.getVLASize(vla).NumElts; 4072 4073 // Effectively, the multiply by the VLA size is part of the GEP. 4074 // GEP indexes are signed, and scaling an index isn't permitted to 4075 // signed-overflow, so we use the same semantics for our explicit 4076 // multiply. We suppress this if overflow is not undefined behavior. 4077 llvm::Type *elemTy = CGF.ConvertTypeForMem(vla->getElementType()); 4078 if (CGF.getLangOpts().PointerOverflowDefined) { 4079 index = CGF.Builder.CreateMul(index, numElements, "vla.index"); 4080 pointer = CGF.Builder.CreateGEP(elemTy, pointer, index, "add.ptr"); 4081 } else { 4082 index = CGF.Builder.CreateNSWMul(index, numElements, "vla.index"); 4083 pointer = CGF.EmitCheckedInBoundsGEP( 4084 elemTy, pointer, index, isSigned, isSubtraction, op.E->getExprLoc(), 4085 "add.ptr"); 4086 } 4087 return pointer; 4088 } 4089 4090 // Explicitly handle GNU void* and function pointer arithmetic extensions. The 4091 // GNU void* casts amount to no-ops since our void* type is i8*, but this is 4092 // future proof. 4093 llvm::Type *elemTy; 4094 if (elementType->isVoidType() || elementType->isFunctionType()) 4095 elemTy = CGF.Int8Ty; 4096 else 4097 elemTy = CGF.ConvertTypeForMem(elementType); 4098 4099 if (CGF.getLangOpts().PointerOverflowDefined) 4100 return CGF.Builder.CreateGEP(elemTy, pointer, index, "add.ptr"); 4101 4102 return CGF.EmitCheckedInBoundsGEP( 4103 elemTy, pointer, index, isSigned, isSubtraction, op.E->getExprLoc(), 4104 "add.ptr"); 4105 } 4106 4107 // Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and 4108 // Addend. Use negMul and negAdd to negate the first operand of the Mul or 4109 // the add operand respectively. This allows fmuladd to represent a*b-c, or 4110 // c-a*b. Patterns in LLVM should catch the negated forms and translate them to 4111 // efficient operations. 4112 static Value* buildFMulAdd(llvm::Instruction *MulOp, Value *Addend, 4113 const CodeGenFunction &CGF, CGBuilderTy &Builder, 4114 bool negMul, bool negAdd) { 4115 Value *MulOp0 = MulOp->getOperand(0); 4116 Value *MulOp1 = MulOp->getOperand(1); 4117 if (negMul) 4118 MulOp0 = Builder.CreateFNeg(MulOp0, "neg"); 4119 if (negAdd) 4120 Addend = Builder.CreateFNeg(Addend, "neg"); 4121 4122 Value *FMulAdd = nullptr; 4123 if (Builder.getIsFPConstrained()) { 4124 assert(isa<llvm::ConstrainedFPIntrinsic>(MulOp) && 4125 "Only constrained operation should be created when Builder is in FP " 4126 "constrained mode"); 4127 FMulAdd = Builder.CreateConstrainedFPCall( 4128 CGF.CGM.getIntrinsic(llvm::Intrinsic::experimental_constrained_fmuladd, 4129 Addend->getType()), 4130 {MulOp0, MulOp1, Addend}); 4131 } else { 4132 FMulAdd = Builder.CreateCall( 4133 CGF.CGM.getIntrinsic(llvm::Intrinsic::fmuladd, Addend->getType()), 4134 {MulOp0, MulOp1, Addend}); 4135 } 4136 MulOp->eraseFromParent(); 4137 4138 return FMulAdd; 4139 } 4140 4141 // Check whether it would be legal to emit an fmuladd intrinsic call to 4142 // represent op and if so, build the fmuladd. 4143 // 4144 // Checks that (a) the operation is fusable, and (b) -ffp-contract=on. 4145 // Does NOT check the type of the operation - it's assumed that this function 4146 // will be called from contexts where it's known that the type is contractable. 4147 static Value* tryEmitFMulAdd(const BinOpInfo &op, 4148 const CodeGenFunction &CGF, CGBuilderTy &Builder, 4149 bool isSub=false) { 4150 4151 assert((op.Opcode == BO_Add || op.Opcode == BO_AddAssign || 4152 op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) && 4153 "Only fadd/fsub can be the root of an fmuladd."); 4154 4155 // Check whether this op is marked as fusable. 4156 if (!op.FPFeatures.allowFPContractWithinStatement()) 4157 return nullptr; 4158 4159 Value *LHS = op.LHS; 4160 Value *RHS = op.RHS; 4161 4162 // Peek through fneg to look for fmul. Make sure fneg has no users, and that 4163 // it is the only use of its operand. 4164 bool NegLHS = false; 4165 if (auto *LHSUnOp = dyn_cast<llvm::UnaryOperator>(LHS)) { 4166 if (LHSUnOp->getOpcode() == llvm::Instruction::FNeg && 4167 LHSUnOp->use_empty() && LHSUnOp->getOperand(0)->hasOneUse()) { 4168 LHS = LHSUnOp->getOperand(0); 4169 NegLHS = true; 4170 } 4171 } 4172 4173 bool NegRHS = false; 4174 if (auto *RHSUnOp = dyn_cast<llvm::UnaryOperator>(RHS)) { 4175 if (RHSUnOp->getOpcode() == llvm::Instruction::FNeg && 4176 RHSUnOp->use_empty() && RHSUnOp->getOperand(0)->hasOneUse()) { 4177 RHS = RHSUnOp->getOperand(0); 4178 NegRHS = true; 4179 } 4180 } 4181 4182 // We have a potentially fusable op. Look for a mul on one of the operands. 4183 // Also, make sure that the mul result isn't used directly. In that case, 4184 // there's no point creating a muladd operation. 4185 if (auto *LHSBinOp = dyn_cast<llvm::BinaryOperator>(LHS)) { 4186 if (LHSBinOp->getOpcode() == llvm::Instruction::FMul && 4187 (LHSBinOp->use_empty() || NegLHS)) { 4188 // If we looked through fneg, erase it. 4189 if (NegLHS) 4190 cast<llvm::Instruction>(op.LHS)->eraseFromParent(); 4191 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, NegLHS, isSub); 4192 } 4193 } 4194 if (auto *RHSBinOp = dyn_cast<llvm::BinaryOperator>(RHS)) { 4195 if (RHSBinOp->getOpcode() == llvm::Instruction::FMul && 4196 (RHSBinOp->use_empty() || NegRHS)) { 4197 // If we looked through fneg, erase it. 4198 if (NegRHS) 4199 cast<llvm::Instruction>(op.RHS)->eraseFromParent(); 4200 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub ^ NegRHS, false); 4201 } 4202 } 4203 4204 if (auto *LHSBinOp = dyn_cast<llvm::CallBase>(LHS)) { 4205 if (LHSBinOp->getIntrinsicID() == 4206 llvm::Intrinsic::experimental_constrained_fmul && 4207 (LHSBinOp->use_empty() || NegLHS)) { 4208 // If we looked through fneg, erase it. 4209 if (NegLHS) 4210 cast<llvm::Instruction>(op.LHS)->eraseFromParent(); 4211 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, NegLHS, isSub); 4212 } 4213 } 4214 if (auto *RHSBinOp = dyn_cast<llvm::CallBase>(RHS)) { 4215 if (RHSBinOp->getIntrinsicID() == 4216 llvm::Intrinsic::experimental_constrained_fmul && 4217 (RHSBinOp->use_empty() || NegRHS)) { 4218 // If we looked through fneg, erase it. 4219 if (NegRHS) 4220 cast<llvm::Instruction>(op.RHS)->eraseFromParent(); 4221 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub ^ NegRHS, false); 4222 } 4223 } 4224 4225 return nullptr; 4226 } 4227 4228 Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) { 4229 if (op.LHS->getType()->isPointerTy() || 4230 op.RHS->getType()->isPointerTy()) 4231 return emitPointerArithmetic(CGF, op, CodeGenFunction::NotSubtraction); 4232 4233 if (op.Ty->isSignedIntegerOrEnumerationType()) { 4234 switch (CGF.getLangOpts().getSignedOverflowBehavior()) { 4235 case LangOptions::SOB_Defined: 4236 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) 4237 return Builder.CreateAdd(op.LHS, op.RHS, "add"); 4238 [[fallthrough]]; 4239 case LangOptions::SOB_Undefined: 4240 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) 4241 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add"); 4242 [[fallthrough]]; 4243 case LangOptions::SOB_Trapping: 4244 if (CanElideOverflowCheck(CGF.getContext(), op)) 4245 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add"); 4246 return EmitOverflowCheckedBinOp(op); 4247 } 4248 } 4249 4250 // For vector and matrix adds, try to fold into a fmuladd. 4251 if (op.LHS->getType()->isFPOrFPVectorTy()) { 4252 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures); 4253 // Try to form an fmuladd. 4254 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder)) 4255 return FMulAdd; 4256 } 4257 4258 if (op.Ty->isConstantMatrixType()) { 4259 llvm::MatrixBuilder MB(Builder); 4260 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures); 4261 return MB.CreateAdd(op.LHS, op.RHS); 4262 } 4263 4264 if (op.Ty->isUnsignedIntegerType() && 4265 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) && 4266 !CanElideOverflowCheck(CGF.getContext(), op)) 4267 return EmitOverflowCheckedBinOp(op); 4268 4269 if (op.LHS->getType()->isFPOrFPVectorTy()) { 4270 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures); 4271 return Builder.CreateFAdd(op.LHS, op.RHS, "add"); 4272 } 4273 4274 if (op.isFixedPointOp()) 4275 return EmitFixedPointBinOp(op); 4276 4277 return Builder.CreateAdd(op.LHS, op.RHS, "add"); 4278 } 4279 4280 /// The resulting value must be calculated with exact precision, so the operands 4281 /// may not be the same type. 4282 Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) { 4283 using llvm::APSInt; 4284 using llvm::ConstantInt; 4285 4286 // This is either a binary operation where at least one of the operands is 4287 // a fixed-point type, or a unary operation where the operand is a fixed-point 4288 // type. The result type of a binary operation is determined by 4289 // Sema::handleFixedPointConversions(). 4290 QualType ResultTy = op.Ty; 4291 QualType LHSTy, RHSTy; 4292 if (const auto *BinOp = dyn_cast<BinaryOperator>(op.E)) { 4293 RHSTy = BinOp->getRHS()->getType(); 4294 if (const auto *CAO = dyn_cast<CompoundAssignOperator>(BinOp)) { 4295 // For compound assignment, the effective type of the LHS at this point 4296 // is the computation LHS type, not the actual LHS type, and the final 4297 // result type is not the type of the expression but rather the 4298 // computation result type. 4299 LHSTy = CAO->getComputationLHSType(); 4300 ResultTy = CAO->getComputationResultType(); 4301 } else 4302 LHSTy = BinOp->getLHS()->getType(); 4303 } else if (const auto *UnOp = dyn_cast<UnaryOperator>(op.E)) { 4304 LHSTy = UnOp->getSubExpr()->getType(); 4305 RHSTy = UnOp->getSubExpr()->getType(); 4306 } 4307 ASTContext &Ctx = CGF.getContext(); 4308 Value *LHS = op.LHS; 4309 Value *RHS = op.RHS; 4310 4311 auto LHSFixedSema = Ctx.getFixedPointSemantics(LHSTy); 4312 auto RHSFixedSema = Ctx.getFixedPointSemantics(RHSTy); 4313 auto ResultFixedSema = Ctx.getFixedPointSemantics(ResultTy); 4314 auto CommonFixedSema = LHSFixedSema.getCommonSemantics(RHSFixedSema); 4315 4316 // Perform the actual operation. 4317 Value *Result; 4318 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder); 4319 switch (op.Opcode) { 4320 case BO_AddAssign: 4321 case BO_Add: 4322 Result = FPBuilder.CreateAdd(LHS, LHSFixedSema, RHS, RHSFixedSema); 4323 break; 4324 case BO_SubAssign: 4325 case BO_Sub: 4326 Result = FPBuilder.CreateSub(LHS, LHSFixedSema, RHS, RHSFixedSema); 4327 break; 4328 case BO_MulAssign: 4329 case BO_Mul: 4330 Result = FPBuilder.CreateMul(LHS, LHSFixedSema, RHS, RHSFixedSema); 4331 break; 4332 case BO_DivAssign: 4333 case BO_Div: 4334 Result = FPBuilder.CreateDiv(LHS, LHSFixedSema, RHS, RHSFixedSema); 4335 break; 4336 case BO_ShlAssign: 4337 case BO_Shl: 4338 Result = FPBuilder.CreateShl(LHS, LHSFixedSema, RHS); 4339 break; 4340 case BO_ShrAssign: 4341 case BO_Shr: 4342 Result = FPBuilder.CreateShr(LHS, LHSFixedSema, RHS); 4343 break; 4344 case BO_LT: 4345 return FPBuilder.CreateLT(LHS, LHSFixedSema, RHS, RHSFixedSema); 4346 case BO_GT: 4347 return FPBuilder.CreateGT(LHS, LHSFixedSema, RHS, RHSFixedSema); 4348 case BO_LE: 4349 return FPBuilder.CreateLE(LHS, LHSFixedSema, RHS, RHSFixedSema); 4350 case BO_GE: 4351 return FPBuilder.CreateGE(LHS, LHSFixedSema, RHS, RHSFixedSema); 4352 case BO_EQ: 4353 // For equality operations, we assume any padding bits on unsigned types are 4354 // zero'd out. They could be overwritten through non-saturating operations 4355 // that cause overflow, but this leads to undefined behavior. 4356 return FPBuilder.CreateEQ(LHS, LHSFixedSema, RHS, RHSFixedSema); 4357 case BO_NE: 4358 return FPBuilder.CreateNE(LHS, LHSFixedSema, RHS, RHSFixedSema); 4359 case BO_Cmp: 4360 case BO_LAnd: 4361 case BO_LOr: 4362 llvm_unreachable("Found unimplemented fixed point binary operation"); 4363 case BO_PtrMemD: 4364 case BO_PtrMemI: 4365 case BO_Rem: 4366 case BO_Xor: 4367 case BO_And: 4368 case BO_Or: 4369 case BO_Assign: 4370 case BO_RemAssign: 4371 case BO_AndAssign: 4372 case BO_XorAssign: 4373 case BO_OrAssign: 4374 case BO_Comma: 4375 llvm_unreachable("Found unsupported binary operation for fixed point types."); 4376 } 4377 4378 bool IsShift = BinaryOperator::isShiftOp(op.Opcode) || 4379 BinaryOperator::isShiftAssignOp(op.Opcode); 4380 // Convert to the result type. 4381 return FPBuilder.CreateFixedToFixed(Result, IsShift ? LHSFixedSema 4382 : CommonFixedSema, 4383 ResultFixedSema); 4384 } 4385 4386 Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) { 4387 // The LHS is always a pointer if either side is. 4388 if (!op.LHS->getType()->isPointerTy()) { 4389 if (op.Ty->isSignedIntegerOrEnumerationType()) { 4390 switch (CGF.getLangOpts().getSignedOverflowBehavior()) { 4391 case LangOptions::SOB_Defined: 4392 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) 4393 return Builder.CreateSub(op.LHS, op.RHS, "sub"); 4394 [[fallthrough]]; 4395 case LangOptions::SOB_Undefined: 4396 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) 4397 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub"); 4398 [[fallthrough]]; 4399 case LangOptions::SOB_Trapping: 4400 if (CanElideOverflowCheck(CGF.getContext(), op)) 4401 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub"); 4402 return EmitOverflowCheckedBinOp(op); 4403 } 4404 } 4405 4406 // For vector and matrix subs, try to fold into a fmuladd. 4407 if (op.LHS->getType()->isFPOrFPVectorTy()) { 4408 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures); 4409 // Try to form an fmuladd. 4410 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true)) 4411 return FMulAdd; 4412 } 4413 4414 if (op.Ty->isConstantMatrixType()) { 4415 llvm::MatrixBuilder MB(Builder); 4416 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures); 4417 return MB.CreateSub(op.LHS, op.RHS); 4418 } 4419 4420 if (op.Ty->isUnsignedIntegerType() && 4421 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) && 4422 !CanElideOverflowCheck(CGF.getContext(), op)) 4423 return EmitOverflowCheckedBinOp(op); 4424 4425 if (op.LHS->getType()->isFPOrFPVectorTy()) { 4426 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures); 4427 return Builder.CreateFSub(op.LHS, op.RHS, "sub"); 4428 } 4429 4430 if (op.isFixedPointOp()) 4431 return EmitFixedPointBinOp(op); 4432 4433 return Builder.CreateSub(op.LHS, op.RHS, "sub"); 4434 } 4435 4436 // If the RHS is not a pointer, then we have normal pointer 4437 // arithmetic. 4438 if (!op.RHS->getType()->isPointerTy()) 4439 return emitPointerArithmetic(CGF, op, CodeGenFunction::IsSubtraction); 4440 4441 // Otherwise, this is a pointer subtraction. 4442 4443 // Do the raw subtraction part. 4444 llvm::Value *LHS 4445 = Builder.CreatePtrToInt(op.LHS, CGF.PtrDiffTy, "sub.ptr.lhs.cast"); 4446 llvm::Value *RHS 4447 = Builder.CreatePtrToInt(op.RHS, CGF.PtrDiffTy, "sub.ptr.rhs.cast"); 4448 Value *diffInChars = Builder.CreateSub(LHS, RHS, "sub.ptr.sub"); 4449 4450 // Okay, figure out the element size. 4451 const BinaryOperator *expr = cast<BinaryOperator>(op.E); 4452 QualType elementType = expr->getLHS()->getType()->getPointeeType(); 4453 4454 llvm::Value *divisor = nullptr; 4455 4456 // For a variable-length array, this is going to be non-constant. 4457 if (const VariableArrayType *vla 4458 = CGF.getContext().getAsVariableArrayType(elementType)) { 4459 auto VlaSize = CGF.getVLASize(vla); 4460 elementType = VlaSize.Type; 4461 divisor = VlaSize.NumElts; 4462 4463 // Scale the number of non-VLA elements by the non-VLA element size. 4464 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(elementType); 4465 if (!eltSize.isOne()) 4466 divisor = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), divisor); 4467 4468 // For everything elese, we can just compute it, safe in the 4469 // assumption that Sema won't let anything through that we can't 4470 // safely compute the size of. 4471 } else { 4472 CharUnits elementSize; 4473 // Handle GCC extension for pointer arithmetic on void* and 4474 // function pointer types. 4475 if (elementType->isVoidType() || elementType->isFunctionType()) 4476 elementSize = CharUnits::One(); 4477 else 4478 elementSize = CGF.getContext().getTypeSizeInChars(elementType); 4479 4480 // Don't even emit the divide for element size of 1. 4481 if (elementSize.isOne()) 4482 return diffInChars; 4483 4484 divisor = CGF.CGM.getSize(elementSize); 4485 } 4486 4487 // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since 4488 // pointer difference in C is only defined in the case where both operands 4489 // are pointing to elements of an array. 4490 return Builder.CreateExactSDiv(diffInChars, divisor, "sub.ptr.div"); 4491 } 4492 4493 Value *ScalarExprEmitter::GetMaximumShiftAmount(Value *LHS, Value *RHS, 4494 bool RHSIsSigned) { 4495 llvm::IntegerType *Ty; 4496 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(LHS->getType())) 4497 Ty = cast<llvm::IntegerType>(VT->getElementType()); 4498 else 4499 Ty = cast<llvm::IntegerType>(LHS->getType()); 4500 // For a given type of LHS the maximum shift amount is width(LHS)-1, however 4501 // it can occur that width(LHS)-1 > range(RHS). Since there is no check for 4502 // this in ConstantInt::get, this results in the value getting truncated. 4503 // Constrain the return value to be max(RHS) in this case. 4504 llvm::Type *RHSTy = RHS->getType(); 4505 llvm::APInt RHSMax = 4506 RHSIsSigned ? llvm::APInt::getSignedMaxValue(RHSTy->getScalarSizeInBits()) 4507 : llvm::APInt::getMaxValue(RHSTy->getScalarSizeInBits()); 4508 if (RHSMax.ult(Ty->getBitWidth())) 4509 return llvm::ConstantInt::get(RHSTy, RHSMax); 4510 return llvm::ConstantInt::get(RHSTy, Ty->getBitWidth() - 1); 4511 } 4512 4513 Value *ScalarExprEmitter::ConstrainShiftValue(Value *LHS, Value *RHS, 4514 const Twine &Name) { 4515 llvm::IntegerType *Ty; 4516 if (auto *VT = dyn_cast<llvm::VectorType>(LHS->getType())) 4517 Ty = cast<llvm::IntegerType>(VT->getElementType()); 4518 else 4519 Ty = cast<llvm::IntegerType>(LHS->getType()); 4520 4521 if (llvm::isPowerOf2_64(Ty->getBitWidth())) 4522 return Builder.CreateAnd(RHS, GetMaximumShiftAmount(LHS, RHS, false), Name); 4523 4524 return Builder.CreateURem( 4525 RHS, llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth()), Name); 4526 } 4527 4528 Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) { 4529 // TODO: This misses out on the sanitizer check below. 4530 if (Ops.isFixedPointOp()) 4531 return EmitFixedPointBinOp(Ops); 4532 4533 // LLVM requires the LHS and RHS to be the same type: promote or truncate the 4534 // RHS to the same size as the LHS. 4535 Value *RHS = Ops.RHS; 4536 if (Ops.LHS->getType() != RHS->getType()) 4537 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom"); 4538 4539 bool SanitizeSignedBase = CGF.SanOpts.has(SanitizerKind::ShiftBase) && 4540 Ops.Ty->hasSignedIntegerRepresentation() && 4541 !CGF.getLangOpts().isSignedOverflowDefined() && 4542 !CGF.getLangOpts().CPlusPlus20; 4543 bool SanitizeUnsignedBase = 4544 CGF.SanOpts.has(SanitizerKind::UnsignedShiftBase) && 4545 Ops.Ty->hasUnsignedIntegerRepresentation(); 4546 bool SanitizeBase = SanitizeSignedBase || SanitizeUnsignedBase; 4547 bool SanitizeExponent = CGF.SanOpts.has(SanitizerKind::ShiftExponent); 4548 // OpenCL 6.3j: shift values are effectively % word size of LHS. 4549 if (CGF.getLangOpts().OpenCL || CGF.getLangOpts().HLSL) 4550 RHS = ConstrainShiftValue(Ops.LHS, RHS, "shl.mask"); 4551 else if ((SanitizeBase || SanitizeExponent) && 4552 isa<llvm::IntegerType>(Ops.LHS->getType())) { 4553 CodeGenFunction::SanitizerScope SanScope(&CGF); 4554 SmallVector<std::pair<Value *, SanitizerKind::SanitizerOrdinal>, 2> Checks; 4555 bool RHSIsSigned = Ops.rhsHasSignedIntegerRepresentation(); 4556 llvm::Value *WidthMinusOne = 4557 GetMaximumShiftAmount(Ops.LHS, Ops.RHS, RHSIsSigned); 4558 llvm::Value *ValidExponent = Builder.CreateICmpULE(Ops.RHS, WidthMinusOne); 4559 4560 if (SanitizeExponent) { 4561 Checks.push_back( 4562 std::make_pair(ValidExponent, SanitizerKind::SO_ShiftExponent)); 4563 } 4564 4565 if (SanitizeBase) { 4566 // Check whether we are shifting any non-zero bits off the top of the 4567 // integer. We only emit this check if exponent is valid - otherwise 4568 // instructions below will have undefined behavior themselves. 4569 llvm::BasicBlock *Orig = Builder.GetInsertBlock(); 4570 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont"); 4571 llvm::BasicBlock *CheckShiftBase = CGF.createBasicBlock("check"); 4572 Builder.CreateCondBr(ValidExponent, CheckShiftBase, Cont); 4573 llvm::Value *PromotedWidthMinusOne = 4574 (RHS == Ops.RHS) ? WidthMinusOne 4575 : GetMaximumShiftAmount(Ops.LHS, RHS, RHSIsSigned); 4576 CGF.EmitBlock(CheckShiftBase); 4577 llvm::Value *BitsShiftedOff = Builder.CreateLShr( 4578 Ops.LHS, Builder.CreateSub(PromotedWidthMinusOne, RHS, "shl.zeros", 4579 /*NUW*/ true, /*NSW*/ true), 4580 "shl.check"); 4581 if (SanitizeUnsignedBase || CGF.getLangOpts().CPlusPlus) { 4582 // In C99, we are not permitted to shift a 1 bit into the sign bit. 4583 // Under C++11's rules, shifting a 1 bit into the sign bit is 4584 // OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't 4585 // define signed left shifts, so we use the C99 and C++11 rules there). 4586 // Unsigned shifts can always shift into the top bit. 4587 llvm::Value *One = llvm::ConstantInt::get(BitsShiftedOff->getType(), 1); 4588 BitsShiftedOff = Builder.CreateLShr(BitsShiftedOff, One); 4589 } 4590 llvm::Value *Zero = llvm::ConstantInt::get(BitsShiftedOff->getType(), 0); 4591 llvm::Value *ValidBase = Builder.CreateICmpEQ(BitsShiftedOff, Zero); 4592 CGF.EmitBlock(Cont); 4593 llvm::PHINode *BaseCheck = Builder.CreatePHI(ValidBase->getType(), 2); 4594 BaseCheck->addIncoming(Builder.getTrue(), Orig); 4595 BaseCheck->addIncoming(ValidBase, CheckShiftBase); 4596 Checks.push_back(std::make_pair( 4597 BaseCheck, SanitizeSignedBase ? SanitizerKind::SO_ShiftBase 4598 : SanitizerKind::SO_UnsignedShiftBase)); 4599 } 4600 4601 assert(!Checks.empty()); 4602 EmitBinOpCheck(Checks, Ops); 4603 } 4604 4605 return Builder.CreateShl(Ops.LHS, RHS, "shl"); 4606 } 4607 4608 Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) { 4609 // TODO: This misses out on the sanitizer check below. 4610 if (Ops.isFixedPointOp()) 4611 return EmitFixedPointBinOp(Ops); 4612 4613 // LLVM requires the LHS and RHS to be the same type: promote or truncate the 4614 // RHS to the same size as the LHS. 4615 Value *RHS = Ops.RHS; 4616 if (Ops.LHS->getType() != RHS->getType()) 4617 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom"); 4618 4619 // OpenCL 6.3j: shift values are effectively % word size of LHS. 4620 if (CGF.getLangOpts().OpenCL || CGF.getLangOpts().HLSL) 4621 RHS = ConstrainShiftValue(Ops.LHS, RHS, "shr.mask"); 4622 else if (CGF.SanOpts.has(SanitizerKind::ShiftExponent) && 4623 isa<llvm::IntegerType>(Ops.LHS->getType())) { 4624 CodeGenFunction::SanitizerScope SanScope(&CGF); 4625 bool RHSIsSigned = Ops.rhsHasSignedIntegerRepresentation(); 4626 llvm::Value *Valid = Builder.CreateICmpULE( 4627 Ops.RHS, GetMaximumShiftAmount(Ops.LHS, Ops.RHS, RHSIsSigned)); 4628 EmitBinOpCheck(std::make_pair(Valid, SanitizerKind::SO_ShiftExponent), Ops); 4629 } 4630 4631 if (Ops.Ty->hasUnsignedIntegerRepresentation()) 4632 return Builder.CreateLShr(Ops.LHS, RHS, "shr"); 4633 return Builder.CreateAShr(Ops.LHS, RHS, "shr"); 4634 } 4635 4636 enum IntrinsicType { VCMPEQ, VCMPGT }; 4637 // return corresponding comparison intrinsic for given vector type 4638 static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT, 4639 BuiltinType::Kind ElemKind) { 4640 switch (ElemKind) { 4641 default: llvm_unreachable("unexpected element type"); 4642 case BuiltinType::Char_U: 4643 case BuiltinType::UChar: 4644 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p : 4645 llvm::Intrinsic::ppc_altivec_vcmpgtub_p; 4646 case BuiltinType::Char_S: 4647 case BuiltinType::SChar: 4648 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p : 4649 llvm::Intrinsic::ppc_altivec_vcmpgtsb_p; 4650 case BuiltinType::UShort: 4651 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p : 4652 llvm::Intrinsic::ppc_altivec_vcmpgtuh_p; 4653 case BuiltinType::Short: 4654 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p : 4655 llvm::Intrinsic::ppc_altivec_vcmpgtsh_p; 4656 case BuiltinType::UInt: 4657 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p : 4658 llvm::Intrinsic::ppc_altivec_vcmpgtuw_p; 4659 case BuiltinType::Int: 4660 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p : 4661 llvm::Intrinsic::ppc_altivec_vcmpgtsw_p; 4662 case BuiltinType::ULong: 4663 case BuiltinType::ULongLong: 4664 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p : 4665 llvm::Intrinsic::ppc_altivec_vcmpgtud_p; 4666 case BuiltinType::Long: 4667 case BuiltinType::LongLong: 4668 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p : 4669 llvm::Intrinsic::ppc_altivec_vcmpgtsd_p; 4670 case BuiltinType::Float: 4671 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p : 4672 llvm::Intrinsic::ppc_altivec_vcmpgtfp_p; 4673 case BuiltinType::Double: 4674 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_vsx_xvcmpeqdp_p : 4675 llvm::Intrinsic::ppc_vsx_xvcmpgtdp_p; 4676 case BuiltinType::UInt128: 4677 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p 4678 : llvm::Intrinsic::ppc_altivec_vcmpgtuq_p; 4679 case BuiltinType::Int128: 4680 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p 4681 : llvm::Intrinsic::ppc_altivec_vcmpgtsq_p; 4682 } 4683 } 4684 4685 Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E, 4686 llvm::CmpInst::Predicate UICmpOpc, 4687 llvm::CmpInst::Predicate SICmpOpc, 4688 llvm::CmpInst::Predicate FCmpOpc, 4689 bool IsSignaling) { 4690 TestAndClearIgnoreResultAssign(); 4691 Value *Result; 4692 QualType LHSTy = E->getLHS()->getType(); 4693 QualType RHSTy = E->getRHS()->getType(); 4694 if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) { 4695 assert(E->getOpcode() == BO_EQ || 4696 E->getOpcode() == BO_NE); 4697 Value *LHS = CGF.EmitScalarExpr(E->getLHS()); 4698 Value *RHS = CGF.EmitScalarExpr(E->getRHS()); 4699 Result = CGF.CGM.getCXXABI().EmitMemberPointerComparison( 4700 CGF, LHS, RHS, MPT, E->getOpcode() == BO_NE); 4701 } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) { 4702 BinOpInfo BOInfo = EmitBinOps(E); 4703 Value *LHS = BOInfo.LHS; 4704 Value *RHS = BOInfo.RHS; 4705 4706 // If AltiVec, the comparison results in a numeric type, so we use 4707 // intrinsics comparing vectors and giving 0 or 1 as a result 4708 if (LHSTy->isVectorType() && !E->getType()->isVectorType()) { 4709 // constants for mapping CR6 register bits to predicate result 4710 enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6; 4711 4712 llvm::Intrinsic::ID ID = llvm::Intrinsic::not_intrinsic; 4713 4714 // in several cases vector arguments order will be reversed 4715 Value *FirstVecArg = LHS, 4716 *SecondVecArg = RHS; 4717 4718 QualType ElTy = LHSTy->castAs<VectorType>()->getElementType(); 4719 BuiltinType::Kind ElementKind = ElTy->castAs<BuiltinType>()->getKind(); 4720 4721 switch(E->getOpcode()) { 4722 default: llvm_unreachable("is not a comparison operation"); 4723 case BO_EQ: 4724 CR6 = CR6_LT; 4725 ID = GetIntrinsic(VCMPEQ, ElementKind); 4726 break; 4727 case BO_NE: 4728 CR6 = CR6_EQ; 4729 ID = GetIntrinsic(VCMPEQ, ElementKind); 4730 break; 4731 case BO_LT: 4732 CR6 = CR6_LT; 4733 ID = GetIntrinsic(VCMPGT, ElementKind); 4734 std::swap(FirstVecArg, SecondVecArg); 4735 break; 4736 case BO_GT: 4737 CR6 = CR6_LT; 4738 ID = GetIntrinsic(VCMPGT, ElementKind); 4739 break; 4740 case BO_LE: 4741 if (ElementKind == BuiltinType::Float) { 4742 CR6 = CR6_LT; 4743 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p; 4744 std::swap(FirstVecArg, SecondVecArg); 4745 } 4746 else { 4747 CR6 = CR6_EQ; 4748 ID = GetIntrinsic(VCMPGT, ElementKind); 4749 } 4750 break; 4751 case BO_GE: 4752 if (ElementKind == BuiltinType::Float) { 4753 CR6 = CR6_LT; 4754 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p; 4755 } 4756 else { 4757 CR6 = CR6_EQ; 4758 ID = GetIntrinsic(VCMPGT, ElementKind); 4759 std::swap(FirstVecArg, SecondVecArg); 4760 } 4761 break; 4762 } 4763 4764 Value *CR6Param = Builder.getInt32(CR6); 4765 llvm::Function *F = CGF.CGM.getIntrinsic(ID); 4766 Result = Builder.CreateCall(F, {CR6Param, FirstVecArg, SecondVecArg}); 4767 4768 // The result type of intrinsic may not be same as E->getType(). 4769 // If E->getType() is not BoolTy, EmitScalarConversion will do the 4770 // conversion work. If E->getType() is BoolTy, EmitScalarConversion will 4771 // do nothing, if ResultTy is not i1 at the same time, it will cause 4772 // crash later. 4773 llvm::IntegerType *ResultTy = cast<llvm::IntegerType>(Result->getType()); 4774 if (ResultTy->getBitWidth() > 1 && 4775 E->getType() == CGF.getContext().BoolTy) 4776 Result = Builder.CreateTrunc(Result, Builder.getInt1Ty()); 4777 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(), 4778 E->getExprLoc()); 4779 } 4780 4781 if (BOInfo.isFixedPointOp()) { 4782 Result = EmitFixedPointBinOp(BOInfo); 4783 } else if (LHS->getType()->isFPOrFPVectorTy()) { 4784 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, BOInfo.FPFeatures); 4785 if (!IsSignaling) 4786 Result = Builder.CreateFCmp(FCmpOpc, LHS, RHS, "cmp"); 4787 else 4788 Result = Builder.CreateFCmpS(FCmpOpc, LHS, RHS, "cmp"); 4789 } else if (LHSTy->hasSignedIntegerRepresentation()) { 4790 Result = Builder.CreateICmp(SICmpOpc, LHS, RHS, "cmp"); 4791 } else { 4792 // Unsigned integers and pointers. 4793 4794 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers && 4795 !isa<llvm::ConstantPointerNull>(LHS) && 4796 !isa<llvm::ConstantPointerNull>(RHS)) { 4797 4798 // Dynamic information is required to be stripped for comparisons, 4799 // because it could leak the dynamic information. Based on comparisons 4800 // of pointers to dynamic objects, the optimizer can replace one pointer 4801 // with another, which might be incorrect in presence of invariant 4802 // groups. Comparison with null is safe because null does not carry any 4803 // dynamic information. 4804 if (LHSTy.mayBeDynamicClass()) 4805 LHS = Builder.CreateStripInvariantGroup(LHS); 4806 if (RHSTy.mayBeDynamicClass()) 4807 RHS = Builder.CreateStripInvariantGroup(RHS); 4808 } 4809 4810 Result = Builder.CreateICmp(UICmpOpc, LHS, RHS, "cmp"); 4811 } 4812 4813 // If this is a vector comparison, sign extend the result to the appropriate 4814 // vector integer type and return it (don't convert to bool). 4815 if (LHSTy->isVectorType()) 4816 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext"); 4817 4818 } else { 4819 // Complex Comparison: can only be an equality comparison. 4820 CodeGenFunction::ComplexPairTy LHS, RHS; 4821 QualType CETy; 4822 if (auto *CTy = LHSTy->getAs<ComplexType>()) { 4823 LHS = CGF.EmitComplexExpr(E->getLHS()); 4824 CETy = CTy->getElementType(); 4825 } else { 4826 LHS.first = Visit(E->getLHS()); 4827 LHS.second = llvm::Constant::getNullValue(LHS.first->getType()); 4828 CETy = LHSTy; 4829 } 4830 if (auto *CTy = RHSTy->getAs<ComplexType>()) { 4831 RHS = CGF.EmitComplexExpr(E->getRHS()); 4832 assert(CGF.getContext().hasSameUnqualifiedType(CETy, 4833 CTy->getElementType()) && 4834 "The element types must always match."); 4835 (void)CTy; 4836 } else { 4837 RHS.first = Visit(E->getRHS()); 4838 RHS.second = llvm::Constant::getNullValue(RHS.first->getType()); 4839 assert(CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) && 4840 "The element types must always match."); 4841 } 4842 4843 Value *ResultR, *ResultI; 4844 if (CETy->isRealFloatingType()) { 4845 // As complex comparisons can only be equality comparisons, they 4846 // are never signaling comparisons. 4847 ResultR = Builder.CreateFCmp(FCmpOpc, LHS.first, RHS.first, "cmp.r"); 4848 ResultI = Builder.CreateFCmp(FCmpOpc, LHS.second, RHS.second, "cmp.i"); 4849 } else { 4850 // Complex comparisons can only be equality comparisons. As such, signed 4851 // and unsigned opcodes are the same. 4852 ResultR = Builder.CreateICmp(UICmpOpc, LHS.first, RHS.first, "cmp.r"); 4853 ResultI = Builder.CreateICmp(UICmpOpc, LHS.second, RHS.second, "cmp.i"); 4854 } 4855 4856 if (E->getOpcode() == BO_EQ) { 4857 Result = Builder.CreateAnd(ResultR, ResultI, "and.ri"); 4858 } else { 4859 assert(E->getOpcode() == BO_NE && 4860 "Complex comparison other than == or != ?"); 4861 Result = Builder.CreateOr(ResultR, ResultI, "or.ri"); 4862 } 4863 } 4864 4865 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(), 4866 E->getExprLoc()); 4867 } 4868 4869 llvm::Value *CodeGenFunction::EmitWithOriginalRHSBitfieldAssignment( 4870 const BinaryOperator *E, Value **Previous, QualType *SrcType) { 4871 // In case we have the integer or bitfield sanitizer checks enabled 4872 // we want to get the expression before scalar conversion. 4873 if (auto *ICE = dyn_cast<ImplicitCastExpr>(E->getRHS())) { 4874 CastKind Kind = ICE->getCastKind(); 4875 if (Kind == CK_IntegralCast || Kind == CK_LValueToRValue) { 4876 *SrcType = ICE->getSubExpr()->getType(); 4877 *Previous = EmitScalarExpr(ICE->getSubExpr()); 4878 // Pass default ScalarConversionOpts to avoid emitting 4879 // integer sanitizer checks as E refers to bitfield. 4880 return EmitScalarConversion(*Previous, *SrcType, ICE->getType(), 4881 ICE->getExprLoc()); 4882 } 4883 } 4884 return EmitScalarExpr(E->getRHS()); 4885 } 4886 4887 Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) { 4888 bool Ignore = TestAndClearIgnoreResultAssign(); 4889 4890 Value *RHS; 4891 LValue LHS; 4892 4893 switch (E->getLHS()->getType().getObjCLifetime()) { 4894 case Qualifiers::OCL_Strong: 4895 std::tie(LHS, RHS) = CGF.EmitARCStoreStrong(E, Ignore); 4896 break; 4897 4898 case Qualifiers::OCL_Autoreleasing: 4899 std::tie(LHS, RHS) = CGF.EmitARCStoreAutoreleasing(E); 4900 break; 4901 4902 case Qualifiers::OCL_ExplicitNone: 4903 std::tie(LHS, RHS) = CGF.EmitARCStoreUnsafeUnretained(E, Ignore); 4904 break; 4905 4906 case Qualifiers::OCL_Weak: 4907 RHS = Visit(E->getRHS()); 4908 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store); 4909 RHS = CGF.EmitARCStoreWeak(LHS.getAddress(), RHS, Ignore); 4910 break; 4911 4912 case Qualifiers::OCL_None: 4913 // __block variables need to have the rhs evaluated first, plus 4914 // this should improve codegen just a little. 4915 Value *Previous = nullptr; 4916 QualType SrcType = E->getRHS()->getType(); 4917 // Check if LHS is a bitfield, if RHS contains an implicit cast expression 4918 // we want to extract that value and potentially (if the bitfield sanitizer 4919 // is enabled) use it to check for an implicit conversion. 4920 if (E->getLHS()->refersToBitField()) 4921 RHS = CGF.EmitWithOriginalRHSBitfieldAssignment(E, &Previous, &SrcType); 4922 else 4923 RHS = Visit(E->getRHS()); 4924 4925 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store); 4926 4927 // Store the value into the LHS. Bit-fields are handled specially 4928 // because the result is altered by the store, i.e., [C99 6.5.16p1] 4929 // 'An assignment expression has the value of the left operand after 4930 // the assignment...'. 4931 if (LHS.isBitField()) { 4932 CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, &RHS); 4933 // If the expression contained an implicit conversion, make sure 4934 // to use the value before the scalar conversion. 4935 Value *Src = Previous ? Previous : RHS; 4936 QualType DstType = E->getLHS()->getType(); 4937 CGF.EmitBitfieldConversionCheck(Src, SrcType, RHS, DstType, 4938 LHS.getBitFieldInfo(), E->getExprLoc()); 4939 } else { 4940 CGF.EmitNullabilityCheck(LHS, RHS, E->getExprLoc()); 4941 CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS); 4942 } 4943 } 4944 4945 // If the result is clearly ignored, return now. 4946 if (Ignore) 4947 return nullptr; 4948 4949 // The result of an assignment in C is the assigned r-value. 4950 if (!CGF.getLangOpts().CPlusPlus) 4951 return RHS; 4952 4953 // If the lvalue is non-volatile, return the computed value of the assignment. 4954 if (!LHS.isVolatileQualified()) 4955 return RHS; 4956 4957 // Otherwise, reload the value. 4958 return EmitLoadOfLValue(LHS, E->getExprLoc()); 4959 } 4960 4961 Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) { 4962 // Perform vector logical and on comparisons with zero vectors. 4963 if (E->getType()->isVectorType()) { 4964 CGF.incrementProfileCounter(E); 4965 4966 Value *LHS = Visit(E->getLHS()); 4967 Value *RHS = Visit(E->getRHS()); 4968 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType()); 4969 if (LHS->getType()->isFPOrFPVectorTy()) { 4970 CodeGenFunction::CGFPOptionsRAII FPOptsRAII( 4971 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts())); 4972 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp"); 4973 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp"); 4974 } else { 4975 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp"); 4976 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp"); 4977 } 4978 Value *And = Builder.CreateAnd(LHS, RHS); 4979 return Builder.CreateSExt(And, ConvertType(E->getType()), "sext"); 4980 } 4981 4982 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr(); 4983 llvm::Type *ResTy = ConvertType(E->getType()); 4984 4985 // If we have 0 && RHS, see if we can elide RHS, if so, just return 0. 4986 // If we have 1 && X, just emit X without inserting the control flow. 4987 bool LHSCondVal; 4988 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) { 4989 if (LHSCondVal) { // If we have 1 && X, just emit X. 4990 CGF.incrementProfileCounter(E); 4991 4992 // If the top of the logical operator nest, reset the MCDC temp to 0. 4993 if (CGF.MCDCLogOpStack.empty()) 4994 CGF.maybeResetMCDCCondBitmap(E); 4995 4996 CGF.MCDCLogOpStack.push_back(E); 4997 4998 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); 4999 5000 // If we're generating for profiling or coverage, generate a branch to a 5001 // block that increments the RHS counter needed to track branch condition 5002 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and 5003 // "FalseBlock" after the increment is done. 5004 if (InstrumentRegions && 5005 CodeGenFunction::isInstrumentedCondition(E->getRHS())) { 5006 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond); 5007 llvm::BasicBlock *FBlock = CGF.createBasicBlock("land.end"); 5008 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt"); 5009 Builder.CreateCondBr(RHSCond, RHSBlockCnt, FBlock); 5010 CGF.EmitBlock(RHSBlockCnt); 5011 CGF.incrementProfileCounter(E->getRHS()); 5012 CGF.EmitBranch(FBlock); 5013 CGF.EmitBlock(FBlock); 5014 } else 5015 CGF.markStmtMaybeUsed(E->getRHS()); 5016 5017 CGF.MCDCLogOpStack.pop_back(); 5018 // If the top of the logical operator nest, update the MCDC bitmap. 5019 if (CGF.MCDCLogOpStack.empty()) 5020 CGF.maybeUpdateMCDCTestVectorBitmap(E); 5021 5022 // ZExt result to int or bool. 5023 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "land.ext"); 5024 } 5025 5026 // 0 && RHS: If it is safe, just elide the RHS, and return 0/false. 5027 if (!CGF.ContainsLabel(E->getRHS())) { 5028 CGF.markStmtMaybeUsed(E->getRHS()); 5029 return llvm::Constant::getNullValue(ResTy); 5030 } 5031 } 5032 5033 // If the top of the logical operator nest, reset the MCDC temp to 0. 5034 if (CGF.MCDCLogOpStack.empty()) 5035 CGF.maybeResetMCDCCondBitmap(E); 5036 5037 CGF.MCDCLogOpStack.push_back(E); 5038 5039 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end"); 5040 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs"); 5041 5042 CodeGenFunction::ConditionalEvaluation eval(CGF); 5043 5044 // Branch on the LHS first. If it is false, go to the failure (cont) block. 5045 CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock, 5046 CGF.getProfileCount(E->getRHS())); 5047 5048 // Any edges into the ContBlock are now from an (indeterminate number of) 5049 // edges from this first condition. All of these values will be false. Start 5050 // setting up the PHI node in the Cont Block for this. 5051 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2, 5052 "", ContBlock); 5053 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock); 5054 PI != PE; ++PI) 5055 PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI); 5056 5057 eval.begin(CGF); 5058 CGF.EmitBlock(RHSBlock); 5059 CGF.incrementProfileCounter(E); 5060 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); 5061 eval.end(CGF); 5062 5063 // Reaquire the RHS block, as there may be subblocks inserted. 5064 RHSBlock = Builder.GetInsertBlock(); 5065 5066 // If we're generating for profiling or coverage, generate a branch on the 5067 // RHS to a block that increments the RHS true counter needed to track branch 5068 // condition coverage. 5069 if (InstrumentRegions && 5070 CodeGenFunction::isInstrumentedCondition(E->getRHS())) { 5071 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond); 5072 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt"); 5073 Builder.CreateCondBr(RHSCond, RHSBlockCnt, ContBlock); 5074 CGF.EmitBlock(RHSBlockCnt); 5075 CGF.incrementProfileCounter(E->getRHS()); 5076 CGF.EmitBranch(ContBlock); 5077 PN->addIncoming(RHSCond, RHSBlockCnt); 5078 } 5079 5080 // Emit an unconditional branch from this block to ContBlock. 5081 { 5082 // There is no need to emit line number for unconditional branch. 5083 auto NL = ApplyDebugLocation::CreateEmpty(CGF); 5084 CGF.EmitBlock(ContBlock); 5085 } 5086 // Insert an entry into the phi node for the edge with the value of RHSCond. 5087 PN->addIncoming(RHSCond, RHSBlock); 5088 5089 CGF.MCDCLogOpStack.pop_back(); 5090 // If the top of the logical operator nest, update the MCDC bitmap. 5091 if (CGF.MCDCLogOpStack.empty()) 5092 CGF.maybeUpdateMCDCTestVectorBitmap(E); 5093 5094 // Artificial location to preserve the scope information 5095 { 5096 auto NL = ApplyDebugLocation::CreateArtificial(CGF); 5097 PN->setDebugLoc(Builder.getCurrentDebugLocation()); 5098 } 5099 5100 // ZExt result to int. 5101 return Builder.CreateZExtOrBitCast(PN, ResTy, "land.ext"); 5102 } 5103 5104 Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) { 5105 // Perform vector logical or on comparisons with zero vectors. 5106 if (E->getType()->isVectorType()) { 5107 CGF.incrementProfileCounter(E); 5108 5109 Value *LHS = Visit(E->getLHS()); 5110 Value *RHS = Visit(E->getRHS()); 5111 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType()); 5112 if (LHS->getType()->isFPOrFPVectorTy()) { 5113 CodeGenFunction::CGFPOptionsRAII FPOptsRAII( 5114 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts())); 5115 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp"); 5116 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp"); 5117 } else { 5118 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp"); 5119 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp"); 5120 } 5121 Value *Or = Builder.CreateOr(LHS, RHS); 5122 return Builder.CreateSExt(Or, ConvertType(E->getType()), "sext"); 5123 } 5124 5125 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr(); 5126 llvm::Type *ResTy = ConvertType(E->getType()); 5127 5128 // If we have 1 || RHS, see if we can elide RHS, if so, just return 1. 5129 // If we have 0 || X, just emit X without inserting the control flow. 5130 bool LHSCondVal; 5131 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) { 5132 if (!LHSCondVal) { // If we have 0 || X, just emit X. 5133 CGF.incrementProfileCounter(E); 5134 5135 // If the top of the logical operator nest, reset the MCDC temp to 0. 5136 if (CGF.MCDCLogOpStack.empty()) 5137 CGF.maybeResetMCDCCondBitmap(E); 5138 5139 CGF.MCDCLogOpStack.push_back(E); 5140 5141 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); 5142 5143 // If we're generating for profiling or coverage, generate a branch to a 5144 // block that increments the RHS counter need to track branch condition 5145 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and 5146 // "FalseBlock" after the increment is done. 5147 if (InstrumentRegions && 5148 CodeGenFunction::isInstrumentedCondition(E->getRHS())) { 5149 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond); 5150 llvm::BasicBlock *FBlock = CGF.createBasicBlock("lor.end"); 5151 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt"); 5152 Builder.CreateCondBr(RHSCond, FBlock, RHSBlockCnt); 5153 CGF.EmitBlock(RHSBlockCnt); 5154 CGF.incrementProfileCounter(E->getRHS()); 5155 CGF.EmitBranch(FBlock); 5156 CGF.EmitBlock(FBlock); 5157 } else 5158 CGF.markStmtMaybeUsed(E->getRHS()); 5159 5160 CGF.MCDCLogOpStack.pop_back(); 5161 // If the top of the logical operator nest, update the MCDC bitmap. 5162 if (CGF.MCDCLogOpStack.empty()) 5163 CGF.maybeUpdateMCDCTestVectorBitmap(E); 5164 5165 // ZExt result to int or bool. 5166 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "lor.ext"); 5167 } 5168 5169 // 1 || RHS: If it is safe, just elide the RHS, and return 1/true. 5170 if (!CGF.ContainsLabel(E->getRHS())) { 5171 CGF.markStmtMaybeUsed(E->getRHS()); 5172 return llvm::ConstantInt::get(ResTy, 1); 5173 } 5174 } 5175 5176 // If the top of the logical operator nest, reset the MCDC temp to 0. 5177 if (CGF.MCDCLogOpStack.empty()) 5178 CGF.maybeResetMCDCCondBitmap(E); 5179 5180 CGF.MCDCLogOpStack.push_back(E); 5181 5182 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end"); 5183 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs"); 5184 5185 CodeGenFunction::ConditionalEvaluation eval(CGF); 5186 5187 // Branch on the LHS first. If it is true, go to the success (cont) block. 5188 CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock, 5189 CGF.getCurrentProfileCount() - 5190 CGF.getProfileCount(E->getRHS())); 5191 5192 // Any edges into the ContBlock are now from an (indeterminate number of) 5193 // edges from this first condition. All of these values will be true. Start 5194 // setting up the PHI node in the Cont Block for this. 5195 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2, 5196 "", ContBlock); 5197 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock); 5198 PI != PE; ++PI) 5199 PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI); 5200 5201 eval.begin(CGF); 5202 5203 // Emit the RHS condition as a bool value. 5204 CGF.EmitBlock(RHSBlock); 5205 CGF.incrementProfileCounter(E); 5206 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); 5207 5208 eval.end(CGF); 5209 5210 // Reaquire the RHS block, as there may be subblocks inserted. 5211 RHSBlock = Builder.GetInsertBlock(); 5212 5213 // If we're generating for profiling or coverage, generate a branch on the 5214 // RHS to a block that increments the RHS true counter needed to track branch 5215 // condition coverage. 5216 if (InstrumentRegions && 5217 CodeGenFunction::isInstrumentedCondition(E->getRHS())) { 5218 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond); 5219 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt"); 5220 Builder.CreateCondBr(RHSCond, ContBlock, RHSBlockCnt); 5221 CGF.EmitBlock(RHSBlockCnt); 5222 CGF.incrementProfileCounter(E->getRHS()); 5223 CGF.EmitBranch(ContBlock); 5224 PN->addIncoming(RHSCond, RHSBlockCnt); 5225 } 5226 5227 // Emit an unconditional branch from this block to ContBlock. Insert an entry 5228 // into the phi node for the edge with the value of RHSCond. 5229 CGF.EmitBlock(ContBlock); 5230 PN->addIncoming(RHSCond, RHSBlock); 5231 5232 CGF.MCDCLogOpStack.pop_back(); 5233 // If the top of the logical operator nest, update the MCDC bitmap. 5234 if (CGF.MCDCLogOpStack.empty()) 5235 CGF.maybeUpdateMCDCTestVectorBitmap(E); 5236 5237 // ZExt result to int. 5238 return Builder.CreateZExtOrBitCast(PN, ResTy, "lor.ext"); 5239 } 5240 5241 Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) { 5242 CGF.EmitIgnoredExpr(E->getLHS()); 5243 CGF.EnsureInsertPoint(); 5244 return Visit(E->getRHS()); 5245 } 5246 5247 //===----------------------------------------------------------------------===// 5248 // Other Operators 5249 //===----------------------------------------------------------------------===// 5250 5251 /// isCheapEnoughToEvaluateUnconditionally - Return true if the specified 5252 /// expression is cheap enough and side-effect-free enough to evaluate 5253 /// unconditionally instead of conditionally. This is used to convert control 5254 /// flow into selects in some cases. 5255 static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E, 5256 CodeGenFunction &CGF) { 5257 // Anything that is an integer or floating point constant is fine. 5258 return E->IgnoreParens()->isEvaluatable(CGF.getContext()); 5259 5260 // Even non-volatile automatic variables can't be evaluated unconditionally. 5261 // Referencing a thread_local may cause non-trivial initialization work to 5262 // occur. If we're inside a lambda and one of the variables is from the scope 5263 // outside the lambda, that function may have returned already. Reading its 5264 // locals is a bad idea. Also, these reads may introduce races there didn't 5265 // exist in the source-level program. 5266 } 5267 5268 5269 Value *ScalarExprEmitter:: 5270 VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) { 5271 TestAndClearIgnoreResultAssign(); 5272 5273 // Bind the common expression if necessary. 5274 CodeGenFunction::OpaqueValueMapping binding(CGF, E); 5275 5276 Expr *condExpr = E->getCond(); 5277 Expr *lhsExpr = E->getTrueExpr(); 5278 Expr *rhsExpr = E->getFalseExpr(); 5279 5280 // If the condition constant folds and can be elided, try to avoid emitting 5281 // the condition and the dead arm. 5282 bool CondExprBool; 5283 if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) { 5284 Expr *live = lhsExpr, *dead = rhsExpr; 5285 if (!CondExprBool) std::swap(live, dead); 5286 5287 // If the dead side doesn't have labels we need, just emit the Live part. 5288 if (!CGF.ContainsLabel(dead)) { 5289 if (CondExprBool) { 5290 if (llvm::EnableSingleByteCoverage) { 5291 CGF.incrementProfileCounter(lhsExpr); 5292 CGF.incrementProfileCounter(rhsExpr); 5293 } 5294 CGF.incrementProfileCounter(E); 5295 } 5296 Value *Result = Visit(live); 5297 CGF.markStmtMaybeUsed(dead); 5298 5299 // If the live part is a throw expression, it acts like it has a void 5300 // type, so evaluating it returns a null Value*. However, a conditional 5301 // with non-void type must return a non-null Value*. 5302 if (!Result && !E->getType()->isVoidType()) 5303 Result = llvm::UndefValue::get(CGF.ConvertType(E->getType())); 5304 5305 return Result; 5306 } 5307 } 5308 5309 // OpenCL: If the condition is a vector, we can treat this condition like 5310 // the select function. 5311 if ((CGF.getLangOpts().OpenCL && condExpr->getType()->isVectorType()) || 5312 condExpr->getType()->isExtVectorType()) { 5313 CGF.incrementProfileCounter(E); 5314 5315 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr); 5316 llvm::Value *LHS = Visit(lhsExpr); 5317 llvm::Value *RHS = Visit(rhsExpr); 5318 5319 llvm::Type *condType = ConvertType(condExpr->getType()); 5320 auto *vecTy = cast<llvm::FixedVectorType>(condType); 5321 5322 unsigned numElem = vecTy->getNumElements(); 5323 llvm::Type *elemType = vecTy->getElementType(); 5324 5325 llvm::Value *zeroVec = llvm::Constant::getNullValue(vecTy); 5326 llvm::Value *TestMSB = Builder.CreateICmpSLT(CondV, zeroVec); 5327 llvm::Value *tmp = Builder.CreateSExt( 5328 TestMSB, llvm::FixedVectorType::get(elemType, numElem), "sext"); 5329 llvm::Value *tmp2 = Builder.CreateNot(tmp); 5330 5331 // Cast float to int to perform ANDs if necessary. 5332 llvm::Value *RHSTmp = RHS; 5333 llvm::Value *LHSTmp = LHS; 5334 bool wasCast = false; 5335 llvm::VectorType *rhsVTy = cast<llvm::VectorType>(RHS->getType()); 5336 if (rhsVTy->getElementType()->isFloatingPointTy()) { 5337 RHSTmp = Builder.CreateBitCast(RHS, tmp2->getType()); 5338 LHSTmp = Builder.CreateBitCast(LHS, tmp->getType()); 5339 wasCast = true; 5340 } 5341 5342 llvm::Value *tmp3 = Builder.CreateAnd(RHSTmp, tmp2); 5343 llvm::Value *tmp4 = Builder.CreateAnd(LHSTmp, tmp); 5344 llvm::Value *tmp5 = Builder.CreateOr(tmp3, tmp4, "cond"); 5345 if (wasCast) 5346 tmp5 = Builder.CreateBitCast(tmp5, RHS->getType()); 5347 5348 return tmp5; 5349 } 5350 5351 if (condExpr->getType()->isVectorType() || 5352 condExpr->getType()->isSveVLSBuiltinType()) { 5353 CGF.incrementProfileCounter(E); 5354 5355 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr); 5356 llvm::Value *LHS = Visit(lhsExpr); 5357 llvm::Value *RHS = Visit(rhsExpr); 5358 5359 llvm::Type *CondType = ConvertType(condExpr->getType()); 5360 auto *VecTy = cast<llvm::VectorType>(CondType); 5361 llvm::Value *ZeroVec = llvm::Constant::getNullValue(VecTy); 5362 5363 CondV = Builder.CreateICmpNE(CondV, ZeroVec, "vector_cond"); 5364 return Builder.CreateSelect(CondV, LHS, RHS, "vector_select"); 5365 } 5366 5367 // If this is a really simple expression (like x ? 4 : 5), emit this as a 5368 // select instead of as control flow. We can only do this if it is cheap and 5369 // safe to evaluate the LHS and RHS unconditionally. 5370 if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, CGF) && 5371 isCheapEnoughToEvaluateUnconditionally(rhsExpr, CGF)) { 5372 llvm::Value *CondV = CGF.EvaluateExprAsBool(condExpr); 5373 llvm::Value *StepV = Builder.CreateZExtOrBitCast(CondV, CGF.Int64Ty); 5374 5375 if (llvm::EnableSingleByteCoverage) { 5376 CGF.incrementProfileCounter(lhsExpr); 5377 CGF.incrementProfileCounter(rhsExpr); 5378 CGF.incrementProfileCounter(E); 5379 } else 5380 CGF.incrementProfileCounter(E, StepV); 5381 5382 llvm::Value *LHS = Visit(lhsExpr); 5383 llvm::Value *RHS = Visit(rhsExpr); 5384 if (!LHS) { 5385 // If the conditional has void type, make sure we return a null Value*. 5386 assert(!RHS && "LHS and RHS types must match"); 5387 return nullptr; 5388 } 5389 return Builder.CreateSelect(CondV, LHS, RHS, "cond"); 5390 } 5391 5392 // If the top of the logical operator nest, reset the MCDC temp to 0. 5393 if (CGF.MCDCLogOpStack.empty()) 5394 CGF.maybeResetMCDCCondBitmap(condExpr); 5395 5396 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true"); 5397 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false"); 5398 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end"); 5399 5400 CodeGenFunction::ConditionalEvaluation eval(CGF); 5401 CGF.EmitBranchOnBoolExpr(condExpr, LHSBlock, RHSBlock, 5402 CGF.getProfileCount(lhsExpr)); 5403 5404 CGF.EmitBlock(LHSBlock); 5405 5406 // If the top of the logical operator nest, update the MCDC bitmap for the 5407 // ConditionalOperator prior to visiting its LHS and RHS blocks, since they 5408 // may also contain a boolean expression. 5409 if (CGF.MCDCLogOpStack.empty()) 5410 CGF.maybeUpdateMCDCTestVectorBitmap(condExpr); 5411 5412 if (llvm::EnableSingleByteCoverage) 5413 CGF.incrementProfileCounter(lhsExpr); 5414 else 5415 CGF.incrementProfileCounter(E); 5416 5417 eval.begin(CGF); 5418 Value *LHS = Visit(lhsExpr); 5419 eval.end(CGF); 5420 5421 LHSBlock = Builder.GetInsertBlock(); 5422 Builder.CreateBr(ContBlock); 5423 5424 CGF.EmitBlock(RHSBlock); 5425 5426 // If the top of the logical operator nest, update the MCDC bitmap for the 5427 // ConditionalOperator prior to visiting its LHS and RHS blocks, since they 5428 // may also contain a boolean expression. 5429 if (CGF.MCDCLogOpStack.empty()) 5430 CGF.maybeUpdateMCDCTestVectorBitmap(condExpr); 5431 5432 if (llvm::EnableSingleByteCoverage) 5433 CGF.incrementProfileCounter(rhsExpr); 5434 5435 eval.begin(CGF); 5436 Value *RHS = Visit(rhsExpr); 5437 eval.end(CGF); 5438 5439 RHSBlock = Builder.GetInsertBlock(); 5440 CGF.EmitBlock(ContBlock); 5441 5442 // If the LHS or RHS is a throw expression, it will be legitimately null. 5443 if (!LHS) 5444 return RHS; 5445 if (!RHS) 5446 return LHS; 5447 5448 // Create a PHI node for the real part. 5449 llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), 2, "cond"); 5450 PN->addIncoming(LHS, LHSBlock); 5451 PN->addIncoming(RHS, RHSBlock); 5452 5453 // When single byte coverage mode is enabled, add a counter to continuation 5454 // block. 5455 if (llvm::EnableSingleByteCoverage) 5456 CGF.incrementProfileCounter(E); 5457 5458 return PN; 5459 } 5460 5461 Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) { 5462 return Visit(E->getChosenSubExpr()); 5463 } 5464 5465 Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) { 5466 Address ArgValue = Address::invalid(); 5467 RValue ArgPtr = CGF.EmitVAArg(VE, ArgValue); 5468 5469 return ArgPtr.getScalarVal(); 5470 } 5471 5472 Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) { 5473 return CGF.EmitBlockLiteral(block); 5474 } 5475 5476 // Convert a vec3 to vec4, or vice versa. 5477 static Value *ConvertVec3AndVec4(CGBuilderTy &Builder, CodeGenFunction &CGF, 5478 Value *Src, unsigned NumElementsDst) { 5479 static constexpr int Mask[] = {0, 1, 2, -1}; 5480 return Builder.CreateShuffleVector(Src, llvm::ArrayRef(Mask, NumElementsDst)); 5481 } 5482 5483 // Create cast instructions for converting LLVM value \p Src to LLVM type \p 5484 // DstTy. \p Src has the same size as \p DstTy. Both are single value types 5485 // but could be scalar or vectors of different lengths, and either can be 5486 // pointer. 5487 // There are 4 cases: 5488 // 1. non-pointer -> non-pointer : needs 1 bitcast 5489 // 2. pointer -> pointer : needs 1 bitcast or addrspacecast 5490 // 3. pointer -> non-pointer 5491 // a) pointer -> intptr_t : needs 1 ptrtoint 5492 // b) pointer -> non-intptr_t : needs 1 ptrtoint then 1 bitcast 5493 // 4. non-pointer -> pointer 5494 // a) intptr_t -> pointer : needs 1 inttoptr 5495 // b) non-intptr_t -> pointer : needs 1 bitcast then 1 inttoptr 5496 // Note: for cases 3b and 4b two casts are required since LLVM casts do not 5497 // allow casting directly between pointer types and non-integer non-pointer 5498 // types. 5499 static Value *createCastsForTypeOfSameSize(CGBuilderTy &Builder, 5500 const llvm::DataLayout &DL, 5501 Value *Src, llvm::Type *DstTy, 5502 StringRef Name = "") { 5503 auto SrcTy = Src->getType(); 5504 5505 // Case 1. 5506 if (!SrcTy->isPointerTy() && !DstTy->isPointerTy()) 5507 return Builder.CreateBitCast(Src, DstTy, Name); 5508 5509 // Case 2. 5510 if (SrcTy->isPointerTy() && DstTy->isPointerTy()) 5511 return Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy, Name); 5512 5513 // Case 3. 5514 if (SrcTy->isPointerTy() && !DstTy->isPointerTy()) { 5515 // Case 3b. 5516 if (!DstTy->isIntegerTy()) 5517 Src = Builder.CreatePtrToInt(Src, DL.getIntPtrType(SrcTy)); 5518 // Cases 3a and 3b. 5519 return Builder.CreateBitOrPointerCast(Src, DstTy, Name); 5520 } 5521 5522 // Case 4b. 5523 if (!SrcTy->isIntegerTy()) 5524 Src = Builder.CreateBitCast(Src, DL.getIntPtrType(DstTy)); 5525 // Cases 4a and 4b. 5526 return Builder.CreateIntToPtr(Src, DstTy, Name); 5527 } 5528 5529 Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) { 5530 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr()); 5531 llvm::Type *DstTy = ConvertType(E->getType()); 5532 5533 llvm::Type *SrcTy = Src->getType(); 5534 unsigned NumElementsSrc = 5535 isa<llvm::VectorType>(SrcTy) 5536 ? cast<llvm::FixedVectorType>(SrcTy)->getNumElements() 5537 : 0; 5538 unsigned NumElementsDst = 5539 isa<llvm::VectorType>(DstTy) 5540 ? cast<llvm::FixedVectorType>(DstTy)->getNumElements() 5541 : 0; 5542 5543 // Use bit vector expansion for ext_vector_type boolean vectors. 5544 if (E->getType()->isExtVectorBoolType()) 5545 return CGF.emitBoolVecConversion(Src, NumElementsDst, "astype"); 5546 5547 // Going from vec3 to non-vec3 is a special case and requires a shuffle 5548 // vector to get a vec4, then a bitcast if the target type is different. 5549 if (NumElementsSrc == 3 && NumElementsDst != 3) { 5550 Src = ConvertVec3AndVec4(Builder, CGF, Src, 4); 5551 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src, 5552 DstTy); 5553 5554 Src->setName("astype"); 5555 return Src; 5556 } 5557 5558 // Going from non-vec3 to vec3 is a special case and requires a bitcast 5559 // to vec4 if the original type is not vec4, then a shuffle vector to 5560 // get a vec3. 5561 if (NumElementsSrc != 3 && NumElementsDst == 3) { 5562 auto *Vec4Ty = llvm::FixedVectorType::get( 5563 cast<llvm::VectorType>(DstTy)->getElementType(), 4); 5564 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src, 5565 Vec4Ty); 5566 5567 Src = ConvertVec3AndVec4(Builder, CGF, Src, 3); 5568 Src->setName("astype"); 5569 return Src; 5570 } 5571 5572 return createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), 5573 Src, DstTy, "astype"); 5574 } 5575 5576 Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) { 5577 return CGF.EmitAtomicExpr(E).getScalarVal(); 5578 } 5579 5580 //===----------------------------------------------------------------------===// 5581 // Entry Point into this File 5582 //===----------------------------------------------------------------------===// 5583 5584 /// Emit the computation of the specified expression of scalar type, ignoring 5585 /// the result. 5586 Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) { 5587 assert(E && hasScalarEvaluationKind(E->getType()) && 5588 "Invalid scalar expression to emit"); 5589 5590 return ScalarExprEmitter(*this, IgnoreResultAssign) 5591 .Visit(const_cast<Expr *>(E)); 5592 } 5593 5594 /// Emit a conversion from the specified type to the specified destination type, 5595 /// both of which are LLVM scalar types. 5596 Value *CodeGenFunction::EmitScalarConversion(Value *Src, QualType SrcTy, 5597 QualType DstTy, 5598 SourceLocation Loc) { 5599 assert(hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) && 5600 "Invalid scalar expression to emit"); 5601 return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy, Loc); 5602 } 5603 5604 /// Emit a conversion from the specified complex type to the specified 5605 /// destination type, where the destination type is an LLVM scalar type. 5606 Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src, 5607 QualType SrcTy, 5608 QualType DstTy, 5609 SourceLocation Loc) { 5610 assert(SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) && 5611 "Invalid complex -> scalar conversion"); 5612 return ScalarExprEmitter(*this) 5613 .EmitComplexToScalarConversion(Src, SrcTy, DstTy, Loc); 5614 } 5615 5616 5617 Value * 5618 CodeGenFunction::EmitPromotedScalarExpr(const Expr *E, 5619 QualType PromotionType) { 5620 if (!PromotionType.isNull()) 5621 return ScalarExprEmitter(*this).EmitPromoted(E, PromotionType); 5622 else 5623 return ScalarExprEmitter(*this).Visit(const_cast<Expr *>(E)); 5624 } 5625 5626 5627 llvm::Value *CodeGenFunction:: 5628 EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, 5629 bool isInc, bool isPre) { 5630 return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre); 5631 } 5632 5633 LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) { 5634 // object->isa or (*object).isa 5635 // Generate code as for: *(Class*)object 5636 5637 Expr *BaseExpr = E->getBase(); 5638 Address Addr = Address::invalid(); 5639 if (BaseExpr->isPRValue()) { 5640 llvm::Type *BaseTy = 5641 ConvertTypeForMem(BaseExpr->getType()->getPointeeType()); 5642 Addr = Address(EmitScalarExpr(BaseExpr), BaseTy, getPointerAlign()); 5643 } else { 5644 Addr = EmitLValue(BaseExpr).getAddress(); 5645 } 5646 5647 // Cast the address to Class*. 5648 Addr = Addr.withElementType(ConvertType(E->getType())); 5649 return MakeAddrLValue(Addr, E->getType()); 5650 } 5651 5652 5653 LValue CodeGenFunction::EmitCompoundAssignmentLValue( 5654 const CompoundAssignOperator *E) { 5655 ScalarExprEmitter Scalar(*this); 5656 Value *Result = nullptr; 5657 switch (E->getOpcode()) { 5658 #define COMPOUND_OP(Op) \ 5659 case BO_##Op##Assign: \ 5660 return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \ 5661 Result) 5662 COMPOUND_OP(Mul); 5663 COMPOUND_OP(Div); 5664 COMPOUND_OP(Rem); 5665 COMPOUND_OP(Add); 5666 COMPOUND_OP(Sub); 5667 COMPOUND_OP(Shl); 5668 COMPOUND_OP(Shr); 5669 COMPOUND_OP(And); 5670 COMPOUND_OP(Xor); 5671 COMPOUND_OP(Or); 5672 #undef COMPOUND_OP 5673 5674 case BO_PtrMemD: 5675 case BO_PtrMemI: 5676 case BO_Mul: 5677 case BO_Div: 5678 case BO_Rem: 5679 case BO_Add: 5680 case BO_Sub: 5681 case BO_Shl: 5682 case BO_Shr: 5683 case BO_LT: 5684 case BO_GT: 5685 case BO_LE: 5686 case BO_GE: 5687 case BO_EQ: 5688 case BO_NE: 5689 case BO_Cmp: 5690 case BO_And: 5691 case BO_Xor: 5692 case BO_Or: 5693 case BO_LAnd: 5694 case BO_LOr: 5695 case BO_Assign: 5696 case BO_Comma: 5697 llvm_unreachable("Not valid compound assignment operators"); 5698 } 5699 5700 llvm_unreachable("Unhandled compound assignment operator"); 5701 } 5702 5703 struct GEPOffsetAndOverflow { 5704 // The total (signed) byte offset for the GEP. 5705 llvm::Value *TotalOffset; 5706 // The offset overflow flag - true if the total offset overflows. 5707 llvm::Value *OffsetOverflows; 5708 }; 5709 5710 /// Evaluate given GEPVal, which is either an inbounds GEP, or a constant, 5711 /// and compute the total offset it applies from it's base pointer BasePtr. 5712 /// Returns offset in bytes and a boolean flag whether an overflow happened 5713 /// during evaluation. 5714 static GEPOffsetAndOverflow EmitGEPOffsetInBytes(Value *BasePtr, Value *GEPVal, 5715 llvm::LLVMContext &VMContext, 5716 CodeGenModule &CGM, 5717 CGBuilderTy &Builder) { 5718 const auto &DL = CGM.getDataLayout(); 5719 5720 // The total (signed) byte offset for the GEP. 5721 llvm::Value *TotalOffset = nullptr; 5722 5723 // Was the GEP already reduced to a constant? 5724 if (isa<llvm::Constant>(GEPVal)) { 5725 // Compute the offset by casting both pointers to integers and subtracting: 5726 // GEPVal = BasePtr + ptr(Offset) <--> Offset = int(GEPVal) - int(BasePtr) 5727 Value *BasePtr_int = 5728 Builder.CreatePtrToInt(BasePtr, DL.getIntPtrType(BasePtr->getType())); 5729 Value *GEPVal_int = 5730 Builder.CreatePtrToInt(GEPVal, DL.getIntPtrType(GEPVal->getType())); 5731 TotalOffset = Builder.CreateSub(GEPVal_int, BasePtr_int); 5732 return {TotalOffset, /*OffsetOverflows=*/Builder.getFalse()}; 5733 } 5734 5735 auto *GEP = cast<llvm::GEPOperator>(GEPVal); 5736 assert(GEP->getPointerOperand() == BasePtr && 5737 "BasePtr must be the base of the GEP."); 5738 assert(GEP->isInBounds() && "Expected inbounds GEP"); 5739 5740 auto *IntPtrTy = DL.getIntPtrType(GEP->getPointerOperandType()); 5741 5742 // Grab references to the signed add/mul overflow intrinsics for intptr_t. 5743 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy); 5744 auto *SAddIntrinsic = 5745 CGM.getIntrinsic(llvm::Intrinsic::sadd_with_overflow, IntPtrTy); 5746 auto *SMulIntrinsic = 5747 CGM.getIntrinsic(llvm::Intrinsic::smul_with_overflow, IntPtrTy); 5748 5749 // The offset overflow flag - true if the total offset overflows. 5750 llvm::Value *OffsetOverflows = Builder.getFalse(); 5751 5752 /// Return the result of the given binary operation. 5753 auto eval = [&](BinaryOperator::Opcode Opcode, llvm::Value *LHS, 5754 llvm::Value *RHS) -> llvm::Value * { 5755 assert((Opcode == BO_Add || Opcode == BO_Mul) && "Can't eval binop"); 5756 5757 // If the operands are constants, return a constant result. 5758 if (auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS)) { 5759 if (auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS)) { 5760 llvm::APInt N; 5761 bool HasOverflow = mayHaveIntegerOverflow(LHSCI, RHSCI, Opcode, 5762 /*Signed=*/true, N); 5763 if (HasOverflow) 5764 OffsetOverflows = Builder.getTrue(); 5765 return llvm::ConstantInt::get(VMContext, N); 5766 } 5767 } 5768 5769 // Otherwise, compute the result with checked arithmetic. 5770 auto *ResultAndOverflow = Builder.CreateCall( 5771 (Opcode == BO_Add) ? SAddIntrinsic : SMulIntrinsic, {LHS, RHS}); 5772 OffsetOverflows = Builder.CreateOr( 5773 Builder.CreateExtractValue(ResultAndOverflow, 1), OffsetOverflows); 5774 return Builder.CreateExtractValue(ResultAndOverflow, 0); 5775 }; 5776 5777 // Determine the total byte offset by looking at each GEP operand. 5778 for (auto GTI = llvm::gep_type_begin(GEP), GTE = llvm::gep_type_end(GEP); 5779 GTI != GTE; ++GTI) { 5780 llvm::Value *LocalOffset; 5781 auto *Index = GTI.getOperand(); 5782 // Compute the local offset contributed by this indexing step: 5783 if (auto *STy = GTI.getStructTypeOrNull()) { 5784 // For struct indexing, the local offset is the byte position of the 5785 // specified field. 5786 unsigned FieldNo = cast<llvm::ConstantInt>(Index)->getZExtValue(); 5787 LocalOffset = llvm::ConstantInt::get( 5788 IntPtrTy, DL.getStructLayout(STy)->getElementOffset(FieldNo)); 5789 } else { 5790 // Otherwise this is array-like indexing. The local offset is the index 5791 // multiplied by the element size. 5792 auto *ElementSize = 5793 llvm::ConstantInt::get(IntPtrTy, GTI.getSequentialElementStride(DL)); 5794 auto *IndexS = Builder.CreateIntCast(Index, IntPtrTy, /*isSigned=*/true); 5795 LocalOffset = eval(BO_Mul, ElementSize, IndexS); 5796 } 5797 5798 // If this is the first offset, set it as the total offset. Otherwise, add 5799 // the local offset into the running total. 5800 if (!TotalOffset || TotalOffset == Zero) 5801 TotalOffset = LocalOffset; 5802 else 5803 TotalOffset = eval(BO_Add, TotalOffset, LocalOffset); 5804 } 5805 5806 return {TotalOffset, OffsetOverflows}; 5807 } 5808 5809 Value * 5810 CodeGenFunction::EmitCheckedInBoundsGEP(llvm::Type *ElemTy, Value *Ptr, 5811 ArrayRef<Value *> IdxList, 5812 bool SignedIndices, bool IsSubtraction, 5813 SourceLocation Loc, const Twine &Name) { 5814 llvm::Type *PtrTy = Ptr->getType(); 5815 5816 llvm::GEPNoWrapFlags NWFlags = llvm::GEPNoWrapFlags::inBounds(); 5817 if (!SignedIndices && !IsSubtraction) 5818 NWFlags |= llvm::GEPNoWrapFlags::noUnsignedWrap(); 5819 5820 Value *GEPVal = Builder.CreateGEP(ElemTy, Ptr, IdxList, Name, NWFlags); 5821 5822 // If the pointer overflow sanitizer isn't enabled, do nothing. 5823 if (!SanOpts.has(SanitizerKind::PointerOverflow)) 5824 return GEPVal; 5825 5826 // Perform nullptr-and-offset check unless the nullptr is defined. 5827 bool PerformNullCheck = !NullPointerIsDefined( 5828 Builder.GetInsertBlock()->getParent(), PtrTy->getPointerAddressSpace()); 5829 // Check for overflows unless the GEP got constant-folded, 5830 // and only in the default address space 5831 bool PerformOverflowCheck = 5832 !isa<llvm::Constant>(GEPVal) && PtrTy->getPointerAddressSpace() == 0; 5833 5834 if (!(PerformNullCheck || PerformOverflowCheck)) 5835 return GEPVal; 5836 5837 const auto &DL = CGM.getDataLayout(); 5838 5839 SanitizerScope SanScope(this); 5840 llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy); 5841 5842 GEPOffsetAndOverflow EvaluatedGEP = 5843 EmitGEPOffsetInBytes(Ptr, GEPVal, getLLVMContext(), CGM, Builder); 5844 5845 assert((!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) || 5846 EvaluatedGEP.OffsetOverflows == Builder.getFalse()) && 5847 "If the offset got constant-folded, we don't expect that there was an " 5848 "overflow."); 5849 5850 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy); 5851 5852 // Common case: if the total offset is zero, don't emit a check. 5853 if (EvaluatedGEP.TotalOffset == Zero) 5854 return GEPVal; 5855 5856 // Now that we've computed the total offset, add it to the base pointer (with 5857 // wrapping semantics). 5858 auto *IntPtr = Builder.CreatePtrToInt(Ptr, IntPtrTy); 5859 auto *ComputedGEP = Builder.CreateAdd(IntPtr, EvaluatedGEP.TotalOffset); 5860 5861 llvm::SmallVector<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>, 5862 2> 5863 Checks; 5864 5865 if (PerformNullCheck) { 5866 // If the base pointer evaluates to a null pointer value, 5867 // the only valid pointer this inbounds GEP can produce is also 5868 // a null pointer, so the offset must also evaluate to zero. 5869 // Likewise, if we have non-zero base pointer, we can not get null pointer 5870 // as a result, so the offset can not be -intptr_t(BasePtr). 5871 // In other words, both pointers are either null, or both are non-null, 5872 // or the behaviour is undefined. 5873 auto *BaseIsNotNullptr = Builder.CreateIsNotNull(Ptr); 5874 auto *ResultIsNotNullptr = Builder.CreateIsNotNull(ComputedGEP); 5875 auto *Valid = Builder.CreateICmpEQ(BaseIsNotNullptr, ResultIsNotNullptr); 5876 Checks.emplace_back(Valid, SanitizerKind::SO_PointerOverflow); 5877 } 5878 5879 if (PerformOverflowCheck) { 5880 // The GEP is valid if: 5881 // 1) The total offset doesn't overflow, and 5882 // 2) The sign of the difference between the computed address and the base 5883 // pointer matches the sign of the total offset. 5884 llvm::Value *ValidGEP; 5885 auto *NoOffsetOverflow = Builder.CreateNot(EvaluatedGEP.OffsetOverflows); 5886 if (SignedIndices) { 5887 // GEP is computed as `unsigned base + signed offset`, therefore: 5888 // * If offset was positive, then the computed pointer can not be 5889 // [unsigned] less than the base pointer, unless it overflowed. 5890 // * If offset was negative, then the computed pointer can not be 5891 // [unsigned] greater than the bas pointere, unless it overflowed. 5892 auto *PosOrZeroValid = Builder.CreateICmpUGE(ComputedGEP, IntPtr); 5893 auto *PosOrZeroOffset = 5894 Builder.CreateICmpSGE(EvaluatedGEP.TotalOffset, Zero); 5895 llvm::Value *NegValid = Builder.CreateICmpULT(ComputedGEP, IntPtr); 5896 ValidGEP = 5897 Builder.CreateSelect(PosOrZeroOffset, PosOrZeroValid, NegValid); 5898 } else if (!IsSubtraction) { 5899 // GEP is computed as `unsigned base + unsigned offset`, therefore the 5900 // computed pointer can not be [unsigned] less than base pointer, 5901 // unless there was an overflow. 5902 // Equivalent to `@llvm.uadd.with.overflow(%base, %offset)`. 5903 ValidGEP = Builder.CreateICmpUGE(ComputedGEP, IntPtr); 5904 } else { 5905 // GEP is computed as `unsigned base - unsigned offset`, therefore the 5906 // computed pointer can not be [unsigned] greater than base pointer, 5907 // unless there was an overflow. 5908 // Equivalent to `@llvm.usub.with.overflow(%base, sub(0, %offset))`. 5909 ValidGEP = Builder.CreateICmpULE(ComputedGEP, IntPtr); 5910 } 5911 ValidGEP = Builder.CreateAnd(ValidGEP, NoOffsetOverflow); 5912 Checks.emplace_back(ValidGEP, SanitizerKind::SO_PointerOverflow); 5913 } 5914 5915 assert(!Checks.empty() && "Should have produced some checks."); 5916 5917 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc)}; 5918 // Pass the computed GEP to the runtime to avoid emitting poisoned arguments. 5919 llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP}; 5920 EmitCheck(Checks, SanitizerHandler::PointerOverflow, StaticArgs, DynamicArgs); 5921 5922 return GEPVal; 5923 } 5924 5925 Address CodeGenFunction::EmitCheckedInBoundsGEP( 5926 Address Addr, ArrayRef<Value *> IdxList, llvm::Type *elementType, 5927 bool SignedIndices, bool IsSubtraction, SourceLocation Loc, CharUnits Align, 5928 const Twine &Name) { 5929 if (!SanOpts.has(SanitizerKind::PointerOverflow)) { 5930 llvm::GEPNoWrapFlags NWFlags = llvm::GEPNoWrapFlags::inBounds(); 5931 if (!SignedIndices && !IsSubtraction) 5932 NWFlags |= llvm::GEPNoWrapFlags::noUnsignedWrap(); 5933 5934 return Builder.CreateGEP(Addr, IdxList, elementType, Align, Name, NWFlags); 5935 } 5936 5937 return RawAddress( 5938 EmitCheckedInBoundsGEP(Addr.getElementType(), Addr.emitRawPointer(*this), 5939 IdxList, SignedIndices, IsSubtraction, Loc, Name), 5940 elementType, Align); 5941 } 5942