1 //===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This contains code to emit Expr nodes with scalar LLVM types as LLVM code.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "CGCXXABI.h"
14 #include "CGCleanup.h"
15 #include "CGDebugInfo.h"
16 #include "CGObjCRuntime.h"
17 #include "CGOpenMPRuntime.h"
18 #include "CodeGenFunction.h"
19 #include "CodeGenModule.h"
20 #include "ConstantEmitter.h"
21 #include "TargetInfo.h"
22 #include "clang/AST/ASTContext.h"
23 #include "clang/AST/Attr.h"
24 #include "clang/AST/DeclObjC.h"
25 #include "clang/AST/Expr.h"
26 #include "clang/AST/RecordLayout.h"
27 #include "clang/AST/StmtVisitor.h"
28 #include "clang/Basic/CodeGenOptions.h"
29 #include "clang/Basic/TargetInfo.h"
30 #include "llvm/ADT/APFixedPoint.h"
31 #include "llvm/ADT/Optional.h"
32 #include "llvm/IR/CFG.h"
33 #include "llvm/IR/Constants.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/FixedPointBuilder.h"
36 #include "llvm/IR/Function.h"
37 #include "llvm/IR/GetElementPtrTypeIterator.h"
38 #include "llvm/IR/GlobalVariable.h"
39 #include "llvm/IR/Intrinsics.h"
40 #include "llvm/IR/IntrinsicsPowerPC.h"
41 #include "llvm/IR/MatrixBuilder.h"
42 #include "llvm/IR/Module.h"
43 #include <cstdarg>
44
45 using namespace clang;
46 using namespace CodeGen;
47 using llvm::Value;
48
49 //===----------------------------------------------------------------------===//
50 // Scalar Expression Emitter
51 //===----------------------------------------------------------------------===//
52
53 namespace {
54
55 /// Determine whether the given binary operation may overflow.
56 /// Sets \p Result to the value of the operation for BO_Add, BO_Sub, BO_Mul,
57 /// and signed BO_{Div,Rem}. For these opcodes, and for unsigned BO_{Div,Rem},
58 /// the returned overflow check is precise. The returned value is 'true' for
59 /// all other opcodes, to be conservative.
mayHaveIntegerOverflow(llvm::ConstantInt * LHS,llvm::ConstantInt * RHS,BinaryOperator::Opcode Opcode,bool Signed,llvm::APInt & Result)60 bool mayHaveIntegerOverflow(llvm::ConstantInt *LHS, llvm::ConstantInt *RHS,
61 BinaryOperator::Opcode Opcode, bool Signed,
62 llvm::APInt &Result) {
63 // Assume overflow is possible, unless we can prove otherwise.
64 bool Overflow = true;
65 const auto &LHSAP = LHS->getValue();
66 const auto &RHSAP = RHS->getValue();
67 if (Opcode == BO_Add) {
68 if (Signed)
69 Result = LHSAP.sadd_ov(RHSAP, Overflow);
70 else
71 Result = LHSAP.uadd_ov(RHSAP, Overflow);
72 } else if (Opcode == BO_Sub) {
73 if (Signed)
74 Result = LHSAP.ssub_ov(RHSAP, Overflow);
75 else
76 Result = LHSAP.usub_ov(RHSAP, Overflow);
77 } else if (Opcode == BO_Mul) {
78 if (Signed)
79 Result = LHSAP.smul_ov(RHSAP, Overflow);
80 else
81 Result = LHSAP.umul_ov(RHSAP, Overflow);
82 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
83 if (Signed && !RHS->isZero())
84 Result = LHSAP.sdiv_ov(RHSAP, Overflow);
85 else
86 return false;
87 }
88 return Overflow;
89 }
90
91 struct BinOpInfo {
92 Value *LHS;
93 Value *RHS;
94 QualType Ty; // Computation Type.
95 BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform
96 FPOptions FPFeatures;
97 const Expr *E; // Entire expr, for error unsupported. May not be binop.
98
99 /// Check if the binop can result in integer overflow.
mayHaveIntegerOverflow__anonacb3eee80111::BinOpInfo100 bool mayHaveIntegerOverflow() const {
101 // Without constant input, we can't rule out overflow.
102 auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS);
103 auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS);
104 if (!LHSCI || !RHSCI)
105 return true;
106
107 llvm::APInt Result;
108 return ::mayHaveIntegerOverflow(
109 LHSCI, RHSCI, Opcode, Ty->hasSignedIntegerRepresentation(), Result);
110 }
111
112 /// Check if the binop computes a division or a remainder.
isDivremOp__anonacb3eee80111::BinOpInfo113 bool isDivremOp() const {
114 return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign ||
115 Opcode == BO_RemAssign;
116 }
117
118 /// Check if the binop can result in an integer division by zero.
mayHaveIntegerDivisionByZero__anonacb3eee80111::BinOpInfo119 bool mayHaveIntegerDivisionByZero() const {
120 if (isDivremOp())
121 if (auto *CI = dyn_cast<llvm::ConstantInt>(RHS))
122 return CI->isZero();
123 return true;
124 }
125
126 /// Check if the binop can result in a float division by zero.
mayHaveFloatDivisionByZero__anonacb3eee80111::BinOpInfo127 bool mayHaveFloatDivisionByZero() const {
128 if (isDivremOp())
129 if (auto *CFP = dyn_cast<llvm::ConstantFP>(RHS))
130 return CFP->isZero();
131 return true;
132 }
133
134 /// Check if at least one operand is a fixed point type. In such cases, this
135 /// operation did not follow usual arithmetic conversion and both operands
136 /// might not be of the same type.
isFixedPointOp__anonacb3eee80111::BinOpInfo137 bool isFixedPointOp() const {
138 // We cannot simply check the result type since comparison operations return
139 // an int.
140 if (const auto *BinOp = dyn_cast<BinaryOperator>(E)) {
141 QualType LHSType = BinOp->getLHS()->getType();
142 QualType RHSType = BinOp->getRHS()->getType();
143 return LHSType->isFixedPointType() || RHSType->isFixedPointType();
144 }
145 if (const auto *UnOp = dyn_cast<UnaryOperator>(E))
146 return UnOp->getSubExpr()->getType()->isFixedPointType();
147 return false;
148 }
149 };
150
MustVisitNullValue(const Expr * E)151 static bool MustVisitNullValue(const Expr *E) {
152 // If a null pointer expression's type is the C++0x nullptr_t, then
153 // it's not necessarily a simple constant and it must be evaluated
154 // for its potential side effects.
155 return E->getType()->isNullPtrType();
156 }
157
158 /// If \p E is a widened promoted integer, get its base (unpromoted) type.
getUnwidenedIntegerType(const ASTContext & Ctx,const Expr * E)159 static llvm::Optional<QualType> getUnwidenedIntegerType(const ASTContext &Ctx,
160 const Expr *E) {
161 const Expr *Base = E->IgnoreImpCasts();
162 if (E == Base)
163 return llvm::None;
164
165 QualType BaseTy = Base->getType();
166 if (!BaseTy->isPromotableIntegerType() ||
167 Ctx.getTypeSize(BaseTy) >= Ctx.getTypeSize(E->getType()))
168 return llvm::None;
169
170 return BaseTy;
171 }
172
173 /// Check if \p E is a widened promoted integer.
IsWidenedIntegerOp(const ASTContext & Ctx,const Expr * E)174 static bool IsWidenedIntegerOp(const ASTContext &Ctx, const Expr *E) {
175 return getUnwidenedIntegerType(Ctx, E).hasValue();
176 }
177
178 /// Check if we can skip the overflow check for \p Op.
CanElideOverflowCheck(const ASTContext & Ctx,const BinOpInfo & Op)179 static bool CanElideOverflowCheck(const ASTContext &Ctx, const BinOpInfo &Op) {
180 assert((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) &&
181 "Expected a unary or binary operator");
182
183 // If the binop has constant inputs and we can prove there is no overflow,
184 // we can elide the overflow check.
185 if (!Op.mayHaveIntegerOverflow())
186 return true;
187
188 // If a unary op has a widened operand, the op cannot overflow.
189 if (const auto *UO = dyn_cast<UnaryOperator>(Op.E))
190 return !UO->canOverflow();
191
192 // We usually don't need overflow checks for binops with widened operands.
193 // Multiplication with promoted unsigned operands is a special case.
194 const auto *BO = cast<BinaryOperator>(Op.E);
195 auto OptionalLHSTy = getUnwidenedIntegerType(Ctx, BO->getLHS());
196 if (!OptionalLHSTy)
197 return false;
198
199 auto OptionalRHSTy = getUnwidenedIntegerType(Ctx, BO->getRHS());
200 if (!OptionalRHSTy)
201 return false;
202
203 QualType LHSTy = *OptionalLHSTy;
204 QualType RHSTy = *OptionalRHSTy;
205
206 // This is the simple case: binops without unsigned multiplication, and with
207 // widened operands. No overflow check is needed here.
208 if ((Op.Opcode != BO_Mul && Op.Opcode != BO_MulAssign) ||
209 !LHSTy->isUnsignedIntegerType() || !RHSTy->isUnsignedIntegerType())
210 return true;
211
212 // For unsigned multiplication the overflow check can be elided if either one
213 // of the unpromoted types are less than half the size of the promoted type.
214 unsigned PromotedSize = Ctx.getTypeSize(Op.E->getType());
215 return (2 * Ctx.getTypeSize(LHSTy)) < PromotedSize ||
216 (2 * Ctx.getTypeSize(RHSTy)) < PromotedSize;
217 }
218
219 class ScalarExprEmitter
220 : public StmtVisitor<ScalarExprEmitter, Value*> {
221 CodeGenFunction &CGF;
222 CGBuilderTy &Builder;
223 bool IgnoreResultAssign;
224 llvm::LLVMContext &VMContext;
225 public:
226
ScalarExprEmitter(CodeGenFunction & cgf,bool ira=false)227 ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false)
228 : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira),
229 VMContext(cgf.getLLVMContext()) {
230 }
231
232 //===--------------------------------------------------------------------===//
233 // Utilities
234 //===--------------------------------------------------------------------===//
235
TestAndClearIgnoreResultAssign()236 bool TestAndClearIgnoreResultAssign() {
237 bool I = IgnoreResultAssign;
238 IgnoreResultAssign = false;
239 return I;
240 }
241
ConvertType(QualType T)242 llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
EmitLValue(const Expr * E)243 LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
EmitCheckedLValue(const Expr * E,CodeGenFunction::TypeCheckKind TCK)244 LValue EmitCheckedLValue(const Expr *E, CodeGenFunction::TypeCheckKind TCK) {
245 return CGF.EmitCheckedLValue(E, TCK);
246 }
247
248 void EmitBinOpCheck(ArrayRef<std::pair<Value *, SanitizerMask>> Checks,
249 const BinOpInfo &Info);
250
EmitLoadOfLValue(LValue LV,SourceLocation Loc)251 Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
252 return CGF.EmitLoadOfLValue(LV, Loc).getScalarVal();
253 }
254
EmitLValueAlignmentAssumption(const Expr * E,Value * V)255 void EmitLValueAlignmentAssumption(const Expr *E, Value *V) {
256 const AlignValueAttr *AVAttr = nullptr;
257 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
258 const ValueDecl *VD = DRE->getDecl();
259
260 if (VD->getType()->isReferenceType()) {
261 if (const auto *TTy =
262 dyn_cast<TypedefType>(VD->getType().getNonReferenceType()))
263 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
264 } else {
265 // Assumptions for function parameters are emitted at the start of the
266 // function, so there is no need to repeat that here,
267 // unless the alignment-assumption sanitizer is enabled,
268 // then we prefer the assumption over alignment attribute
269 // on IR function param.
270 if (isa<ParmVarDecl>(VD) && !CGF.SanOpts.has(SanitizerKind::Alignment))
271 return;
272
273 AVAttr = VD->getAttr<AlignValueAttr>();
274 }
275 }
276
277 if (!AVAttr)
278 if (const auto *TTy =
279 dyn_cast<TypedefType>(E->getType()))
280 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
281
282 if (!AVAttr)
283 return;
284
285 Value *AlignmentValue = CGF.EmitScalarExpr(AVAttr->getAlignment());
286 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(AlignmentValue);
287 CGF.emitAlignmentAssumption(V, E, AVAttr->getLocation(), AlignmentCI);
288 }
289
290 /// EmitLoadOfLValue - Given an expression with complex type that represents a
291 /// value l-value, this method emits the address of the l-value, then loads
292 /// and returns the result.
EmitLoadOfLValue(const Expr * E)293 Value *EmitLoadOfLValue(const Expr *E) {
294 Value *V = EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load),
295 E->getExprLoc());
296
297 EmitLValueAlignmentAssumption(E, V);
298 return V;
299 }
300
301 /// EmitConversionToBool - Convert the specified expression value to a
302 /// boolean (i1) truth value. This is equivalent to "Val != 0".
303 Value *EmitConversionToBool(Value *Src, QualType DstTy);
304
305 /// Emit a check that a conversion from a floating-point type does not
306 /// overflow.
307 void EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType,
308 Value *Src, QualType SrcType, QualType DstType,
309 llvm::Type *DstTy, SourceLocation Loc);
310
311 /// Known implicit conversion check kinds.
312 /// Keep in sync with the enum of the same name in ubsan_handlers.h
313 enum ImplicitConversionCheckKind : unsigned char {
314 ICCK_IntegerTruncation = 0, // Legacy, was only used by clang 7.
315 ICCK_UnsignedIntegerTruncation = 1,
316 ICCK_SignedIntegerTruncation = 2,
317 ICCK_IntegerSignChange = 3,
318 ICCK_SignedIntegerTruncationOrSignChange = 4,
319 };
320
321 /// Emit a check that an [implicit] truncation of an integer does not
322 /// discard any bits. It is not UB, so we use the value after truncation.
323 void EmitIntegerTruncationCheck(Value *Src, QualType SrcType, Value *Dst,
324 QualType DstType, SourceLocation Loc);
325
326 /// Emit a check that an [implicit] conversion of an integer does not change
327 /// the sign of the value. It is not UB, so we use the value after conversion.
328 /// NOTE: Src and Dst may be the exact same value! (point to the same thing)
329 void EmitIntegerSignChangeCheck(Value *Src, QualType SrcType, Value *Dst,
330 QualType DstType, SourceLocation Loc);
331
332 /// Emit a conversion from the specified type to the specified destination
333 /// type, both of which are LLVM scalar types.
334 struct ScalarConversionOpts {
335 bool TreatBooleanAsSigned;
336 bool EmitImplicitIntegerTruncationChecks;
337 bool EmitImplicitIntegerSignChangeChecks;
338
ScalarConversionOpts__anonacb3eee80111::ScalarExprEmitter::ScalarConversionOpts339 ScalarConversionOpts()
340 : TreatBooleanAsSigned(false),
341 EmitImplicitIntegerTruncationChecks(false),
342 EmitImplicitIntegerSignChangeChecks(false) {}
343
ScalarConversionOpts__anonacb3eee80111::ScalarExprEmitter::ScalarConversionOpts344 ScalarConversionOpts(clang::SanitizerSet SanOpts)
345 : TreatBooleanAsSigned(false),
346 EmitImplicitIntegerTruncationChecks(
347 SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)),
348 EmitImplicitIntegerSignChangeChecks(
349 SanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) {}
350 };
351 Value *EmitScalarCast(Value *Src, QualType SrcType, QualType DstType,
352 llvm::Type *SrcTy, llvm::Type *DstTy,
353 ScalarConversionOpts Opts);
354 Value *
355 EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy,
356 SourceLocation Loc,
357 ScalarConversionOpts Opts = ScalarConversionOpts());
358
359 /// Convert between either a fixed point and other fixed point or fixed point
360 /// and an integer.
361 Value *EmitFixedPointConversion(Value *Src, QualType SrcTy, QualType DstTy,
362 SourceLocation Loc);
363
364 /// Emit a conversion from the specified complex type to the specified
365 /// destination type, where the destination type is an LLVM scalar type.
366 Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
367 QualType SrcTy, QualType DstTy,
368 SourceLocation Loc);
369
370 /// EmitNullValue - Emit a value that corresponds to null for the given type.
371 Value *EmitNullValue(QualType Ty);
372
373 /// EmitFloatToBoolConversion - Perform an FP to boolean conversion.
EmitFloatToBoolConversion(Value * V)374 Value *EmitFloatToBoolConversion(Value *V) {
375 // Compare against 0.0 for fp scalars.
376 llvm::Value *Zero = llvm::Constant::getNullValue(V->getType());
377 return Builder.CreateFCmpUNE(V, Zero, "tobool");
378 }
379
380 /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion.
EmitPointerToBoolConversion(Value * V,QualType QT)381 Value *EmitPointerToBoolConversion(Value *V, QualType QT) {
382 Value *Zero = CGF.CGM.getNullPointer(cast<llvm::PointerType>(V->getType()), QT);
383
384 return Builder.CreateICmpNE(V, Zero, "tobool");
385 }
386
EmitIntToBoolConversion(Value * V)387 Value *EmitIntToBoolConversion(Value *V) {
388 // Because of the type rules of C, we often end up computing a
389 // logical value, then zero extending it to int, then wanting it
390 // as a logical value again. Optimize this common case.
391 if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(V)) {
392 if (ZI->getOperand(0)->getType() == Builder.getInt1Ty()) {
393 Value *Result = ZI->getOperand(0);
394 // If there aren't any more uses, zap the instruction to save space.
395 // Note that there can be more uses, for example if this
396 // is the result of an assignment.
397 if (ZI->use_empty())
398 ZI->eraseFromParent();
399 return Result;
400 }
401 }
402
403 return Builder.CreateIsNotNull(V, "tobool");
404 }
405
406 //===--------------------------------------------------------------------===//
407 // Visitor Methods
408 //===--------------------------------------------------------------------===//
409
Visit(Expr * E)410 Value *Visit(Expr *E) {
411 ApplyDebugLocation DL(CGF, E);
412 return StmtVisitor<ScalarExprEmitter, Value*>::Visit(E);
413 }
414
VisitStmt(Stmt * S)415 Value *VisitStmt(Stmt *S) {
416 S->dump(llvm::errs(), CGF.getContext());
417 llvm_unreachable("Stmt can't have complex result type!");
418 }
419 Value *VisitExpr(Expr *S);
420
VisitConstantExpr(ConstantExpr * E)421 Value *VisitConstantExpr(ConstantExpr *E) {
422 if (Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) {
423 if (E->isGLValue())
424 return CGF.Builder.CreateLoad(Address(
425 Result, CGF.getContext().getTypeAlignInChars(E->getType())));
426 return Result;
427 }
428 return Visit(E->getSubExpr());
429 }
VisitParenExpr(ParenExpr * PE)430 Value *VisitParenExpr(ParenExpr *PE) {
431 return Visit(PE->getSubExpr());
432 }
VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr * E)433 Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
434 return Visit(E->getReplacement());
435 }
VisitGenericSelectionExpr(GenericSelectionExpr * GE)436 Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
437 return Visit(GE->getResultExpr());
438 }
VisitCoawaitExpr(CoawaitExpr * S)439 Value *VisitCoawaitExpr(CoawaitExpr *S) {
440 return CGF.EmitCoawaitExpr(*S).getScalarVal();
441 }
VisitCoyieldExpr(CoyieldExpr * S)442 Value *VisitCoyieldExpr(CoyieldExpr *S) {
443 return CGF.EmitCoyieldExpr(*S).getScalarVal();
444 }
VisitUnaryCoawait(const UnaryOperator * E)445 Value *VisitUnaryCoawait(const UnaryOperator *E) {
446 return Visit(E->getSubExpr());
447 }
448
449 // Leaves.
VisitIntegerLiteral(const IntegerLiteral * E)450 Value *VisitIntegerLiteral(const IntegerLiteral *E) {
451 return Builder.getInt(E->getValue());
452 }
VisitFixedPointLiteral(const FixedPointLiteral * E)453 Value *VisitFixedPointLiteral(const FixedPointLiteral *E) {
454 return Builder.getInt(E->getValue());
455 }
VisitFloatingLiteral(const FloatingLiteral * E)456 Value *VisitFloatingLiteral(const FloatingLiteral *E) {
457 return llvm::ConstantFP::get(VMContext, E->getValue());
458 }
VisitCharacterLiteral(const CharacterLiteral * E)459 Value *VisitCharacterLiteral(const CharacterLiteral *E) {
460 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
461 }
VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr * E)462 Value *VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {
463 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
464 }
VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr * E)465 Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
466 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
467 }
VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr * E)468 Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
469 return EmitNullValue(E->getType());
470 }
VisitGNUNullExpr(const GNUNullExpr * E)471 Value *VisitGNUNullExpr(const GNUNullExpr *E) {
472 return EmitNullValue(E->getType());
473 }
474 Value *VisitOffsetOfExpr(OffsetOfExpr *E);
475 Value *VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E);
VisitAddrLabelExpr(const AddrLabelExpr * E)476 Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
477 llvm::Value *V = CGF.GetAddrOfLabel(E->getLabel());
478 return Builder.CreateBitCast(V, ConvertType(E->getType()));
479 }
480
VisitSizeOfPackExpr(SizeOfPackExpr * E)481 Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) {
482 return llvm::ConstantInt::get(ConvertType(E->getType()),E->getPackLength());
483 }
484
VisitPseudoObjectExpr(PseudoObjectExpr * E)485 Value *VisitPseudoObjectExpr(PseudoObjectExpr *E) {
486 return CGF.EmitPseudoObjectRValue(E).getScalarVal();
487 }
488
VisitOpaqueValueExpr(OpaqueValueExpr * E)489 Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {
490 if (E->isGLValue())
491 return EmitLoadOfLValue(CGF.getOrCreateOpaqueLValueMapping(E),
492 E->getExprLoc());
493
494 // Otherwise, assume the mapping is the scalar directly.
495 return CGF.getOrCreateOpaqueRValueMapping(E).getScalarVal();
496 }
497
498 // l-values.
VisitDeclRefExpr(DeclRefExpr * E)499 Value *VisitDeclRefExpr(DeclRefExpr *E) {
500 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E))
501 return CGF.emitScalarConstant(Constant, E);
502 return EmitLoadOfLValue(E);
503 }
504
VisitObjCSelectorExpr(ObjCSelectorExpr * E)505 Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
506 return CGF.EmitObjCSelectorExpr(E);
507 }
VisitObjCProtocolExpr(ObjCProtocolExpr * E)508 Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
509 return CGF.EmitObjCProtocolExpr(E);
510 }
VisitObjCIvarRefExpr(ObjCIvarRefExpr * E)511 Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
512 return EmitLoadOfLValue(E);
513 }
VisitObjCMessageExpr(ObjCMessageExpr * E)514 Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
515 if (E->getMethodDecl() &&
516 E->getMethodDecl()->getReturnType()->isReferenceType())
517 return EmitLoadOfLValue(E);
518 return CGF.EmitObjCMessageExpr(E).getScalarVal();
519 }
520
VisitObjCIsaExpr(ObjCIsaExpr * E)521 Value *VisitObjCIsaExpr(ObjCIsaExpr *E) {
522 LValue LV = CGF.EmitObjCIsaExpr(E);
523 Value *V = CGF.EmitLoadOfLValue(LV, E->getExprLoc()).getScalarVal();
524 return V;
525 }
526
VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr * E)527 Value *VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) {
528 VersionTuple Version = E->getVersion();
529
530 // If we're checking for a platform older than our minimum deployment
531 // target, we can fold the check away.
532 if (Version <= CGF.CGM.getTarget().getPlatformMinVersion())
533 return llvm::ConstantInt::get(Builder.getInt1Ty(), 1);
534
535 return CGF.EmitBuiltinAvailable(Version);
536 }
537
538 Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
539 Value *VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E);
540 Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
541 Value *VisitConvertVectorExpr(ConvertVectorExpr *E);
542 Value *VisitMemberExpr(MemberExpr *E);
VisitExtVectorElementExpr(Expr * E)543 Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
VisitCompoundLiteralExpr(CompoundLiteralExpr * E)544 Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
545 // Strictly speaking, we shouldn't be calling EmitLoadOfLValue, which
546 // transitively calls EmitCompoundLiteralLValue, here in C++ since compound
547 // literals aren't l-values in C++. We do so simply because that's the
548 // cleanest way to handle compound literals in C++.
549 // See the discussion here: https://reviews.llvm.org/D64464
550 return EmitLoadOfLValue(E);
551 }
552
553 Value *VisitInitListExpr(InitListExpr *E);
554
VisitArrayInitIndexExpr(ArrayInitIndexExpr * E)555 Value *VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) {
556 assert(CGF.getArrayInitIndex() &&
557 "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?");
558 return CGF.getArrayInitIndex();
559 }
560
VisitImplicitValueInitExpr(const ImplicitValueInitExpr * E)561 Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
562 return EmitNullValue(E->getType());
563 }
VisitExplicitCastExpr(ExplicitCastExpr * E)564 Value *VisitExplicitCastExpr(ExplicitCastExpr *E) {
565 CGF.CGM.EmitExplicitCastExprType(E, &CGF);
566 return VisitCastExpr(E);
567 }
568 Value *VisitCastExpr(CastExpr *E);
569
VisitCallExpr(const CallExpr * E)570 Value *VisitCallExpr(const CallExpr *E) {
571 if (E->getCallReturnType(CGF.getContext())->isReferenceType())
572 return EmitLoadOfLValue(E);
573
574 Value *V = CGF.EmitCallExpr(E).getScalarVal();
575
576 EmitLValueAlignmentAssumption(E, V);
577 return V;
578 }
579
580 Value *VisitStmtExpr(const StmtExpr *E);
581
582 // Unary Operators.
VisitUnaryPostDec(const UnaryOperator * E)583 Value *VisitUnaryPostDec(const UnaryOperator *E) {
584 LValue LV = EmitLValue(E->getSubExpr());
585 return EmitScalarPrePostIncDec(E, LV, false, false);
586 }
VisitUnaryPostInc(const UnaryOperator * E)587 Value *VisitUnaryPostInc(const UnaryOperator *E) {
588 LValue LV = EmitLValue(E->getSubExpr());
589 return EmitScalarPrePostIncDec(E, LV, true, false);
590 }
VisitUnaryPreDec(const UnaryOperator * E)591 Value *VisitUnaryPreDec(const UnaryOperator *E) {
592 LValue LV = EmitLValue(E->getSubExpr());
593 return EmitScalarPrePostIncDec(E, LV, false, true);
594 }
VisitUnaryPreInc(const UnaryOperator * E)595 Value *VisitUnaryPreInc(const UnaryOperator *E) {
596 LValue LV = EmitLValue(E->getSubExpr());
597 return EmitScalarPrePostIncDec(E, LV, true, true);
598 }
599
600 llvm::Value *EmitIncDecConsiderOverflowBehavior(const UnaryOperator *E,
601 llvm::Value *InVal,
602 bool IsInc);
603
604 llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
605 bool isInc, bool isPre);
606
607
VisitUnaryAddrOf(const UnaryOperator * E)608 Value *VisitUnaryAddrOf(const UnaryOperator *E) {
609 if (isa<MemberPointerType>(E->getType())) // never sugared
610 return CGF.CGM.getMemberPointerConstant(E);
611
612 return EmitLValue(E->getSubExpr()).getPointer(CGF);
613 }
VisitUnaryDeref(const UnaryOperator * E)614 Value *VisitUnaryDeref(const UnaryOperator *E) {
615 if (E->getType()->isVoidType())
616 return Visit(E->getSubExpr()); // the actual value should be unused
617 return EmitLoadOfLValue(E);
618 }
VisitUnaryPlus(const UnaryOperator * E)619 Value *VisitUnaryPlus(const UnaryOperator *E) {
620 // This differs from gcc, though, most likely due to a bug in gcc.
621 TestAndClearIgnoreResultAssign();
622 return Visit(E->getSubExpr());
623 }
624 Value *VisitUnaryMinus (const UnaryOperator *E);
625 Value *VisitUnaryNot (const UnaryOperator *E);
626 Value *VisitUnaryLNot (const UnaryOperator *E);
627 Value *VisitUnaryReal (const UnaryOperator *E);
628 Value *VisitUnaryImag (const UnaryOperator *E);
VisitUnaryExtension(const UnaryOperator * E)629 Value *VisitUnaryExtension(const UnaryOperator *E) {
630 return Visit(E->getSubExpr());
631 }
632
633 // C++
VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr * E)634 Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) {
635 return EmitLoadOfLValue(E);
636 }
VisitSourceLocExpr(SourceLocExpr * SLE)637 Value *VisitSourceLocExpr(SourceLocExpr *SLE) {
638 auto &Ctx = CGF.getContext();
639 APValue Evaluated =
640 SLE->EvaluateInContext(Ctx, CGF.CurSourceLocExprScope.getDefaultExpr());
641 return ConstantEmitter(CGF).emitAbstract(SLE->getLocation(), Evaluated,
642 SLE->getType());
643 }
644
VisitCXXDefaultArgExpr(CXXDefaultArgExpr * DAE)645 Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
646 CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE);
647 return Visit(DAE->getExpr());
648 }
VisitCXXDefaultInitExpr(CXXDefaultInitExpr * DIE)649 Value *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
650 CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE);
651 return Visit(DIE->getExpr());
652 }
VisitCXXThisExpr(CXXThisExpr * TE)653 Value *VisitCXXThisExpr(CXXThisExpr *TE) {
654 return CGF.LoadCXXThis();
655 }
656
657 Value *VisitExprWithCleanups(ExprWithCleanups *E);
VisitCXXNewExpr(const CXXNewExpr * E)658 Value *VisitCXXNewExpr(const CXXNewExpr *E) {
659 return CGF.EmitCXXNewExpr(E);
660 }
VisitCXXDeleteExpr(const CXXDeleteExpr * E)661 Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
662 CGF.EmitCXXDeleteExpr(E);
663 return nullptr;
664 }
665
VisitTypeTraitExpr(const TypeTraitExpr * E)666 Value *VisitTypeTraitExpr(const TypeTraitExpr *E) {
667 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
668 }
669
VisitConceptSpecializationExpr(const ConceptSpecializationExpr * E)670 Value *VisitConceptSpecializationExpr(const ConceptSpecializationExpr *E) {
671 return Builder.getInt1(E->isSatisfied());
672 }
673
VisitRequiresExpr(const RequiresExpr * E)674 Value *VisitRequiresExpr(const RequiresExpr *E) {
675 return Builder.getInt1(E->isSatisfied());
676 }
677
VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr * E)678 Value *VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) {
679 return llvm::ConstantInt::get(Builder.getInt32Ty(), E->getValue());
680 }
681
VisitExpressionTraitExpr(const ExpressionTraitExpr * E)682 Value *VisitExpressionTraitExpr(const ExpressionTraitExpr *E) {
683 return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue());
684 }
685
VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr * E)686 Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) {
687 // C++ [expr.pseudo]p1:
688 // The result shall only be used as the operand for the function call
689 // operator (), and the result of such a call has type void. The only
690 // effect is the evaluation of the postfix-expression before the dot or
691 // arrow.
692 CGF.EmitScalarExpr(E->getBase());
693 return nullptr;
694 }
695
VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr * E)696 Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
697 return EmitNullValue(E->getType());
698 }
699
VisitCXXThrowExpr(const CXXThrowExpr * E)700 Value *VisitCXXThrowExpr(const CXXThrowExpr *E) {
701 CGF.EmitCXXThrowExpr(E);
702 return nullptr;
703 }
704
VisitCXXNoexceptExpr(const CXXNoexceptExpr * E)705 Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
706 return Builder.getInt1(E->getValue());
707 }
708
709 // Binary Operators.
EmitMul(const BinOpInfo & Ops)710 Value *EmitMul(const BinOpInfo &Ops) {
711 if (Ops.Ty->isSignedIntegerOrEnumerationType()) {
712 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
713 case LangOptions::SOB_Defined:
714 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
715 case LangOptions::SOB_Undefined:
716 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
717 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
718 LLVM_FALLTHROUGH;
719 case LangOptions::SOB_Trapping:
720 if (CanElideOverflowCheck(CGF.getContext(), Ops))
721 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
722 return EmitOverflowCheckedBinOp(Ops);
723 }
724 }
725
726 if (Ops.Ty->isConstantMatrixType()) {
727 llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
728 // We need to check the types of the operands of the operator to get the
729 // correct matrix dimensions.
730 auto *BO = cast<BinaryOperator>(Ops.E);
731 auto *LHSMatTy = dyn_cast<ConstantMatrixType>(
732 BO->getLHS()->getType().getCanonicalType());
733 auto *RHSMatTy = dyn_cast<ConstantMatrixType>(
734 BO->getRHS()->getType().getCanonicalType());
735 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
736 if (LHSMatTy && RHSMatTy)
737 return MB.CreateMatrixMultiply(Ops.LHS, Ops.RHS, LHSMatTy->getNumRows(),
738 LHSMatTy->getNumColumns(),
739 RHSMatTy->getNumColumns());
740 return MB.CreateScalarMultiply(Ops.LHS, Ops.RHS);
741 }
742
743 if (Ops.Ty->isUnsignedIntegerType() &&
744 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
745 !CanElideOverflowCheck(CGF.getContext(), Ops))
746 return EmitOverflowCheckedBinOp(Ops);
747
748 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
749 // Preserve the old values
750 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
751 return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
752 }
753 if (Ops.isFixedPointOp())
754 return EmitFixedPointBinOp(Ops);
755 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
756 }
757 /// Create a binary op that checks for overflow.
758 /// Currently only supports +, - and *.
759 Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
760
761 // Check for undefined division and modulus behaviors.
762 void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops,
763 llvm::Value *Zero,bool isDiv);
764 // Common helper for getting how wide LHS of shift is.
765 static Value *GetWidthMinusOneValue(Value* LHS,Value* RHS);
766
767 // Used for shifting constraints for OpenCL, do mask for powers of 2, URem for
768 // non powers of two.
769 Value *ConstrainShiftValue(Value *LHS, Value *RHS, const Twine &Name);
770
771 Value *EmitDiv(const BinOpInfo &Ops);
772 Value *EmitRem(const BinOpInfo &Ops);
773 Value *EmitAdd(const BinOpInfo &Ops);
774 Value *EmitSub(const BinOpInfo &Ops);
775 Value *EmitShl(const BinOpInfo &Ops);
776 Value *EmitShr(const BinOpInfo &Ops);
EmitAnd(const BinOpInfo & Ops)777 Value *EmitAnd(const BinOpInfo &Ops) {
778 return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and");
779 }
EmitXor(const BinOpInfo & Ops)780 Value *EmitXor(const BinOpInfo &Ops) {
781 return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor");
782 }
EmitOr(const BinOpInfo & Ops)783 Value *EmitOr (const BinOpInfo &Ops) {
784 return Builder.CreateOr(Ops.LHS, Ops.RHS, "or");
785 }
786
787 // Helper functions for fixed point binary operations.
788 Value *EmitFixedPointBinOp(const BinOpInfo &Ops);
789
790 BinOpInfo EmitBinOps(const BinaryOperator *E);
791 LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
792 Value *(ScalarExprEmitter::*F)(const BinOpInfo &),
793 Value *&Result);
794
795 Value *EmitCompoundAssign(const CompoundAssignOperator *E,
796 Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
797
798 // Binary operators and binary compound assignment operators.
799 #define HANDLEBINOP(OP) \
800 Value *VisitBin ## OP(const BinaryOperator *E) { \
801 return Emit ## OP(EmitBinOps(E)); \
802 } \
803 Value *VisitBin ## OP ## Assign(const CompoundAssignOperator *E) { \
804 return EmitCompoundAssign(E, &ScalarExprEmitter::Emit ## OP); \
805 }
806 HANDLEBINOP(Mul)
807 HANDLEBINOP(Div)
808 HANDLEBINOP(Rem)
809 HANDLEBINOP(Add)
810 HANDLEBINOP(Sub)
811 HANDLEBINOP(Shl)
812 HANDLEBINOP(Shr)
813 HANDLEBINOP(And)
814 HANDLEBINOP(Xor)
815 HANDLEBINOP(Or)
816 #undef HANDLEBINOP
817
818 // Comparisons.
819 Value *EmitCompare(const BinaryOperator *E, llvm::CmpInst::Predicate UICmpOpc,
820 llvm::CmpInst::Predicate SICmpOpc,
821 llvm::CmpInst::Predicate FCmpOpc, bool IsSignaling);
822 #define VISITCOMP(CODE, UI, SI, FP, SIG) \
823 Value *VisitBin##CODE(const BinaryOperator *E) { \
824 return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \
825 llvm::FCmpInst::FP, SIG); }
826 VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT, true)
827 VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT, true)
828 VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE, true)
829 VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE, true)
830 VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ, false)
831 VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE, false)
832 #undef VISITCOMP
833
834 Value *VisitBinAssign (const BinaryOperator *E);
835
836 Value *VisitBinLAnd (const BinaryOperator *E);
837 Value *VisitBinLOr (const BinaryOperator *E);
838 Value *VisitBinComma (const BinaryOperator *E);
839
VisitBinPtrMemD(const Expr * E)840 Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); }
VisitBinPtrMemI(const Expr * E)841 Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); }
842
VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator * E)843 Value *VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) {
844 return Visit(E->getSemanticForm());
845 }
846
847 // Other Operators.
848 Value *VisitBlockExpr(const BlockExpr *BE);
849 Value *VisitAbstractConditionalOperator(const AbstractConditionalOperator *);
850 Value *VisitChooseExpr(ChooseExpr *CE);
851 Value *VisitVAArgExpr(VAArgExpr *VE);
VisitObjCStringLiteral(const ObjCStringLiteral * E)852 Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
853 return CGF.EmitObjCStringLiteral(E);
854 }
VisitObjCBoxedExpr(ObjCBoxedExpr * E)855 Value *VisitObjCBoxedExpr(ObjCBoxedExpr *E) {
856 return CGF.EmitObjCBoxedExpr(E);
857 }
VisitObjCArrayLiteral(ObjCArrayLiteral * E)858 Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
859 return CGF.EmitObjCArrayLiteral(E);
860 }
VisitObjCDictionaryLiteral(ObjCDictionaryLiteral * E)861 Value *VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
862 return CGF.EmitObjCDictionaryLiteral(E);
863 }
864 Value *VisitAsTypeExpr(AsTypeExpr *CE);
865 Value *VisitAtomicExpr(AtomicExpr *AE);
866 };
867 } // end anonymous namespace.
868
869 //===----------------------------------------------------------------------===//
870 // Utilities
871 //===----------------------------------------------------------------------===//
872
873 /// EmitConversionToBool - Convert the specified expression value to a
874 /// boolean (i1) truth value. This is equivalent to "Val != 0".
EmitConversionToBool(Value * Src,QualType SrcType)875 Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
876 assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs");
877
878 if (SrcType->isRealFloatingType())
879 return EmitFloatToBoolConversion(Src);
880
881 if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(SrcType))
882 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, Src, MPT);
883
884 assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&
885 "Unknown scalar type to convert");
886
887 if (isa<llvm::IntegerType>(Src->getType()))
888 return EmitIntToBoolConversion(Src);
889
890 assert(isa<llvm::PointerType>(Src->getType()));
891 return EmitPointerToBoolConversion(Src, SrcType);
892 }
893
EmitFloatConversionCheck(Value * OrigSrc,QualType OrigSrcType,Value * Src,QualType SrcType,QualType DstType,llvm::Type * DstTy,SourceLocation Loc)894 void ScalarExprEmitter::EmitFloatConversionCheck(
895 Value *OrigSrc, QualType OrigSrcType, Value *Src, QualType SrcType,
896 QualType DstType, llvm::Type *DstTy, SourceLocation Loc) {
897 assert(SrcType->isFloatingType() && "not a conversion from floating point");
898 if (!isa<llvm::IntegerType>(DstTy))
899 return;
900
901 CodeGenFunction::SanitizerScope SanScope(&CGF);
902 using llvm::APFloat;
903 using llvm::APSInt;
904
905 llvm::Value *Check = nullptr;
906 const llvm::fltSemantics &SrcSema =
907 CGF.getContext().getFloatTypeSemantics(OrigSrcType);
908
909 // Floating-point to integer. This has undefined behavior if the source is
910 // +-Inf, NaN, or doesn't fit into the destination type (after truncation
911 // to an integer).
912 unsigned Width = CGF.getContext().getIntWidth(DstType);
913 bool Unsigned = DstType->isUnsignedIntegerOrEnumerationType();
914
915 APSInt Min = APSInt::getMinValue(Width, Unsigned);
916 APFloat MinSrc(SrcSema, APFloat::uninitialized);
917 if (MinSrc.convertFromAPInt(Min, !Unsigned, APFloat::rmTowardZero) &
918 APFloat::opOverflow)
919 // Don't need an overflow check for lower bound. Just check for
920 // -Inf/NaN.
921 MinSrc = APFloat::getInf(SrcSema, true);
922 else
923 // Find the largest value which is too small to represent (before
924 // truncation toward zero).
925 MinSrc.subtract(APFloat(SrcSema, 1), APFloat::rmTowardNegative);
926
927 APSInt Max = APSInt::getMaxValue(Width, Unsigned);
928 APFloat MaxSrc(SrcSema, APFloat::uninitialized);
929 if (MaxSrc.convertFromAPInt(Max, !Unsigned, APFloat::rmTowardZero) &
930 APFloat::opOverflow)
931 // Don't need an overflow check for upper bound. Just check for
932 // +Inf/NaN.
933 MaxSrc = APFloat::getInf(SrcSema, false);
934 else
935 // Find the smallest value which is too large to represent (before
936 // truncation toward zero).
937 MaxSrc.add(APFloat(SrcSema, 1), APFloat::rmTowardPositive);
938
939 // If we're converting from __half, convert the range to float to match
940 // the type of src.
941 if (OrigSrcType->isHalfType()) {
942 const llvm::fltSemantics &Sema =
943 CGF.getContext().getFloatTypeSemantics(SrcType);
944 bool IsInexact;
945 MinSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
946 MaxSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
947 }
948
949 llvm::Value *GE =
950 Builder.CreateFCmpOGT(Src, llvm::ConstantFP::get(VMContext, MinSrc));
951 llvm::Value *LE =
952 Builder.CreateFCmpOLT(Src, llvm::ConstantFP::get(VMContext, MaxSrc));
953 Check = Builder.CreateAnd(GE, LE);
954
955 llvm::Constant *StaticArgs[] = {CGF.EmitCheckSourceLocation(Loc),
956 CGF.EmitCheckTypeDescriptor(OrigSrcType),
957 CGF.EmitCheckTypeDescriptor(DstType)};
958 CGF.EmitCheck(std::make_pair(Check, SanitizerKind::FloatCastOverflow),
959 SanitizerHandler::FloatCastOverflow, StaticArgs, OrigSrc);
960 }
961
962 // Should be called within CodeGenFunction::SanitizerScope RAII scope.
963 // Returns 'i1 false' when the truncation Src -> Dst was lossy.
964 static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
965 std::pair<llvm::Value *, SanitizerMask>>
EmitIntegerTruncationCheckHelper(Value * Src,QualType SrcType,Value * Dst,QualType DstType,CGBuilderTy & Builder)966 EmitIntegerTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst,
967 QualType DstType, CGBuilderTy &Builder) {
968 llvm::Type *SrcTy = Src->getType();
969 llvm::Type *DstTy = Dst->getType();
970 (void)DstTy; // Only used in assert()
971
972 // This should be truncation of integral types.
973 assert(Src != Dst);
974 assert(SrcTy->getScalarSizeInBits() > Dst->getType()->getScalarSizeInBits());
975 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
976 "non-integer llvm type");
977
978 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
979 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
980
981 // If both (src and dst) types are unsigned, then it's an unsigned truncation.
982 // Else, it is a signed truncation.
983 ScalarExprEmitter::ImplicitConversionCheckKind Kind;
984 SanitizerMask Mask;
985 if (!SrcSigned && !DstSigned) {
986 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
987 Mask = SanitizerKind::ImplicitUnsignedIntegerTruncation;
988 } else {
989 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
990 Mask = SanitizerKind::ImplicitSignedIntegerTruncation;
991 }
992
993 llvm::Value *Check = nullptr;
994 // 1. Extend the truncated value back to the same width as the Src.
995 Check = Builder.CreateIntCast(Dst, SrcTy, DstSigned, "anyext");
996 // 2. Equality-compare with the original source value
997 Check = Builder.CreateICmpEQ(Check, Src, "truncheck");
998 // If the comparison result is 'i1 false', then the truncation was lossy.
999 return std::make_pair(Kind, std::make_pair(Check, Mask));
1000 }
1001
PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(QualType SrcType,QualType DstType)1002 static bool PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(
1003 QualType SrcType, QualType DstType) {
1004 return SrcType->isIntegerType() && DstType->isIntegerType();
1005 }
1006
EmitIntegerTruncationCheck(Value * Src,QualType SrcType,Value * Dst,QualType DstType,SourceLocation Loc)1007 void ScalarExprEmitter::EmitIntegerTruncationCheck(Value *Src, QualType SrcType,
1008 Value *Dst, QualType DstType,
1009 SourceLocation Loc) {
1010 if (!CGF.SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation))
1011 return;
1012
1013 // We only care about int->int conversions here.
1014 // We ignore conversions to/from pointer and/or bool.
1015 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType,
1016 DstType))
1017 return;
1018
1019 unsigned SrcBits = Src->getType()->getScalarSizeInBits();
1020 unsigned DstBits = Dst->getType()->getScalarSizeInBits();
1021 // This must be truncation. Else we do not care.
1022 if (SrcBits <= DstBits)
1023 return;
1024
1025 assert(!DstType->isBooleanType() && "we should not get here with booleans.");
1026
1027 // If the integer sign change sanitizer is enabled,
1028 // and we are truncating from larger unsigned type to smaller signed type,
1029 // let that next sanitizer deal with it.
1030 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1031 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1032 if (CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange) &&
1033 (!SrcSigned && DstSigned))
1034 return;
1035
1036 CodeGenFunction::SanitizerScope SanScope(&CGF);
1037
1038 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1039 std::pair<llvm::Value *, SanitizerMask>>
1040 Check =
1041 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1042 // If the comparison result is 'i1 false', then the truncation was lossy.
1043
1044 // Do we care about this type of truncation?
1045 if (!CGF.SanOpts.has(Check.second.second))
1046 return;
1047
1048 llvm::Constant *StaticArgs[] = {
1049 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
1050 CGF.EmitCheckTypeDescriptor(DstType),
1051 llvm::ConstantInt::get(Builder.getInt8Ty(), Check.first)};
1052 CGF.EmitCheck(Check.second, SanitizerHandler::ImplicitConversion, StaticArgs,
1053 {Src, Dst});
1054 }
1055
1056 // Should be called within CodeGenFunction::SanitizerScope RAII scope.
1057 // Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1058 static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1059 std::pair<llvm::Value *, SanitizerMask>>
EmitIntegerSignChangeCheckHelper(Value * Src,QualType SrcType,Value * Dst,QualType DstType,CGBuilderTy & Builder)1060 EmitIntegerSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst,
1061 QualType DstType, CGBuilderTy &Builder) {
1062 llvm::Type *SrcTy = Src->getType();
1063 llvm::Type *DstTy = Dst->getType();
1064
1065 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
1066 "non-integer llvm type");
1067
1068 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1069 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1070 (void)SrcSigned; // Only used in assert()
1071 (void)DstSigned; // Only used in assert()
1072 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1073 unsigned DstBits = DstTy->getScalarSizeInBits();
1074 (void)SrcBits; // Only used in assert()
1075 (void)DstBits; // Only used in assert()
1076
1077 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
1078 "either the widths should be different, or the signednesses.");
1079
1080 // NOTE: zero value is considered to be non-negative.
1081 auto EmitIsNegativeTest = [&Builder](Value *V, QualType VType,
1082 const char *Name) -> Value * {
1083 // Is this value a signed type?
1084 bool VSigned = VType->isSignedIntegerOrEnumerationType();
1085 llvm::Type *VTy = V->getType();
1086 if (!VSigned) {
1087 // If the value is unsigned, then it is never negative.
1088 // FIXME: can we encounter non-scalar VTy here?
1089 return llvm::ConstantInt::getFalse(VTy->getContext());
1090 }
1091 // Get the zero of the same type with which we will be comparing.
1092 llvm::Constant *Zero = llvm::ConstantInt::get(VTy, 0);
1093 // %V.isnegative = icmp slt %V, 0
1094 // I.e is %V *strictly* less than zero, does it have negative value?
1095 return Builder.CreateICmp(llvm::ICmpInst::ICMP_SLT, V, Zero,
1096 llvm::Twine(Name) + "." + V->getName() +
1097 ".negativitycheck");
1098 };
1099
1100 // 1. Was the old Value negative?
1101 llvm::Value *SrcIsNegative = EmitIsNegativeTest(Src, SrcType, "src");
1102 // 2. Is the new Value negative?
1103 llvm::Value *DstIsNegative = EmitIsNegativeTest(Dst, DstType, "dst");
1104 // 3. Now, was the 'negativity status' preserved during the conversion?
1105 // NOTE: conversion from negative to zero is considered to change the sign.
1106 // (We want to get 'false' when the conversion changed the sign)
1107 // So we should just equality-compare the negativity statuses.
1108 llvm::Value *Check = nullptr;
1109 Check = Builder.CreateICmpEQ(SrcIsNegative, DstIsNegative, "signchangecheck");
1110 // If the comparison result is 'false', then the conversion changed the sign.
1111 return std::make_pair(
1112 ScalarExprEmitter::ICCK_IntegerSignChange,
1113 std::make_pair(Check, SanitizerKind::ImplicitIntegerSignChange));
1114 }
1115
EmitIntegerSignChangeCheck(Value * Src,QualType SrcType,Value * Dst,QualType DstType,SourceLocation Loc)1116 void ScalarExprEmitter::EmitIntegerSignChangeCheck(Value *Src, QualType SrcType,
1117 Value *Dst, QualType DstType,
1118 SourceLocation Loc) {
1119 if (!CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange))
1120 return;
1121
1122 llvm::Type *SrcTy = Src->getType();
1123 llvm::Type *DstTy = Dst->getType();
1124
1125 // We only care about int->int conversions here.
1126 // We ignore conversions to/from pointer and/or bool.
1127 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType,
1128 DstType))
1129 return;
1130
1131 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1132 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1133 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1134 unsigned DstBits = DstTy->getScalarSizeInBits();
1135
1136 // Now, we do not need to emit the check in *all* of the cases.
1137 // We can avoid emitting it in some obvious cases where it would have been
1138 // dropped by the opt passes (instcombine) always anyways.
1139 // If it's a cast between effectively the same type, no check.
1140 // NOTE: this is *not* equivalent to checking the canonical types.
1141 if (SrcSigned == DstSigned && SrcBits == DstBits)
1142 return;
1143 // At least one of the values needs to have signed type.
1144 // If both are unsigned, then obviously, neither of them can be negative.
1145 if (!SrcSigned && !DstSigned)
1146 return;
1147 // If the conversion is to *larger* *signed* type, then no check is needed.
1148 // Because either sign-extension happens (so the sign will remain),
1149 // or zero-extension will happen (the sign bit will be zero.)
1150 if ((DstBits > SrcBits) && DstSigned)
1151 return;
1152 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1153 (SrcBits > DstBits) && SrcSigned) {
1154 // If the signed integer truncation sanitizer is enabled,
1155 // and this is a truncation from signed type, then no check is needed.
1156 // Because here sign change check is interchangeable with truncation check.
1157 return;
1158 }
1159 // That's it. We can't rule out any more cases with the data we have.
1160
1161 CodeGenFunction::SanitizerScope SanScope(&CGF);
1162
1163 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1164 std::pair<llvm::Value *, SanitizerMask>>
1165 Check;
1166
1167 // Each of these checks needs to return 'false' when an issue was detected.
1168 ImplicitConversionCheckKind CheckKind;
1169 llvm::SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;
1170 // So we can 'and' all the checks together, and still get 'false',
1171 // if at least one of the checks detected an issue.
1172
1173 Check = EmitIntegerSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
1174 CheckKind = Check.first;
1175 Checks.emplace_back(Check.second);
1176
1177 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1178 (SrcBits > DstBits) && !SrcSigned && DstSigned) {
1179 // If the signed integer truncation sanitizer was enabled,
1180 // and we are truncating from larger unsigned type to smaller signed type,
1181 // let's handle the case we skipped in that check.
1182 Check =
1183 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1184 CheckKind = ICCK_SignedIntegerTruncationOrSignChange;
1185 Checks.emplace_back(Check.second);
1186 // If the comparison result is 'i1 false', then the truncation was lossy.
1187 }
1188
1189 llvm::Constant *StaticArgs[] = {
1190 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
1191 CGF.EmitCheckTypeDescriptor(DstType),
1192 llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind)};
1193 // EmitCheck() will 'and' all the checks together.
1194 CGF.EmitCheck(Checks, SanitizerHandler::ImplicitConversion, StaticArgs,
1195 {Src, Dst});
1196 }
1197
EmitScalarCast(Value * Src,QualType SrcType,QualType DstType,llvm::Type * SrcTy,llvm::Type * DstTy,ScalarConversionOpts Opts)1198 Value *ScalarExprEmitter::EmitScalarCast(Value *Src, QualType SrcType,
1199 QualType DstType, llvm::Type *SrcTy,
1200 llvm::Type *DstTy,
1201 ScalarConversionOpts Opts) {
1202 // The Element types determine the type of cast to perform.
1203 llvm::Type *SrcElementTy;
1204 llvm::Type *DstElementTy;
1205 QualType SrcElementType;
1206 QualType DstElementType;
1207 if (SrcType->isMatrixType() && DstType->isMatrixType()) {
1208 SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType();
1209 DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType();
1210 SrcElementType = SrcType->castAs<MatrixType>()->getElementType();
1211 DstElementType = DstType->castAs<MatrixType>()->getElementType();
1212 } else {
1213 assert(!SrcType->isMatrixType() && !DstType->isMatrixType() &&
1214 "cannot cast between matrix and non-matrix types");
1215 SrcElementTy = SrcTy;
1216 DstElementTy = DstTy;
1217 SrcElementType = SrcType;
1218 DstElementType = DstType;
1219 }
1220
1221 if (isa<llvm::IntegerType>(SrcElementTy)) {
1222 bool InputSigned = SrcElementType->isSignedIntegerOrEnumerationType();
1223 if (SrcElementType->isBooleanType() && Opts.TreatBooleanAsSigned) {
1224 InputSigned = true;
1225 }
1226
1227 if (isa<llvm::IntegerType>(DstElementTy))
1228 return Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
1229 if (InputSigned)
1230 return Builder.CreateSIToFP(Src, DstTy, "conv");
1231 return Builder.CreateUIToFP(Src, DstTy, "conv");
1232 }
1233
1234 if (isa<llvm::IntegerType>(DstElementTy)) {
1235 assert(SrcElementTy->isFloatingPointTy() && "Unknown real conversion");
1236 if (DstElementType->isSignedIntegerOrEnumerationType())
1237 return Builder.CreateFPToSI(Src, DstTy, "conv");
1238 return Builder.CreateFPToUI(Src, DstTy, "conv");
1239 }
1240
1241 if (DstElementTy->getTypeID() < SrcElementTy->getTypeID())
1242 return Builder.CreateFPTrunc(Src, DstTy, "conv");
1243 return Builder.CreateFPExt(Src, DstTy, "conv");
1244 }
1245
1246 /// Emit a conversion from the specified type to the specified destination type,
1247 /// both of which are LLVM scalar types.
EmitScalarConversion(Value * Src,QualType SrcType,QualType DstType,SourceLocation Loc,ScalarConversionOpts Opts)1248 Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
1249 QualType DstType,
1250 SourceLocation Loc,
1251 ScalarConversionOpts Opts) {
1252 // All conversions involving fixed point types should be handled by the
1253 // EmitFixedPoint family functions. This is done to prevent bloating up this
1254 // function more, and although fixed point numbers are represented by
1255 // integers, we do not want to follow any logic that assumes they should be
1256 // treated as integers.
1257 // TODO(leonardchan): When necessary, add another if statement checking for
1258 // conversions to fixed point types from other types.
1259 if (SrcType->isFixedPointType()) {
1260 if (DstType->isBooleanType())
1261 // It is important that we check this before checking if the dest type is
1262 // an integer because booleans are technically integer types.
1263 // We do not need to check the padding bit on unsigned types if unsigned
1264 // padding is enabled because overflow into this bit is undefined
1265 // behavior.
1266 return Builder.CreateIsNotNull(Src, "tobool");
1267 if (DstType->isFixedPointType() || DstType->isIntegerType() ||
1268 DstType->isRealFloatingType())
1269 return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1270
1271 llvm_unreachable(
1272 "Unhandled scalar conversion from a fixed point type to another type.");
1273 } else if (DstType->isFixedPointType()) {
1274 if (SrcType->isIntegerType() || SrcType->isRealFloatingType())
1275 // This also includes converting booleans and enums to fixed point types.
1276 return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1277
1278 llvm_unreachable(
1279 "Unhandled scalar conversion to a fixed point type from another type.");
1280 }
1281
1282 QualType NoncanonicalSrcType = SrcType;
1283 QualType NoncanonicalDstType = DstType;
1284
1285 SrcType = CGF.getContext().getCanonicalType(SrcType);
1286 DstType = CGF.getContext().getCanonicalType(DstType);
1287 if (SrcType == DstType) return Src;
1288
1289 if (DstType->isVoidType()) return nullptr;
1290
1291 llvm::Value *OrigSrc = Src;
1292 QualType OrigSrcType = SrcType;
1293 llvm::Type *SrcTy = Src->getType();
1294
1295 // Handle conversions to bool first, they are special: comparisons against 0.
1296 if (DstType->isBooleanType())
1297 return EmitConversionToBool(Src, SrcType);
1298
1299 llvm::Type *DstTy = ConvertType(DstType);
1300
1301 // Cast from half through float if half isn't a native type.
1302 if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1303 // Cast to FP using the intrinsic if the half type itself isn't supported.
1304 if (DstTy->isFloatingPointTy()) {
1305 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics())
1306 return Builder.CreateCall(
1307 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, DstTy),
1308 Src);
1309 } else {
1310 // Cast to other types through float, using either the intrinsic or FPExt,
1311 // depending on whether the half type itself is supported
1312 // (as opposed to operations on half, available with NativeHalfType).
1313 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
1314 Src = Builder.CreateCall(
1315 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
1316 CGF.CGM.FloatTy),
1317 Src);
1318 } else {
1319 Src = Builder.CreateFPExt(Src, CGF.CGM.FloatTy, "conv");
1320 }
1321 SrcType = CGF.getContext().FloatTy;
1322 SrcTy = CGF.FloatTy;
1323 }
1324 }
1325
1326 // Ignore conversions like int -> uint.
1327 if (SrcTy == DstTy) {
1328 if (Opts.EmitImplicitIntegerSignChangeChecks)
1329 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Src,
1330 NoncanonicalDstType, Loc);
1331
1332 return Src;
1333 }
1334
1335 // Handle pointer conversions next: pointers can only be converted to/from
1336 // other pointers and integers. Check for pointer types in terms of LLVM, as
1337 // some native types (like Obj-C id) may map to a pointer type.
1338 if (auto DstPT = dyn_cast<llvm::PointerType>(DstTy)) {
1339 // The source value may be an integer, or a pointer.
1340 if (isa<llvm::PointerType>(SrcTy))
1341 return Builder.CreateBitCast(Src, DstTy, "conv");
1342
1343 assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
1344 // First, convert to the correct width so that we control the kind of
1345 // extension.
1346 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DstPT);
1347 bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
1348 llvm::Value* IntResult =
1349 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
1350 // Then, cast to pointer.
1351 return Builder.CreateIntToPtr(IntResult, DstTy, "conv");
1352 }
1353
1354 if (isa<llvm::PointerType>(SrcTy)) {
1355 // Must be an ptr to int cast.
1356 assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?");
1357 return Builder.CreatePtrToInt(Src, DstTy, "conv");
1358 }
1359
1360 // A scalar can be splatted to an extended vector of the same element type
1361 if (DstType->isExtVectorType() && !SrcType->isVectorType()) {
1362 // Sema should add casts to make sure that the source expression's type is
1363 // the same as the vector's element type (sans qualifiers)
1364 assert(DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() ==
1365 SrcType.getTypePtr() &&
1366 "Splatted expr doesn't match with vector element type?");
1367
1368 // Splat the element across to all elements
1369 unsigned NumElements = cast<llvm::FixedVectorType>(DstTy)->getNumElements();
1370 return Builder.CreateVectorSplat(NumElements, Src, "splat");
1371 }
1372
1373 if (SrcType->isMatrixType() && DstType->isMatrixType())
1374 return EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1375
1376 if (isa<llvm::VectorType>(SrcTy) || isa<llvm::VectorType>(DstTy)) {
1377 // Allow bitcast from vector to integer/fp of the same size.
1378 unsigned SrcSize = SrcTy->getPrimitiveSizeInBits();
1379 unsigned DstSize = DstTy->getPrimitiveSizeInBits();
1380 if (SrcSize == DstSize)
1381 return Builder.CreateBitCast(Src, DstTy, "conv");
1382
1383 // Conversions between vectors of different sizes are not allowed except
1384 // when vectors of half are involved. Operations on storage-only half
1385 // vectors require promoting half vector operands to float vectors and
1386 // truncating the result, which is either an int or float vector, to a
1387 // short or half vector.
1388
1389 // Source and destination are both expected to be vectors.
1390 llvm::Type *SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType();
1391 llvm::Type *DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType();
1392 (void)DstElementTy;
1393
1394 assert(((SrcElementTy->isIntegerTy() &&
1395 DstElementTy->isIntegerTy()) ||
1396 (SrcElementTy->isFloatingPointTy() &&
1397 DstElementTy->isFloatingPointTy())) &&
1398 "unexpected conversion between a floating-point vector and an "
1399 "integer vector");
1400
1401 // Truncate an i32 vector to an i16 vector.
1402 if (SrcElementTy->isIntegerTy())
1403 return Builder.CreateIntCast(Src, DstTy, false, "conv");
1404
1405 // Truncate a float vector to a half vector.
1406 if (SrcSize > DstSize)
1407 return Builder.CreateFPTrunc(Src, DstTy, "conv");
1408
1409 // Promote a half vector to a float vector.
1410 return Builder.CreateFPExt(Src, DstTy, "conv");
1411 }
1412
1413 // Finally, we have the arithmetic types: real int/float.
1414 Value *Res = nullptr;
1415 llvm::Type *ResTy = DstTy;
1416
1417 // An overflowing conversion has undefined behavior if either the source type
1418 // or the destination type is a floating-point type. However, we consider the
1419 // range of representable values for all floating-point types to be
1420 // [-inf,+inf], so no overflow can ever happen when the destination type is a
1421 // floating-point type.
1422 if (CGF.SanOpts.has(SanitizerKind::FloatCastOverflow) &&
1423 OrigSrcType->isFloatingType())
1424 EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, DstTy,
1425 Loc);
1426
1427 // Cast to half through float if half isn't a native type.
1428 if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1429 // Make sure we cast in a single step if from another FP type.
1430 if (SrcTy->isFloatingPointTy()) {
1431 // Use the intrinsic if the half type itself isn't supported
1432 // (as opposed to operations on half, available with NativeHalfType).
1433 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics())
1434 return Builder.CreateCall(
1435 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, SrcTy), Src);
1436 // If the half type is supported, just use an fptrunc.
1437 return Builder.CreateFPTrunc(Src, DstTy);
1438 }
1439 DstTy = CGF.FloatTy;
1440 }
1441
1442 Res = EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1443
1444 if (DstTy != ResTy) {
1445 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
1446 assert(ResTy->isIntegerTy(16) && "Only half FP requires extra conversion");
1447 Res = Builder.CreateCall(
1448 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, CGF.CGM.FloatTy),
1449 Res);
1450 } else {
1451 Res = Builder.CreateFPTrunc(Res, ResTy, "conv");
1452 }
1453 }
1454
1455 if (Opts.EmitImplicitIntegerTruncationChecks)
1456 EmitIntegerTruncationCheck(Src, NoncanonicalSrcType, Res,
1457 NoncanonicalDstType, Loc);
1458
1459 if (Opts.EmitImplicitIntegerSignChangeChecks)
1460 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Res,
1461 NoncanonicalDstType, Loc);
1462
1463 return Res;
1464 }
1465
EmitFixedPointConversion(Value * Src,QualType SrcTy,QualType DstTy,SourceLocation Loc)1466 Value *ScalarExprEmitter::EmitFixedPointConversion(Value *Src, QualType SrcTy,
1467 QualType DstTy,
1468 SourceLocation Loc) {
1469 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
1470 llvm::Value *Result;
1471 if (SrcTy->isRealFloatingType())
1472 Result = FPBuilder.CreateFloatingToFixed(Src,
1473 CGF.getContext().getFixedPointSemantics(DstTy));
1474 else if (DstTy->isRealFloatingType())
1475 Result = FPBuilder.CreateFixedToFloating(Src,
1476 CGF.getContext().getFixedPointSemantics(SrcTy),
1477 ConvertType(DstTy));
1478 else {
1479 auto SrcFPSema = CGF.getContext().getFixedPointSemantics(SrcTy);
1480 auto DstFPSema = CGF.getContext().getFixedPointSemantics(DstTy);
1481
1482 if (DstTy->isIntegerType())
1483 Result = FPBuilder.CreateFixedToInteger(Src, SrcFPSema,
1484 DstFPSema.getWidth(),
1485 DstFPSema.isSigned());
1486 else if (SrcTy->isIntegerType())
1487 Result = FPBuilder.CreateIntegerToFixed(Src, SrcFPSema.isSigned(),
1488 DstFPSema);
1489 else
1490 Result = FPBuilder.CreateFixedToFixed(Src, SrcFPSema, DstFPSema);
1491 }
1492 return Result;
1493 }
1494
1495 /// Emit a conversion from the specified complex type to the specified
1496 /// destination type, where the destination type is an LLVM scalar type.
EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,QualType SrcTy,QualType DstTy,SourceLocation Loc)1497 Value *ScalarExprEmitter::EmitComplexToScalarConversion(
1498 CodeGenFunction::ComplexPairTy Src, QualType SrcTy, QualType DstTy,
1499 SourceLocation Loc) {
1500 // Get the source element type.
1501 SrcTy = SrcTy->castAs<ComplexType>()->getElementType();
1502
1503 // Handle conversions to bool first, they are special: comparisons against 0.
1504 if (DstTy->isBooleanType()) {
1505 // Complex != 0 -> (Real != 0) | (Imag != 0)
1506 Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1507 Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy, Loc);
1508 return Builder.CreateOr(Src.first, Src.second, "tobool");
1509 }
1510
1511 // C99 6.3.1.7p2: "When a value of complex type is converted to a real type,
1512 // the imaginary part of the complex value is discarded and the value of the
1513 // real part is converted according to the conversion rules for the
1514 // corresponding real type.
1515 return EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1516 }
1517
EmitNullValue(QualType Ty)1518 Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
1519 return CGF.EmitFromMemory(CGF.CGM.EmitNullConstant(Ty), Ty);
1520 }
1521
1522 /// Emit a sanitization check for the given "binary" operation (which
1523 /// might actually be a unary increment which has been lowered to a binary
1524 /// operation). The check passes if all values in \p Checks (which are \c i1),
1525 /// are \c true.
EmitBinOpCheck(ArrayRef<std::pair<Value *,SanitizerMask>> Checks,const BinOpInfo & Info)1526 void ScalarExprEmitter::EmitBinOpCheck(
1527 ArrayRef<std::pair<Value *, SanitizerMask>> Checks, const BinOpInfo &Info) {
1528 assert(CGF.IsSanitizerScope);
1529 SanitizerHandler Check;
1530 SmallVector<llvm::Constant *, 4> StaticData;
1531 SmallVector<llvm::Value *, 2> DynamicData;
1532
1533 BinaryOperatorKind Opcode = Info.Opcode;
1534 if (BinaryOperator::isCompoundAssignmentOp(Opcode))
1535 Opcode = BinaryOperator::getOpForCompoundAssignment(Opcode);
1536
1537 StaticData.push_back(CGF.EmitCheckSourceLocation(Info.E->getExprLoc()));
1538 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Info.E);
1539 if (UO && UO->getOpcode() == UO_Minus) {
1540 Check = SanitizerHandler::NegateOverflow;
1541 StaticData.push_back(CGF.EmitCheckTypeDescriptor(UO->getType()));
1542 DynamicData.push_back(Info.RHS);
1543 } else {
1544 if (BinaryOperator::isShiftOp(Opcode)) {
1545 // Shift LHS negative or too large, or RHS out of bounds.
1546 Check = SanitizerHandler::ShiftOutOfBounds;
1547 const BinaryOperator *BO = cast<BinaryOperator>(Info.E);
1548 StaticData.push_back(
1549 CGF.EmitCheckTypeDescriptor(BO->getLHS()->getType()));
1550 StaticData.push_back(
1551 CGF.EmitCheckTypeDescriptor(BO->getRHS()->getType()));
1552 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
1553 // Divide or modulo by zero, or signed overflow (eg INT_MAX / -1).
1554 Check = SanitizerHandler::DivremOverflow;
1555 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1556 } else {
1557 // Arithmetic overflow (+, -, *).
1558 switch (Opcode) {
1559 case BO_Add: Check = SanitizerHandler::AddOverflow; break;
1560 case BO_Sub: Check = SanitizerHandler::SubOverflow; break;
1561 case BO_Mul: Check = SanitizerHandler::MulOverflow; break;
1562 default: llvm_unreachable("unexpected opcode for bin op check");
1563 }
1564 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1565 }
1566 DynamicData.push_back(Info.LHS);
1567 DynamicData.push_back(Info.RHS);
1568 }
1569
1570 CGF.EmitCheck(Checks, Check, StaticData, DynamicData);
1571 }
1572
1573 //===----------------------------------------------------------------------===//
1574 // Visitor Methods
1575 //===----------------------------------------------------------------------===//
1576
VisitExpr(Expr * E)1577 Value *ScalarExprEmitter::VisitExpr(Expr *E) {
1578 CGF.ErrorUnsupported(E, "scalar expression");
1579 if (E->getType()->isVoidType())
1580 return nullptr;
1581 return llvm::UndefValue::get(CGF.ConvertType(E->getType()));
1582 }
1583
VisitShuffleVectorExpr(ShuffleVectorExpr * E)1584 Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
1585 // Vector Mask Case
1586 if (E->getNumSubExprs() == 2) {
1587 Value *LHS = CGF.EmitScalarExpr(E->getExpr(0));
1588 Value *RHS = CGF.EmitScalarExpr(E->getExpr(1));
1589 Value *Mask;
1590
1591 auto *LTy = cast<llvm::FixedVectorType>(LHS->getType());
1592 unsigned LHSElts = LTy->getNumElements();
1593
1594 Mask = RHS;
1595
1596 auto *MTy = cast<llvm::FixedVectorType>(Mask->getType());
1597
1598 // Mask off the high bits of each shuffle index.
1599 Value *MaskBits =
1600 llvm::ConstantInt::get(MTy, llvm::NextPowerOf2(LHSElts - 1) - 1);
1601 Mask = Builder.CreateAnd(Mask, MaskBits, "mask");
1602
1603 // newv = undef
1604 // mask = mask & maskbits
1605 // for each elt
1606 // n = extract mask i
1607 // x = extract val n
1608 // newv = insert newv, x, i
1609 auto *RTy = llvm::FixedVectorType::get(LTy->getElementType(),
1610 MTy->getNumElements());
1611 Value* NewV = llvm::UndefValue::get(RTy);
1612 for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
1613 Value *IIndx = llvm::ConstantInt::get(CGF.SizeTy, i);
1614 Value *Indx = Builder.CreateExtractElement(Mask, IIndx, "shuf_idx");
1615
1616 Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt");
1617 NewV = Builder.CreateInsertElement(NewV, VExt, IIndx, "shuf_ins");
1618 }
1619 return NewV;
1620 }
1621
1622 Value* V1 = CGF.EmitScalarExpr(E->getExpr(0));
1623 Value* V2 = CGF.EmitScalarExpr(E->getExpr(1));
1624
1625 SmallVector<int, 32> Indices;
1626 for (unsigned i = 2; i < E->getNumSubExprs(); ++i) {
1627 llvm::APSInt Idx = E->getShuffleMaskIdx(CGF.getContext(), i-2);
1628 // Check for -1 and output it as undef in the IR.
1629 if (Idx.isSigned() && Idx.isAllOnesValue())
1630 Indices.push_back(-1);
1631 else
1632 Indices.push_back(Idx.getZExtValue());
1633 }
1634
1635 return Builder.CreateShuffleVector(V1, V2, Indices, "shuffle");
1636 }
1637
VisitConvertVectorExpr(ConvertVectorExpr * E)1638 Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) {
1639 QualType SrcType = E->getSrcExpr()->getType(),
1640 DstType = E->getType();
1641
1642 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
1643
1644 SrcType = CGF.getContext().getCanonicalType(SrcType);
1645 DstType = CGF.getContext().getCanonicalType(DstType);
1646 if (SrcType == DstType) return Src;
1647
1648 assert(SrcType->isVectorType() &&
1649 "ConvertVector source type must be a vector");
1650 assert(DstType->isVectorType() &&
1651 "ConvertVector destination type must be a vector");
1652
1653 llvm::Type *SrcTy = Src->getType();
1654 llvm::Type *DstTy = ConvertType(DstType);
1655
1656 // Ignore conversions like int -> uint.
1657 if (SrcTy == DstTy)
1658 return Src;
1659
1660 QualType SrcEltType = SrcType->castAs<VectorType>()->getElementType(),
1661 DstEltType = DstType->castAs<VectorType>()->getElementType();
1662
1663 assert(SrcTy->isVectorTy() &&
1664 "ConvertVector source IR type must be a vector");
1665 assert(DstTy->isVectorTy() &&
1666 "ConvertVector destination IR type must be a vector");
1667
1668 llvm::Type *SrcEltTy = cast<llvm::VectorType>(SrcTy)->getElementType(),
1669 *DstEltTy = cast<llvm::VectorType>(DstTy)->getElementType();
1670
1671 if (DstEltType->isBooleanType()) {
1672 assert((SrcEltTy->isFloatingPointTy() ||
1673 isa<llvm::IntegerType>(SrcEltTy)) && "Unknown boolean conversion");
1674
1675 llvm::Value *Zero = llvm::Constant::getNullValue(SrcTy);
1676 if (SrcEltTy->isFloatingPointTy()) {
1677 return Builder.CreateFCmpUNE(Src, Zero, "tobool");
1678 } else {
1679 return Builder.CreateICmpNE(Src, Zero, "tobool");
1680 }
1681 }
1682
1683 // We have the arithmetic types: real int/float.
1684 Value *Res = nullptr;
1685
1686 if (isa<llvm::IntegerType>(SrcEltTy)) {
1687 bool InputSigned = SrcEltType->isSignedIntegerOrEnumerationType();
1688 if (isa<llvm::IntegerType>(DstEltTy))
1689 Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
1690 else if (InputSigned)
1691 Res = Builder.CreateSIToFP(Src, DstTy, "conv");
1692 else
1693 Res = Builder.CreateUIToFP(Src, DstTy, "conv");
1694 } else if (isa<llvm::IntegerType>(DstEltTy)) {
1695 assert(SrcEltTy->isFloatingPointTy() && "Unknown real conversion");
1696 if (DstEltType->isSignedIntegerOrEnumerationType())
1697 Res = Builder.CreateFPToSI(Src, DstTy, "conv");
1698 else
1699 Res = Builder.CreateFPToUI(Src, DstTy, "conv");
1700 } else {
1701 assert(SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() &&
1702 "Unknown real conversion");
1703 if (DstEltTy->getTypeID() < SrcEltTy->getTypeID())
1704 Res = Builder.CreateFPTrunc(Src, DstTy, "conv");
1705 else
1706 Res = Builder.CreateFPExt(Src, DstTy, "conv");
1707 }
1708
1709 return Res;
1710 }
1711
VisitMemberExpr(MemberExpr * E)1712 Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
1713 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E)) {
1714 CGF.EmitIgnoredExpr(E->getBase());
1715 return CGF.emitScalarConstant(Constant, E);
1716 } else {
1717 Expr::EvalResult Result;
1718 if (E->EvaluateAsInt(Result, CGF.getContext(), Expr::SE_AllowSideEffects)) {
1719 llvm::APSInt Value = Result.Val.getInt();
1720 CGF.EmitIgnoredExpr(E->getBase());
1721 return Builder.getInt(Value);
1722 }
1723 }
1724
1725 return EmitLoadOfLValue(E);
1726 }
1727
VisitArraySubscriptExpr(ArraySubscriptExpr * E)1728 Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
1729 TestAndClearIgnoreResultAssign();
1730
1731 // Emit subscript expressions in rvalue context's. For most cases, this just
1732 // loads the lvalue formed by the subscript expr. However, we have to be
1733 // careful, because the base of a vector subscript is occasionally an rvalue,
1734 // so we can't get it as an lvalue.
1735 if (!E->getBase()->getType()->isVectorType())
1736 return EmitLoadOfLValue(E);
1737
1738 // Handle the vector case. The base must be a vector, the index must be an
1739 // integer value.
1740 Value *Base = Visit(E->getBase());
1741 Value *Idx = Visit(E->getIdx());
1742 QualType IdxTy = E->getIdx()->getType();
1743
1744 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
1745 CGF.EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, /*Accessed*/true);
1746
1747 return Builder.CreateExtractElement(Base, Idx, "vecext");
1748 }
1749
VisitMatrixSubscriptExpr(MatrixSubscriptExpr * E)1750 Value *ScalarExprEmitter::VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E) {
1751 TestAndClearIgnoreResultAssign();
1752
1753 // Handle the vector case. The base must be a vector, the index must be an
1754 // integer value.
1755 Value *RowIdx = Visit(E->getRowIdx());
1756 Value *ColumnIdx = Visit(E->getColumnIdx());
1757 Value *Matrix = Visit(E->getBase());
1758
1759 // TODO: Should we emit bounds checks with SanitizerKind::ArrayBounds?
1760 llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
1761 return MB.CreateExtractElement(
1762 Matrix, RowIdx, ColumnIdx,
1763 E->getBase()->getType()->castAs<ConstantMatrixType>()->getNumRows());
1764 }
1765
getMaskElt(llvm::ShuffleVectorInst * SVI,unsigned Idx,unsigned Off)1766 static int getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
1767 unsigned Off) {
1768 int MV = SVI->getMaskValue(Idx);
1769 if (MV == -1)
1770 return -1;
1771 return Off + MV;
1772 }
1773
getAsInt32(llvm::ConstantInt * C,llvm::Type * I32Ty)1774 static int getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty) {
1775 assert(llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue()) &&
1776 "Index operand too large for shufflevector mask!");
1777 return C->getZExtValue();
1778 }
1779
VisitInitListExpr(InitListExpr * E)1780 Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
1781 bool Ignore = TestAndClearIgnoreResultAssign();
1782 (void)Ignore;
1783 assert (Ignore == false && "init list ignored");
1784 unsigned NumInitElements = E->getNumInits();
1785
1786 if (E->hadArrayRangeDesignator())
1787 CGF.ErrorUnsupported(E, "GNU array range designator extension");
1788
1789 llvm::VectorType *VType =
1790 dyn_cast<llvm::VectorType>(ConvertType(E->getType()));
1791
1792 if (!VType) {
1793 if (NumInitElements == 0) {
1794 // C++11 value-initialization for the scalar.
1795 return EmitNullValue(E->getType());
1796 }
1797 // We have a scalar in braces. Just use the first element.
1798 return Visit(E->getInit(0));
1799 }
1800
1801 unsigned ResElts = cast<llvm::FixedVectorType>(VType)->getNumElements();
1802
1803 // Loop over initializers collecting the Value for each, and remembering
1804 // whether the source was swizzle (ExtVectorElementExpr). This will allow
1805 // us to fold the shuffle for the swizzle into the shuffle for the vector
1806 // initializer, since LLVM optimizers generally do not want to touch
1807 // shuffles.
1808 unsigned CurIdx = 0;
1809 bool VIsUndefShuffle = false;
1810 llvm::Value *V = llvm::UndefValue::get(VType);
1811 for (unsigned i = 0; i != NumInitElements; ++i) {
1812 Expr *IE = E->getInit(i);
1813 Value *Init = Visit(IE);
1814 SmallVector<int, 16> Args;
1815
1816 llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType());
1817
1818 // Handle scalar elements. If the scalar initializer is actually one
1819 // element of a different vector of the same width, use shuffle instead of
1820 // extract+insert.
1821 if (!VVT) {
1822 if (isa<ExtVectorElementExpr>(IE)) {
1823 llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Init);
1824
1825 if (cast<llvm::FixedVectorType>(EI->getVectorOperandType())
1826 ->getNumElements() == ResElts) {
1827 llvm::ConstantInt *C = cast<llvm::ConstantInt>(EI->getIndexOperand());
1828 Value *LHS = nullptr, *RHS = nullptr;
1829 if (CurIdx == 0) {
1830 // insert into undef -> shuffle (src, undef)
1831 // shufflemask must use an i32
1832 Args.push_back(getAsInt32(C, CGF.Int32Ty));
1833 Args.resize(ResElts, -1);
1834
1835 LHS = EI->getVectorOperand();
1836 RHS = V;
1837 VIsUndefShuffle = true;
1838 } else if (VIsUndefShuffle) {
1839 // insert into undefshuffle && size match -> shuffle (v, src)
1840 llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V);
1841 for (unsigned j = 0; j != CurIdx; ++j)
1842 Args.push_back(getMaskElt(SVV, j, 0));
1843 Args.push_back(ResElts + C->getZExtValue());
1844 Args.resize(ResElts, -1);
1845
1846 LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
1847 RHS = EI->getVectorOperand();
1848 VIsUndefShuffle = false;
1849 }
1850 if (!Args.empty()) {
1851 V = Builder.CreateShuffleVector(LHS, RHS, Args);
1852 ++CurIdx;
1853 continue;
1854 }
1855 }
1856 }
1857 V = Builder.CreateInsertElement(V, Init, Builder.getInt32(CurIdx),
1858 "vecinit");
1859 VIsUndefShuffle = false;
1860 ++CurIdx;
1861 continue;
1862 }
1863
1864 unsigned InitElts = cast<llvm::FixedVectorType>(VVT)->getNumElements();
1865
1866 // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's
1867 // input is the same width as the vector being constructed, generate an
1868 // optimized shuffle of the swizzle input into the result.
1869 unsigned Offset = (CurIdx == 0) ? 0 : ResElts;
1870 if (isa<ExtVectorElementExpr>(IE)) {
1871 llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init);
1872 Value *SVOp = SVI->getOperand(0);
1873 auto *OpTy = cast<llvm::FixedVectorType>(SVOp->getType());
1874
1875 if (OpTy->getNumElements() == ResElts) {
1876 for (unsigned j = 0; j != CurIdx; ++j) {
1877 // If the current vector initializer is a shuffle with undef, merge
1878 // this shuffle directly into it.
1879 if (VIsUndefShuffle) {
1880 Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0));
1881 } else {
1882 Args.push_back(j);
1883 }
1884 }
1885 for (unsigned j = 0, je = InitElts; j != je; ++j)
1886 Args.push_back(getMaskElt(SVI, j, Offset));
1887 Args.resize(ResElts, -1);
1888
1889 if (VIsUndefShuffle)
1890 V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
1891
1892 Init = SVOp;
1893 }
1894 }
1895
1896 // Extend init to result vector length, and then shuffle its contribution
1897 // to the vector initializer into V.
1898 if (Args.empty()) {
1899 for (unsigned j = 0; j != InitElts; ++j)
1900 Args.push_back(j);
1901 Args.resize(ResElts, -1);
1902 Init = Builder.CreateShuffleVector(Init, Args, "vext");
1903
1904 Args.clear();
1905 for (unsigned j = 0; j != CurIdx; ++j)
1906 Args.push_back(j);
1907 for (unsigned j = 0; j != InitElts; ++j)
1908 Args.push_back(j + Offset);
1909 Args.resize(ResElts, -1);
1910 }
1911
1912 // If V is undef, make sure it ends up on the RHS of the shuffle to aid
1913 // merging subsequent shuffles into this one.
1914 if (CurIdx == 0)
1915 std::swap(V, Init);
1916 V = Builder.CreateShuffleVector(V, Init, Args, "vecinit");
1917 VIsUndefShuffle = isa<llvm::UndefValue>(Init);
1918 CurIdx += InitElts;
1919 }
1920
1921 // FIXME: evaluate codegen vs. shuffling against constant null vector.
1922 // Emit remaining default initializers.
1923 llvm::Type *EltTy = VType->getElementType();
1924
1925 // Emit remaining default initializers
1926 for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) {
1927 Value *Idx = Builder.getInt32(CurIdx);
1928 llvm::Value *Init = llvm::Constant::getNullValue(EltTy);
1929 V = Builder.CreateInsertElement(V, Init, Idx, "vecinit");
1930 }
1931 return V;
1932 }
1933
ShouldNullCheckClassCastValue(const CastExpr * CE)1934 bool CodeGenFunction::ShouldNullCheckClassCastValue(const CastExpr *CE) {
1935 const Expr *E = CE->getSubExpr();
1936
1937 if (CE->getCastKind() == CK_UncheckedDerivedToBase)
1938 return false;
1939
1940 if (isa<CXXThisExpr>(E->IgnoreParens())) {
1941 // We always assume that 'this' is never null.
1942 return false;
1943 }
1944
1945 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
1946 // And that glvalue casts are never null.
1947 if (ICE->getValueKind() != VK_RValue)
1948 return false;
1949 }
1950
1951 return true;
1952 }
1953
1954 // VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts
1955 // have to handle a more broad range of conversions than explicit casts, as they
1956 // handle things like function to ptr-to-function decay etc.
VisitCastExpr(CastExpr * CE)1957 Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
1958 Expr *E = CE->getSubExpr();
1959 QualType DestTy = CE->getType();
1960 CastKind Kind = CE->getCastKind();
1961
1962 // These cases are generally not written to ignore the result of
1963 // evaluating their sub-expressions, so we clear this now.
1964 bool Ignored = TestAndClearIgnoreResultAssign();
1965
1966 // Since almost all cast kinds apply to scalars, this switch doesn't have
1967 // a default case, so the compiler will warn on a missing case. The cases
1968 // are in the same order as in the CastKind enum.
1969 switch (Kind) {
1970 case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
1971 case CK_BuiltinFnToFnPtr:
1972 llvm_unreachable("builtin functions are handled elsewhere");
1973
1974 case CK_LValueBitCast:
1975 case CK_ObjCObjectLValueCast: {
1976 Address Addr = EmitLValue(E).getAddress(CGF);
1977 Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy));
1978 LValue LV = CGF.MakeAddrLValue(Addr, DestTy);
1979 return EmitLoadOfLValue(LV, CE->getExprLoc());
1980 }
1981
1982 case CK_LValueToRValueBitCast: {
1983 LValue SourceLVal = CGF.EmitLValue(E);
1984 Address Addr = Builder.CreateElementBitCast(SourceLVal.getAddress(CGF),
1985 CGF.ConvertTypeForMem(DestTy));
1986 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
1987 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
1988 return EmitLoadOfLValue(DestLV, CE->getExprLoc());
1989 }
1990
1991 case CK_CPointerToObjCPointerCast:
1992 case CK_BlockPointerToObjCPointerCast:
1993 case CK_AnyPointerToBlockPointerCast:
1994 case CK_BitCast: {
1995 Value *Src = Visit(const_cast<Expr*>(E));
1996 llvm::Type *SrcTy = Src->getType();
1997 llvm::Type *DstTy = ConvertType(DestTy);
1998 if (SrcTy->isPtrOrPtrVectorTy() && DstTy->isPtrOrPtrVectorTy() &&
1999 SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) {
2000 llvm_unreachable("wrong cast for pointers in different address spaces"
2001 "(must be an address space cast)!");
2002 }
2003
2004 if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
2005 if (auto PT = DestTy->getAs<PointerType>())
2006 CGF.EmitVTablePtrCheckForCast(PT->getPointeeType(), Src,
2007 /*MayBeNull=*/true,
2008 CodeGenFunction::CFITCK_UnrelatedCast,
2009 CE->getBeginLoc());
2010 }
2011
2012 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2013 const QualType SrcType = E->getType();
2014
2015 if (SrcType.mayBeNotDynamicClass() && DestTy.mayBeDynamicClass()) {
2016 // Casting to pointer that could carry dynamic information (provided by
2017 // invariant.group) requires launder.
2018 Src = Builder.CreateLaunderInvariantGroup(Src);
2019 } else if (SrcType.mayBeDynamicClass() && DestTy.mayBeNotDynamicClass()) {
2020 // Casting to pointer that does not carry dynamic information (provided
2021 // by invariant.group) requires stripping it. Note that we don't do it
2022 // if the source could not be dynamic type and destination could be
2023 // dynamic because dynamic information is already laundered. It is
2024 // because launder(strip(src)) == launder(src), so there is no need to
2025 // add extra strip before launder.
2026 Src = Builder.CreateStripInvariantGroup(Src);
2027 }
2028 }
2029
2030 // Update heapallocsite metadata when there is an explicit pointer cast.
2031 if (auto *CI = dyn_cast<llvm::CallBase>(Src)) {
2032 if (CI->getMetadata("heapallocsite") && isa<ExplicitCastExpr>(CE)) {
2033 QualType PointeeType = DestTy->getPointeeType();
2034 if (!PointeeType.isNull())
2035 CGF.getDebugInfo()->addHeapAllocSiteMetadata(CI, PointeeType,
2036 CE->getExprLoc());
2037 }
2038 }
2039
2040 // If Src is a fixed vector and Dst is a scalable vector, and both have the
2041 // same element type, use the llvm.experimental.vector.insert intrinsic to
2042 // perform the bitcast.
2043 if (const auto *FixedSrc = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
2044 if (const auto *ScalableDst = dyn_cast<llvm::ScalableVectorType>(DstTy)) {
2045 if (FixedSrc->getElementType() == ScalableDst->getElementType()) {
2046 llvm::Value *UndefVec = llvm::UndefValue::get(DstTy);
2047 llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
2048 return Builder.CreateInsertVector(DstTy, UndefVec, Src, Zero,
2049 "castScalableSve");
2050 }
2051 }
2052 }
2053
2054 // If Src is a scalable vector and Dst is a fixed vector, and both have the
2055 // same element type, use the llvm.experimental.vector.extract intrinsic to
2056 // perform the bitcast.
2057 if (const auto *ScalableSrc = dyn_cast<llvm::ScalableVectorType>(SrcTy)) {
2058 if (const auto *FixedDst = dyn_cast<llvm::FixedVectorType>(DstTy)) {
2059 if (ScalableSrc->getElementType() == FixedDst->getElementType()) {
2060 llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
2061 return Builder.CreateExtractVector(DstTy, Src, Zero, "castFixedSve");
2062 }
2063 }
2064 }
2065
2066 // Perform VLAT <-> VLST bitcast through memory.
2067 // TODO: since the llvm.experimental.vector.{insert,extract} intrinsics
2068 // require the element types of the vectors to be the same, we
2069 // need to keep this around for casting between predicates, or more
2070 // generally for bitcasts between VLAT <-> VLST where the element
2071 // types of the vectors are not the same, until we figure out a better
2072 // way of doing these casts.
2073 if ((isa<llvm::FixedVectorType>(SrcTy) &&
2074 isa<llvm::ScalableVectorType>(DstTy)) ||
2075 (isa<llvm::ScalableVectorType>(SrcTy) &&
2076 isa<llvm::FixedVectorType>(DstTy))) {
2077 if (const CallExpr *CE = dyn_cast<CallExpr>(E)) {
2078 // Call expressions can't have a scalar return unless the return type
2079 // is a reference type so an lvalue can't be emitted. Create a temp
2080 // alloca to store the call, bitcast the address then load.
2081 QualType RetTy = CE->getCallReturnType(CGF.getContext());
2082 Address Addr =
2083 CGF.CreateDefaultAlignTempAlloca(SrcTy, "saved-call-rvalue");
2084 LValue LV = CGF.MakeAddrLValue(Addr, RetTy);
2085 CGF.EmitStoreOfScalar(Src, LV);
2086 Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy),
2087 "castFixedSve");
2088 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
2089 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2090 return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2091 }
2092
2093 Address Addr = EmitLValue(E).getAddress(CGF);
2094 Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy));
2095 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
2096 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2097 return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2098 }
2099
2100 return Builder.CreateBitCast(Src, DstTy);
2101 }
2102 case CK_AddressSpaceConversion: {
2103 Expr::EvalResult Result;
2104 if (E->EvaluateAsRValue(Result, CGF.getContext()) &&
2105 Result.Val.isNullPointer()) {
2106 // If E has side effect, it is emitted even if its final result is a
2107 // null pointer. In that case, a DCE pass should be able to
2108 // eliminate the useless instructions emitted during translating E.
2109 if (Result.HasSideEffects)
2110 Visit(E);
2111 return CGF.CGM.getNullPointer(cast<llvm::PointerType>(
2112 ConvertType(DestTy)), DestTy);
2113 }
2114 // Since target may map different address spaces in AST to the same address
2115 // space, an address space conversion may end up as a bitcast.
2116 return CGF.CGM.getTargetCodeGenInfo().performAddrSpaceCast(
2117 CGF, Visit(E), E->getType()->getPointeeType().getAddressSpace(),
2118 DestTy->getPointeeType().getAddressSpace(), ConvertType(DestTy));
2119 }
2120 case CK_AtomicToNonAtomic:
2121 case CK_NonAtomicToAtomic:
2122 case CK_NoOp:
2123 case CK_UserDefinedConversion:
2124 return Visit(const_cast<Expr*>(E));
2125
2126 case CK_BaseToDerived: {
2127 const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl();
2128 assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!");
2129
2130 Address Base = CGF.EmitPointerWithAlignment(E);
2131 Address Derived =
2132 CGF.GetAddressOfDerivedClass(Base, DerivedClassDecl,
2133 CE->path_begin(), CE->path_end(),
2134 CGF.ShouldNullCheckClassCastValue(CE));
2135
2136 // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is
2137 // performed and the object is not of the derived type.
2138 if (CGF.sanitizePerformTypeCheck())
2139 CGF.EmitTypeCheck(CodeGenFunction::TCK_DowncastPointer, CE->getExprLoc(),
2140 Derived.getPointer(), DestTy->getPointeeType());
2141
2142 if (CGF.SanOpts.has(SanitizerKind::CFIDerivedCast))
2143 CGF.EmitVTablePtrCheckForCast(
2144 DestTy->getPointeeType(), Derived.getPointer(),
2145 /*MayBeNull=*/true, CodeGenFunction::CFITCK_DerivedCast,
2146 CE->getBeginLoc());
2147
2148 return Derived.getPointer();
2149 }
2150 case CK_UncheckedDerivedToBase:
2151 case CK_DerivedToBase: {
2152 // The EmitPointerWithAlignment path does this fine; just discard
2153 // the alignment.
2154 return CGF.EmitPointerWithAlignment(CE).getPointer();
2155 }
2156
2157 case CK_Dynamic: {
2158 Address V = CGF.EmitPointerWithAlignment(E);
2159 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE);
2160 return CGF.EmitDynamicCast(V, DCE);
2161 }
2162
2163 case CK_ArrayToPointerDecay:
2164 return CGF.EmitArrayToPointerDecay(E).getPointer();
2165 case CK_FunctionToPointerDecay:
2166 return EmitLValue(E).getPointer(CGF);
2167
2168 case CK_NullToPointer:
2169 if (MustVisitNullValue(E))
2170 CGF.EmitIgnoredExpr(E);
2171
2172 return CGF.CGM.getNullPointer(cast<llvm::PointerType>(ConvertType(DestTy)),
2173 DestTy);
2174
2175 case CK_NullToMemberPointer: {
2176 if (MustVisitNullValue(E))
2177 CGF.EmitIgnoredExpr(E);
2178
2179 const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>();
2180 return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT);
2181 }
2182
2183 case CK_ReinterpretMemberPointer:
2184 case CK_BaseToDerivedMemberPointer:
2185 case CK_DerivedToBaseMemberPointer: {
2186 Value *Src = Visit(E);
2187
2188 // Note that the AST doesn't distinguish between checked and
2189 // unchecked member pointer conversions, so we always have to
2190 // implement checked conversions here. This is inefficient when
2191 // actual control flow may be required in order to perform the
2192 // check, which it is for data member pointers (but not member
2193 // function pointers on Itanium and ARM).
2194 return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, CE, Src);
2195 }
2196
2197 case CK_ARCProduceObject:
2198 return CGF.EmitARCRetainScalarExpr(E);
2199 case CK_ARCConsumeObject:
2200 return CGF.EmitObjCConsumeObject(E->getType(), Visit(E));
2201 case CK_ARCReclaimReturnedObject:
2202 return CGF.EmitARCReclaimReturnedObject(E, /*allowUnsafe*/ Ignored);
2203 case CK_ARCExtendBlockObject:
2204 return CGF.EmitARCExtendBlockObject(E);
2205
2206 case CK_CopyAndAutoreleaseBlockObject:
2207 return CGF.EmitBlockCopyAndAutorelease(Visit(E), E->getType());
2208
2209 case CK_FloatingRealToComplex:
2210 case CK_FloatingComplexCast:
2211 case CK_IntegralRealToComplex:
2212 case CK_IntegralComplexCast:
2213 case CK_IntegralComplexToFloatingComplex:
2214 case CK_FloatingComplexToIntegralComplex:
2215 case CK_ConstructorConversion:
2216 case CK_ToUnion:
2217 llvm_unreachable("scalar cast to non-scalar value");
2218
2219 case CK_LValueToRValue:
2220 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy));
2221 assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!");
2222 return Visit(const_cast<Expr*>(E));
2223
2224 case CK_IntegralToPointer: {
2225 Value *Src = Visit(const_cast<Expr*>(E));
2226
2227 // First, convert to the correct width so that we control the kind of
2228 // extension.
2229 auto DestLLVMTy = ConvertType(DestTy);
2230 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DestLLVMTy);
2231 bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType();
2232 llvm::Value* IntResult =
2233 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
2234
2235 auto *IntToPtr = Builder.CreateIntToPtr(IntResult, DestLLVMTy);
2236
2237 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2238 // Going from integer to pointer that could be dynamic requires reloading
2239 // dynamic information from invariant.group.
2240 if (DestTy.mayBeDynamicClass())
2241 IntToPtr = Builder.CreateLaunderInvariantGroup(IntToPtr);
2242 }
2243 return IntToPtr;
2244 }
2245 case CK_PointerToIntegral: {
2246 assert(!DestTy->isBooleanType() && "bool should use PointerToBool");
2247 auto *PtrExpr = Visit(E);
2248
2249 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2250 const QualType SrcType = E->getType();
2251
2252 // Casting to integer requires stripping dynamic information as it does
2253 // not carries it.
2254 if (SrcType.mayBeDynamicClass())
2255 PtrExpr = Builder.CreateStripInvariantGroup(PtrExpr);
2256 }
2257
2258 return Builder.CreatePtrToInt(PtrExpr, ConvertType(DestTy));
2259 }
2260 case CK_ToVoid: {
2261 CGF.EmitIgnoredExpr(E);
2262 return nullptr;
2263 }
2264 case CK_MatrixCast: {
2265 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2266 CE->getExprLoc());
2267 }
2268 case CK_VectorSplat: {
2269 llvm::Type *DstTy = ConvertType(DestTy);
2270 Value *Elt = Visit(const_cast<Expr*>(E));
2271 // Splat the element across to all elements
2272 unsigned NumElements = cast<llvm::FixedVectorType>(DstTy)->getNumElements();
2273 return Builder.CreateVectorSplat(NumElements, Elt, "splat");
2274 }
2275
2276 case CK_FixedPointCast:
2277 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2278 CE->getExprLoc());
2279
2280 case CK_FixedPointToBoolean:
2281 assert(E->getType()->isFixedPointType() &&
2282 "Expected src type to be fixed point type");
2283 assert(DestTy->isBooleanType() && "Expected dest type to be boolean type");
2284 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2285 CE->getExprLoc());
2286
2287 case CK_FixedPointToIntegral:
2288 assert(E->getType()->isFixedPointType() &&
2289 "Expected src type to be fixed point type");
2290 assert(DestTy->isIntegerType() && "Expected dest type to be an integer");
2291 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2292 CE->getExprLoc());
2293
2294 case CK_IntegralToFixedPoint:
2295 assert(E->getType()->isIntegerType() &&
2296 "Expected src type to be an integer");
2297 assert(DestTy->isFixedPointType() &&
2298 "Expected dest type to be fixed point type");
2299 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2300 CE->getExprLoc());
2301
2302 case CK_IntegralCast: {
2303 ScalarConversionOpts Opts;
2304 if (auto *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
2305 if (!ICE->isPartOfExplicitCast())
2306 Opts = ScalarConversionOpts(CGF.SanOpts);
2307 }
2308 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2309 CE->getExprLoc(), Opts);
2310 }
2311 case CK_IntegralToFloating:
2312 case CK_FloatingToIntegral:
2313 case CK_FloatingCast:
2314 case CK_FixedPointToFloating:
2315 case CK_FloatingToFixedPoint: {
2316 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2317 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2318 CE->getExprLoc());
2319 }
2320 case CK_BooleanToSignedIntegral: {
2321 ScalarConversionOpts Opts;
2322 Opts.TreatBooleanAsSigned = true;
2323 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2324 CE->getExprLoc(), Opts);
2325 }
2326 case CK_IntegralToBoolean:
2327 return EmitIntToBoolConversion(Visit(E));
2328 case CK_PointerToBoolean:
2329 return EmitPointerToBoolConversion(Visit(E), E->getType());
2330 case CK_FloatingToBoolean: {
2331 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2332 return EmitFloatToBoolConversion(Visit(E));
2333 }
2334 case CK_MemberPointerToBoolean: {
2335 llvm::Value *MemPtr = Visit(E);
2336 const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
2337 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT);
2338 }
2339
2340 case CK_FloatingComplexToReal:
2341 case CK_IntegralComplexToReal:
2342 return CGF.EmitComplexExpr(E, false, true).first;
2343
2344 case CK_FloatingComplexToBoolean:
2345 case CK_IntegralComplexToBoolean: {
2346 CodeGenFunction::ComplexPairTy V = CGF.EmitComplexExpr(E);
2347
2348 // TODO: kill this function off, inline appropriate case here
2349 return EmitComplexToScalarConversion(V, E->getType(), DestTy,
2350 CE->getExprLoc());
2351 }
2352
2353 case CK_ZeroToOCLOpaqueType: {
2354 assert((DestTy->isEventT() || DestTy->isQueueT() ||
2355 DestTy->isOCLIntelSubgroupAVCType()) &&
2356 "CK_ZeroToOCLEvent cast on non-event type");
2357 return llvm::Constant::getNullValue(ConvertType(DestTy));
2358 }
2359
2360 case CK_IntToOCLSampler:
2361 return CGF.CGM.createOpenCLIntToSamplerConversion(E, CGF);
2362
2363 } // end of switch
2364
2365 llvm_unreachable("unknown scalar cast");
2366 }
2367
VisitStmtExpr(const StmtExpr * E)2368 Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
2369 CodeGenFunction::StmtExprEvaluation eval(CGF);
2370 Address RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(),
2371 !E->getType()->isVoidType());
2372 if (!RetAlloca.isValid())
2373 return nullptr;
2374 return CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(RetAlloca, E->getType()),
2375 E->getExprLoc());
2376 }
2377
VisitExprWithCleanups(ExprWithCleanups * E)2378 Value *ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
2379 CodeGenFunction::RunCleanupsScope Scope(CGF);
2380 Value *V = Visit(E->getSubExpr());
2381 // Defend against dominance problems caused by jumps out of expression
2382 // evaluation through the shared cleanup block.
2383 Scope.ForceCleanup({&V});
2384 return V;
2385 }
2386
2387 //===----------------------------------------------------------------------===//
2388 // Unary Operators
2389 //===----------------------------------------------------------------------===//
2390
createBinOpInfoFromIncDec(const UnaryOperator * E,llvm::Value * InVal,bool IsInc,FPOptions FPFeatures)2391 static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E,
2392 llvm::Value *InVal, bool IsInc,
2393 FPOptions FPFeatures) {
2394 BinOpInfo BinOp;
2395 BinOp.LHS = InVal;
2396 BinOp.RHS = llvm::ConstantInt::get(InVal->getType(), 1, false);
2397 BinOp.Ty = E->getType();
2398 BinOp.Opcode = IsInc ? BO_Add : BO_Sub;
2399 BinOp.FPFeatures = FPFeatures;
2400 BinOp.E = E;
2401 return BinOp;
2402 }
2403
EmitIncDecConsiderOverflowBehavior(const UnaryOperator * E,llvm::Value * InVal,bool IsInc)2404 llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior(
2405 const UnaryOperator *E, llvm::Value *InVal, bool IsInc) {
2406 llvm::Value *Amount =
2407 llvm::ConstantInt::get(InVal->getType(), IsInc ? 1 : -1, true);
2408 StringRef Name = IsInc ? "inc" : "dec";
2409 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
2410 case LangOptions::SOB_Defined:
2411 return Builder.CreateAdd(InVal, Amount, Name);
2412 case LangOptions::SOB_Undefined:
2413 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
2414 return Builder.CreateNSWAdd(InVal, Amount, Name);
2415 LLVM_FALLTHROUGH;
2416 case LangOptions::SOB_Trapping:
2417 if (!E->canOverflow())
2418 return Builder.CreateNSWAdd(InVal, Amount, Name);
2419 return EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(
2420 E, InVal, IsInc, E->getFPFeaturesInEffect(CGF.getLangOpts())));
2421 }
2422 llvm_unreachable("Unknown SignedOverflowBehaviorTy");
2423 }
2424
2425 namespace {
2426 /// Handles check and update for lastprivate conditional variables.
2427 class OMPLastprivateConditionalUpdateRAII {
2428 private:
2429 CodeGenFunction &CGF;
2430 const UnaryOperator *E;
2431
2432 public:
OMPLastprivateConditionalUpdateRAII(CodeGenFunction & CGF,const UnaryOperator * E)2433 OMPLastprivateConditionalUpdateRAII(CodeGenFunction &CGF,
2434 const UnaryOperator *E)
2435 : CGF(CGF), E(E) {}
~OMPLastprivateConditionalUpdateRAII()2436 ~OMPLastprivateConditionalUpdateRAII() {
2437 if (CGF.getLangOpts().OpenMP)
2438 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(
2439 CGF, E->getSubExpr());
2440 }
2441 };
2442 } // namespace
2443
2444 llvm::Value *
EmitScalarPrePostIncDec(const UnaryOperator * E,LValue LV,bool isInc,bool isPre)2445 ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
2446 bool isInc, bool isPre) {
2447 OMPLastprivateConditionalUpdateRAII OMPRegion(CGF, E);
2448 QualType type = E->getSubExpr()->getType();
2449 llvm::PHINode *atomicPHI = nullptr;
2450 llvm::Value *value;
2451 llvm::Value *input;
2452
2453 int amount = (isInc ? 1 : -1);
2454 bool isSubtraction = !isInc;
2455
2456 if (const AtomicType *atomicTy = type->getAs<AtomicType>()) {
2457 type = atomicTy->getValueType();
2458 if (isInc && type->isBooleanType()) {
2459 llvm::Value *True = CGF.EmitToMemory(Builder.getTrue(), type);
2460 if (isPre) {
2461 Builder.CreateStore(True, LV.getAddress(CGF), LV.isVolatileQualified())
2462 ->setAtomic(llvm::AtomicOrdering::SequentiallyConsistent);
2463 return Builder.getTrue();
2464 }
2465 // For atomic bool increment, we just store true and return it for
2466 // preincrement, do an atomic swap with true for postincrement
2467 return Builder.CreateAtomicRMW(
2468 llvm::AtomicRMWInst::Xchg, LV.getPointer(CGF), True,
2469 llvm::AtomicOrdering::SequentiallyConsistent);
2470 }
2471 // Special case for atomic increment / decrement on integers, emit
2472 // atomicrmw instructions. We skip this if we want to be doing overflow
2473 // checking, and fall into the slow path with the atomic cmpxchg loop.
2474 if (!type->isBooleanType() && type->isIntegerType() &&
2475 !(type->isUnsignedIntegerType() &&
2476 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
2477 CGF.getLangOpts().getSignedOverflowBehavior() !=
2478 LangOptions::SOB_Trapping) {
2479 llvm::AtomicRMWInst::BinOp aop = isInc ? llvm::AtomicRMWInst::Add :
2480 llvm::AtomicRMWInst::Sub;
2481 llvm::Instruction::BinaryOps op = isInc ? llvm::Instruction::Add :
2482 llvm::Instruction::Sub;
2483 llvm::Value *amt = CGF.EmitToMemory(
2484 llvm::ConstantInt::get(ConvertType(type), 1, true), type);
2485 llvm::Value *old =
2486 Builder.CreateAtomicRMW(aop, LV.getPointer(CGF), amt,
2487 llvm::AtomicOrdering::SequentiallyConsistent);
2488 return isPre ? Builder.CreateBinOp(op, old, amt) : old;
2489 }
2490 value = EmitLoadOfLValue(LV, E->getExprLoc());
2491 input = value;
2492 // For every other atomic operation, we need to emit a load-op-cmpxchg loop
2493 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
2494 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
2495 value = CGF.EmitToMemory(value, type);
2496 Builder.CreateBr(opBB);
2497 Builder.SetInsertPoint(opBB);
2498 atomicPHI = Builder.CreatePHI(value->getType(), 2);
2499 atomicPHI->addIncoming(value, startBB);
2500 value = atomicPHI;
2501 } else {
2502 value = EmitLoadOfLValue(LV, E->getExprLoc());
2503 input = value;
2504 }
2505
2506 // Special case of integer increment that we have to check first: bool++.
2507 // Due to promotion rules, we get:
2508 // bool++ -> bool = bool + 1
2509 // -> bool = (int)bool + 1
2510 // -> bool = ((int)bool + 1 != 0)
2511 // An interesting aspect of this is that increment is always true.
2512 // Decrement does not have this property.
2513 if (isInc && type->isBooleanType()) {
2514 value = Builder.getTrue();
2515
2516 // Most common case by far: integer increment.
2517 } else if (type->isIntegerType()) {
2518 QualType promotedType;
2519 bool canPerformLossyDemotionCheck = false;
2520 if (type->isPromotableIntegerType()) {
2521 promotedType = CGF.getContext().getPromotedIntegerType(type);
2522 assert(promotedType != type && "Shouldn't promote to the same type.");
2523 canPerformLossyDemotionCheck = true;
2524 canPerformLossyDemotionCheck &=
2525 CGF.getContext().getCanonicalType(type) !=
2526 CGF.getContext().getCanonicalType(promotedType);
2527 canPerformLossyDemotionCheck &=
2528 PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(
2529 type, promotedType);
2530 assert((!canPerformLossyDemotionCheck ||
2531 type->isSignedIntegerOrEnumerationType() ||
2532 promotedType->isSignedIntegerOrEnumerationType() ||
2533 ConvertType(type)->getScalarSizeInBits() ==
2534 ConvertType(promotedType)->getScalarSizeInBits()) &&
2535 "The following check expects that if we do promotion to different "
2536 "underlying canonical type, at least one of the types (either "
2537 "base or promoted) will be signed, or the bitwidths will match.");
2538 }
2539 if (CGF.SanOpts.hasOneOf(
2540 SanitizerKind::ImplicitIntegerArithmeticValueChange) &&
2541 canPerformLossyDemotionCheck) {
2542 // While `x += 1` (for `x` with width less than int) is modeled as
2543 // promotion+arithmetics+demotion, and we can catch lossy demotion with
2544 // ease; inc/dec with width less than int can't overflow because of
2545 // promotion rules, so we omit promotion+demotion, which means that we can
2546 // not catch lossy "demotion". Because we still want to catch these cases
2547 // when the sanitizer is enabled, we perform the promotion, then perform
2548 // the increment/decrement in the wider type, and finally
2549 // perform the demotion. This will catch lossy demotions.
2550
2551 value = EmitScalarConversion(value, type, promotedType, E->getExprLoc());
2552 Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
2553 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2554 // Do pass non-default ScalarConversionOpts so that sanitizer check is
2555 // emitted.
2556 value = EmitScalarConversion(value, promotedType, type, E->getExprLoc(),
2557 ScalarConversionOpts(CGF.SanOpts));
2558
2559 // Note that signed integer inc/dec with width less than int can't
2560 // overflow because of promotion rules; we're just eliding a few steps
2561 // here.
2562 } else if (E->canOverflow() && type->isSignedIntegerOrEnumerationType()) {
2563 value = EmitIncDecConsiderOverflowBehavior(E, value, isInc);
2564 } else if (E->canOverflow() && type->isUnsignedIntegerType() &&
2565 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) {
2566 value = EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(
2567 E, value, isInc, E->getFPFeaturesInEffect(CGF.getLangOpts())));
2568 } else {
2569 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
2570 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2571 }
2572
2573 // Next most common: pointer increment.
2574 } else if (const PointerType *ptr = type->getAs<PointerType>()) {
2575 QualType type = ptr->getPointeeType();
2576
2577 // VLA types don't have constant size.
2578 if (const VariableArrayType *vla
2579 = CGF.getContext().getAsVariableArrayType(type)) {
2580 llvm::Value *numElts = CGF.getVLASize(vla).NumElts;
2581 if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize");
2582 if (CGF.getLangOpts().isSignedOverflowDefined())
2583 value = Builder.CreateGEP(value, numElts, "vla.inc");
2584 else
2585 value = CGF.EmitCheckedInBoundsGEP(
2586 value, numElts, /*SignedIndices=*/false, isSubtraction,
2587 E->getExprLoc(), "vla.inc");
2588
2589 // Arithmetic on function pointers (!) is just +-1.
2590 } else if (type->isFunctionType()) {
2591 llvm::Value *amt = Builder.getInt32(amount);
2592
2593 value = CGF.EmitCastToVoidPtr(value);
2594 if (CGF.getLangOpts().isSignedOverflowDefined())
2595 value = Builder.CreateGEP(value, amt, "incdec.funcptr");
2596 else
2597 value = CGF.EmitCheckedInBoundsGEP(value, amt, /*SignedIndices=*/false,
2598 isSubtraction, E->getExprLoc(),
2599 "incdec.funcptr");
2600 value = Builder.CreateBitCast(value, input->getType());
2601
2602 // For everything else, we can just do a simple increment.
2603 } else {
2604 llvm::Value *amt = Builder.getInt32(amount);
2605 if (CGF.getLangOpts().isSignedOverflowDefined())
2606 value = Builder.CreateGEP(value, amt, "incdec.ptr");
2607 else
2608 value = CGF.EmitCheckedInBoundsGEP(value, amt, /*SignedIndices=*/false,
2609 isSubtraction, E->getExprLoc(),
2610 "incdec.ptr");
2611 }
2612
2613 // Vector increment/decrement.
2614 } else if (type->isVectorType()) {
2615 if (type->hasIntegerRepresentation()) {
2616 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount);
2617
2618 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2619 } else {
2620 value = Builder.CreateFAdd(
2621 value,
2622 llvm::ConstantFP::get(value->getType(), amount),
2623 isInc ? "inc" : "dec");
2624 }
2625
2626 // Floating point.
2627 } else if (type->isRealFloatingType()) {
2628 // Add the inc/dec to the real part.
2629 llvm::Value *amt;
2630 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
2631
2632 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
2633 // Another special case: half FP increment should be done via float
2634 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
2635 value = Builder.CreateCall(
2636 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
2637 CGF.CGM.FloatTy),
2638 input, "incdec.conv");
2639 } else {
2640 value = Builder.CreateFPExt(input, CGF.CGM.FloatTy, "incdec.conv");
2641 }
2642 }
2643
2644 if (value->getType()->isFloatTy())
2645 amt = llvm::ConstantFP::get(VMContext,
2646 llvm::APFloat(static_cast<float>(amount)));
2647 else if (value->getType()->isDoubleTy())
2648 amt = llvm::ConstantFP::get(VMContext,
2649 llvm::APFloat(static_cast<double>(amount)));
2650 else {
2651 // Remaining types are Half, LongDouble or __float128. Convert from float.
2652 llvm::APFloat F(static_cast<float>(amount));
2653 bool ignored;
2654 const llvm::fltSemantics *FS;
2655 // Don't use getFloatTypeSemantics because Half isn't
2656 // necessarily represented using the "half" LLVM type.
2657 if (value->getType()->isFP128Ty())
2658 FS = &CGF.getTarget().getFloat128Format();
2659 else if (value->getType()->isHalfTy())
2660 FS = &CGF.getTarget().getHalfFormat();
2661 else
2662 FS = &CGF.getTarget().getLongDoubleFormat();
2663 F.convert(*FS, llvm::APFloat::rmTowardZero, &ignored);
2664 amt = llvm::ConstantFP::get(VMContext, F);
2665 }
2666 value = Builder.CreateFAdd(value, amt, isInc ? "inc" : "dec");
2667
2668 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
2669 if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
2670 value = Builder.CreateCall(
2671 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16,
2672 CGF.CGM.FloatTy),
2673 value, "incdec.conv");
2674 } else {
2675 value = Builder.CreateFPTrunc(value, input->getType(), "incdec.conv");
2676 }
2677 }
2678
2679 // Fixed-point types.
2680 } else if (type->isFixedPointType()) {
2681 // Fixed-point types are tricky. In some cases, it isn't possible to
2682 // represent a 1 or a -1 in the type at all. Piggyback off of
2683 // EmitFixedPointBinOp to avoid having to reimplement saturation.
2684 BinOpInfo Info;
2685 Info.E = E;
2686 Info.Ty = E->getType();
2687 Info.Opcode = isInc ? BO_Add : BO_Sub;
2688 Info.LHS = value;
2689 Info.RHS = llvm::ConstantInt::get(value->getType(), 1, false);
2690 // If the type is signed, it's better to represent this as +(-1) or -(-1),
2691 // since -1 is guaranteed to be representable.
2692 if (type->isSignedFixedPointType()) {
2693 Info.Opcode = isInc ? BO_Sub : BO_Add;
2694 Info.RHS = Builder.CreateNeg(Info.RHS);
2695 }
2696 // Now, convert from our invented integer literal to the type of the unary
2697 // op. This will upscale and saturate if necessary. This value can become
2698 // undef in some cases.
2699 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
2700 auto DstSema = CGF.getContext().getFixedPointSemantics(Info.Ty);
2701 Info.RHS = FPBuilder.CreateIntegerToFixed(Info.RHS, true, DstSema);
2702 value = EmitFixedPointBinOp(Info);
2703
2704 // Objective-C pointer types.
2705 } else {
2706 const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>();
2707 value = CGF.EmitCastToVoidPtr(value);
2708
2709 CharUnits size = CGF.getContext().getTypeSizeInChars(OPT->getObjectType());
2710 if (!isInc) size = -size;
2711 llvm::Value *sizeValue =
2712 llvm::ConstantInt::get(CGF.SizeTy, size.getQuantity());
2713
2714 if (CGF.getLangOpts().isSignedOverflowDefined())
2715 value = Builder.CreateGEP(value, sizeValue, "incdec.objptr");
2716 else
2717 value = CGF.EmitCheckedInBoundsGEP(value, sizeValue,
2718 /*SignedIndices=*/false, isSubtraction,
2719 E->getExprLoc(), "incdec.objptr");
2720 value = Builder.CreateBitCast(value, input->getType());
2721 }
2722
2723 if (atomicPHI) {
2724 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
2725 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
2726 auto Pair = CGF.EmitAtomicCompareExchange(
2727 LV, RValue::get(atomicPHI), RValue::get(value), E->getExprLoc());
2728 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), type);
2729 llvm::Value *success = Pair.second;
2730 atomicPHI->addIncoming(old, curBlock);
2731 Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
2732 Builder.SetInsertPoint(contBB);
2733 return isPre ? value : input;
2734 }
2735
2736 // Store the updated result through the lvalue.
2737 if (LV.isBitField())
2738 CGF.EmitStoreThroughBitfieldLValue(RValue::get(value), LV, &value);
2739 else
2740 CGF.EmitStoreThroughLValue(RValue::get(value), LV);
2741
2742 // If this is a postinc, return the value read from memory, otherwise use the
2743 // updated value.
2744 return isPre ? value : input;
2745 }
2746
2747
2748
VisitUnaryMinus(const UnaryOperator * E)2749 Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
2750 TestAndClearIgnoreResultAssign();
2751 Value *Op = Visit(E->getSubExpr());
2752
2753 // Generate a unary FNeg for FP ops.
2754 if (Op->getType()->isFPOrFPVectorTy())
2755 return Builder.CreateFNeg(Op, "fneg");
2756
2757 // Emit unary minus with EmitSub so we handle overflow cases etc.
2758 BinOpInfo BinOp;
2759 BinOp.RHS = Op;
2760 BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType());
2761 BinOp.Ty = E->getType();
2762 BinOp.Opcode = BO_Sub;
2763 BinOp.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
2764 BinOp.E = E;
2765 return EmitSub(BinOp);
2766 }
2767
VisitUnaryNot(const UnaryOperator * E)2768 Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
2769 TestAndClearIgnoreResultAssign();
2770 Value *Op = Visit(E->getSubExpr());
2771 return Builder.CreateNot(Op, "neg");
2772 }
2773
VisitUnaryLNot(const UnaryOperator * E)2774 Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
2775 // Perform vector logical not on comparison with zero vector.
2776 if (E->getType()->isVectorType() &&
2777 E->getType()->castAs<VectorType>()->getVectorKind() ==
2778 VectorType::GenericVector) {
2779 Value *Oper = Visit(E->getSubExpr());
2780 Value *Zero = llvm::Constant::getNullValue(Oper->getType());
2781 Value *Result;
2782 if (Oper->getType()->isFPOrFPVectorTy()) {
2783 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
2784 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
2785 Result = Builder.CreateFCmp(llvm::CmpInst::FCMP_OEQ, Oper, Zero, "cmp");
2786 } else
2787 Result = Builder.CreateICmp(llvm::CmpInst::ICMP_EQ, Oper, Zero, "cmp");
2788 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
2789 }
2790
2791 // Compare operand to zero.
2792 Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr());
2793
2794 // Invert value.
2795 // TODO: Could dynamically modify easy computations here. For example, if
2796 // the operand is an icmp ne, turn into icmp eq.
2797 BoolVal = Builder.CreateNot(BoolVal, "lnot");
2798
2799 // ZExt result to the expr type.
2800 return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext");
2801 }
2802
VisitOffsetOfExpr(OffsetOfExpr * E)2803 Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
2804 // Try folding the offsetof to a constant.
2805 Expr::EvalResult EVResult;
2806 if (E->EvaluateAsInt(EVResult, CGF.getContext())) {
2807 llvm::APSInt Value = EVResult.Val.getInt();
2808 return Builder.getInt(Value);
2809 }
2810
2811 // Loop over the components of the offsetof to compute the value.
2812 unsigned n = E->getNumComponents();
2813 llvm::Type* ResultType = ConvertType(E->getType());
2814 llvm::Value* Result = llvm::Constant::getNullValue(ResultType);
2815 QualType CurrentType = E->getTypeSourceInfo()->getType();
2816 for (unsigned i = 0; i != n; ++i) {
2817 OffsetOfNode ON = E->getComponent(i);
2818 llvm::Value *Offset = nullptr;
2819 switch (ON.getKind()) {
2820 case OffsetOfNode::Array: {
2821 // Compute the index
2822 Expr *IdxExpr = E->getIndexExpr(ON.getArrayExprIndex());
2823 llvm::Value* Idx = CGF.EmitScalarExpr(IdxExpr);
2824 bool IdxSigned = IdxExpr->getType()->isSignedIntegerOrEnumerationType();
2825 Idx = Builder.CreateIntCast(Idx, ResultType, IdxSigned, "conv");
2826
2827 // Save the element type
2828 CurrentType =
2829 CGF.getContext().getAsArrayType(CurrentType)->getElementType();
2830
2831 // Compute the element size
2832 llvm::Value* ElemSize = llvm::ConstantInt::get(ResultType,
2833 CGF.getContext().getTypeSizeInChars(CurrentType).getQuantity());
2834
2835 // Multiply out to compute the result
2836 Offset = Builder.CreateMul(Idx, ElemSize);
2837 break;
2838 }
2839
2840 case OffsetOfNode::Field: {
2841 FieldDecl *MemberDecl = ON.getField();
2842 RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl();
2843 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
2844
2845 // Compute the index of the field in its parent.
2846 unsigned i = 0;
2847 // FIXME: It would be nice if we didn't have to loop here!
2848 for (RecordDecl::field_iterator Field = RD->field_begin(),
2849 FieldEnd = RD->field_end();
2850 Field != FieldEnd; ++Field, ++i) {
2851 if (*Field == MemberDecl)
2852 break;
2853 }
2854 assert(i < RL.getFieldCount() && "offsetof field in wrong type");
2855
2856 // Compute the offset to the field
2857 int64_t OffsetInt = RL.getFieldOffset(i) /
2858 CGF.getContext().getCharWidth();
2859 Offset = llvm::ConstantInt::get(ResultType, OffsetInt);
2860
2861 // Save the element type.
2862 CurrentType = MemberDecl->getType();
2863 break;
2864 }
2865
2866 case OffsetOfNode::Identifier:
2867 llvm_unreachable("dependent __builtin_offsetof");
2868
2869 case OffsetOfNode::Base: {
2870 if (ON.getBase()->isVirtual()) {
2871 CGF.ErrorUnsupported(E, "virtual base in offsetof");
2872 continue;
2873 }
2874
2875 RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl();
2876 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
2877
2878 // Save the element type.
2879 CurrentType = ON.getBase()->getType();
2880
2881 // Compute the offset to the base.
2882 const RecordType *BaseRT = CurrentType->getAs<RecordType>();
2883 CXXRecordDecl *BaseRD = cast<CXXRecordDecl>(BaseRT->getDecl());
2884 CharUnits OffsetInt = RL.getBaseClassOffset(BaseRD);
2885 Offset = llvm::ConstantInt::get(ResultType, OffsetInt.getQuantity());
2886 break;
2887 }
2888 }
2889 Result = Builder.CreateAdd(Result, Offset);
2890 }
2891 return Result;
2892 }
2893
2894 /// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of
2895 /// argument of the sizeof expression as an integer.
2896 Value *
VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr * E)2897 ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
2898 const UnaryExprOrTypeTraitExpr *E) {
2899 QualType TypeToSize = E->getTypeOfArgument();
2900 if (E->getKind() == UETT_SizeOf) {
2901 if (const VariableArrayType *VAT =
2902 CGF.getContext().getAsVariableArrayType(TypeToSize)) {
2903 if (E->isArgumentType()) {
2904 // sizeof(type) - make sure to emit the VLA size.
2905 CGF.EmitVariablyModifiedType(TypeToSize);
2906 } else {
2907 // C99 6.5.3.4p2: If the argument is an expression of type
2908 // VLA, it is evaluated.
2909 CGF.EmitIgnoredExpr(E->getArgumentExpr());
2910 }
2911
2912 auto VlaSize = CGF.getVLASize(VAT);
2913 llvm::Value *size = VlaSize.NumElts;
2914
2915 // Scale the number of non-VLA elements by the non-VLA element size.
2916 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(VlaSize.Type);
2917 if (!eltSize.isOne())
2918 size = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), size);
2919
2920 return size;
2921 }
2922 } else if (E->getKind() == UETT_OpenMPRequiredSimdAlign) {
2923 auto Alignment =
2924 CGF.getContext()
2925 .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign(
2926 E->getTypeOfArgument()->getPointeeType()))
2927 .getQuantity();
2928 return llvm::ConstantInt::get(CGF.SizeTy, Alignment);
2929 }
2930
2931 // If this isn't sizeof(vla), the result must be constant; use the constant
2932 // folding logic so we don't have to duplicate it here.
2933 return Builder.getInt(E->EvaluateKnownConstInt(CGF.getContext()));
2934 }
2935
VisitUnaryReal(const UnaryOperator * E)2936 Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E) {
2937 Expr *Op = E->getSubExpr();
2938 if (Op->getType()->isAnyComplexType()) {
2939 // If it's an l-value, load through the appropriate subobject l-value.
2940 // Note that we have to ask E because Op might be an l-value that
2941 // this won't work for, e.g. an Obj-C property.
2942 if (E->isGLValue())
2943 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E),
2944 E->getExprLoc()).getScalarVal();
2945
2946 // Otherwise, calculate and project.
2947 return CGF.EmitComplexExpr(Op, false, true).first;
2948 }
2949
2950 return Visit(Op);
2951 }
2952
VisitUnaryImag(const UnaryOperator * E)2953 Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) {
2954 Expr *Op = E->getSubExpr();
2955 if (Op->getType()->isAnyComplexType()) {
2956 // If it's an l-value, load through the appropriate subobject l-value.
2957 // Note that we have to ask E because Op might be an l-value that
2958 // this won't work for, e.g. an Obj-C property.
2959 if (Op->isGLValue())
2960 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E),
2961 E->getExprLoc()).getScalarVal();
2962
2963 // Otherwise, calculate and project.
2964 return CGF.EmitComplexExpr(Op, true, false).second;
2965 }
2966
2967 // __imag on a scalar returns zero. Emit the subexpr to ensure side
2968 // effects are evaluated, but not the actual value.
2969 if (Op->isGLValue())
2970 CGF.EmitLValue(Op);
2971 else
2972 CGF.EmitScalarExpr(Op, true);
2973 return llvm::Constant::getNullValue(ConvertType(E->getType()));
2974 }
2975
2976 //===----------------------------------------------------------------------===//
2977 // Binary Operators
2978 //===----------------------------------------------------------------------===//
2979
EmitBinOps(const BinaryOperator * E)2980 BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) {
2981 TestAndClearIgnoreResultAssign();
2982 BinOpInfo Result;
2983 Result.LHS = Visit(E->getLHS());
2984 Result.RHS = Visit(E->getRHS());
2985 Result.Ty = E->getType();
2986 Result.Opcode = E->getOpcode();
2987 Result.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
2988 Result.E = E;
2989 return Result;
2990 }
2991
EmitCompoundAssignLValue(const CompoundAssignOperator * E,Value * (ScalarExprEmitter::* Func)(const BinOpInfo &),Value * & Result)2992 LValue ScalarExprEmitter::EmitCompoundAssignLValue(
2993 const CompoundAssignOperator *E,
2994 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &),
2995 Value *&Result) {
2996 QualType LHSTy = E->getLHS()->getType();
2997 BinOpInfo OpInfo;
2998
2999 if (E->getComputationResultType()->isAnyComplexType())
3000 return CGF.EmitScalarCompoundAssignWithComplex(E, Result);
3001
3002 // Emit the RHS first. __block variables need to have the rhs evaluated
3003 // first, plus this should improve codegen a little.
3004 OpInfo.RHS = Visit(E->getRHS());
3005 OpInfo.Ty = E->getComputationResultType();
3006 OpInfo.Opcode = E->getOpcode();
3007 OpInfo.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
3008 OpInfo.E = E;
3009 // Load/convert the LHS.
3010 LValue LHSLV = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
3011
3012 llvm::PHINode *atomicPHI = nullptr;
3013 if (const AtomicType *atomicTy = LHSTy->getAs<AtomicType>()) {
3014 QualType type = atomicTy->getValueType();
3015 if (!type->isBooleanType() && type->isIntegerType() &&
3016 !(type->isUnsignedIntegerType() &&
3017 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
3018 CGF.getLangOpts().getSignedOverflowBehavior() !=
3019 LangOptions::SOB_Trapping) {
3020 llvm::AtomicRMWInst::BinOp AtomicOp = llvm::AtomicRMWInst::BAD_BINOP;
3021 llvm::Instruction::BinaryOps Op;
3022 switch (OpInfo.Opcode) {
3023 // We don't have atomicrmw operands for *, %, /, <<, >>
3024 case BO_MulAssign: case BO_DivAssign:
3025 case BO_RemAssign:
3026 case BO_ShlAssign:
3027 case BO_ShrAssign:
3028 break;
3029 case BO_AddAssign:
3030 AtomicOp = llvm::AtomicRMWInst::Add;
3031 Op = llvm::Instruction::Add;
3032 break;
3033 case BO_SubAssign:
3034 AtomicOp = llvm::AtomicRMWInst::Sub;
3035 Op = llvm::Instruction::Sub;
3036 break;
3037 case BO_AndAssign:
3038 AtomicOp = llvm::AtomicRMWInst::And;
3039 Op = llvm::Instruction::And;
3040 break;
3041 case BO_XorAssign:
3042 AtomicOp = llvm::AtomicRMWInst::Xor;
3043 Op = llvm::Instruction::Xor;
3044 break;
3045 case BO_OrAssign:
3046 AtomicOp = llvm::AtomicRMWInst::Or;
3047 Op = llvm::Instruction::Or;
3048 break;
3049 default:
3050 llvm_unreachable("Invalid compound assignment type");
3051 }
3052 if (AtomicOp != llvm::AtomicRMWInst::BAD_BINOP) {
3053 llvm::Value *Amt = CGF.EmitToMemory(
3054 EmitScalarConversion(OpInfo.RHS, E->getRHS()->getType(), LHSTy,
3055 E->getExprLoc()),
3056 LHSTy);
3057 Value *OldVal = Builder.CreateAtomicRMW(
3058 AtomicOp, LHSLV.getPointer(CGF), Amt,
3059 llvm::AtomicOrdering::SequentiallyConsistent);
3060
3061 // Since operation is atomic, the result type is guaranteed to be the
3062 // same as the input in LLVM terms.
3063 Result = Builder.CreateBinOp(Op, OldVal, Amt);
3064 return LHSLV;
3065 }
3066 }
3067 // FIXME: For floating point types, we should be saving and restoring the
3068 // floating point environment in the loop.
3069 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
3070 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
3071 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
3072 OpInfo.LHS = CGF.EmitToMemory(OpInfo.LHS, type);
3073 Builder.CreateBr(opBB);
3074 Builder.SetInsertPoint(opBB);
3075 atomicPHI = Builder.CreatePHI(OpInfo.LHS->getType(), 2);
3076 atomicPHI->addIncoming(OpInfo.LHS, startBB);
3077 OpInfo.LHS = atomicPHI;
3078 }
3079 else
3080 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
3081
3082 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, OpInfo.FPFeatures);
3083 SourceLocation Loc = E->getExprLoc();
3084 OpInfo.LHS =
3085 EmitScalarConversion(OpInfo.LHS, LHSTy, E->getComputationLHSType(), Loc);
3086
3087 // Expand the binary operator.
3088 Result = (this->*Func)(OpInfo);
3089
3090 // Convert the result back to the LHS type,
3091 // potentially with Implicit Conversion sanitizer check.
3092 Result = EmitScalarConversion(Result, E->getComputationResultType(), LHSTy,
3093 Loc, ScalarConversionOpts(CGF.SanOpts));
3094
3095 if (atomicPHI) {
3096 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
3097 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
3098 auto Pair = CGF.EmitAtomicCompareExchange(
3099 LHSLV, RValue::get(atomicPHI), RValue::get(Result), E->getExprLoc());
3100 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), LHSTy);
3101 llvm::Value *success = Pair.second;
3102 atomicPHI->addIncoming(old, curBlock);
3103 Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
3104 Builder.SetInsertPoint(contBB);
3105 return LHSLV;
3106 }
3107
3108 // Store the result value into the LHS lvalue. Bit-fields are handled
3109 // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
3110 // 'An assignment expression has the value of the left operand after the
3111 // assignment...'.
3112 if (LHSLV.isBitField())
3113 CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, &Result);
3114 else
3115 CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV);
3116
3117 if (CGF.getLangOpts().OpenMP)
3118 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF,
3119 E->getLHS());
3120 return LHSLV;
3121 }
3122
EmitCompoundAssign(const CompoundAssignOperator * E,Value * (ScalarExprEmitter::* Func)(const BinOpInfo &))3123 Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
3124 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
3125 bool Ignore = TestAndClearIgnoreResultAssign();
3126 Value *RHS = nullptr;
3127 LValue LHS = EmitCompoundAssignLValue(E, Func, RHS);
3128
3129 // If the result is clearly ignored, return now.
3130 if (Ignore)
3131 return nullptr;
3132
3133 // The result of an assignment in C is the assigned r-value.
3134 if (!CGF.getLangOpts().CPlusPlus)
3135 return RHS;
3136
3137 // If the lvalue is non-volatile, return the computed value of the assignment.
3138 if (!LHS.isVolatileQualified())
3139 return RHS;
3140
3141 // Otherwise, reload the value.
3142 return EmitLoadOfLValue(LHS, E->getExprLoc());
3143 }
3144
EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo & Ops,llvm::Value * Zero,bool isDiv)3145 void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
3146 const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) {
3147 SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;
3148
3149 if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) {
3150 Checks.push_back(std::make_pair(Builder.CreateICmpNE(Ops.RHS, Zero),
3151 SanitizerKind::IntegerDivideByZero));
3152 }
3153
3154 const auto *BO = cast<BinaryOperator>(Ops.E);
3155 if (CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow) &&
3156 Ops.Ty->hasSignedIntegerRepresentation() &&
3157 !IsWidenedIntegerOp(CGF.getContext(), BO->getLHS()) &&
3158 Ops.mayHaveIntegerOverflow()) {
3159 llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
3160
3161 llvm::Value *IntMin =
3162 Builder.getInt(llvm::APInt::getSignedMinValue(Ty->getBitWidth()));
3163 llvm::Value *NegOne = llvm::Constant::getAllOnesValue(Ty);
3164
3165 llvm::Value *LHSCmp = Builder.CreateICmpNE(Ops.LHS, IntMin);
3166 llvm::Value *RHSCmp = Builder.CreateICmpNE(Ops.RHS, NegOne);
3167 llvm::Value *NotOverflow = Builder.CreateOr(LHSCmp, RHSCmp, "or");
3168 Checks.push_back(
3169 std::make_pair(NotOverflow, SanitizerKind::SignedIntegerOverflow));
3170 }
3171
3172 if (Checks.size() > 0)
3173 EmitBinOpCheck(Checks, Ops);
3174 }
3175
EmitDiv(const BinOpInfo & Ops)3176 Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
3177 {
3178 CodeGenFunction::SanitizerScope SanScope(&CGF);
3179 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
3180 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
3181 Ops.Ty->isIntegerType() &&
3182 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
3183 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
3184 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true);
3185 } else if (CGF.SanOpts.has(SanitizerKind::FloatDivideByZero) &&
3186 Ops.Ty->isRealFloatingType() &&
3187 Ops.mayHaveFloatDivisionByZero()) {
3188 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
3189 llvm::Value *NonZero = Builder.CreateFCmpUNE(Ops.RHS, Zero);
3190 EmitBinOpCheck(std::make_pair(NonZero, SanitizerKind::FloatDivideByZero),
3191 Ops);
3192 }
3193 }
3194
3195 if (Ops.Ty->isConstantMatrixType()) {
3196 llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
3197 // We need to check the types of the operands of the operator to get the
3198 // correct matrix dimensions.
3199 auto *BO = cast<BinaryOperator>(Ops.E);
3200 (void)BO;
3201 assert(
3202 isa<ConstantMatrixType>(BO->getLHS()->getType().getCanonicalType()) &&
3203 "first operand must be a matrix");
3204 assert(BO->getRHS()->getType().getCanonicalType()->isArithmeticType() &&
3205 "second operand must be an arithmetic type");
3206 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
3207 return MB.CreateScalarDiv(Ops.LHS, Ops.RHS,
3208 Ops.Ty->hasUnsignedIntegerRepresentation());
3209 }
3210
3211 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
3212 llvm::Value *Val;
3213 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
3214 Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
3215 if ((CGF.getLangOpts().OpenCL &&
3216 !CGF.CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) ||
3217 (CGF.getLangOpts().HIP && CGF.getLangOpts().CUDAIsDevice &&
3218 !CGF.CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) {
3219 // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp
3220 // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
3221 // build option allows an application to specify that single precision
3222 // floating-point divide (x/y and 1/x) and sqrt used in the program
3223 // source are correctly rounded.
3224 llvm::Type *ValTy = Val->getType();
3225 if (ValTy->isFloatTy() ||
3226 (isa<llvm::VectorType>(ValTy) &&
3227 cast<llvm::VectorType>(ValTy)->getElementType()->isFloatTy()))
3228 CGF.SetFPAccuracy(Val, 2.5);
3229 }
3230 return Val;
3231 }
3232 else if (Ops.isFixedPointOp())
3233 return EmitFixedPointBinOp(Ops);
3234 else if (Ops.Ty->hasUnsignedIntegerRepresentation())
3235 return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div");
3236 else
3237 return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div");
3238 }
3239
EmitRem(const BinOpInfo & Ops)3240 Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
3241 // Rem in C can't be a floating point type: C99 6.5.5p2.
3242 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
3243 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
3244 Ops.Ty->isIntegerType() &&
3245 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
3246 CodeGenFunction::SanitizerScope SanScope(&CGF);
3247 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
3248 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false);
3249 }
3250
3251 if (Ops.Ty->hasUnsignedIntegerRepresentation())
3252 return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem");
3253 else
3254 return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem");
3255 }
3256
EmitOverflowCheckedBinOp(const BinOpInfo & Ops)3257 Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
3258 unsigned IID;
3259 unsigned OpID = 0;
3260 SanitizerHandler OverflowKind;
3261
3262 bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType();
3263 switch (Ops.Opcode) {
3264 case BO_Add:
3265 case BO_AddAssign:
3266 OpID = 1;
3267 IID = isSigned ? llvm::Intrinsic::sadd_with_overflow :
3268 llvm::Intrinsic::uadd_with_overflow;
3269 OverflowKind = SanitizerHandler::AddOverflow;
3270 break;
3271 case BO_Sub:
3272 case BO_SubAssign:
3273 OpID = 2;
3274 IID = isSigned ? llvm::Intrinsic::ssub_with_overflow :
3275 llvm::Intrinsic::usub_with_overflow;
3276 OverflowKind = SanitizerHandler::SubOverflow;
3277 break;
3278 case BO_Mul:
3279 case BO_MulAssign:
3280 OpID = 3;
3281 IID = isSigned ? llvm::Intrinsic::smul_with_overflow :
3282 llvm::Intrinsic::umul_with_overflow;
3283 OverflowKind = SanitizerHandler::MulOverflow;
3284 break;
3285 default:
3286 llvm_unreachable("Unsupported operation for overflow detection");
3287 }
3288 OpID <<= 1;
3289 if (isSigned)
3290 OpID |= 1;
3291
3292 CodeGenFunction::SanitizerScope SanScope(&CGF);
3293 llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty);
3294
3295 llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, opTy);
3296
3297 Value *resultAndOverflow = Builder.CreateCall(intrinsic, {Ops.LHS, Ops.RHS});
3298 Value *result = Builder.CreateExtractValue(resultAndOverflow, 0);
3299 Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1);
3300
3301 // Handle overflow with llvm.trap if no custom handler has been specified.
3302 const std::string *handlerName =
3303 &CGF.getLangOpts().OverflowHandler;
3304 if (handlerName->empty()) {
3305 // If the signed-integer-overflow sanitizer is enabled, emit a call to its
3306 // runtime. Otherwise, this is a -ftrapv check, so just emit a trap.
3307 if (!isSigned || CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) {
3308 llvm::Value *NotOverflow = Builder.CreateNot(overflow);
3309 SanitizerMask Kind = isSigned ? SanitizerKind::SignedIntegerOverflow
3310 : SanitizerKind::UnsignedIntegerOverflow;
3311 EmitBinOpCheck(std::make_pair(NotOverflow, Kind), Ops);
3312 } else
3313 CGF.EmitTrapCheck(Builder.CreateNot(overflow), OverflowKind);
3314 return result;
3315 }
3316
3317 // Branch in case of overflow.
3318 llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
3319 llvm::BasicBlock *continueBB =
3320 CGF.createBasicBlock("nooverflow", CGF.CurFn, initialBB->getNextNode());
3321 llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
3322
3323 Builder.CreateCondBr(overflow, overflowBB, continueBB);
3324
3325 // If an overflow handler is set, then we want to call it and then use its
3326 // result, if it returns.
3327 Builder.SetInsertPoint(overflowBB);
3328
3329 // Get the overflow handler.
3330 llvm::Type *Int8Ty = CGF.Int8Ty;
3331 llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty };
3332 llvm::FunctionType *handlerTy =
3333 llvm::FunctionType::get(CGF.Int64Ty, argTypes, true);
3334 llvm::FunctionCallee handler =
3335 CGF.CGM.CreateRuntimeFunction(handlerTy, *handlerName);
3336
3337 // Sign extend the args to 64-bit, so that we can use the same handler for
3338 // all types of overflow.
3339 llvm::Value *lhs = Builder.CreateSExt(Ops.LHS, CGF.Int64Ty);
3340 llvm::Value *rhs = Builder.CreateSExt(Ops.RHS, CGF.Int64Ty);
3341
3342 // Call the handler with the two arguments, the operation, and the size of
3343 // the result.
3344 llvm::Value *handlerArgs[] = {
3345 lhs,
3346 rhs,
3347 Builder.getInt8(OpID),
3348 Builder.getInt8(cast<llvm::IntegerType>(opTy)->getBitWidth())
3349 };
3350 llvm::Value *handlerResult =
3351 CGF.EmitNounwindRuntimeCall(handler, handlerArgs);
3352
3353 // Truncate the result back to the desired size.
3354 handlerResult = Builder.CreateTrunc(handlerResult, opTy);
3355 Builder.CreateBr(continueBB);
3356
3357 Builder.SetInsertPoint(continueBB);
3358 llvm::PHINode *phi = Builder.CreatePHI(opTy, 2);
3359 phi->addIncoming(result, initialBB);
3360 phi->addIncoming(handlerResult, overflowBB);
3361
3362 return phi;
3363 }
3364
3365 /// Emit pointer + index arithmetic.
emitPointerArithmetic(CodeGenFunction & CGF,const BinOpInfo & op,bool isSubtraction)3366 static Value *emitPointerArithmetic(CodeGenFunction &CGF,
3367 const BinOpInfo &op,
3368 bool isSubtraction) {
3369 // Must have binary (not unary) expr here. Unary pointer
3370 // increment/decrement doesn't use this path.
3371 const BinaryOperator *expr = cast<BinaryOperator>(op.E);
3372
3373 Value *pointer = op.LHS;
3374 Expr *pointerOperand = expr->getLHS();
3375 Value *index = op.RHS;
3376 Expr *indexOperand = expr->getRHS();
3377
3378 // In a subtraction, the LHS is always the pointer.
3379 if (!isSubtraction && !pointer->getType()->isPointerTy()) {
3380 std::swap(pointer, index);
3381 std::swap(pointerOperand, indexOperand);
3382 }
3383
3384 bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType();
3385
3386 unsigned width = cast<llvm::IntegerType>(index->getType())->getBitWidth();
3387 auto &DL = CGF.CGM.getDataLayout();
3388 auto PtrTy = cast<llvm::PointerType>(pointer->getType());
3389
3390 // Some versions of glibc and gcc use idioms (particularly in their malloc
3391 // routines) that add a pointer-sized integer (known to be a pointer value)
3392 // to a null pointer in order to cast the value back to an integer or as
3393 // part of a pointer alignment algorithm. This is undefined behavior, but
3394 // we'd like to be able to compile programs that use it.
3395 //
3396 // Normally, we'd generate a GEP with a null-pointer base here in response
3397 // to that code, but it's also UB to dereference a pointer created that
3398 // way. Instead (as an acknowledged hack to tolerate the idiom) we will
3399 // generate a direct cast of the integer value to a pointer.
3400 //
3401 // The idiom (p = nullptr + N) is not met if any of the following are true:
3402 //
3403 // The operation is subtraction.
3404 // The index is not pointer-sized.
3405 // The pointer type is not byte-sized.
3406 //
3407 if (BinaryOperator::isNullPointerArithmeticExtension(CGF.getContext(),
3408 op.Opcode,
3409 expr->getLHS(),
3410 expr->getRHS()))
3411 return CGF.Builder.CreateIntToPtr(index, pointer->getType());
3412
3413 if (width != DL.getIndexTypeSizeInBits(PtrTy)) {
3414 // Zero-extend or sign-extend the pointer value according to
3415 // whether the index is signed or not.
3416 index = CGF.Builder.CreateIntCast(index, DL.getIndexType(PtrTy), isSigned,
3417 "idx.ext");
3418 }
3419
3420 // If this is subtraction, negate the index.
3421 if (isSubtraction)
3422 index = CGF.Builder.CreateNeg(index, "idx.neg");
3423
3424 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
3425 CGF.EmitBoundsCheck(op.E, pointerOperand, index, indexOperand->getType(),
3426 /*Accessed*/ false);
3427
3428 const PointerType *pointerType
3429 = pointerOperand->getType()->getAs<PointerType>();
3430 if (!pointerType) {
3431 QualType objectType = pointerOperand->getType()
3432 ->castAs<ObjCObjectPointerType>()
3433 ->getPointeeType();
3434 llvm::Value *objectSize
3435 = CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(objectType));
3436
3437 index = CGF.Builder.CreateMul(index, objectSize);
3438
3439 Value *result = CGF.Builder.CreateBitCast(pointer, CGF.VoidPtrTy);
3440 result = CGF.Builder.CreateGEP(result, index, "add.ptr");
3441 return CGF.Builder.CreateBitCast(result, pointer->getType());
3442 }
3443
3444 QualType elementType = pointerType->getPointeeType();
3445 if (const VariableArrayType *vla
3446 = CGF.getContext().getAsVariableArrayType(elementType)) {
3447 // The element count here is the total number of non-VLA elements.
3448 llvm::Value *numElements = CGF.getVLASize(vla).NumElts;
3449
3450 // Effectively, the multiply by the VLA size is part of the GEP.
3451 // GEP indexes are signed, and scaling an index isn't permitted to
3452 // signed-overflow, so we use the same semantics for our explicit
3453 // multiply. We suppress this if overflow is not undefined behavior.
3454 if (CGF.getLangOpts().isSignedOverflowDefined()) {
3455 index = CGF.Builder.CreateMul(index, numElements, "vla.index");
3456 pointer = CGF.Builder.CreateGEP(pointer, index, "add.ptr");
3457 } else {
3458 index = CGF.Builder.CreateNSWMul(index, numElements, "vla.index");
3459 pointer =
3460 CGF.EmitCheckedInBoundsGEP(pointer, index, isSigned, isSubtraction,
3461 op.E->getExprLoc(), "add.ptr");
3462 }
3463 return pointer;
3464 }
3465
3466 // Explicitly handle GNU void* and function pointer arithmetic extensions. The
3467 // GNU void* casts amount to no-ops since our void* type is i8*, but this is
3468 // future proof.
3469 if (elementType->isVoidType() || elementType->isFunctionType()) {
3470 Value *result = CGF.EmitCastToVoidPtr(pointer);
3471 result = CGF.Builder.CreateGEP(result, index, "add.ptr");
3472 return CGF.Builder.CreateBitCast(result, pointer->getType());
3473 }
3474
3475 if (CGF.getLangOpts().isSignedOverflowDefined())
3476 return CGF.Builder.CreateGEP(pointer, index, "add.ptr");
3477
3478 return CGF.EmitCheckedInBoundsGEP(pointer, index, isSigned, isSubtraction,
3479 op.E->getExprLoc(), "add.ptr");
3480 }
3481
3482 // Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and
3483 // Addend. Use negMul and negAdd to negate the first operand of the Mul or
3484 // the add operand respectively. This allows fmuladd to represent a*b-c, or
3485 // c-a*b. Patterns in LLVM should catch the negated forms and translate them to
3486 // efficient operations.
buildFMulAdd(llvm::Instruction * MulOp,Value * Addend,const CodeGenFunction & CGF,CGBuilderTy & Builder,bool negMul,bool negAdd)3487 static Value* buildFMulAdd(llvm::Instruction *MulOp, Value *Addend,
3488 const CodeGenFunction &CGF, CGBuilderTy &Builder,
3489 bool negMul, bool negAdd) {
3490 assert(!(negMul && negAdd) && "Only one of negMul and negAdd should be set.");
3491
3492 Value *MulOp0 = MulOp->getOperand(0);
3493 Value *MulOp1 = MulOp->getOperand(1);
3494 if (negMul)
3495 MulOp0 = Builder.CreateFNeg(MulOp0, "neg");
3496 if (negAdd)
3497 Addend = Builder.CreateFNeg(Addend, "neg");
3498
3499 Value *FMulAdd = nullptr;
3500 if (Builder.getIsFPConstrained()) {
3501 assert(isa<llvm::ConstrainedFPIntrinsic>(MulOp) &&
3502 "Only constrained operation should be created when Builder is in FP "
3503 "constrained mode");
3504 FMulAdd = Builder.CreateConstrainedFPCall(
3505 CGF.CGM.getIntrinsic(llvm::Intrinsic::experimental_constrained_fmuladd,
3506 Addend->getType()),
3507 {MulOp0, MulOp1, Addend});
3508 } else {
3509 FMulAdd = Builder.CreateCall(
3510 CGF.CGM.getIntrinsic(llvm::Intrinsic::fmuladd, Addend->getType()),
3511 {MulOp0, MulOp1, Addend});
3512 }
3513 MulOp->eraseFromParent();
3514
3515 return FMulAdd;
3516 }
3517
3518 // Check whether it would be legal to emit an fmuladd intrinsic call to
3519 // represent op and if so, build the fmuladd.
3520 //
3521 // Checks that (a) the operation is fusable, and (b) -ffp-contract=on.
3522 // Does NOT check the type of the operation - it's assumed that this function
3523 // will be called from contexts where it's known that the type is contractable.
tryEmitFMulAdd(const BinOpInfo & op,const CodeGenFunction & CGF,CGBuilderTy & Builder,bool isSub=false)3524 static Value* tryEmitFMulAdd(const BinOpInfo &op,
3525 const CodeGenFunction &CGF, CGBuilderTy &Builder,
3526 bool isSub=false) {
3527
3528 assert((op.Opcode == BO_Add || op.Opcode == BO_AddAssign ||
3529 op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) &&
3530 "Only fadd/fsub can be the root of an fmuladd.");
3531
3532 // Check whether this op is marked as fusable.
3533 if (!op.FPFeatures.allowFPContractWithinStatement())
3534 return nullptr;
3535
3536 // We have a potentially fusable op. Look for a mul on one of the operands.
3537 // Also, make sure that the mul result isn't used directly. In that case,
3538 // there's no point creating a muladd operation.
3539 if (auto *LHSBinOp = dyn_cast<llvm::BinaryOperator>(op.LHS)) {
3540 if (LHSBinOp->getOpcode() == llvm::Instruction::FMul &&
3541 LHSBinOp->use_empty())
3542 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, false, isSub);
3543 }
3544 if (auto *RHSBinOp = dyn_cast<llvm::BinaryOperator>(op.RHS)) {
3545 if (RHSBinOp->getOpcode() == llvm::Instruction::FMul &&
3546 RHSBinOp->use_empty())
3547 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false);
3548 }
3549
3550 if (auto *LHSBinOp = dyn_cast<llvm::CallBase>(op.LHS)) {
3551 if (LHSBinOp->getIntrinsicID() ==
3552 llvm::Intrinsic::experimental_constrained_fmul &&
3553 LHSBinOp->use_empty())
3554 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, false, isSub);
3555 }
3556 if (auto *RHSBinOp = dyn_cast<llvm::CallBase>(op.RHS)) {
3557 if (RHSBinOp->getIntrinsicID() ==
3558 llvm::Intrinsic::experimental_constrained_fmul &&
3559 RHSBinOp->use_empty())
3560 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false);
3561 }
3562
3563 return nullptr;
3564 }
3565
EmitAdd(const BinOpInfo & op)3566 Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
3567 if (op.LHS->getType()->isPointerTy() ||
3568 op.RHS->getType()->isPointerTy())
3569 return emitPointerArithmetic(CGF, op, CodeGenFunction::NotSubtraction);
3570
3571 if (op.Ty->isSignedIntegerOrEnumerationType()) {
3572 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
3573 case LangOptions::SOB_Defined:
3574 return Builder.CreateAdd(op.LHS, op.RHS, "add");
3575 case LangOptions::SOB_Undefined:
3576 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
3577 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
3578 LLVM_FALLTHROUGH;
3579 case LangOptions::SOB_Trapping:
3580 if (CanElideOverflowCheck(CGF.getContext(), op))
3581 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
3582 return EmitOverflowCheckedBinOp(op);
3583 }
3584 }
3585
3586 if (op.Ty->isConstantMatrixType()) {
3587 llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
3588 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
3589 return MB.CreateAdd(op.LHS, op.RHS);
3590 }
3591
3592 if (op.Ty->isUnsignedIntegerType() &&
3593 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
3594 !CanElideOverflowCheck(CGF.getContext(), op))
3595 return EmitOverflowCheckedBinOp(op);
3596
3597 if (op.LHS->getType()->isFPOrFPVectorTy()) {
3598 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
3599 // Try to form an fmuladd.
3600 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder))
3601 return FMulAdd;
3602
3603 return Builder.CreateFAdd(op.LHS, op.RHS, "add");
3604 }
3605
3606 if (op.isFixedPointOp())
3607 return EmitFixedPointBinOp(op);
3608
3609 return Builder.CreateAdd(op.LHS, op.RHS, "add");
3610 }
3611
3612 /// The resulting value must be calculated with exact precision, so the operands
3613 /// may not be the same type.
EmitFixedPointBinOp(const BinOpInfo & op)3614 Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) {
3615 using llvm::APSInt;
3616 using llvm::ConstantInt;
3617
3618 // This is either a binary operation where at least one of the operands is
3619 // a fixed-point type, or a unary operation where the operand is a fixed-point
3620 // type. The result type of a binary operation is determined by
3621 // Sema::handleFixedPointConversions().
3622 QualType ResultTy = op.Ty;
3623 QualType LHSTy, RHSTy;
3624 if (const auto *BinOp = dyn_cast<BinaryOperator>(op.E)) {
3625 RHSTy = BinOp->getRHS()->getType();
3626 if (const auto *CAO = dyn_cast<CompoundAssignOperator>(BinOp)) {
3627 // For compound assignment, the effective type of the LHS at this point
3628 // is the computation LHS type, not the actual LHS type, and the final
3629 // result type is not the type of the expression but rather the
3630 // computation result type.
3631 LHSTy = CAO->getComputationLHSType();
3632 ResultTy = CAO->getComputationResultType();
3633 } else
3634 LHSTy = BinOp->getLHS()->getType();
3635 } else if (const auto *UnOp = dyn_cast<UnaryOperator>(op.E)) {
3636 LHSTy = UnOp->getSubExpr()->getType();
3637 RHSTy = UnOp->getSubExpr()->getType();
3638 }
3639 ASTContext &Ctx = CGF.getContext();
3640 Value *LHS = op.LHS;
3641 Value *RHS = op.RHS;
3642
3643 auto LHSFixedSema = Ctx.getFixedPointSemantics(LHSTy);
3644 auto RHSFixedSema = Ctx.getFixedPointSemantics(RHSTy);
3645 auto ResultFixedSema = Ctx.getFixedPointSemantics(ResultTy);
3646 auto CommonFixedSema = LHSFixedSema.getCommonSemantics(RHSFixedSema);
3647
3648 // Perform the actual operation.
3649 Value *Result;
3650 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
3651 switch (op.Opcode) {
3652 case BO_AddAssign:
3653 case BO_Add:
3654 Result = FPBuilder.CreateAdd(LHS, LHSFixedSema, RHS, RHSFixedSema);
3655 break;
3656 case BO_SubAssign:
3657 case BO_Sub:
3658 Result = FPBuilder.CreateSub(LHS, LHSFixedSema, RHS, RHSFixedSema);
3659 break;
3660 case BO_MulAssign:
3661 case BO_Mul:
3662 Result = FPBuilder.CreateMul(LHS, LHSFixedSema, RHS, RHSFixedSema);
3663 break;
3664 case BO_DivAssign:
3665 case BO_Div:
3666 Result = FPBuilder.CreateDiv(LHS, LHSFixedSema, RHS, RHSFixedSema);
3667 break;
3668 case BO_ShlAssign:
3669 case BO_Shl:
3670 Result = FPBuilder.CreateShl(LHS, LHSFixedSema, RHS);
3671 break;
3672 case BO_ShrAssign:
3673 case BO_Shr:
3674 Result = FPBuilder.CreateShr(LHS, LHSFixedSema, RHS);
3675 break;
3676 case BO_LT:
3677 return FPBuilder.CreateLT(LHS, LHSFixedSema, RHS, RHSFixedSema);
3678 case BO_GT:
3679 return FPBuilder.CreateGT(LHS, LHSFixedSema, RHS, RHSFixedSema);
3680 case BO_LE:
3681 return FPBuilder.CreateLE(LHS, LHSFixedSema, RHS, RHSFixedSema);
3682 case BO_GE:
3683 return FPBuilder.CreateGE(LHS, LHSFixedSema, RHS, RHSFixedSema);
3684 case BO_EQ:
3685 // For equality operations, we assume any padding bits on unsigned types are
3686 // zero'd out. They could be overwritten through non-saturating operations
3687 // that cause overflow, but this leads to undefined behavior.
3688 return FPBuilder.CreateEQ(LHS, LHSFixedSema, RHS, RHSFixedSema);
3689 case BO_NE:
3690 return FPBuilder.CreateNE(LHS, LHSFixedSema, RHS, RHSFixedSema);
3691 case BO_Cmp:
3692 case BO_LAnd:
3693 case BO_LOr:
3694 llvm_unreachable("Found unimplemented fixed point binary operation");
3695 case BO_PtrMemD:
3696 case BO_PtrMemI:
3697 case BO_Rem:
3698 case BO_Xor:
3699 case BO_And:
3700 case BO_Or:
3701 case BO_Assign:
3702 case BO_RemAssign:
3703 case BO_AndAssign:
3704 case BO_XorAssign:
3705 case BO_OrAssign:
3706 case BO_Comma:
3707 llvm_unreachable("Found unsupported binary operation for fixed point types.");
3708 }
3709
3710 bool IsShift = BinaryOperator::isShiftOp(op.Opcode) ||
3711 BinaryOperator::isShiftAssignOp(op.Opcode);
3712 // Convert to the result type.
3713 return FPBuilder.CreateFixedToFixed(Result, IsShift ? LHSFixedSema
3714 : CommonFixedSema,
3715 ResultFixedSema);
3716 }
3717
EmitSub(const BinOpInfo & op)3718 Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
3719 // The LHS is always a pointer if either side is.
3720 if (!op.LHS->getType()->isPointerTy()) {
3721 if (op.Ty->isSignedIntegerOrEnumerationType()) {
3722 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
3723 case LangOptions::SOB_Defined:
3724 return Builder.CreateSub(op.LHS, op.RHS, "sub");
3725 case LangOptions::SOB_Undefined:
3726 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
3727 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
3728 LLVM_FALLTHROUGH;
3729 case LangOptions::SOB_Trapping:
3730 if (CanElideOverflowCheck(CGF.getContext(), op))
3731 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
3732 return EmitOverflowCheckedBinOp(op);
3733 }
3734 }
3735
3736 if (op.Ty->isConstantMatrixType()) {
3737 llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
3738 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
3739 return MB.CreateSub(op.LHS, op.RHS);
3740 }
3741
3742 if (op.Ty->isUnsignedIntegerType() &&
3743 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
3744 !CanElideOverflowCheck(CGF.getContext(), op))
3745 return EmitOverflowCheckedBinOp(op);
3746
3747 if (op.LHS->getType()->isFPOrFPVectorTy()) {
3748 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
3749 // Try to form an fmuladd.
3750 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true))
3751 return FMulAdd;
3752 return Builder.CreateFSub(op.LHS, op.RHS, "sub");
3753 }
3754
3755 if (op.isFixedPointOp())
3756 return EmitFixedPointBinOp(op);
3757
3758 return Builder.CreateSub(op.LHS, op.RHS, "sub");
3759 }
3760
3761 // If the RHS is not a pointer, then we have normal pointer
3762 // arithmetic.
3763 if (!op.RHS->getType()->isPointerTy())
3764 return emitPointerArithmetic(CGF, op, CodeGenFunction::IsSubtraction);
3765
3766 // Otherwise, this is a pointer subtraction.
3767
3768 // Do the raw subtraction part.
3769 llvm::Value *LHS
3770 = Builder.CreatePtrToInt(op.LHS, CGF.PtrDiffTy, "sub.ptr.lhs.cast");
3771 llvm::Value *RHS
3772 = Builder.CreatePtrToInt(op.RHS, CGF.PtrDiffTy, "sub.ptr.rhs.cast");
3773 Value *diffInChars = Builder.CreateSub(LHS, RHS, "sub.ptr.sub");
3774
3775 // Okay, figure out the element size.
3776 const BinaryOperator *expr = cast<BinaryOperator>(op.E);
3777 QualType elementType = expr->getLHS()->getType()->getPointeeType();
3778
3779 llvm::Value *divisor = nullptr;
3780
3781 // For a variable-length array, this is going to be non-constant.
3782 if (const VariableArrayType *vla
3783 = CGF.getContext().getAsVariableArrayType(elementType)) {
3784 auto VlaSize = CGF.getVLASize(vla);
3785 elementType = VlaSize.Type;
3786 divisor = VlaSize.NumElts;
3787
3788 // Scale the number of non-VLA elements by the non-VLA element size.
3789 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(elementType);
3790 if (!eltSize.isOne())
3791 divisor = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), divisor);
3792
3793 // For everything elese, we can just compute it, safe in the
3794 // assumption that Sema won't let anything through that we can't
3795 // safely compute the size of.
3796 } else {
3797 CharUnits elementSize;
3798 // Handle GCC extension for pointer arithmetic on void* and
3799 // function pointer types.
3800 if (elementType->isVoidType() || elementType->isFunctionType())
3801 elementSize = CharUnits::One();
3802 else
3803 elementSize = CGF.getContext().getTypeSizeInChars(elementType);
3804
3805 // Don't even emit the divide for element size of 1.
3806 if (elementSize.isOne())
3807 return diffInChars;
3808
3809 divisor = CGF.CGM.getSize(elementSize);
3810 }
3811
3812 // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since
3813 // pointer difference in C is only defined in the case where both operands
3814 // are pointing to elements of an array.
3815 return Builder.CreateExactSDiv(diffInChars, divisor, "sub.ptr.div");
3816 }
3817
GetWidthMinusOneValue(Value * LHS,Value * RHS)3818 Value *ScalarExprEmitter::GetWidthMinusOneValue(Value* LHS,Value* RHS) {
3819 llvm::IntegerType *Ty;
3820 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(LHS->getType()))
3821 Ty = cast<llvm::IntegerType>(VT->getElementType());
3822 else
3823 Ty = cast<llvm::IntegerType>(LHS->getType());
3824 return llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth() - 1);
3825 }
3826
ConstrainShiftValue(Value * LHS,Value * RHS,const Twine & Name)3827 Value *ScalarExprEmitter::ConstrainShiftValue(Value *LHS, Value *RHS,
3828 const Twine &Name) {
3829 llvm::IntegerType *Ty;
3830 if (auto *VT = dyn_cast<llvm::VectorType>(LHS->getType()))
3831 Ty = cast<llvm::IntegerType>(VT->getElementType());
3832 else
3833 Ty = cast<llvm::IntegerType>(LHS->getType());
3834
3835 if (llvm::isPowerOf2_64(Ty->getBitWidth()))
3836 return Builder.CreateAnd(RHS, GetWidthMinusOneValue(LHS, RHS), Name);
3837
3838 return Builder.CreateURem(
3839 RHS, llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth()), Name);
3840 }
3841
EmitShl(const BinOpInfo & Ops)3842 Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
3843 // TODO: This misses out on the sanitizer check below.
3844 if (Ops.isFixedPointOp())
3845 return EmitFixedPointBinOp(Ops);
3846
3847 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
3848 // RHS to the same size as the LHS.
3849 Value *RHS = Ops.RHS;
3850 if (Ops.LHS->getType() != RHS->getType())
3851 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
3852
3853 bool SanitizeSignedBase = CGF.SanOpts.has(SanitizerKind::ShiftBase) &&
3854 Ops.Ty->hasSignedIntegerRepresentation() &&
3855 !CGF.getLangOpts().isSignedOverflowDefined() &&
3856 !CGF.getLangOpts().CPlusPlus20;
3857 bool SanitizeUnsignedBase =
3858 CGF.SanOpts.has(SanitizerKind::UnsignedShiftBase) &&
3859 Ops.Ty->hasUnsignedIntegerRepresentation();
3860 bool SanitizeBase = SanitizeSignedBase || SanitizeUnsignedBase;
3861 bool SanitizeExponent = CGF.SanOpts.has(SanitizerKind::ShiftExponent);
3862 // OpenCL 6.3j: shift values are effectively % word size of LHS.
3863 if (CGF.getLangOpts().OpenCL)
3864 RHS = ConstrainShiftValue(Ops.LHS, RHS, "shl.mask");
3865 else if ((SanitizeBase || SanitizeExponent) &&
3866 isa<llvm::IntegerType>(Ops.LHS->getType())) {
3867 CodeGenFunction::SanitizerScope SanScope(&CGF);
3868 SmallVector<std::pair<Value *, SanitizerMask>, 2> Checks;
3869 llvm::Value *WidthMinusOne = GetWidthMinusOneValue(Ops.LHS, Ops.RHS);
3870 llvm::Value *ValidExponent = Builder.CreateICmpULE(Ops.RHS, WidthMinusOne);
3871
3872 if (SanitizeExponent) {
3873 Checks.push_back(
3874 std::make_pair(ValidExponent, SanitizerKind::ShiftExponent));
3875 }
3876
3877 if (SanitizeBase) {
3878 // Check whether we are shifting any non-zero bits off the top of the
3879 // integer. We only emit this check if exponent is valid - otherwise
3880 // instructions below will have undefined behavior themselves.
3881 llvm::BasicBlock *Orig = Builder.GetInsertBlock();
3882 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
3883 llvm::BasicBlock *CheckShiftBase = CGF.createBasicBlock("check");
3884 Builder.CreateCondBr(ValidExponent, CheckShiftBase, Cont);
3885 llvm::Value *PromotedWidthMinusOne =
3886 (RHS == Ops.RHS) ? WidthMinusOne
3887 : GetWidthMinusOneValue(Ops.LHS, RHS);
3888 CGF.EmitBlock(CheckShiftBase);
3889 llvm::Value *BitsShiftedOff = Builder.CreateLShr(
3890 Ops.LHS, Builder.CreateSub(PromotedWidthMinusOne, RHS, "shl.zeros",
3891 /*NUW*/ true, /*NSW*/ true),
3892 "shl.check");
3893 if (SanitizeUnsignedBase || CGF.getLangOpts().CPlusPlus) {
3894 // In C99, we are not permitted to shift a 1 bit into the sign bit.
3895 // Under C++11's rules, shifting a 1 bit into the sign bit is
3896 // OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't
3897 // define signed left shifts, so we use the C99 and C++11 rules there).
3898 // Unsigned shifts can always shift into the top bit.
3899 llvm::Value *One = llvm::ConstantInt::get(BitsShiftedOff->getType(), 1);
3900 BitsShiftedOff = Builder.CreateLShr(BitsShiftedOff, One);
3901 }
3902 llvm::Value *Zero = llvm::ConstantInt::get(BitsShiftedOff->getType(), 0);
3903 llvm::Value *ValidBase = Builder.CreateICmpEQ(BitsShiftedOff, Zero);
3904 CGF.EmitBlock(Cont);
3905 llvm::PHINode *BaseCheck = Builder.CreatePHI(ValidBase->getType(), 2);
3906 BaseCheck->addIncoming(Builder.getTrue(), Orig);
3907 BaseCheck->addIncoming(ValidBase, CheckShiftBase);
3908 Checks.push_back(std::make_pair(
3909 BaseCheck, SanitizeSignedBase ? SanitizerKind::ShiftBase
3910 : SanitizerKind::UnsignedShiftBase));
3911 }
3912
3913 assert(!Checks.empty());
3914 EmitBinOpCheck(Checks, Ops);
3915 }
3916
3917 return Builder.CreateShl(Ops.LHS, RHS, "shl");
3918 }
3919
EmitShr(const BinOpInfo & Ops)3920 Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
3921 // TODO: This misses out on the sanitizer check below.
3922 if (Ops.isFixedPointOp())
3923 return EmitFixedPointBinOp(Ops);
3924
3925 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
3926 // RHS to the same size as the LHS.
3927 Value *RHS = Ops.RHS;
3928 if (Ops.LHS->getType() != RHS->getType())
3929 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
3930
3931 // OpenCL 6.3j: shift values are effectively % word size of LHS.
3932 if (CGF.getLangOpts().OpenCL)
3933 RHS = ConstrainShiftValue(Ops.LHS, RHS, "shr.mask");
3934 else if (CGF.SanOpts.has(SanitizerKind::ShiftExponent) &&
3935 isa<llvm::IntegerType>(Ops.LHS->getType())) {
3936 CodeGenFunction::SanitizerScope SanScope(&CGF);
3937 llvm::Value *Valid =
3938 Builder.CreateICmpULE(RHS, GetWidthMinusOneValue(Ops.LHS, RHS));
3939 EmitBinOpCheck(std::make_pair(Valid, SanitizerKind::ShiftExponent), Ops);
3940 }
3941
3942 if (Ops.Ty->hasUnsignedIntegerRepresentation())
3943 return Builder.CreateLShr(Ops.LHS, RHS, "shr");
3944 return Builder.CreateAShr(Ops.LHS, RHS, "shr");
3945 }
3946
3947 enum IntrinsicType { VCMPEQ, VCMPGT };
3948 // return corresponding comparison intrinsic for given vector type
GetIntrinsic(IntrinsicType IT,BuiltinType::Kind ElemKind)3949 static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT,
3950 BuiltinType::Kind ElemKind) {
3951 switch (ElemKind) {
3952 default: llvm_unreachable("unexpected element type");
3953 case BuiltinType::Char_U:
3954 case BuiltinType::UChar:
3955 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
3956 llvm::Intrinsic::ppc_altivec_vcmpgtub_p;
3957 case BuiltinType::Char_S:
3958 case BuiltinType::SChar:
3959 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
3960 llvm::Intrinsic::ppc_altivec_vcmpgtsb_p;
3961 case BuiltinType::UShort:
3962 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
3963 llvm::Intrinsic::ppc_altivec_vcmpgtuh_p;
3964 case BuiltinType::Short:
3965 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
3966 llvm::Intrinsic::ppc_altivec_vcmpgtsh_p;
3967 case BuiltinType::UInt:
3968 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
3969 llvm::Intrinsic::ppc_altivec_vcmpgtuw_p;
3970 case BuiltinType::Int:
3971 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
3972 llvm::Intrinsic::ppc_altivec_vcmpgtsw_p;
3973 case BuiltinType::ULong:
3974 case BuiltinType::ULongLong:
3975 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
3976 llvm::Intrinsic::ppc_altivec_vcmpgtud_p;
3977 case BuiltinType::Long:
3978 case BuiltinType::LongLong:
3979 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
3980 llvm::Intrinsic::ppc_altivec_vcmpgtsd_p;
3981 case BuiltinType::Float:
3982 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p :
3983 llvm::Intrinsic::ppc_altivec_vcmpgtfp_p;
3984 case BuiltinType::Double:
3985 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_vsx_xvcmpeqdp_p :
3986 llvm::Intrinsic::ppc_vsx_xvcmpgtdp_p;
3987 case BuiltinType::UInt128:
3988 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
3989 : llvm::Intrinsic::ppc_altivec_vcmpgtuq_p;
3990 case BuiltinType::Int128:
3991 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
3992 : llvm::Intrinsic::ppc_altivec_vcmpgtsq_p;
3993 }
3994 }
3995
EmitCompare(const BinaryOperator * E,llvm::CmpInst::Predicate UICmpOpc,llvm::CmpInst::Predicate SICmpOpc,llvm::CmpInst::Predicate FCmpOpc,bool IsSignaling)3996 Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,
3997 llvm::CmpInst::Predicate UICmpOpc,
3998 llvm::CmpInst::Predicate SICmpOpc,
3999 llvm::CmpInst::Predicate FCmpOpc,
4000 bool IsSignaling) {
4001 TestAndClearIgnoreResultAssign();
4002 Value *Result;
4003 QualType LHSTy = E->getLHS()->getType();
4004 QualType RHSTy = E->getRHS()->getType();
4005 if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) {
4006 assert(E->getOpcode() == BO_EQ ||
4007 E->getOpcode() == BO_NE);
4008 Value *LHS = CGF.EmitScalarExpr(E->getLHS());
4009 Value *RHS = CGF.EmitScalarExpr(E->getRHS());
4010 Result = CGF.CGM.getCXXABI().EmitMemberPointerComparison(
4011 CGF, LHS, RHS, MPT, E->getOpcode() == BO_NE);
4012 } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) {
4013 BinOpInfo BOInfo = EmitBinOps(E);
4014 Value *LHS = BOInfo.LHS;
4015 Value *RHS = BOInfo.RHS;
4016
4017 // If AltiVec, the comparison results in a numeric type, so we use
4018 // intrinsics comparing vectors and giving 0 or 1 as a result
4019 if (LHSTy->isVectorType() && !E->getType()->isVectorType()) {
4020 // constants for mapping CR6 register bits to predicate result
4021 enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6;
4022
4023 llvm::Intrinsic::ID ID = llvm::Intrinsic::not_intrinsic;
4024
4025 // in several cases vector arguments order will be reversed
4026 Value *FirstVecArg = LHS,
4027 *SecondVecArg = RHS;
4028
4029 QualType ElTy = LHSTy->castAs<VectorType>()->getElementType();
4030 BuiltinType::Kind ElementKind = ElTy->castAs<BuiltinType>()->getKind();
4031
4032 switch(E->getOpcode()) {
4033 default: llvm_unreachable("is not a comparison operation");
4034 case BO_EQ:
4035 CR6 = CR6_LT;
4036 ID = GetIntrinsic(VCMPEQ, ElementKind);
4037 break;
4038 case BO_NE:
4039 CR6 = CR6_EQ;
4040 ID = GetIntrinsic(VCMPEQ, ElementKind);
4041 break;
4042 case BO_LT:
4043 CR6 = CR6_LT;
4044 ID = GetIntrinsic(VCMPGT, ElementKind);
4045 std::swap(FirstVecArg, SecondVecArg);
4046 break;
4047 case BO_GT:
4048 CR6 = CR6_LT;
4049 ID = GetIntrinsic(VCMPGT, ElementKind);
4050 break;
4051 case BO_LE:
4052 if (ElementKind == BuiltinType::Float) {
4053 CR6 = CR6_LT;
4054 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
4055 std::swap(FirstVecArg, SecondVecArg);
4056 }
4057 else {
4058 CR6 = CR6_EQ;
4059 ID = GetIntrinsic(VCMPGT, ElementKind);
4060 }
4061 break;
4062 case BO_GE:
4063 if (ElementKind == BuiltinType::Float) {
4064 CR6 = CR6_LT;
4065 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
4066 }
4067 else {
4068 CR6 = CR6_EQ;
4069 ID = GetIntrinsic(VCMPGT, ElementKind);
4070 std::swap(FirstVecArg, SecondVecArg);
4071 }
4072 break;
4073 }
4074
4075 Value *CR6Param = Builder.getInt32(CR6);
4076 llvm::Function *F = CGF.CGM.getIntrinsic(ID);
4077 Result = Builder.CreateCall(F, {CR6Param, FirstVecArg, SecondVecArg});
4078
4079 // The result type of intrinsic may not be same as E->getType().
4080 // If E->getType() is not BoolTy, EmitScalarConversion will do the
4081 // conversion work. If E->getType() is BoolTy, EmitScalarConversion will
4082 // do nothing, if ResultTy is not i1 at the same time, it will cause
4083 // crash later.
4084 llvm::IntegerType *ResultTy = cast<llvm::IntegerType>(Result->getType());
4085 if (ResultTy->getBitWidth() > 1 &&
4086 E->getType() == CGF.getContext().BoolTy)
4087 Result = Builder.CreateTrunc(Result, Builder.getInt1Ty());
4088 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
4089 E->getExprLoc());
4090 }
4091
4092 if (BOInfo.isFixedPointOp()) {
4093 Result = EmitFixedPointBinOp(BOInfo);
4094 } else if (LHS->getType()->isFPOrFPVectorTy()) {
4095 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, BOInfo.FPFeatures);
4096 if (!IsSignaling)
4097 Result = Builder.CreateFCmp(FCmpOpc, LHS, RHS, "cmp");
4098 else
4099 Result = Builder.CreateFCmpS(FCmpOpc, LHS, RHS, "cmp");
4100 } else if (LHSTy->hasSignedIntegerRepresentation()) {
4101 Result = Builder.CreateICmp(SICmpOpc, LHS, RHS, "cmp");
4102 } else {
4103 // Unsigned integers and pointers.
4104
4105 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers &&
4106 !isa<llvm::ConstantPointerNull>(LHS) &&
4107 !isa<llvm::ConstantPointerNull>(RHS)) {
4108
4109 // Dynamic information is required to be stripped for comparisons,
4110 // because it could leak the dynamic information. Based on comparisons
4111 // of pointers to dynamic objects, the optimizer can replace one pointer
4112 // with another, which might be incorrect in presence of invariant
4113 // groups. Comparison with null is safe because null does not carry any
4114 // dynamic information.
4115 if (LHSTy.mayBeDynamicClass())
4116 LHS = Builder.CreateStripInvariantGroup(LHS);
4117 if (RHSTy.mayBeDynamicClass())
4118 RHS = Builder.CreateStripInvariantGroup(RHS);
4119 }
4120
4121 Result = Builder.CreateICmp(UICmpOpc, LHS, RHS, "cmp");
4122 }
4123
4124 // If this is a vector comparison, sign extend the result to the appropriate
4125 // vector integer type and return it (don't convert to bool).
4126 if (LHSTy->isVectorType())
4127 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
4128
4129 } else {
4130 // Complex Comparison: can only be an equality comparison.
4131 CodeGenFunction::ComplexPairTy LHS, RHS;
4132 QualType CETy;
4133 if (auto *CTy = LHSTy->getAs<ComplexType>()) {
4134 LHS = CGF.EmitComplexExpr(E->getLHS());
4135 CETy = CTy->getElementType();
4136 } else {
4137 LHS.first = Visit(E->getLHS());
4138 LHS.second = llvm::Constant::getNullValue(LHS.first->getType());
4139 CETy = LHSTy;
4140 }
4141 if (auto *CTy = RHSTy->getAs<ComplexType>()) {
4142 RHS = CGF.EmitComplexExpr(E->getRHS());
4143 assert(CGF.getContext().hasSameUnqualifiedType(CETy,
4144 CTy->getElementType()) &&
4145 "The element types must always match.");
4146 (void)CTy;
4147 } else {
4148 RHS.first = Visit(E->getRHS());
4149 RHS.second = llvm::Constant::getNullValue(RHS.first->getType());
4150 assert(CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) &&
4151 "The element types must always match.");
4152 }
4153
4154 Value *ResultR, *ResultI;
4155 if (CETy->isRealFloatingType()) {
4156 // As complex comparisons can only be equality comparisons, they
4157 // are never signaling comparisons.
4158 ResultR = Builder.CreateFCmp(FCmpOpc, LHS.first, RHS.first, "cmp.r");
4159 ResultI = Builder.CreateFCmp(FCmpOpc, LHS.second, RHS.second, "cmp.i");
4160 } else {
4161 // Complex comparisons can only be equality comparisons. As such, signed
4162 // and unsigned opcodes are the same.
4163 ResultR = Builder.CreateICmp(UICmpOpc, LHS.first, RHS.first, "cmp.r");
4164 ResultI = Builder.CreateICmp(UICmpOpc, LHS.second, RHS.second, "cmp.i");
4165 }
4166
4167 if (E->getOpcode() == BO_EQ) {
4168 Result = Builder.CreateAnd(ResultR, ResultI, "and.ri");
4169 } else {
4170 assert(E->getOpcode() == BO_NE &&
4171 "Complex comparison other than == or != ?");
4172 Result = Builder.CreateOr(ResultR, ResultI, "or.ri");
4173 }
4174 }
4175
4176 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
4177 E->getExprLoc());
4178 }
4179
VisitBinAssign(const BinaryOperator * E)4180 Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
4181 bool Ignore = TestAndClearIgnoreResultAssign();
4182
4183 Value *RHS;
4184 LValue LHS;
4185
4186 switch (E->getLHS()->getType().getObjCLifetime()) {
4187 case Qualifiers::OCL_Strong:
4188 std::tie(LHS, RHS) = CGF.EmitARCStoreStrong(E, Ignore);
4189 break;
4190
4191 case Qualifiers::OCL_Autoreleasing:
4192 std::tie(LHS, RHS) = CGF.EmitARCStoreAutoreleasing(E);
4193 break;
4194
4195 case Qualifiers::OCL_ExplicitNone:
4196 std::tie(LHS, RHS) = CGF.EmitARCStoreUnsafeUnretained(E, Ignore);
4197 break;
4198
4199 case Qualifiers::OCL_Weak:
4200 RHS = Visit(E->getRHS());
4201 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
4202 RHS = CGF.EmitARCStoreWeak(LHS.getAddress(CGF), RHS, Ignore);
4203 break;
4204
4205 case Qualifiers::OCL_None:
4206 // __block variables need to have the rhs evaluated first, plus
4207 // this should improve codegen just a little.
4208 RHS = Visit(E->getRHS());
4209 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
4210
4211 // Store the value into the LHS. Bit-fields are handled specially
4212 // because the result is altered by the store, i.e., [C99 6.5.16p1]
4213 // 'An assignment expression has the value of the left operand after
4214 // the assignment...'.
4215 if (LHS.isBitField()) {
4216 CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, &RHS);
4217 } else {
4218 CGF.EmitNullabilityCheck(LHS, RHS, E->getExprLoc());
4219 CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS);
4220 }
4221 }
4222
4223 // If the result is clearly ignored, return now.
4224 if (Ignore)
4225 return nullptr;
4226
4227 // The result of an assignment in C is the assigned r-value.
4228 if (!CGF.getLangOpts().CPlusPlus)
4229 return RHS;
4230
4231 // If the lvalue is non-volatile, return the computed value of the assignment.
4232 if (!LHS.isVolatileQualified())
4233 return RHS;
4234
4235 // Otherwise, reload the value.
4236 return EmitLoadOfLValue(LHS, E->getExprLoc());
4237 }
4238
VisitBinLAnd(const BinaryOperator * E)4239 Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
4240 // Perform vector logical and on comparisons with zero vectors.
4241 if (E->getType()->isVectorType()) {
4242 CGF.incrementProfileCounter(E);
4243
4244 Value *LHS = Visit(E->getLHS());
4245 Value *RHS = Visit(E->getRHS());
4246 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
4247 if (LHS->getType()->isFPOrFPVectorTy()) {
4248 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
4249 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
4250 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
4251 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
4252 } else {
4253 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
4254 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
4255 }
4256 Value *And = Builder.CreateAnd(LHS, RHS);
4257 return Builder.CreateSExt(And, ConvertType(E->getType()), "sext");
4258 }
4259
4260 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
4261 llvm::Type *ResTy = ConvertType(E->getType());
4262
4263 // If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
4264 // If we have 1 && X, just emit X without inserting the control flow.
4265 bool LHSCondVal;
4266 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
4267 if (LHSCondVal) { // If we have 1 && X, just emit X.
4268 CGF.incrementProfileCounter(E);
4269
4270 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
4271
4272 // If we're generating for profiling or coverage, generate a branch to a
4273 // block that increments the RHS counter needed to track branch condition
4274 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
4275 // "FalseBlock" after the increment is done.
4276 if (InstrumentRegions &&
4277 CodeGenFunction::isInstrumentedCondition(E->getRHS())) {
4278 llvm::BasicBlock *FBlock = CGF.createBasicBlock("land.end");
4279 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt");
4280 Builder.CreateCondBr(RHSCond, RHSBlockCnt, FBlock);
4281 CGF.EmitBlock(RHSBlockCnt);
4282 CGF.incrementProfileCounter(E->getRHS());
4283 CGF.EmitBranch(FBlock);
4284 CGF.EmitBlock(FBlock);
4285 }
4286
4287 // ZExt result to int or bool.
4288 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "land.ext");
4289 }
4290
4291 // 0 && RHS: If it is safe, just elide the RHS, and return 0/false.
4292 if (!CGF.ContainsLabel(E->getRHS()))
4293 return llvm::Constant::getNullValue(ResTy);
4294 }
4295
4296 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end");
4297 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs");
4298
4299 CodeGenFunction::ConditionalEvaluation eval(CGF);
4300
4301 // Branch on the LHS first. If it is false, go to the failure (cont) block.
4302 CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock,
4303 CGF.getProfileCount(E->getRHS()));
4304
4305 // Any edges into the ContBlock are now from an (indeterminate number of)
4306 // edges from this first condition. All of these values will be false. Start
4307 // setting up the PHI node in the Cont Block for this.
4308 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
4309 "", ContBlock);
4310 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
4311 PI != PE; ++PI)
4312 PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI);
4313
4314 eval.begin(CGF);
4315 CGF.EmitBlock(RHSBlock);
4316 CGF.incrementProfileCounter(E);
4317 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
4318 eval.end(CGF);
4319
4320 // Reaquire the RHS block, as there may be subblocks inserted.
4321 RHSBlock = Builder.GetInsertBlock();
4322
4323 // If we're generating for profiling or coverage, generate a branch on the
4324 // RHS to a block that increments the RHS true counter needed to track branch
4325 // condition coverage.
4326 if (InstrumentRegions &&
4327 CodeGenFunction::isInstrumentedCondition(E->getRHS())) {
4328 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt");
4329 Builder.CreateCondBr(RHSCond, RHSBlockCnt, ContBlock);
4330 CGF.EmitBlock(RHSBlockCnt);
4331 CGF.incrementProfileCounter(E->getRHS());
4332 CGF.EmitBranch(ContBlock);
4333 PN->addIncoming(RHSCond, RHSBlockCnt);
4334 }
4335
4336 // Emit an unconditional branch from this block to ContBlock.
4337 {
4338 // There is no need to emit line number for unconditional branch.
4339 auto NL = ApplyDebugLocation::CreateEmpty(CGF);
4340 CGF.EmitBlock(ContBlock);
4341 }
4342 // Insert an entry into the phi node for the edge with the value of RHSCond.
4343 PN->addIncoming(RHSCond, RHSBlock);
4344
4345 // Artificial location to preserve the scope information
4346 {
4347 auto NL = ApplyDebugLocation::CreateArtificial(CGF);
4348 PN->setDebugLoc(Builder.getCurrentDebugLocation());
4349 }
4350
4351 // ZExt result to int.
4352 return Builder.CreateZExtOrBitCast(PN, ResTy, "land.ext");
4353 }
4354
VisitBinLOr(const BinaryOperator * E)4355 Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
4356 // Perform vector logical or on comparisons with zero vectors.
4357 if (E->getType()->isVectorType()) {
4358 CGF.incrementProfileCounter(E);
4359
4360 Value *LHS = Visit(E->getLHS());
4361 Value *RHS = Visit(E->getRHS());
4362 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
4363 if (LHS->getType()->isFPOrFPVectorTy()) {
4364 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
4365 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
4366 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
4367 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
4368 } else {
4369 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
4370 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
4371 }
4372 Value *Or = Builder.CreateOr(LHS, RHS);
4373 return Builder.CreateSExt(Or, ConvertType(E->getType()), "sext");
4374 }
4375
4376 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
4377 llvm::Type *ResTy = ConvertType(E->getType());
4378
4379 // If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
4380 // If we have 0 || X, just emit X without inserting the control flow.
4381 bool LHSCondVal;
4382 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
4383 if (!LHSCondVal) { // If we have 0 || X, just emit X.
4384 CGF.incrementProfileCounter(E);
4385
4386 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
4387
4388 // If we're generating for profiling or coverage, generate a branch to a
4389 // block that increments the RHS counter need to track branch condition
4390 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
4391 // "FalseBlock" after the increment is done.
4392 if (InstrumentRegions &&
4393 CodeGenFunction::isInstrumentedCondition(E->getRHS())) {
4394 llvm::BasicBlock *FBlock = CGF.createBasicBlock("lor.end");
4395 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt");
4396 Builder.CreateCondBr(RHSCond, FBlock, RHSBlockCnt);
4397 CGF.EmitBlock(RHSBlockCnt);
4398 CGF.incrementProfileCounter(E->getRHS());
4399 CGF.EmitBranch(FBlock);
4400 CGF.EmitBlock(FBlock);
4401 }
4402
4403 // ZExt result to int or bool.
4404 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "lor.ext");
4405 }
4406
4407 // 1 || RHS: If it is safe, just elide the RHS, and return 1/true.
4408 if (!CGF.ContainsLabel(E->getRHS()))
4409 return llvm::ConstantInt::get(ResTy, 1);
4410 }
4411
4412 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end");
4413 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs");
4414
4415 CodeGenFunction::ConditionalEvaluation eval(CGF);
4416
4417 // Branch on the LHS first. If it is true, go to the success (cont) block.
4418 CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock,
4419 CGF.getCurrentProfileCount() -
4420 CGF.getProfileCount(E->getRHS()));
4421
4422 // Any edges into the ContBlock are now from an (indeterminate number of)
4423 // edges from this first condition. All of these values will be true. Start
4424 // setting up the PHI node in the Cont Block for this.
4425 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
4426 "", ContBlock);
4427 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
4428 PI != PE; ++PI)
4429 PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI);
4430
4431 eval.begin(CGF);
4432
4433 // Emit the RHS condition as a bool value.
4434 CGF.EmitBlock(RHSBlock);
4435 CGF.incrementProfileCounter(E);
4436 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
4437
4438 eval.end(CGF);
4439
4440 // Reaquire the RHS block, as there may be subblocks inserted.
4441 RHSBlock = Builder.GetInsertBlock();
4442
4443 // If we're generating for profiling or coverage, generate a branch on the
4444 // RHS to a block that increments the RHS true counter needed to track branch
4445 // condition coverage.
4446 if (InstrumentRegions &&
4447 CodeGenFunction::isInstrumentedCondition(E->getRHS())) {
4448 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt");
4449 Builder.CreateCondBr(RHSCond, ContBlock, RHSBlockCnt);
4450 CGF.EmitBlock(RHSBlockCnt);
4451 CGF.incrementProfileCounter(E->getRHS());
4452 CGF.EmitBranch(ContBlock);
4453 PN->addIncoming(RHSCond, RHSBlockCnt);
4454 }
4455
4456 // Emit an unconditional branch from this block to ContBlock. Insert an entry
4457 // into the phi node for the edge with the value of RHSCond.
4458 CGF.EmitBlock(ContBlock);
4459 PN->addIncoming(RHSCond, RHSBlock);
4460
4461 // ZExt result to int.
4462 return Builder.CreateZExtOrBitCast(PN, ResTy, "lor.ext");
4463 }
4464
VisitBinComma(const BinaryOperator * E)4465 Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
4466 CGF.EmitIgnoredExpr(E->getLHS());
4467 CGF.EnsureInsertPoint();
4468 return Visit(E->getRHS());
4469 }
4470
4471 //===----------------------------------------------------------------------===//
4472 // Other Operators
4473 //===----------------------------------------------------------------------===//
4474
4475 /// isCheapEnoughToEvaluateUnconditionally - Return true if the specified
4476 /// expression is cheap enough and side-effect-free enough to evaluate
4477 /// unconditionally instead of conditionally. This is used to convert control
4478 /// flow into selects in some cases.
isCheapEnoughToEvaluateUnconditionally(const Expr * E,CodeGenFunction & CGF)4479 static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E,
4480 CodeGenFunction &CGF) {
4481 // Anything that is an integer or floating point constant is fine.
4482 return E->IgnoreParens()->isEvaluatable(CGF.getContext());
4483
4484 // Even non-volatile automatic variables can't be evaluated unconditionally.
4485 // Referencing a thread_local may cause non-trivial initialization work to
4486 // occur. If we're inside a lambda and one of the variables is from the scope
4487 // outside the lambda, that function may have returned already. Reading its
4488 // locals is a bad idea. Also, these reads may introduce races there didn't
4489 // exist in the source-level program.
4490 }
4491
4492
4493 Value *ScalarExprEmitter::
VisitAbstractConditionalOperator(const AbstractConditionalOperator * E)4494 VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
4495 TestAndClearIgnoreResultAssign();
4496
4497 // Bind the common expression if necessary.
4498 CodeGenFunction::OpaqueValueMapping binding(CGF, E);
4499
4500 Expr *condExpr = E->getCond();
4501 Expr *lhsExpr = E->getTrueExpr();
4502 Expr *rhsExpr = E->getFalseExpr();
4503
4504 // If the condition constant folds and can be elided, try to avoid emitting
4505 // the condition and the dead arm.
4506 bool CondExprBool;
4507 if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
4508 Expr *live = lhsExpr, *dead = rhsExpr;
4509 if (!CondExprBool) std::swap(live, dead);
4510
4511 // If the dead side doesn't have labels we need, just emit the Live part.
4512 if (!CGF.ContainsLabel(dead)) {
4513 if (CondExprBool)
4514 CGF.incrementProfileCounter(E);
4515 Value *Result = Visit(live);
4516
4517 // If the live part is a throw expression, it acts like it has a void
4518 // type, so evaluating it returns a null Value*. However, a conditional
4519 // with non-void type must return a non-null Value*.
4520 if (!Result && !E->getType()->isVoidType())
4521 Result = llvm::UndefValue::get(CGF.ConvertType(E->getType()));
4522
4523 return Result;
4524 }
4525 }
4526
4527 // OpenCL: If the condition is a vector, we can treat this condition like
4528 // the select function.
4529 if ((CGF.getLangOpts().OpenCL && condExpr->getType()->isVectorType()) ||
4530 condExpr->getType()->isExtVectorType()) {
4531 CGF.incrementProfileCounter(E);
4532
4533 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
4534 llvm::Value *LHS = Visit(lhsExpr);
4535 llvm::Value *RHS = Visit(rhsExpr);
4536
4537 llvm::Type *condType = ConvertType(condExpr->getType());
4538 auto *vecTy = cast<llvm::FixedVectorType>(condType);
4539
4540 unsigned numElem = vecTy->getNumElements();
4541 llvm::Type *elemType = vecTy->getElementType();
4542
4543 llvm::Value *zeroVec = llvm::Constant::getNullValue(vecTy);
4544 llvm::Value *TestMSB = Builder.CreateICmpSLT(CondV, zeroVec);
4545 llvm::Value *tmp = Builder.CreateSExt(
4546 TestMSB, llvm::FixedVectorType::get(elemType, numElem), "sext");
4547 llvm::Value *tmp2 = Builder.CreateNot(tmp);
4548
4549 // Cast float to int to perform ANDs if necessary.
4550 llvm::Value *RHSTmp = RHS;
4551 llvm::Value *LHSTmp = LHS;
4552 bool wasCast = false;
4553 llvm::VectorType *rhsVTy = cast<llvm::VectorType>(RHS->getType());
4554 if (rhsVTy->getElementType()->isFloatingPointTy()) {
4555 RHSTmp = Builder.CreateBitCast(RHS, tmp2->getType());
4556 LHSTmp = Builder.CreateBitCast(LHS, tmp->getType());
4557 wasCast = true;
4558 }
4559
4560 llvm::Value *tmp3 = Builder.CreateAnd(RHSTmp, tmp2);
4561 llvm::Value *tmp4 = Builder.CreateAnd(LHSTmp, tmp);
4562 llvm::Value *tmp5 = Builder.CreateOr(tmp3, tmp4, "cond");
4563 if (wasCast)
4564 tmp5 = Builder.CreateBitCast(tmp5, RHS->getType());
4565
4566 return tmp5;
4567 }
4568
4569 if (condExpr->getType()->isVectorType()) {
4570 CGF.incrementProfileCounter(E);
4571
4572 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
4573 llvm::Value *LHS = Visit(lhsExpr);
4574 llvm::Value *RHS = Visit(rhsExpr);
4575
4576 llvm::Type *CondType = ConvertType(condExpr->getType());
4577 auto *VecTy = cast<llvm::VectorType>(CondType);
4578 llvm::Value *ZeroVec = llvm::Constant::getNullValue(VecTy);
4579
4580 CondV = Builder.CreateICmpNE(CondV, ZeroVec, "vector_cond");
4581 return Builder.CreateSelect(CondV, LHS, RHS, "vector_select");
4582 }
4583
4584 // If this is a really simple expression (like x ? 4 : 5), emit this as a
4585 // select instead of as control flow. We can only do this if it is cheap and
4586 // safe to evaluate the LHS and RHS unconditionally.
4587 if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, CGF) &&
4588 isCheapEnoughToEvaluateUnconditionally(rhsExpr, CGF)) {
4589 llvm::Value *CondV = CGF.EvaluateExprAsBool(condExpr);
4590 llvm::Value *StepV = Builder.CreateZExtOrBitCast(CondV, CGF.Int64Ty);
4591
4592 CGF.incrementProfileCounter(E, StepV);
4593
4594 llvm::Value *LHS = Visit(lhsExpr);
4595 llvm::Value *RHS = Visit(rhsExpr);
4596 if (!LHS) {
4597 // If the conditional has void type, make sure we return a null Value*.
4598 assert(!RHS && "LHS and RHS types must match");
4599 return nullptr;
4600 }
4601 return Builder.CreateSelect(CondV, LHS, RHS, "cond");
4602 }
4603
4604 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
4605 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
4606 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
4607
4608 CodeGenFunction::ConditionalEvaluation eval(CGF);
4609 CGF.EmitBranchOnBoolExpr(condExpr, LHSBlock, RHSBlock,
4610 CGF.getProfileCount(lhsExpr));
4611
4612 CGF.EmitBlock(LHSBlock);
4613 CGF.incrementProfileCounter(E);
4614 eval.begin(CGF);
4615 Value *LHS = Visit(lhsExpr);
4616 eval.end(CGF);
4617
4618 LHSBlock = Builder.GetInsertBlock();
4619 Builder.CreateBr(ContBlock);
4620
4621 CGF.EmitBlock(RHSBlock);
4622 eval.begin(CGF);
4623 Value *RHS = Visit(rhsExpr);
4624 eval.end(CGF);
4625
4626 RHSBlock = Builder.GetInsertBlock();
4627 CGF.EmitBlock(ContBlock);
4628
4629 // If the LHS or RHS is a throw expression, it will be legitimately null.
4630 if (!LHS)
4631 return RHS;
4632 if (!RHS)
4633 return LHS;
4634
4635 // Create a PHI node for the real part.
4636 llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), 2, "cond");
4637 PN->addIncoming(LHS, LHSBlock);
4638 PN->addIncoming(RHS, RHSBlock);
4639 return PN;
4640 }
4641
VisitChooseExpr(ChooseExpr * E)4642 Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) {
4643 return Visit(E->getChosenSubExpr());
4644 }
4645
VisitVAArgExpr(VAArgExpr * VE)4646 Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
4647 QualType Ty = VE->getType();
4648
4649 if (Ty->isVariablyModifiedType())
4650 CGF.EmitVariablyModifiedType(Ty);
4651
4652 Address ArgValue = Address::invalid();
4653 Address ArgPtr = CGF.EmitVAArg(VE, ArgValue);
4654
4655 llvm::Type *ArgTy = ConvertType(VE->getType());
4656
4657 // If EmitVAArg fails, emit an error.
4658 if (!ArgPtr.isValid()) {
4659 CGF.ErrorUnsupported(VE, "va_arg expression");
4660 return llvm::UndefValue::get(ArgTy);
4661 }
4662
4663 // FIXME Volatility.
4664 llvm::Value *Val = Builder.CreateLoad(ArgPtr);
4665
4666 // If EmitVAArg promoted the type, we must truncate it.
4667 if (ArgTy != Val->getType()) {
4668 if (ArgTy->isPointerTy() && !Val->getType()->isPointerTy())
4669 Val = Builder.CreateIntToPtr(Val, ArgTy);
4670 else
4671 Val = Builder.CreateTrunc(Val, ArgTy);
4672 }
4673
4674 return Val;
4675 }
4676
VisitBlockExpr(const BlockExpr * block)4677 Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) {
4678 return CGF.EmitBlockLiteral(block);
4679 }
4680
4681 // Convert a vec3 to vec4, or vice versa.
ConvertVec3AndVec4(CGBuilderTy & Builder,CodeGenFunction & CGF,Value * Src,unsigned NumElementsDst)4682 static Value *ConvertVec3AndVec4(CGBuilderTy &Builder, CodeGenFunction &CGF,
4683 Value *Src, unsigned NumElementsDst) {
4684 static constexpr int Mask[] = {0, 1, 2, -1};
4685 return Builder.CreateShuffleVector(Src,
4686 llvm::makeArrayRef(Mask, NumElementsDst));
4687 }
4688
4689 // Create cast instructions for converting LLVM value \p Src to LLVM type \p
4690 // DstTy. \p Src has the same size as \p DstTy. Both are single value types
4691 // but could be scalar or vectors of different lengths, and either can be
4692 // pointer.
4693 // There are 4 cases:
4694 // 1. non-pointer -> non-pointer : needs 1 bitcast
4695 // 2. pointer -> pointer : needs 1 bitcast or addrspacecast
4696 // 3. pointer -> non-pointer
4697 // a) pointer -> intptr_t : needs 1 ptrtoint
4698 // b) pointer -> non-intptr_t : needs 1 ptrtoint then 1 bitcast
4699 // 4. non-pointer -> pointer
4700 // a) intptr_t -> pointer : needs 1 inttoptr
4701 // b) non-intptr_t -> pointer : needs 1 bitcast then 1 inttoptr
4702 // Note: for cases 3b and 4b two casts are required since LLVM casts do not
4703 // allow casting directly between pointer types and non-integer non-pointer
4704 // types.
createCastsForTypeOfSameSize(CGBuilderTy & Builder,const llvm::DataLayout & DL,Value * Src,llvm::Type * DstTy,StringRef Name="")4705 static Value *createCastsForTypeOfSameSize(CGBuilderTy &Builder,
4706 const llvm::DataLayout &DL,
4707 Value *Src, llvm::Type *DstTy,
4708 StringRef Name = "") {
4709 auto SrcTy = Src->getType();
4710
4711 // Case 1.
4712 if (!SrcTy->isPointerTy() && !DstTy->isPointerTy())
4713 return Builder.CreateBitCast(Src, DstTy, Name);
4714
4715 // Case 2.
4716 if (SrcTy->isPointerTy() && DstTy->isPointerTy())
4717 return Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy, Name);
4718
4719 // Case 3.
4720 if (SrcTy->isPointerTy() && !DstTy->isPointerTy()) {
4721 // Case 3b.
4722 if (!DstTy->isIntegerTy())
4723 Src = Builder.CreatePtrToInt(Src, DL.getIntPtrType(SrcTy));
4724 // Cases 3a and 3b.
4725 return Builder.CreateBitOrPointerCast(Src, DstTy, Name);
4726 }
4727
4728 // Case 4b.
4729 if (!SrcTy->isIntegerTy())
4730 Src = Builder.CreateBitCast(Src, DL.getIntPtrType(DstTy));
4731 // Cases 4a and 4b.
4732 return Builder.CreateIntToPtr(Src, DstTy, Name);
4733 }
4734
VisitAsTypeExpr(AsTypeExpr * E)4735 Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
4736 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
4737 llvm::Type *DstTy = ConvertType(E->getType());
4738
4739 llvm::Type *SrcTy = Src->getType();
4740 unsigned NumElementsSrc =
4741 isa<llvm::VectorType>(SrcTy)
4742 ? cast<llvm::FixedVectorType>(SrcTy)->getNumElements()
4743 : 0;
4744 unsigned NumElementsDst =
4745 isa<llvm::VectorType>(DstTy)
4746 ? cast<llvm::FixedVectorType>(DstTy)->getNumElements()
4747 : 0;
4748
4749 // Going from vec3 to non-vec3 is a special case and requires a shuffle
4750 // vector to get a vec4, then a bitcast if the target type is different.
4751 if (NumElementsSrc == 3 && NumElementsDst != 3) {
4752 Src = ConvertVec3AndVec4(Builder, CGF, Src, 4);
4753
4754 if (!CGF.CGM.getCodeGenOpts().PreserveVec3Type) {
4755 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
4756 DstTy);
4757 }
4758
4759 Src->setName("astype");
4760 return Src;
4761 }
4762
4763 // Going from non-vec3 to vec3 is a special case and requires a bitcast
4764 // to vec4 if the original type is not vec4, then a shuffle vector to
4765 // get a vec3.
4766 if (NumElementsSrc != 3 && NumElementsDst == 3) {
4767 if (!CGF.CGM.getCodeGenOpts().PreserveVec3Type) {
4768 auto *Vec4Ty = llvm::FixedVectorType::get(
4769 cast<llvm::VectorType>(DstTy)->getElementType(), 4);
4770 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
4771 Vec4Ty);
4772 }
4773
4774 Src = ConvertVec3AndVec4(Builder, CGF, Src, 3);
4775 Src->setName("astype");
4776 return Src;
4777 }
4778
4779 return createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(),
4780 Src, DstTy, "astype");
4781 }
4782
VisitAtomicExpr(AtomicExpr * E)4783 Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) {
4784 return CGF.EmitAtomicExpr(E).getScalarVal();
4785 }
4786
4787 //===----------------------------------------------------------------------===//
4788 // Entry Point into this File
4789 //===----------------------------------------------------------------------===//
4790
4791 /// Emit the computation of the specified expression of scalar type, ignoring
4792 /// the result.
EmitScalarExpr(const Expr * E,bool IgnoreResultAssign)4793 Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) {
4794 assert(E && hasScalarEvaluationKind(E->getType()) &&
4795 "Invalid scalar expression to emit");
4796
4797 return ScalarExprEmitter(*this, IgnoreResultAssign)
4798 .Visit(const_cast<Expr *>(E));
4799 }
4800
4801 /// Emit a conversion from the specified type to the specified destination type,
4802 /// both of which are LLVM scalar types.
EmitScalarConversion(Value * Src,QualType SrcTy,QualType DstTy,SourceLocation Loc)4803 Value *CodeGenFunction::EmitScalarConversion(Value *Src, QualType SrcTy,
4804 QualType DstTy,
4805 SourceLocation Loc) {
4806 assert(hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) &&
4807 "Invalid scalar expression to emit");
4808 return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy, Loc);
4809 }
4810
4811 /// Emit a conversion from the specified complex type to the specified
4812 /// destination type, where the destination type is an LLVM scalar type.
EmitComplexToScalarConversion(ComplexPairTy Src,QualType SrcTy,QualType DstTy,SourceLocation Loc)4813 Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src,
4814 QualType SrcTy,
4815 QualType DstTy,
4816 SourceLocation Loc) {
4817 assert(SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) &&
4818 "Invalid complex -> scalar conversion");
4819 return ScalarExprEmitter(*this)
4820 .EmitComplexToScalarConversion(Src, SrcTy, DstTy, Loc);
4821 }
4822
4823
4824 llvm::Value *CodeGenFunction::
EmitScalarPrePostIncDec(const UnaryOperator * E,LValue LV,bool isInc,bool isPre)4825 EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
4826 bool isInc, bool isPre) {
4827 return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre);
4828 }
4829
EmitObjCIsaExpr(const ObjCIsaExpr * E)4830 LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
4831 // object->isa or (*object).isa
4832 // Generate code as for: *(Class*)object
4833
4834 Expr *BaseExpr = E->getBase();
4835 Address Addr = Address::invalid();
4836 if (BaseExpr->isRValue()) {
4837 Addr = Address(EmitScalarExpr(BaseExpr), getPointerAlign());
4838 } else {
4839 Addr = EmitLValue(BaseExpr).getAddress(*this);
4840 }
4841
4842 // Cast the address to Class*.
4843 Addr = Builder.CreateElementBitCast(Addr, ConvertType(E->getType()));
4844 return MakeAddrLValue(Addr, E->getType());
4845 }
4846
4847
EmitCompoundAssignmentLValue(const CompoundAssignOperator * E)4848 LValue CodeGenFunction::EmitCompoundAssignmentLValue(
4849 const CompoundAssignOperator *E) {
4850 ScalarExprEmitter Scalar(*this);
4851 Value *Result = nullptr;
4852 switch (E->getOpcode()) {
4853 #define COMPOUND_OP(Op) \
4854 case BO_##Op##Assign: \
4855 return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \
4856 Result)
4857 COMPOUND_OP(Mul);
4858 COMPOUND_OP(Div);
4859 COMPOUND_OP(Rem);
4860 COMPOUND_OP(Add);
4861 COMPOUND_OP(Sub);
4862 COMPOUND_OP(Shl);
4863 COMPOUND_OP(Shr);
4864 COMPOUND_OP(And);
4865 COMPOUND_OP(Xor);
4866 COMPOUND_OP(Or);
4867 #undef COMPOUND_OP
4868
4869 case BO_PtrMemD:
4870 case BO_PtrMemI:
4871 case BO_Mul:
4872 case BO_Div:
4873 case BO_Rem:
4874 case BO_Add:
4875 case BO_Sub:
4876 case BO_Shl:
4877 case BO_Shr:
4878 case BO_LT:
4879 case BO_GT:
4880 case BO_LE:
4881 case BO_GE:
4882 case BO_EQ:
4883 case BO_NE:
4884 case BO_Cmp:
4885 case BO_And:
4886 case BO_Xor:
4887 case BO_Or:
4888 case BO_LAnd:
4889 case BO_LOr:
4890 case BO_Assign:
4891 case BO_Comma:
4892 llvm_unreachable("Not valid compound assignment operators");
4893 }
4894
4895 llvm_unreachable("Unhandled compound assignment operator");
4896 }
4897
4898 struct GEPOffsetAndOverflow {
4899 // The total (signed) byte offset for the GEP.
4900 llvm::Value *TotalOffset;
4901 // The offset overflow flag - true if the total offset overflows.
4902 llvm::Value *OffsetOverflows;
4903 };
4904
4905 /// Evaluate given GEPVal, which is either an inbounds GEP, or a constant,
4906 /// and compute the total offset it applies from it's base pointer BasePtr.
4907 /// Returns offset in bytes and a boolean flag whether an overflow happened
4908 /// during evaluation.
EmitGEPOffsetInBytes(Value * BasePtr,Value * GEPVal,llvm::LLVMContext & VMContext,CodeGenModule & CGM,CGBuilderTy & Builder)4909 static GEPOffsetAndOverflow EmitGEPOffsetInBytes(Value *BasePtr, Value *GEPVal,
4910 llvm::LLVMContext &VMContext,
4911 CodeGenModule &CGM,
4912 CGBuilderTy &Builder) {
4913 const auto &DL = CGM.getDataLayout();
4914
4915 // The total (signed) byte offset for the GEP.
4916 llvm::Value *TotalOffset = nullptr;
4917
4918 // Was the GEP already reduced to a constant?
4919 if (isa<llvm::Constant>(GEPVal)) {
4920 // Compute the offset by casting both pointers to integers and subtracting:
4921 // GEPVal = BasePtr + ptr(Offset) <--> Offset = int(GEPVal) - int(BasePtr)
4922 Value *BasePtr_int =
4923 Builder.CreatePtrToInt(BasePtr, DL.getIntPtrType(BasePtr->getType()));
4924 Value *GEPVal_int =
4925 Builder.CreatePtrToInt(GEPVal, DL.getIntPtrType(GEPVal->getType()));
4926 TotalOffset = Builder.CreateSub(GEPVal_int, BasePtr_int);
4927 return {TotalOffset, /*OffsetOverflows=*/Builder.getFalse()};
4928 }
4929
4930 auto *GEP = cast<llvm::GEPOperator>(GEPVal);
4931 assert(GEP->getPointerOperand() == BasePtr &&
4932 "BasePtr must be the the base of the GEP.");
4933 assert(GEP->isInBounds() && "Expected inbounds GEP");
4934
4935 auto *IntPtrTy = DL.getIntPtrType(GEP->getPointerOperandType());
4936
4937 // Grab references to the signed add/mul overflow intrinsics for intptr_t.
4938 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);
4939 auto *SAddIntrinsic =
4940 CGM.getIntrinsic(llvm::Intrinsic::sadd_with_overflow, IntPtrTy);
4941 auto *SMulIntrinsic =
4942 CGM.getIntrinsic(llvm::Intrinsic::smul_with_overflow, IntPtrTy);
4943
4944 // The offset overflow flag - true if the total offset overflows.
4945 llvm::Value *OffsetOverflows = Builder.getFalse();
4946
4947 /// Return the result of the given binary operation.
4948 auto eval = [&](BinaryOperator::Opcode Opcode, llvm::Value *LHS,
4949 llvm::Value *RHS) -> llvm::Value * {
4950 assert((Opcode == BO_Add || Opcode == BO_Mul) && "Can't eval binop");
4951
4952 // If the operands are constants, return a constant result.
4953 if (auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS)) {
4954 if (auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS)) {
4955 llvm::APInt N;
4956 bool HasOverflow = mayHaveIntegerOverflow(LHSCI, RHSCI, Opcode,
4957 /*Signed=*/true, N);
4958 if (HasOverflow)
4959 OffsetOverflows = Builder.getTrue();
4960 return llvm::ConstantInt::get(VMContext, N);
4961 }
4962 }
4963
4964 // Otherwise, compute the result with checked arithmetic.
4965 auto *ResultAndOverflow = Builder.CreateCall(
4966 (Opcode == BO_Add) ? SAddIntrinsic : SMulIntrinsic, {LHS, RHS});
4967 OffsetOverflows = Builder.CreateOr(
4968 Builder.CreateExtractValue(ResultAndOverflow, 1), OffsetOverflows);
4969 return Builder.CreateExtractValue(ResultAndOverflow, 0);
4970 };
4971
4972 // Determine the total byte offset by looking at each GEP operand.
4973 for (auto GTI = llvm::gep_type_begin(GEP), GTE = llvm::gep_type_end(GEP);
4974 GTI != GTE; ++GTI) {
4975 llvm::Value *LocalOffset;
4976 auto *Index = GTI.getOperand();
4977 // Compute the local offset contributed by this indexing step:
4978 if (auto *STy = GTI.getStructTypeOrNull()) {
4979 // For struct indexing, the local offset is the byte position of the
4980 // specified field.
4981 unsigned FieldNo = cast<llvm::ConstantInt>(Index)->getZExtValue();
4982 LocalOffset = llvm::ConstantInt::get(
4983 IntPtrTy, DL.getStructLayout(STy)->getElementOffset(FieldNo));
4984 } else {
4985 // Otherwise this is array-like indexing. The local offset is the index
4986 // multiplied by the element size.
4987 auto *ElementSize = llvm::ConstantInt::get(
4988 IntPtrTy, DL.getTypeAllocSize(GTI.getIndexedType()));
4989 auto *IndexS = Builder.CreateIntCast(Index, IntPtrTy, /*isSigned=*/true);
4990 LocalOffset = eval(BO_Mul, ElementSize, IndexS);
4991 }
4992
4993 // If this is the first offset, set it as the total offset. Otherwise, add
4994 // the local offset into the running total.
4995 if (!TotalOffset || TotalOffset == Zero)
4996 TotalOffset = LocalOffset;
4997 else
4998 TotalOffset = eval(BO_Add, TotalOffset, LocalOffset);
4999 }
5000
5001 return {TotalOffset, OffsetOverflows};
5002 }
5003
5004 Value *
EmitCheckedInBoundsGEP(Value * Ptr,ArrayRef<Value * > IdxList,bool SignedIndices,bool IsSubtraction,SourceLocation Loc,const Twine & Name)5005 CodeGenFunction::EmitCheckedInBoundsGEP(Value *Ptr, ArrayRef<Value *> IdxList,
5006 bool SignedIndices, bool IsSubtraction,
5007 SourceLocation Loc, const Twine &Name) {
5008 Value *GEPVal = Builder.CreateInBoundsGEP(Ptr, IdxList, Name);
5009
5010 // If the pointer overflow sanitizer isn't enabled, do nothing.
5011 if (!SanOpts.has(SanitizerKind::PointerOverflow))
5012 return GEPVal;
5013
5014 llvm::Type *PtrTy = Ptr->getType();
5015
5016 // Perform nullptr-and-offset check unless the nullptr is defined.
5017 bool PerformNullCheck = !NullPointerIsDefined(
5018 Builder.GetInsertBlock()->getParent(), PtrTy->getPointerAddressSpace());
5019 // Check for overflows unless the GEP got constant-folded,
5020 // and only in the default address space
5021 bool PerformOverflowCheck =
5022 !isa<llvm::Constant>(GEPVal) && PtrTy->getPointerAddressSpace() == 0;
5023
5024 if (!(PerformNullCheck || PerformOverflowCheck))
5025 return GEPVal;
5026
5027 const auto &DL = CGM.getDataLayout();
5028
5029 SanitizerScope SanScope(this);
5030 llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy);
5031
5032 GEPOffsetAndOverflow EvaluatedGEP =
5033 EmitGEPOffsetInBytes(Ptr, GEPVal, getLLVMContext(), CGM, Builder);
5034
5035 assert((!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) ||
5036 EvaluatedGEP.OffsetOverflows == Builder.getFalse()) &&
5037 "If the offset got constant-folded, we don't expect that there was an "
5038 "overflow.");
5039
5040 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);
5041
5042 // Common case: if the total offset is zero, and we are using C++ semantics,
5043 // where nullptr+0 is defined, don't emit a check.
5044 if (EvaluatedGEP.TotalOffset == Zero && CGM.getLangOpts().CPlusPlus)
5045 return GEPVal;
5046
5047 // Now that we've computed the total offset, add it to the base pointer (with
5048 // wrapping semantics).
5049 auto *IntPtr = Builder.CreatePtrToInt(Ptr, IntPtrTy);
5050 auto *ComputedGEP = Builder.CreateAdd(IntPtr, EvaluatedGEP.TotalOffset);
5051
5052 llvm::SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;
5053
5054 if (PerformNullCheck) {
5055 // In C++, if the base pointer evaluates to a null pointer value,
5056 // the only valid pointer this inbounds GEP can produce is also
5057 // a null pointer, so the offset must also evaluate to zero.
5058 // Likewise, if we have non-zero base pointer, we can not get null pointer
5059 // as a result, so the offset can not be -intptr_t(BasePtr).
5060 // In other words, both pointers are either null, or both are non-null,
5061 // or the behaviour is undefined.
5062 //
5063 // C, however, is more strict in this regard, and gives more
5064 // optimization opportunities: in C, additionally, nullptr+0 is undefined.
5065 // So both the input to the 'gep inbounds' AND the output must not be null.
5066 auto *BaseIsNotNullptr = Builder.CreateIsNotNull(Ptr);
5067 auto *ResultIsNotNullptr = Builder.CreateIsNotNull(ComputedGEP);
5068 auto *Valid =
5069 CGM.getLangOpts().CPlusPlus
5070 ? Builder.CreateICmpEQ(BaseIsNotNullptr, ResultIsNotNullptr)
5071 : Builder.CreateAnd(BaseIsNotNullptr, ResultIsNotNullptr);
5072 Checks.emplace_back(Valid, SanitizerKind::PointerOverflow);
5073 }
5074
5075 if (PerformOverflowCheck) {
5076 // The GEP is valid if:
5077 // 1) The total offset doesn't overflow, and
5078 // 2) The sign of the difference between the computed address and the base
5079 // pointer matches the sign of the total offset.
5080 llvm::Value *ValidGEP;
5081 auto *NoOffsetOverflow = Builder.CreateNot(EvaluatedGEP.OffsetOverflows);
5082 if (SignedIndices) {
5083 // GEP is computed as `unsigned base + signed offset`, therefore:
5084 // * If offset was positive, then the computed pointer can not be
5085 // [unsigned] less than the base pointer, unless it overflowed.
5086 // * If offset was negative, then the computed pointer can not be
5087 // [unsigned] greater than the bas pointere, unless it overflowed.
5088 auto *PosOrZeroValid = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
5089 auto *PosOrZeroOffset =
5090 Builder.CreateICmpSGE(EvaluatedGEP.TotalOffset, Zero);
5091 llvm::Value *NegValid = Builder.CreateICmpULT(ComputedGEP, IntPtr);
5092 ValidGEP =
5093 Builder.CreateSelect(PosOrZeroOffset, PosOrZeroValid, NegValid);
5094 } else if (!IsSubtraction) {
5095 // GEP is computed as `unsigned base + unsigned offset`, therefore the
5096 // computed pointer can not be [unsigned] less than base pointer,
5097 // unless there was an overflow.
5098 // Equivalent to `@llvm.uadd.with.overflow(%base, %offset)`.
5099 ValidGEP = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
5100 } else {
5101 // GEP is computed as `unsigned base - unsigned offset`, therefore the
5102 // computed pointer can not be [unsigned] greater than base pointer,
5103 // unless there was an overflow.
5104 // Equivalent to `@llvm.usub.with.overflow(%base, sub(0, %offset))`.
5105 ValidGEP = Builder.CreateICmpULE(ComputedGEP, IntPtr);
5106 }
5107 ValidGEP = Builder.CreateAnd(ValidGEP, NoOffsetOverflow);
5108 Checks.emplace_back(ValidGEP, SanitizerKind::PointerOverflow);
5109 }
5110
5111 assert(!Checks.empty() && "Should have produced some checks.");
5112
5113 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc)};
5114 // Pass the computed GEP to the runtime to avoid emitting poisoned arguments.
5115 llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP};
5116 EmitCheck(Checks, SanitizerHandler::PointerOverflow, StaticArgs, DynamicArgs);
5117
5118 return GEPVal;
5119 }
5120