xref: /llvm-project/clang/lib/CodeGen/CGExpr.cpp (revision 1295aa2e814d1747d69520e34e2c5fb2888e666d)
1 //===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This contains code to emit Expr nodes as LLVM code.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "ABIInfoImpl.h"
14 #include "CGCUDARuntime.h"
15 #include "CGCXXABI.h"
16 #include "CGCall.h"
17 #include "CGCleanup.h"
18 #include "CGDebugInfo.h"
19 #include "CGObjCRuntime.h"
20 #include "CGOpenMPRuntime.h"
21 #include "CGRecordLayout.h"
22 #include "CodeGenFunction.h"
23 #include "CodeGenModule.h"
24 #include "ConstantEmitter.h"
25 #include "TargetInfo.h"
26 #include "clang/AST/ASTContext.h"
27 #include "clang/AST/ASTLambda.h"
28 #include "clang/AST/Attr.h"
29 #include "clang/AST/DeclObjC.h"
30 #include "clang/AST/NSAPI.h"
31 #include "clang/AST/StmtVisitor.h"
32 #include "clang/Basic/Builtins.h"
33 #include "clang/Basic/CodeGenOptions.h"
34 #include "clang/Basic/SourceManager.h"
35 #include "llvm/ADT/STLExtras.h"
36 #include "llvm/ADT/ScopeExit.h"
37 #include "llvm/ADT/StringExtras.h"
38 #include "llvm/IR/DataLayout.h"
39 #include "llvm/IR/Intrinsics.h"
40 #include "llvm/IR/LLVMContext.h"
41 #include "llvm/IR/MDBuilder.h"
42 #include "llvm/IR/MatrixBuilder.h"
43 #include "llvm/Support/ConvertUTF.h"
44 #include "llvm/Support/Endian.h"
45 #include "llvm/Support/MathExtras.h"
46 #include "llvm/Support/Path.h"
47 #include "llvm/Support/xxhash.h"
48 #include "llvm/Transforms/Utils/SanitizerStats.h"
49 
50 #include <numeric>
51 #include <optional>
52 #include <string>
53 
54 using namespace clang;
55 using namespace CodeGen;
56 
57 namespace clang {
58 // TODO: Introduce frontend options to enabled per sanitizers, similar to
59 // `fsanitize-trap`.
60 llvm::cl::opt<bool> ClSanitizeGuardChecks(
61     "ubsan-guard-checks", llvm::cl::Optional,
62     llvm::cl::desc("Guard UBSAN checks with `llvm.allow.ubsan.check()`."));
63 } // namespace clang
64 
65 //===--------------------------------------------------------------------===//
66 //                        Defines for metadata
67 //===--------------------------------------------------------------------===//
68 
69 // Those values are crucial to be the SAME as in ubsan runtime library.
70 enum VariableTypeDescriptorKind : uint16_t {
71   /// An integer type.
72   TK_Integer = 0x0000,
73   /// A floating-point type.
74   TK_Float = 0x0001,
75   /// An _BitInt(N) type.
76   TK_BitInt = 0x0002,
77   /// Any other type. The value representation is unspecified.
78   TK_Unknown = 0xffff
79 };
80 
81 //===--------------------------------------------------------------------===//
82 //                        Miscellaneous Helper Methods
83 //===--------------------------------------------------------------------===//
84 
85 /// CreateTempAlloca - This creates a alloca and inserts it into the entry
86 /// block.
87 RawAddress
88 CodeGenFunction::CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits Align,
89                                              const Twine &Name,
90                                              llvm::Value *ArraySize) {
91   auto Alloca = CreateTempAlloca(Ty, Name, ArraySize);
92   Alloca->setAlignment(Align.getAsAlign());
93   return RawAddress(Alloca, Ty, Align, KnownNonNull);
94 }
95 
96 /// CreateTempAlloca - This creates a alloca and inserts it into the entry
97 /// block. The alloca is casted to default address space if necessary.
98 RawAddress CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, CharUnits Align,
99                                              const Twine &Name,
100                                              llvm::Value *ArraySize,
101                                              RawAddress *AllocaAddr) {
102   auto Alloca = CreateTempAllocaWithoutCast(Ty, Align, Name, ArraySize);
103   if (AllocaAddr)
104     *AllocaAddr = Alloca;
105   llvm::Value *V = Alloca.getPointer();
106   // Alloca always returns a pointer in alloca address space, which may
107   // be different from the type defined by the language. For example,
108   // in C++ the auto variables are in the default address space. Therefore
109   // cast alloca to the default address space when necessary.
110   if (getASTAllocaAddressSpace() != LangAS::Default) {
111     auto DestAddrSpace = getContext().getTargetAddressSpace(LangAS::Default);
112     llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
113     // When ArraySize is nullptr, alloca is inserted at AllocaInsertPt,
114     // otherwise alloca is inserted at the current insertion point of the
115     // builder.
116     if (!ArraySize)
117       Builder.SetInsertPoint(getPostAllocaInsertPoint());
118     V = getTargetHooks().performAddrSpaceCast(
119         *this, V, getASTAllocaAddressSpace(), LangAS::Default,
120         Builder.getPtrTy(DestAddrSpace), /*non-null*/ true);
121   }
122 
123   return RawAddress(V, Ty, Align, KnownNonNull);
124 }
125 
126 /// CreateTempAlloca - This creates an alloca and inserts it into the entry
127 /// block if \p ArraySize is nullptr, otherwise inserts it at the current
128 /// insertion point of the builder.
129 llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty,
130                                                     const Twine &Name,
131                                                     llvm::Value *ArraySize) {
132   llvm::AllocaInst *Alloca;
133   if (ArraySize)
134     Alloca = Builder.CreateAlloca(Ty, ArraySize, Name);
135   else
136     Alloca =
137         new llvm::AllocaInst(Ty, CGM.getDataLayout().getAllocaAddrSpace(),
138                              ArraySize, Name, AllocaInsertPt->getIterator());
139   if (Allocas) {
140     Allocas->Add(Alloca);
141   }
142   return Alloca;
143 }
144 
145 /// CreateDefaultAlignTempAlloca - This creates an alloca with the
146 /// default alignment of the corresponding LLVM type, which is *not*
147 /// guaranteed to be related in any way to the expected alignment of
148 /// an AST type that might have been lowered to Ty.
149 RawAddress CodeGenFunction::CreateDefaultAlignTempAlloca(llvm::Type *Ty,
150                                                          const Twine &Name) {
151   CharUnits Align =
152       CharUnits::fromQuantity(CGM.getDataLayout().getPrefTypeAlign(Ty));
153   return CreateTempAlloca(Ty, Align, Name);
154 }
155 
156 RawAddress CodeGenFunction::CreateIRTemp(QualType Ty, const Twine &Name) {
157   CharUnits Align = getContext().getTypeAlignInChars(Ty);
158   return CreateTempAlloca(ConvertType(Ty), Align, Name);
159 }
160 
161 RawAddress CodeGenFunction::CreateMemTemp(QualType Ty, const Twine &Name,
162                                           RawAddress *Alloca) {
163   // FIXME: Should we prefer the preferred type alignment here?
164   return CreateMemTemp(Ty, getContext().getTypeAlignInChars(Ty), Name, Alloca);
165 }
166 
167 RawAddress CodeGenFunction::CreateMemTemp(QualType Ty, CharUnits Align,
168                                           const Twine &Name,
169                                           RawAddress *Alloca) {
170   RawAddress Result = CreateTempAlloca(ConvertTypeForMem(Ty), Align, Name,
171                                        /*ArraySize=*/nullptr, Alloca);
172 
173   if (Ty->isConstantMatrixType()) {
174     auto *ArrayTy = cast<llvm::ArrayType>(Result.getElementType());
175     auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(),
176                                                 ArrayTy->getNumElements());
177 
178     Result = Address(Result.getPointer(), VectorTy, Result.getAlignment(),
179                      KnownNonNull);
180   }
181   return Result;
182 }
183 
184 RawAddress CodeGenFunction::CreateMemTempWithoutCast(QualType Ty,
185                                                      CharUnits Align,
186                                                      const Twine &Name) {
187   return CreateTempAllocaWithoutCast(ConvertTypeForMem(Ty), Align, Name);
188 }
189 
190 RawAddress CodeGenFunction::CreateMemTempWithoutCast(QualType Ty,
191                                                      const Twine &Name) {
192   return CreateMemTempWithoutCast(Ty, getContext().getTypeAlignInChars(Ty),
193                                   Name);
194 }
195 
196 /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
197 /// expression and compare the result against zero, returning an Int1Ty value.
198 llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) {
199   PGO.setCurrentStmt(E);
200   if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) {
201     llvm::Value *MemPtr = EmitScalarExpr(E);
202     return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT);
203   }
204 
205   QualType BoolTy = getContext().BoolTy;
206   SourceLocation Loc = E->getExprLoc();
207   CGFPOptionsRAII FPOptsRAII(*this, E);
208   if (!E->getType()->isAnyComplexType())
209     return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy, Loc);
210 
211   return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(), BoolTy,
212                                        Loc);
213 }
214 
215 /// EmitIgnoredExpr - Emit code to compute the specified expression,
216 /// ignoring the result.
217 void CodeGenFunction::EmitIgnoredExpr(const Expr *E) {
218   if (E->isPRValue())
219     return (void)EmitAnyExpr(E, AggValueSlot::ignored(), true);
220 
221   // if this is a bitfield-resulting conditional operator, we can special case
222   // emit this. The normal 'EmitLValue' version of this is particularly
223   // difficult to codegen for, since creating a single "LValue" for two
224   // different sized arguments here is not particularly doable.
225   if (const auto *CondOp = dyn_cast<AbstractConditionalOperator>(
226           E->IgnoreParenNoopCasts(getContext()))) {
227     if (CondOp->getObjectKind() == OK_BitField)
228       return EmitIgnoredConditionalOperator(CondOp);
229   }
230 
231   // Just emit it as an l-value and drop the result.
232   EmitLValue(E);
233 }
234 
235 /// EmitAnyExpr - Emit code to compute the specified expression which
236 /// can have any type.  The result is returned as an RValue struct.
237 /// If this is an aggregate expression, AggSlot indicates where the
238 /// result should be returned.
239 RValue CodeGenFunction::EmitAnyExpr(const Expr *E,
240                                     AggValueSlot aggSlot,
241                                     bool ignoreResult) {
242   switch (getEvaluationKind(E->getType())) {
243   case TEK_Scalar:
244     return RValue::get(EmitScalarExpr(E, ignoreResult));
245   case TEK_Complex:
246     return RValue::getComplex(EmitComplexExpr(E, ignoreResult, ignoreResult));
247   case TEK_Aggregate:
248     if (!ignoreResult && aggSlot.isIgnored())
249       aggSlot = CreateAggTemp(E->getType(), "agg-temp");
250     EmitAggExpr(E, aggSlot);
251     return aggSlot.asRValue();
252   }
253   llvm_unreachable("bad evaluation kind");
254 }
255 
256 /// EmitAnyExprToTemp - Similar to EmitAnyExpr(), however, the result will
257 /// always be accessible even if no aggregate location is provided.
258 RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E) {
259   AggValueSlot AggSlot = AggValueSlot::ignored();
260 
261   if (hasAggregateEvaluationKind(E->getType()))
262     AggSlot = CreateAggTemp(E->getType(), "agg.tmp");
263   return EmitAnyExpr(E, AggSlot);
264 }
265 
266 /// EmitAnyExprToMem - Evaluate an expression into a given memory
267 /// location.
268 void CodeGenFunction::EmitAnyExprToMem(const Expr *E,
269                                        Address Location,
270                                        Qualifiers Quals,
271                                        bool IsInit) {
272   // FIXME: This function should take an LValue as an argument.
273   switch (getEvaluationKind(E->getType())) {
274   case TEK_Complex:
275     EmitComplexExprIntoLValue(E, MakeAddrLValue(Location, E->getType()),
276                               /*isInit*/ false);
277     return;
278 
279   case TEK_Aggregate: {
280     EmitAggExpr(E, AggValueSlot::forAddr(Location, Quals,
281                                          AggValueSlot::IsDestructed_t(IsInit),
282                                          AggValueSlot::DoesNotNeedGCBarriers,
283                                          AggValueSlot::IsAliased_t(!IsInit),
284                                          AggValueSlot::MayOverlap));
285     return;
286   }
287 
288   case TEK_Scalar: {
289     RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false));
290     LValue LV = MakeAddrLValue(Location, E->getType());
291     EmitStoreThroughLValue(RV, LV);
292     return;
293   }
294   }
295   llvm_unreachable("bad evaluation kind");
296 }
297 
298 void CodeGenFunction::EmitInitializationToLValue(
299     const Expr *E, LValue LV, AggValueSlot::IsZeroed_t IsZeroed) {
300   QualType Type = LV.getType();
301   switch (getEvaluationKind(Type)) {
302   case TEK_Complex:
303     EmitComplexExprIntoLValue(E, LV, /*isInit*/ true);
304     return;
305   case TEK_Aggregate:
306     EmitAggExpr(E, AggValueSlot::forLValue(LV, AggValueSlot::IsDestructed,
307                                            AggValueSlot::DoesNotNeedGCBarriers,
308                                            AggValueSlot::IsNotAliased,
309                                            AggValueSlot::MayOverlap, IsZeroed));
310     return;
311   case TEK_Scalar:
312     if (LV.isSimple())
313       EmitScalarInit(E, /*D=*/nullptr, LV, /*Captured=*/false);
314     else
315       EmitStoreThroughLValue(RValue::get(EmitScalarExpr(E)), LV);
316     return;
317   }
318   llvm_unreachable("bad evaluation kind");
319 }
320 
321 static void
322 pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M,
323                      const Expr *E, Address ReferenceTemporary) {
324   // Objective-C++ ARC:
325   //   If we are binding a reference to a temporary that has ownership, we
326   //   need to perform retain/release operations on the temporary.
327   //
328   // FIXME: This should be looking at E, not M.
329   if (auto Lifetime = M->getType().getObjCLifetime()) {
330     switch (Lifetime) {
331     case Qualifiers::OCL_None:
332     case Qualifiers::OCL_ExplicitNone:
333       // Carry on to normal cleanup handling.
334       break;
335 
336     case Qualifiers::OCL_Autoreleasing:
337       // Nothing to do; cleaned up by an autorelease pool.
338       return;
339 
340     case Qualifiers::OCL_Strong:
341     case Qualifiers::OCL_Weak:
342       switch (StorageDuration Duration = M->getStorageDuration()) {
343       case SD_Static:
344         // Note: we intentionally do not register a cleanup to release
345         // the object on program termination.
346         return;
347 
348       case SD_Thread:
349         // FIXME: We should probably register a cleanup in this case.
350         return;
351 
352       case SD_Automatic:
353       case SD_FullExpression:
354         CodeGenFunction::Destroyer *Destroy;
355         CleanupKind CleanupKind;
356         if (Lifetime == Qualifiers::OCL_Strong) {
357           const ValueDecl *VD = M->getExtendingDecl();
358           bool Precise = isa_and_nonnull<VarDecl>(VD) &&
359                          VD->hasAttr<ObjCPreciseLifetimeAttr>();
360           CleanupKind = CGF.getARCCleanupKind();
361           Destroy = Precise ? &CodeGenFunction::destroyARCStrongPrecise
362                             : &CodeGenFunction::destroyARCStrongImprecise;
363         } else {
364           // __weak objects always get EH cleanups; otherwise, exceptions
365           // could cause really nasty crashes instead of mere leaks.
366           CleanupKind = NormalAndEHCleanup;
367           Destroy = &CodeGenFunction::destroyARCWeak;
368         }
369         if (Duration == SD_FullExpression)
370           CGF.pushDestroy(CleanupKind, ReferenceTemporary,
371                           M->getType(), *Destroy,
372                           CleanupKind & EHCleanup);
373         else
374           CGF.pushLifetimeExtendedDestroy(CleanupKind, ReferenceTemporary,
375                                           M->getType(),
376                                           *Destroy, CleanupKind & EHCleanup);
377         return;
378 
379       case SD_Dynamic:
380         llvm_unreachable("temporary cannot have dynamic storage duration");
381       }
382       llvm_unreachable("unknown storage duration");
383     }
384   }
385 
386   CXXDestructorDecl *ReferenceTemporaryDtor = nullptr;
387   if (const RecordType *RT =
388           E->getType()->getBaseElementTypeUnsafe()->getAs<RecordType>()) {
389     // Get the destructor for the reference temporary.
390     auto *ClassDecl = cast<CXXRecordDecl>(RT->getDecl());
391     if (!ClassDecl->hasTrivialDestructor())
392       ReferenceTemporaryDtor = ClassDecl->getDestructor();
393   }
394 
395   if (!ReferenceTemporaryDtor)
396     return;
397 
398   // Call the destructor for the temporary.
399   switch (M->getStorageDuration()) {
400   case SD_Static:
401   case SD_Thread: {
402     llvm::FunctionCallee CleanupFn;
403     llvm::Constant *CleanupArg;
404     if (E->getType()->isArrayType()) {
405       CleanupFn = CodeGenFunction(CGF.CGM).generateDestroyHelper(
406           ReferenceTemporary, E->getType(),
407           CodeGenFunction::destroyCXXObject, CGF.getLangOpts().Exceptions,
408           dyn_cast_or_null<VarDecl>(M->getExtendingDecl()));
409       CleanupArg = llvm::Constant::getNullValue(CGF.Int8PtrTy);
410     } else {
411       CleanupFn = CGF.CGM.getAddrAndTypeOfCXXStructor(
412           GlobalDecl(ReferenceTemporaryDtor, Dtor_Complete));
413       CleanupArg = cast<llvm::Constant>(ReferenceTemporary.emitRawPointer(CGF));
414     }
415     CGF.CGM.getCXXABI().registerGlobalDtor(
416         CGF, *cast<VarDecl>(M->getExtendingDecl()), CleanupFn, CleanupArg);
417     break;
418   }
419 
420   case SD_FullExpression:
421     CGF.pushDestroy(NormalAndEHCleanup, ReferenceTemporary, E->getType(),
422                     CodeGenFunction::destroyCXXObject,
423                     CGF.getLangOpts().Exceptions);
424     break;
425 
426   case SD_Automatic:
427     CGF.pushLifetimeExtendedDestroy(NormalAndEHCleanup,
428                                     ReferenceTemporary, E->getType(),
429                                     CodeGenFunction::destroyCXXObject,
430                                     CGF.getLangOpts().Exceptions);
431     break;
432 
433   case SD_Dynamic:
434     llvm_unreachable("temporary cannot have dynamic storage duration");
435   }
436 }
437 
438 static RawAddress createReferenceTemporary(CodeGenFunction &CGF,
439                                            const MaterializeTemporaryExpr *M,
440                                            const Expr *Inner,
441                                            RawAddress *Alloca = nullptr) {
442   auto &TCG = CGF.getTargetHooks();
443   switch (M->getStorageDuration()) {
444   case SD_FullExpression:
445   case SD_Automatic: {
446     // If we have a constant temporary array or record try to promote it into a
447     // constant global under the same rules a normal constant would've been
448     // promoted. This is easier on the optimizer and generally emits fewer
449     // instructions.
450     QualType Ty = Inner->getType();
451     if (CGF.CGM.getCodeGenOpts().MergeAllConstants &&
452         (Ty->isArrayType() || Ty->isRecordType()) &&
453         Ty.isConstantStorage(CGF.getContext(), true, false))
454       if (auto Init = ConstantEmitter(CGF).tryEmitAbstract(Inner, Ty)) {
455         auto AS = CGF.CGM.GetGlobalConstantAddressSpace();
456         auto *GV = new llvm::GlobalVariable(
457             CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
458             llvm::GlobalValue::PrivateLinkage, Init, ".ref.tmp", nullptr,
459             llvm::GlobalValue::NotThreadLocal,
460             CGF.getContext().getTargetAddressSpace(AS));
461         CharUnits alignment = CGF.getContext().getTypeAlignInChars(Ty);
462         GV->setAlignment(alignment.getAsAlign());
463         llvm::Constant *C = GV;
464         if (AS != LangAS::Default)
465           C = TCG.performAddrSpaceCast(
466               CGF.CGM, GV, AS, LangAS::Default,
467               llvm::PointerType::get(
468                   CGF.getLLVMContext(),
469                   CGF.getContext().getTargetAddressSpace(LangAS::Default)));
470         // FIXME: Should we put the new global into a COMDAT?
471         return RawAddress(C, GV->getValueType(), alignment);
472       }
473     return CGF.CreateMemTemp(Ty, "ref.tmp", Alloca);
474   }
475   case SD_Thread:
476   case SD_Static:
477     return CGF.CGM.GetAddrOfGlobalTemporary(M, Inner);
478 
479   case SD_Dynamic:
480     llvm_unreachable("temporary can't have dynamic storage duration");
481   }
482   llvm_unreachable("unknown storage duration");
483 }
484 
485 /// Helper method to check if the underlying ABI is AAPCS
486 static bool isAAPCS(const TargetInfo &TargetInfo) {
487   return TargetInfo.getABI().starts_with("aapcs");
488 }
489 
490 LValue CodeGenFunction::
491 EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) {
492   const Expr *E = M->getSubExpr();
493 
494   assert((!M->getExtendingDecl() || !isa<VarDecl>(M->getExtendingDecl()) ||
495           !cast<VarDecl>(M->getExtendingDecl())->isARCPseudoStrong()) &&
496          "Reference should never be pseudo-strong!");
497 
498   // FIXME: ideally this would use EmitAnyExprToMem, however, we cannot do so
499   // as that will cause the lifetime adjustment to be lost for ARC
500   auto ownership = M->getType().getObjCLifetime();
501   if (ownership != Qualifiers::OCL_None &&
502       ownership != Qualifiers::OCL_ExplicitNone) {
503     RawAddress Object = createReferenceTemporary(*this, M, E);
504     if (auto *Var = dyn_cast<llvm::GlobalVariable>(Object.getPointer())) {
505       llvm::Type *Ty = ConvertTypeForMem(E->getType());
506       Object = Object.withElementType(Ty);
507 
508       // createReferenceTemporary will promote the temporary to a global with a
509       // constant initializer if it can.  It can only do this to a value of
510       // ARC-manageable type if the value is global and therefore "immune" to
511       // ref-counting operations.  Therefore we have no need to emit either a
512       // dynamic initialization or a cleanup and we can just return the address
513       // of the temporary.
514       if (Var->hasInitializer())
515         return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl);
516 
517       Var->setInitializer(CGM.EmitNullConstant(E->getType()));
518     }
519     LValue RefTempDst = MakeAddrLValue(Object, M->getType(),
520                                        AlignmentSource::Decl);
521 
522     switch (getEvaluationKind(E->getType())) {
523     default: llvm_unreachable("expected scalar or aggregate expression");
524     case TEK_Scalar:
525       EmitScalarInit(E, M->getExtendingDecl(), RefTempDst, false);
526       break;
527     case TEK_Aggregate: {
528       EmitAggExpr(E, AggValueSlot::forAddr(Object,
529                                            E->getType().getQualifiers(),
530                                            AggValueSlot::IsDestructed,
531                                            AggValueSlot::DoesNotNeedGCBarriers,
532                                            AggValueSlot::IsNotAliased,
533                                            AggValueSlot::DoesNotOverlap));
534       break;
535     }
536     }
537 
538     pushTemporaryCleanup(*this, M, E, Object);
539     return RefTempDst;
540   }
541 
542   SmallVector<const Expr *, 2> CommaLHSs;
543   SmallVector<SubobjectAdjustment, 2> Adjustments;
544   E = E->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments);
545 
546   for (const auto &Ignored : CommaLHSs)
547     EmitIgnoredExpr(Ignored);
548 
549   if (const auto *opaque = dyn_cast<OpaqueValueExpr>(E)) {
550     if (opaque->getType()->isRecordType()) {
551       assert(Adjustments.empty());
552       return EmitOpaqueValueLValue(opaque);
553     }
554   }
555 
556   // Create and initialize the reference temporary.
557   RawAddress Alloca = Address::invalid();
558   RawAddress Object = createReferenceTemporary(*this, M, E, &Alloca);
559   if (auto *Var = dyn_cast<llvm::GlobalVariable>(
560           Object.getPointer()->stripPointerCasts())) {
561     llvm::Type *TemporaryType = ConvertTypeForMem(E->getType());
562     Object = Object.withElementType(TemporaryType);
563     // If the temporary is a global and has a constant initializer or is a
564     // constant temporary that we promoted to a global, we may have already
565     // initialized it.
566     if (!Var->hasInitializer()) {
567       Var->setInitializer(CGM.EmitNullConstant(E->getType()));
568       EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true);
569     }
570   } else {
571     switch (M->getStorageDuration()) {
572     case SD_Automatic:
573       if (auto *Size = EmitLifetimeStart(
574               CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()),
575               Alloca.getPointer())) {
576         pushCleanupAfterFullExpr<CallLifetimeEnd>(NormalEHLifetimeMarker,
577                                                   Alloca, Size);
578       }
579       break;
580 
581     case SD_FullExpression: {
582       if (!ShouldEmitLifetimeMarkers)
583         break;
584 
585       // Avoid creating a conditional cleanup just to hold an llvm.lifetime.end
586       // marker. Instead, start the lifetime of a conditional temporary earlier
587       // so that it's unconditional. Don't do this with sanitizers which need
588       // more precise lifetime marks. However when inside an "await.suspend"
589       // block, we should always avoid conditional cleanup because it creates
590       // boolean marker that lives across await_suspend, which can destroy coro
591       // frame.
592       ConditionalEvaluation *OldConditional = nullptr;
593       CGBuilderTy::InsertPoint OldIP;
594       if (isInConditionalBranch() && !E->getType().isDestructedType() &&
595           ((!SanOpts.has(SanitizerKind::HWAddress) &&
596             !SanOpts.has(SanitizerKind::Memory) &&
597             !CGM.getCodeGenOpts().SanitizeAddressUseAfterScope) ||
598            inSuspendBlock())) {
599         OldConditional = OutermostConditional;
600         OutermostConditional = nullptr;
601 
602         OldIP = Builder.saveIP();
603         llvm::BasicBlock *Block = OldConditional->getStartingBlock();
604         Builder.restoreIP(CGBuilderTy::InsertPoint(
605             Block, llvm::BasicBlock::iterator(Block->back())));
606       }
607 
608       if (auto *Size = EmitLifetimeStart(
609               CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()),
610               Alloca.getPointer())) {
611         pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, Alloca,
612                                              Size);
613       }
614 
615       if (OldConditional) {
616         OutermostConditional = OldConditional;
617         Builder.restoreIP(OldIP);
618       }
619       break;
620     }
621 
622     default:
623       break;
624     }
625     EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true);
626   }
627   pushTemporaryCleanup(*this, M, E, Object);
628 
629   // Perform derived-to-base casts and/or field accesses, to get from the
630   // temporary object we created (and, potentially, for which we extended
631   // the lifetime) to the subobject we're binding the reference to.
632   for (SubobjectAdjustment &Adjustment : llvm::reverse(Adjustments)) {
633     switch (Adjustment.Kind) {
634     case SubobjectAdjustment::DerivedToBaseAdjustment:
635       Object =
636           GetAddressOfBaseClass(Object, Adjustment.DerivedToBase.DerivedClass,
637                                 Adjustment.DerivedToBase.BasePath->path_begin(),
638                                 Adjustment.DerivedToBase.BasePath->path_end(),
639                                 /*NullCheckValue=*/ false, E->getExprLoc());
640       break;
641 
642     case SubobjectAdjustment::FieldAdjustment: {
643       LValue LV = MakeAddrLValue(Object, E->getType(), AlignmentSource::Decl);
644       LV = EmitLValueForField(LV, Adjustment.Field);
645       assert(LV.isSimple() &&
646              "materialized temporary field is not a simple lvalue");
647       Object = LV.getAddress();
648       break;
649     }
650 
651     case SubobjectAdjustment::MemberPointerAdjustment: {
652       llvm::Value *Ptr = EmitScalarExpr(Adjustment.Ptr.RHS);
653       Object = EmitCXXMemberDataPointerAddress(E, Object, Ptr,
654                                                Adjustment.Ptr.MPT);
655       break;
656     }
657     }
658   }
659 
660   return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl);
661 }
662 
663 RValue
664 CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E) {
665   // Emit the expression as an lvalue.
666   LValue LV = EmitLValue(E);
667   assert(LV.isSimple());
668   llvm::Value *Value = LV.getPointer(*this);
669 
670   if (sanitizePerformTypeCheck() && !E->getType()->isFunctionType()) {
671     // C++11 [dcl.ref]p5 (as amended by core issue 453):
672     //   If a glvalue to which a reference is directly bound designates neither
673     //   an existing object or function of an appropriate type nor a region of
674     //   storage of suitable size and alignment to contain an object of the
675     //   reference's type, the behavior is undefined.
676     QualType Ty = E->getType();
677     EmitTypeCheck(TCK_ReferenceBinding, E->getExprLoc(), Value, Ty);
678   }
679 
680   return RValue::get(Value);
681 }
682 
683 
684 /// getAccessedFieldNo - Given an encoded value and a result number, return the
685 /// input field number being accessed.
686 unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx,
687                                              const llvm::Constant *Elts) {
688   return cast<llvm::ConstantInt>(Elts->getAggregateElement(Idx))
689       ->getZExtValue();
690 }
691 
692 static llvm::Value *emitHashMix(CGBuilderTy &Builder, llvm::Value *Acc,
693                                 llvm::Value *Ptr) {
694   llvm::Value *A0 =
695       Builder.CreateMul(Ptr, Builder.getInt64(0xbf58476d1ce4e5b9u));
696   llvm::Value *A1 =
697       Builder.CreateXor(A0, Builder.CreateLShr(A0, Builder.getInt64(31)));
698   return Builder.CreateXor(Acc, A1);
699 }
700 
701 bool CodeGenFunction::isNullPointerAllowed(TypeCheckKind TCK) {
702   return TCK == TCK_DowncastPointer || TCK == TCK_Upcast ||
703          TCK == TCK_UpcastToVirtualBase || TCK == TCK_DynamicOperation;
704 }
705 
706 bool CodeGenFunction::isVptrCheckRequired(TypeCheckKind TCK, QualType Ty) {
707   CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
708   return (RD && RD->hasDefinition() && RD->isDynamicClass()) &&
709          (TCK == TCK_MemberAccess || TCK == TCK_MemberCall ||
710           TCK == TCK_DowncastPointer || TCK == TCK_DowncastReference ||
711           TCK == TCK_UpcastToVirtualBase || TCK == TCK_DynamicOperation);
712 }
713 
714 bool CodeGenFunction::sanitizePerformTypeCheck() const {
715   return SanOpts.has(SanitizerKind::Null) ||
716          SanOpts.has(SanitizerKind::Alignment) ||
717          SanOpts.has(SanitizerKind::ObjectSize) ||
718          SanOpts.has(SanitizerKind::Vptr);
719 }
720 
721 void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
722                                     llvm::Value *Ptr, QualType Ty,
723                                     CharUnits Alignment,
724                                     SanitizerSet SkippedChecks,
725                                     llvm::Value *ArraySize) {
726   if (!sanitizePerformTypeCheck())
727     return;
728 
729   // Don't check pointers outside the default address space. The null check
730   // isn't correct, the object-size check isn't supported by LLVM, and we can't
731   // communicate the addresses to the runtime handler for the vptr check.
732   if (Ptr->getType()->getPointerAddressSpace())
733     return;
734 
735   // Don't check pointers to volatile data. The behavior here is implementation-
736   // defined.
737   if (Ty.isVolatileQualified())
738     return;
739 
740   SanitizerScope SanScope(this);
741 
742   SmallVector<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>, 3>
743       Checks;
744   llvm::BasicBlock *Done = nullptr;
745 
746   // Quickly determine whether we have a pointer to an alloca. It's possible
747   // to skip null checks, and some alignment checks, for these pointers. This
748   // can reduce compile-time significantly.
749   auto PtrToAlloca = dyn_cast<llvm::AllocaInst>(Ptr->stripPointerCasts());
750 
751   llvm::Value *True = llvm::ConstantInt::getTrue(getLLVMContext());
752   llvm::Value *IsNonNull = nullptr;
753   bool IsGuaranteedNonNull =
754       SkippedChecks.has(SanitizerKind::Null) || PtrToAlloca;
755   bool AllowNullPointers = isNullPointerAllowed(TCK);
756   if ((SanOpts.has(SanitizerKind::Null) || AllowNullPointers) &&
757       !IsGuaranteedNonNull) {
758     // The glvalue must not be an empty glvalue.
759     IsNonNull = Builder.CreateIsNotNull(Ptr);
760 
761     // The IR builder can constant-fold the null check if the pointer points to
762     // a constant.
763     IsGuaranteedNonNull = IsNonNull == True;
764 
765     // Skip the null check if the pointer is known to be non-null.
766     if (!IsGuaranteedNonNull) {
767       if (AllowNullPointers) {
768         // When performing pointer casts, it's OK if the value is null.
769         // Skip the remaining checks in that case.
770         Done = createBasicBlock("null");
771         llvm::BasicBlock *Rest = createBasicBlock("not.null");
772         Builder.CreateCondBr(IsNonNull, Rest, Done);
773         EmitBlock(Rest);
774       } else {
775         Checks.push_back(std::make_pair(IsNonNull, SanitizerKind::SO_Null));
776       }
777     }
778   }
779 
780   if (SanOpts.has(SanitizerKind::ObjectSize) &&
781       !SkippedChecks.has(SanitizerKind::ObjectSize) &&
782       !Ty->isIncompleteType()) {
783     uint64_t TySize = CGM.getMinimumObjectSize(Ty).getQuantity();
784     llvm::Value *Size = llvm::ConstantInt::get(IntPtrTy, TySize);
785     if (ArraySize)
786       Size = Builder.CreateMul(Size, ArraySize);
787 
788     // Degenerate case: new X[0] does not need an objectsize check.
789     llvm::Constant *ConstantSize = dyn_cast<llvm::Constant>(Size);
790     if (!ConstantSize || !ConstantSize->isNullValue()) {
791       // The glvalue must refer to a large enough storage region.
792       // FIXME: If Address Sanitizer is enabled, insert dynamic instrumentation
793       //        to check this.
794       // FIXME: Get object address space
795       llvm::Type *Tys[2] = { IntPtrTy, Int8PtrTy };
796       llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, Tys);
797       llvm::Value *Min = Builder.getFalse();
798       llvm::Value *NullIsUnknown = Builder.getFalse();
799       llvm::Value *Dynamic = Builder.getFalse();
800       llvm::Value *LargeEnough = Builder.CreateICmpUGE(
801           Builder.CreateCall(F, {Ptr, Min, NullIsUnknown, Dynamic}), Size);
802       Checks.push_back(
803           std::make_pair(LargeEnough, SanitizerKind::SO_ObjectSize));
804     }
805   }
806 
807   llvm::MaybeAlign AlignVal;
808   llvm::Value *PtrAsInt = nullptr;
809 
810   if (SanOpts.has(SanitizerKind::Alignment) &&
811       !SkippedChecks.has(SanitizerKind::Alignment)) {
812     AlignVal = Alignment.getAsMaybeAlign();
813     if (!Ty->isIncompleteType() && !AlignVal)
814       AlignVal = CGM.getNaturalTypeAlignment(Ty, nullptr, nullptr,
815                                              /*ForPointeeType=*/true)
816                      .getAsMaybeAlign();
817 
818     // The glvalue must be suitably aligned.
819     if (AlignVal && *AlignVal > llvm::Align(1) &&
820         (!PtrToAlloca || PtrToAlloca->getAlign() < *AlignVal)) {
821       PtrAsInt = Builder.CreatePtrToInt(Ptr, IntPtrTy);
822       llvm::Value *Align = Builder.CreateAnd(
823           PtrAsInt, llvm::ConstantInt::get(IntPtrTy, AlignVal->value() - 1));
824       llvm::Value *Aligned =
825           Builder.CreateICmpEQ(Align, llvm::ConstantInt::get(IntPtrTy, 0));
826       if (Aligned != True)
827         Checks.push_back(std::make_pair(Aligned, SanitizerKind::SO_Alignment));
828     }
829   }
830 
831   if (Checks.size() > 0) {
832     llvm::Constant *StaticData[] = {
833         EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(Ty),
834         llvm::ConstantInt::get(Int8Ty, AlignVal ? llvm::Log2(*AlignVal) : 1),
835         llvm::ConstantInt::get(Int8Ty, TCK)};
836     EmitCheck(Checks, SanitizerHandler::TypeMismatch, StaticData,
837               PtrAsInt ? PtrAsInt : Ptr);
838   }
839 
840   // If possible, check that the vptr indicates that there is a subobject of
841   // type Ty at offset zero within this object.
842   //
843   // C++11 [basic.life]p5,6:
844   //   [For storage which does not refer to an object within its lifetime]
845   //   The program has undefined behavior if:
846   //    -- the [pointer or glvalue] is used to access a non-static data member
847   //       or call a non-static member function
848   if (SanOpts.has(SanitizerKind::Vptr) &&
849       !SkippedChecks.has(SanitizerKind::Vptr) && isVptrCheckRequired(TCK, Ty)) {
850     // Ensure that the pointer is non-null before loading it. If there is no
851     // compile-time guarantee, reuse the run-time null check or emit a new one.
852     if (!IsGuaranteedNonNull) {
853       if (!IsNonNull)
854         IsNonNull = Builder.CreateIsNotNull(Ptr);
855       if (!Done)
856         Done = createBasicBlock("vptr.null");
857       llvm::BasicBlock *VptrNotNull = createBasicBlock("vptr.not.null");
858       Builder.CreateCondBr(IsNonNull, VptrNotNull, Done);
859       EmitBlock(VptrNotNull);
860     }
861 
862     // Compute a deterministic hash of the mangled name of the type.
863     SmallString<64> MangledName;
864     llvm::raw_svector_ostream Out(MangledName);
865     CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty.getUnqualifiedType(),
866                                                      Out);
867 
868     // Contained in NoSanitizeList based on the mangled type.
869     if (!CGM.getContext().getNoSanitizeList().containsType(SanitizerKind::Vptr,
870                                                            Out.str())) {
871       // Load the vptr, and mix it with TypeHash.
872       llvm::Value *TypeHash =
873           llvm::ConstantInt::get(Int64Ty, xxh3_64bits(Out.str()));
874 
875       llvm::Type *VPtrTy = llvm::PointerType::get(IntPtrTy, 0);
876       Address VPtrAddr(Ptr, IntPtrTy, getPointerAlign());
877       llvm::Value *VPtrVal = GetVTablePtr(VPtrAddr, VPtrTy,
878                                           Ty->getAsCXXRecordDecl(),
879                                           VTableAuthMode::UnsafeUbsanStrip);
880       VPtrVal = Builder.CreateBitOrPointerCast(VPtrVal, IntPtrTy);
881 
882       llvm::Value *Hash =
883           emitHashMix(Builder, TypeHash, Builder.CreateZExt(VPtrVal, Int64Ty));
884       Hash = Builder.CreateTrunc(Hash, IntPtrTy);
885 
886       // Look the hash up in our cache.
887       const int CacheSize = 128;
888       llvm::Type *HashTable = llvm::ArrayType::get(IntPtrTy, CacheSize);
889       llvm::Value *Cache = CGM.CreateRuntimeVariable(HashTable,
890                                                      "__ubsan_vptr_type_cache");
891       llvm::Value *Slot = Builder.CreateAnd(Hash,
892                                             llvm::ConstantInt::get(IntPtrTy,
893                                                                    CacheSize-1));
894       llvm::Value *Indices[] = { Builder.getInt32(0), Slot };
895       llvm::Value *CacheVal = Builder.CreateAlignedLoad(
896           IntPtrTy, Builder.CreateInBoundsGEP(HashTable, Cache, Indices),
897           getPointerAlign());
898 
899       // If the hash isn't in the cache, call a runtime handler to perform the
900       // hard work of checking whether the vptr is for an object of the right
901       // type. This will either fill in the cache and return, or produce a
902       // diagnostic.
903       llvm::Value *EqualHash = Builder.CreateICmpEQ(CacheVal, Hash);
904       llvm::Constant *StaticData[] = {
905         EmitCheckSourceLocation(Loc),
906         EmitCheckTypeDescriptor(Ty),
907         CGM.GetAddrOfRTTIDescriptor(Ty.getUnqualifiedType()),
908         llvm::ConstantInt::get(Int8Ty, TCK)
909       };
910       llvm::Value *DynamicData[] = { Ptr, Hash };
911       EmitCheck(std::make_pair(EqualHash, SanitizerKind::SO_Vptr),
912                 SanitizerHandler::DynamicTypeCacheMiss, StaticData,
913                 DynamicData);
914     }
915   }
916 
917   if (Done) {
918     Builder.CreateBr(Done);
919     EmitBlock(Done);
920   }
921 }
922 
923 llvm::Value *CodeGenFunction::LoadPassedObjectSize(const Expr *E,
924                                                    QualType EltTy) {
925   ASTContext &C = getContext();
926   uint64_t EltSize = C.getTypeSizeInChars(EltTy).getQuantity();
927   if (!EltSize)
928     return nullptr;
929 
930   auto *ArrayDeclRef = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts());
931   if (!ArrayDeclRef)
932     return nullptr;
933 
934   auto *ParamDecl = dyn_cast<ParmVarDecl>(ArrayDeclRef->getDecl());
935   if (!ParamDecl)
936     return nullptr;
937 
938   auto *POSAttr = ParamDecl->getAttr<PassObjectSizeAttr>();
939   if (!POSAttr)
940     return nullptr;
941 
942   // Don't load the size if it's a lower bound.
943   int POSType = POSAttr->getType();
944   if (POSType != 0 && POSType != 1)
945     return nullptr;
946 
947   // Find the implicit size parameter.
948   auto PassedSizeIt = SizeArguments.find(ParamDecl);
949   if (PassedSizeIt == SizeArguments.end())
950     return nullptr;
951 
952   const ImplicitParamDecl *PassedSizeDecl = PassedSizeIt->second;
953   assert(LocalDeclMap.count(PassedSizeDecl) && "Passed size not loadable");
954   Address AddrOfSize = LocalDeclMap.find(PassedSizeDecl)->second;
955   llvm::Value *SizeInBytes = EmitLoadOfScalar(AddrOfSize, /*Volatile=*/false,
956                                               C.getSizeType(), E->getExprLoc());
957   llvm::Value *SizeOfElement =
958       llvm::ConstantInt::get(SizeInBytes->getType(), EltSize);
959   return Builder.CreateUDiv(SizeInBytes, SizeOfElement);
960 }
961 
962 /// If Base is known to point to the start of an array, return the length of
963 /// that array. Return 0 if the length cannot be determined.
964 static llvm::Value *getArrayIndexingBound(CodeGenFunction &CGF,
965                                           const Expr *Base,
966                                           QualType &IndexedType,
967                                           LangOptions::StrictFlexArraysLevelKind
968                                           StrictFlexArraysLevel) {
969   // For the vector indexing extension, the bound is the number of elements.
970   if (const VectorType *VT = Base->getType()->getAs<VectorType>()) {
971     IndexedType = Base->getType();
972     return CGF.Builder.getInt32(VT->getNumElements());
973   }
974 
975   Base = Base->IgnoreParens();
976 
977   if (const auto *CE = dyn_cast<CastExpr>(Base)) {
978     if (CE->getCastKind() == CK_ArrayToPointerDecay &&
979         !CE->getSubExpr()->isFlexibleArrayMemberLike(CGF.getContext(),
980                                                      StrictFlexArraysLevel)) {
981       CodeGenFunction::SanitizerScope SanScope(&CGF);
982 
983       IndexedType = CE->getSubExpr()->getType();
984       const ArrayType *AT = IndexedType->castAsArrayTypeUnsafe();
985       if (const auto *CAT = dyn_cast<ConstantArrayType>(AT))
986         return CGF.Builder.getInt(CAT->getSize());
987 
988       if (const auto *VAT = dyn_cast<VariableArrayType>(AT))
989         return CGF.getVLASize(VAT).NumElts;
990       // Ignore pass_object_size here. It's not applicable on decayed pointers.
991     }
992   }
993 
994   CodeGenFunction::SanitizerScope SanScope(&CGF);
995 
996   QualType EltTy{Base->getType()->getPointeeOrArrayElementType(), 0};
997   if (llvm::Value *POS = CGF.LoadPassedObjectSize(Base, EltTy)) {
998     IndexedType = Base->getType();
999     return POS;
1000   }
1001 
1002   return nullptr;
1003 }
1004 
1005 namespace {
1006 
1007 /// \p StructAccessBase returns the base \p Expr of a field access. It returns
1008 /// either a \p DeclRefExpr, representing the base pointer to the struct, i.e.:
1009 ///
1010 ///     p in p-> a.b.c
1011 ///
1012 /// or a \p MemberExpr, if the \p MemberExpr has the \p RecordDecl we're
1013 /// looking for:
1014 ///
1015 ///     struct s {
1016 ///       struct s *ptr;
1017 ///       int count;
1018 ///       char array[] __attribute__((counted_by(count)));
1019 ///     };
1020 ///
1021 /// If we have an expression like \p p->ptr->array[index], we want the
1022 /// \p MemberExpr for \p p->ptr instead of \p p.
1023 class StructAccessBase
1024     : public ConstStmtVisitor<StructAccessBase, const Expr *> {
1025   const RecordDecl *ExpectedRD;
1026 
1027   bool IsExpectedRecordDecl(const Expr *E) const {
1028     QualType Ty = E->getType();
1029     if (Ty->isPointerType())
1030       Ty = Ty->getPointeeType();
1031     return ExpectedRD == Ty->getAsRecordDecl();
1032   }
1033 
1034 public:
1035   StructAccessBase(const RecordDecl *ExpectedRD) : ExpectedRD(ExpectedRD) {}
1036 
1037   //===--------------------------------------------------------------------===//
1038   //                            Visitor Methods
1039   //===--------------------------------------------------------------------===//
1040 
1041   // NOTE: If we build C++ support for counted_by, then we'll have to handle
1042   // horrors like this:
1043   //
1044   //     struct S {
1045   //       int x, y;
1046   //       int blah[] __attribute__((counted_by(x)));
1047   //     } s;
1048   //
1049   //     int foo(int index, int val) {
1050   //       int (S::*IHatePMDs)[] = &S::blah;
1051   //       (s.*IHatePMDs)[index] = val;
1052   //     }
1053 
1054   const Expr *Visit(const Expr *E) {
1055     return ConstStmtVisitor<StructAccessBase, const Expr *>::Visit(E);
1056   }
1057 
1058   const Expr *VisitStmt(const Stmt *S) { return nullptr; }
1059 
1060   // These are the types we expect to return (in order of most to least
1061   // likely):
1062   //
1063   //   1. DeclRefExpr - This is the expression for the base of the structure.
1064   //      It's exactly what we want to build an access to the \p counted_by
1065   //      field.
1066   //   2. MemberExpr - This is the expression that has the same \p RecordDecl
1067   //      as the flexble array member's lexical enclosing \p RecordDecl. This
1068   //      allows us to catch things like: "p->p->array"
1069   //   3. CompoundLiteralExpr - This is for people who create something
1070   //      heretical like (struct foo has a flexible array member):
1071   //
1072   //        (struct foo){ 1, 2 }.blah[idx];
1073   const Expr *VisitDeclRefExpr(const DeclRefExpr *E) {
1074     return IsExpectedRecordDecl(E) ? E : nullptr;
1075   }
1076   const Expr *VisitMemberExpr(const MemberExpr *E) {
1077     if (IsExpectedRecordDecl(E) && E->isArrow())
1078       return E;
1079     const Expr *Res = Visit(E->getBase());
1080     return !Res && IsExpectedRecordDecl(E) ? E : Res;
1081   }
1082   const Expr *VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
1083     return IsExpectedRecordDecl(E) ? E : nullptr;
1084   }
1085   const Expr *VisitCallExpr(const CallExpr *E) {
1086     return IsExpectedRecordDecl(E) ? E : nullptr;
1087   }
1088 
1089   const Expr *VisitArraySubscriptExpr(const ArraySubscriptExpr *E) {
1090     if (IsExpectedRecordDecl(E))
1091       return E;
1092     return Visit(E->getBase());
1093   }
1094   const Expr *VisitCastExpr(const CastExpr *E) {
1095     if (E->getCastKind() == CK_LValueToRValue)
1096       return IsExpectedRecordDecl(E) ? E : nullptr;
1097     return Visit(E->getSubExpr());
1098   }
1099   const Expr *VisitParenExpr(const ParenExpr *E) {
1100     return Visit(E->getSubExpr());
1101   }
1102   const Expr *VisitUnaryAddrOf(const UnaryOperator *E) {
1103     return Visit(E->getSubExpr());
1104   }
1105   const Expr *VisitUnaryDeref(const UnaryOperator *E) {
1106     return Visit(E->getSubExpr());
1107   }
1108 };
1109 
1110 } // end anonymous namespace
1111 
1112 using RecIndicesTy = SmallVector<llvm::Value *, 8>;
1113 
1114 static bool getGEPIndicesToField(CodeGenFunction &CGF, const RecordDecl *RD,
1115                                  const FieldDecl *Field,
1116                                  RecIndicesTy &Indices) {
1117   const CGRecordLayout &Layout = CGF.CGM.getTypes().getCGRecordLayout(RD);
1118   int64_t FieldNo = -1;
1119   for (const FieldDecl *FD : RD->fields()) {
1120     if (!Layout.containsFieldDecl(FD))
1121       // This could happen if the field has a struct type that's empty. I don't
1122       // know why either.
1123       continue;
1124 
1125     FieldNo = Layout.getLLVMFieldNo(FD);
1126     if (FD == Field) {
1127       Indices.emplace_back(CGF.Builder.getInt32(FieldNo));
1128       return true;
1129     }
1130 
1131     QualType Ty = FD->getType();
1132     if (Ty->isRecordType()) {
1133       if (getGEPIndicesToField(CGF, Ty->getAsRecordDecl(), Field, Indices)) {
1134         if (RD->isUnion())
1135           FieldNo = 0;
1136         Indices.emplace_back(CGF.Builder.getInt32(FieldNo));
1137         return true;
1138       }
1139     }
1140   }
1141 
1142   return false;
1143 }
1144 
1145 llvm::Value *CodeGenFunction::GetCountedByFieldExprGEP(
1146     const Expr *Base, const FieldDecl *FAMDecl, const FieldDecl *CountDecl) {
1147   const RecordDecl *RD = CountDecl->getParent()->getOuterLexicalRecordContext();
1148 
1149   // Find the base struct expr (i.e. p in p->a.b.c.d).
1150   const Expr *StructBase = StructAccessBase(RD).Visit(Base);
1151   if (!StructBase || StructBase->HasSideEffects(getContext()))
1152     return nullptr;
1153 
1154   llvm::Value *Res = nullptr;
1155   if (StructBase->getType()->isPointerType()) {
1156     LValueBaseInfo BaseInfo;
1157     TBAAAccessInfo TBAAInfo;
1158     Address Addr = EmitPointerWithAlignment(StructBase, &BaseInfo, &TBAAInfo);
1159     Res = Addr.emitRawPointer(*this);
1160   } else if (StructBase->isLValue()) {
1161     LValue LV = EmitLValue(StructBase);
1162     Address Addr = LV.getAddress();
1163     Res = Addr.emitRawPointer(*this);
1164   } else {
1165     return nullptr;
1166   }
1167 
1168   RecIndicesTy Indices;
1169   getGEPIndicesToField(*this, RD, CountDecl, Indices);
1170   if (Indices.empty())
1171     return nullptr;
1172 
1173   Indices.push_back(Builder.getInt32(0));
1174   return Builder.CreateInBoundsGEP(
1175       ConvertType(QualType(RD->getTypeForDecl(), 0)), Res,
1176       RecIndicesTy(llvm::reverse(Indices)), "counted_by.gep");
1177 }
1178 
1179 /// This method is typically called in contexts where we can't generate
1180 /// side-effects, like in __builtin_dynamic_object_size. When finding
1181 /// expressions, only choose those that have either already been emitted or can
1182 /// be loaded without side-effects.
1183 ///
1184 /// - \p FAMDecl: the \p Decl for the flexible array member. It may not be
1185 ///   within the top-level struct.
1186 /// - \p CountDecl: must be within the same non-anonymous struct as \p FAMDecl.
1187 llvm::Value *CodeGenFunction::EmitLoadOfCountedByField(
1188     const Expr *Base, const FieldDecl *FAMDecl, const FieldDecl *CountDecl) {
1189   if (llvm::Value *GEP = GetCountedByFieldExprGEP(Base, FAMDecl, CountDecl))
1190     return Builder.CreateAlignedLoad(ConvertType(CountDecl->getType()), GEP,
1191                                      getIntAlign(), "counted_by.load");
1192   return nullptr;
1193 }
1194 
1195 void CodeGenFunction::EmitBoundsCheck(const Expr *E, const Expr *Base,
1196                                       llvm::Value *Index, QualType IndexType,
1197                                       bool Accessed) {
1198   assert(SanOpts.has(SanitizerKind::ArrayBounds) &&
1199          "should not be called unless adding bounds checks");
1200   const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
1201       getLangOpts().getStrictFlexArraysLevel();
1202   QualType IndexedType;
1203   llvm::Value *Bound =
1204       getArrayIndexingBound(*this, Base, IndexedType, StrictFlexArraysLevel);
1205 
1206   EmitBoundsCheckImpl(E, Bound, Index, IndexType, IndexedType, Accessed);
1207 }
1208 
1209 void CodeGenFunction::EmitBoundsCheckImpl(const Expr *E, llvm::Value *Bound,
1210                                           llvm::Value *Index,
1211                                           QualType IndexType,
1212                                           QualType IndexedType, bool Accessed) {
1213   if (!Bound)
1214     return;
1215 
1216   SanitizerScope SanScope(this);
1217 
1218   bool IndexSigned = IndexType->isSignedIntegerOrEnumerationType();
1219   llvm::Value *IndexVal = Builder.CreateIntCast(Index, SizeTy, IndexSigned);
1220   llvm::Value *BoundVal = Builder.CreateIntCast(Bound, SizeTy, false);
1221 
1222   llvm::Constant *StaticData[] = {
1223     EmitCheckSourceLocation(E->getExprLoc()),
1224     EmitCheckTypeDescriptor(IndexedType),
1225     EmitCheckTypeDescriptor(IndexType)
1226   };
1227   llvm::Value *Check = Accessed ? Builder.CreateICmpULT(IndexVal, BoundVal)
1228                                 : Builder.CreateICmpULE(IndexVal, BoundVal);
1229   EmitCheck(std::make_pair(Check, SanitizerKind::SO_ArrayBounds),
1230             SanitizerHandler::OutOfBounds, StaticData, Index);
1231 }
1232 
1233 CodeGenFunction::ComplexPairTy CodeGenFunction::
1234 EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
1235                          bool isInc, bool isPre) {
1236   ComplexPairTy InVal = EmitLoadOfComplex(LV, E->getExprLoc());
1237 
1238   llvm::Value *NextVal;
1239   if (isa<llvm::IntegerType>(InVal.first->getType())) {
1240     uint64_t AmountVal = isInc ? 1 : -1;
1241     NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true);
1242 
1243     // Add the inc/dec to the real part.
1244     NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
1245   } else {
1246     QualType ElemTy = E->getType()->castAs<ComplexType>()->getElementType();
1247     llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1);
1248     if (!isInc)
1249       FVal.changeSign();
1250     NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal);
1251 
1252     // Add the inc/dec to the real part.
1253     NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
1254   }
1255 
1256   ComplexPairTy IncVal(NextVal, InVal.second);
1257 
1258   // Store the updated result through the lvalue.
1259   EmitStoreOfComplex(IncVal, LV, /*init*/ false);
1260   if (getLangOpts().OpenMP)
1261     CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(*this,
1262                                                               E->getSubExpr());
1263 
1264   // If this is a postinc, return the value read from memory, otherwise use the
1265   // updated value.
1266   return isPre ? IncVal : InVal;
1267 }
1268 
1269 void CodeGenModule::EmitExplicitCastExprType(const ExplicitCastExpr *E,
1270                                              CodeGenFunction *CGF) {
1271   // Bind VLAs in the cast type.
1272   if (CGF && E->getType()->isVariablyModifiedType())
1273     CGF->EmitVariablyModifiedType(E->getType());
1274 
1275   if (CGDebugInfo *DI = getModuleDebugInfo())
1276     DI->EmitExplicitCastType(E->getType());
1277 }
1278 
1279 //===----------------------------------------------------------------------===//
1280 //                         LValue Expression Emission
1281 //===----------------------------------------------------------------------===//
1282 
1283 static Address EmitPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo,
1284                                         TBAAAccessInfo *TBAAInfo,
1285                                         KnownNonNull_t IsKnownNonNull,
1286                                         CodeGenFunction &CGF) {
1287   // We allow this with ObjC object pointers because of fragile ABIs.
1288   assert(E->getType()->isPointerType() ||
1289          E->getType()->isObjCObjectPointerType());
1290   E = E->IgnoreParens();
1291 
1292   // Casts:
1293   if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
1294     if (const auto *ECE = dyn_cast<ExplicitCastExpr>(CE))
1295       CGF.CGM.EmitExplicitCastExprType(ECE, &CGF);
1296 
1297     switch (CE->getCastKind()) {
1298     // Non-converting casts (but not C's implicit conversion from void*).
1299     case CK_BitCast:
1300     case CK_NoOp:
1301     case CK_AddressSpaceConversion:
1302       if (auto PtrTy = CE->getSubExpr()->getType()->getAs<PointerType>()) {
1303         if (PtrTy->getPointeeType()->isVoidType())
1304           break;
1305 
1306         LValueBaseInfo InnerBaseInfo;
1307         TBAAAccessInfo InnerTBAAInfo;
1308         Address Addr = CGF.EmitPointerWithAlignment(
1309             CE->getSubExpr(), &InnerBaseInfo, &InnerTBAAInfo, IsKnownNonNull);
1310         if (BaseInfo) *BaseInfo = InnerBaseInfo;
1311         if (TBAAInfo) *TBAAInfo = InnerTBAAInfo;
1312 
1313         if (isa<ExplicitCastExpr>(CE)) {
1314           LValueBaseInfo TargetTypeBaseInfo;
1315           TBAAAccessInfo TargetTypeTBAAInfo;
1316           CharUnits Align = CGF.CGM.getNaturalPointeeTypeAlignment(
1317               E->getType(), &TargetTypeBaseInfo, &TargetTypeTBAAInfo);
1318           if (TBAAInfo)
1319             *TBAAInfo =
1320                 CGF.CGM.mergeTBAAInfoForCast(*TBAAInfo, TargetTypeTBAAInfo);
1321           // If the source l-value is opaque, honor the alignment of the
1322           // casted-to type.
1323           if (InnerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) {
1324             if (BaseInfo)
1325               BaseInfo->mergeForCast(TargetTypeBaseInfo);
1326             Addr.setAlignment(Align);
1327           }
1328         }
1329 
1330         if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast) &&
1331             CE->getCastKind() == CK_BitCast) {
1332           if (auto PT = E->getType()->getAs<PointerType>())
1333             CGF.EmitVTablePtrCheckForCast(PT->getPointeeType(), Addr,
1334                                           /*MayBeNull=*/true,
1335                                           CodeGenFunction::CFITCK_UnrelatedCast,
1336                                           CE->getBeginLoc());
1337         }
1338 
1339         llvm::Type *ElemTy =
1340             CGF.ConvertTypeForMem(E->getType()->getPointeeType());
1341         Addr = Addr.withElementType(ElemTy);
1342         if (CE->getCastKind() == CK_AddressSpaceConversion)
1343           Addr = CGF.Builder.CreateAddrSpaceCast(
1344               Addr, CGF.ConvertType(E->getType()), ElemTy);
1345         return CGF.authPointerToPointerCast(Addr, CE->getSubExpr()->getType(),
1346                                             CE->getType());
1347       }
1348       break;
1349 
1350     // Array-to-pointer decay.
1351     case CK_ArrayToPointerDecay:
1352       return CGF.EmitArrayToPointerDecay(CE->getSubExpr(), BaseInfo, TBAAInfo);
1353 
1354     // Derived-to-base conversions.
1355     case CK_UncheckedDerivedToBase:
1356     case CK_DerivedToBase: {
1357       // TODO: Support accesses to members of base classes in TBAA. For now, we
1358       // conservatively pretend that the complete object is of the base class
1359       // type.
1360       if (TBAAInfo)
1361         *TBAAInfo = CGF.CGM.getTBAAAccessInfo(E->getType());
1362       Address Addr = CGF.EmitPointerWithAlignment(
1363           CE->getSubExpr(), BaseInfo, nullptr,
1364           (KnownNonNull_t)(IsKnownNonNull ||
1365                            CE->getCastKind() == CK_UncheckedDerivedToBase));
1366       auto Derived = CE->getSubExpr()->getType()->getPointeeCXXRecordDecl();
1367       return CGF.GetAddressOfBaseClass(
1368           Addr, Derived, CE->path_begin(), CE->path_end(),
1369           CGF.ShouldNullCheckClassCastValue(CE), CE->getExprLoc());
1370     }
1371 
1372     // TODO: Is there any reason to treat base-to-derived conversions
1373     // specially?
1374     default:
1375       break;
1376     }
1377   }
1378 
1379   // Unary &.
1380   if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
1381     if (UO->getOpcode() == UO_AddrOf) {
1382       LValue LV = CGF.EmitLValue(UO->getSubExpr(), IsKnownNonNull);
1383       if (BaseInfo) *BaseInfo = LV.getBaseInfo();
1384       if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();
1385       return LV.getAddress();
1386     }
1387   }
1388 
1389   // std::addressof and variants.
1390   if (auto *Call = dyn_cast<CallExpr>(E)) {
1391     switch (Call->getBuiltinCallee()) {
1392     default:
1393       break;
1394     case Builtin::BIaddressof:
1395     case Builtin::BI__addressof:
1396     case Builtin::BI__builtin_addressof: {
1397       LValue LV = CGF.EmitLValue(Call->getArg(0), IsKnownNonNull);
1398       if (BaseInfo) *BaseInfo = LV.getBaseInfo();
1399       if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();
1400       return LV.getAddress();
1401     }
1402     }
1403   }
1404 
1405   // TODO: conditional operators, comma.
1406 
1407   // Otherwise, use the alignment of the type.
1408   return CGF.makeNaturalAddressForPointer(
1409       CGF.EmitScalarExpr(E), E->getType()->getPointeeType(), CharUnits(),
1410       /*ForPointeeType=*/true, BaseInfo, TBAAInfo, IsKnownNonNull);
1411 }
1412 
1413 /// EmitPointerWithAlignment - Given an expression of pointer type, try to
1414 /// derive a more accurate bound on the alignment of the pointer.
1415 Address CodeGenFunction::EmitPointerWithAlignment(
1416     const Expr *E, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo,
1417     KnownNonNull_t IsKnownNonNull) {
1418   Address Addr =
1419       ::EmitPointerWithAlignment(E, BaseInfo, TBAAInfo, IsKnownNonNull, *this);
1420   if (IsKnownNonNull && !Addr.isKnownNonNull())
1421     Addr.setKnownNonNull();
1422   return Addr;
1423 }
1424 
1425 llvm::Value *CodeGenFunction::EmitNonNullRValueCheck(RValue RV, QualType T) {
1426   llvm::Value *V = RV.getScalarVal();
1427   if (auto MPT = T->getAs<MemberPointerType>())
1428     return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, V, MPT);
1429   return Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
1430 }
1431 
1432 RValue CodeGenFunction::GetUndefRValue(QualType Ty) {
1433   if (Ty->isVoidType())
1434     return RValue::get(nullptr);
1435 
1436   switch (getEvaluationKind(Ty)) {
1437   case TEK_Complex: {
1438     llvm::Type *EltTy =
1439       ConvertType(Ty->castAs<ComplexType>()->getElementType());
1440     llvm::Value *U = llvm::UndefValue::get(EltTy);
1441     return RValue::getComplex(std::make_pair(U, U));
1442   }
1443 
1444   // If this is a use of an undefined aggregate type, the aggregate must have an
1445   // identifiable address.  Just because the contents of the value are undefined
1446   // doesn't mean that the address can't be taken and compared.
1447   case TEK_Aggregate: {
1448     Address DestPtr = CreateMemTemp(Ty, "undef.agg.tmp");
1449     return RValue::getAggregate(DestPtr);
1450   }
1451 
1452   case TEK_Scalar:
1453     return RValue::get(llvm::UndefValue::get(ConvertType(Ty)));
1454   }
1455   llvm_unreachable("bad evaluation kind");
1456 }
1457 
1458 RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E,
1459                                               const char *Name) {
1460   ErrorUnsupported(E, Name);
1461   return GetUndefRValue(E->getType());
1462 }
1463 
1464 LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E,
1465                                               const char *Name) {
1466   ErrorUnsupported(E, Name);
1467   llvm::Type *ElTy = ConvertType(E->getType());
1468   llvm::Type *Ty = UnqualPtrTy;
1469   return MakeAddrLValue(
1470       Address(llvm::UndefValue::get(Ty), ElTy, CharUnits::One()), E->getType());
1471 }
1472 
1473 bool CodeGenFunction::IsWrappedCXXThis(const Expr *Obj) {
1474   const Expr *Base = Obj;
1475   while (!isa<CXXThisExpr>(Base)) {
1476     // The result of a dynamic_cast can be null.
1477     if (isa<CXXDynamicCastExpr>(Base))
1478       return false;
1479 
1480     if (const auto *CE = dyn_cast<CastExpr>(Base)) {
1481       Base = CE->getSubExpr();
1482     } else if (const auto *PE = dyn_cast<ParenExpr>(Base)) {
1483       Base = PE->getSubExpr();
1484     } else if (const auto *UO = dyn_cast<UnaryOperator>(Base)) {
1485       if (UO->getOpcode() == UO_Extension)
1486         Base = UO->getSubExpr();
1487       else
1488         return false;
1489     } else {
1490       return false;
1491     }
1492   }
1493   return true;
1494 }
1495 
1496 LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) {
1497   LValue LV;
1498   if (SanOpts.has(SanitizerKind::ArrayBounds) && isa<ArraySubscriptExpr>(E))
1499     LV = EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E), /*Accessed*/true);
1500   else
1501     LV = EmitLValue(E);
1502   if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple()) {
1503     SanitizerSet SkippedChecks;
1504     if (const auto *ME = dyn_cast<MemberExpr>(E)) {
1505       bool IsBaseCXXThis = IsWrappedCXXThis(ME->getBase());
1506       if (IsBaseCXXThis)
1507         SkippedChecks.set(SanitizerKind::Alignment, true);
1508       if (IsBaseCXXThis || isa<DeclRefExpr>(ME->getBase()))
1509         SkippedChecks.set(SanitizerKind::Null, true);
1510     }
1511     EmitTypeCheck(TCK, E->getExprLoc(), LV, E->getType(), SkippedChecks);
1512   }
1513   return LV;
1514 }
1515 
1516 /// EmitLValue - Emit code to compute a designator that specifies the location
1517 /// of the expression.
1518 ///
1519 /// This can return one of two things: a simple address or a bitfield reference.
1520 /// In either case, the LLVM Value* in the LValue structure is guaranteed to be
1521 /// an LLVM pointer type.
1522 ///
1523 /// If this returns a bitfield reference, nothing about the pointee type of the
1524 /// LLVM value is known: For example, it may not be a pointer to an integer.
1525 ///
1526 /// If this returns a normal address, and if the lvalue's C type is fixed size,
1527 /// this method guarantees that the returned pointer type will point to an LLVM
1528 /// type of the same size of the lvalue's type.  If the lvalue has a variable
1529 /// length type, this is not possible.
1530 ///
1531 LValue CodeGenFunction::EmitLValue(const Expr *E,
1532                                    KnownNonNull_t IsKnownNonNull) {
1533   // Running with sufficient stack space to avoid deeply nested expressions
1534   // cause a stack overflow.
1535   LValue LV;
1536   CGM.runWithSufficientStackSpace(
1537       E->getExprLoc(), [&] { LV = EmitLValueHelper(E, IsKnownNonNull); });
1538 
1539   if (IsKnownNonNull && !LV.isKnownNonNull())
1540     LV.setKnownNonNull();
1541   return LV;
1542 }
1543 
1544 static QualType getConstantExprReferredType(const FullExpr *E,
1545                                             const ASTContext &Ctx) {
1546   const Expr *SE = E->getSubExpr()->IgnoreImplicit();
1547   if (isa<OpaqueValueExpr>(SE))
1548     return SE->getType();
1549   return cast<CallExpr>(SE)->getCallReturnType(Ctx)->getPointeeType();
1550 }
1551 
1552 LValue CodeGenFunction::EmitLValueHelper(const Expr *E,
1553                                          KnownNonNull_t IsKnownNonNull) {
1554   ApplyDebugLocation DL(*this, E);
1555   switch (E->getStmtClass()) {
1556   default: return EmitUnsupportedLValue(E, "l-value expression");
1557 
1558   case Expr::ObjCPropertyRefExprClass:
1559     llvm_unreachable("cannot emit a property reference directly");
1560 
1561   case Expr::ObjCSelectorExprClass:
1562     return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E));
1563   case Expr::ObjCIsaExprClass:
1564     return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E));
1565   case Expr::BinaryOperatorClass:
1566     return EmitBinaryOperatorLValue(cast<BinaryOperator>(E));
1567   case Expr::CompoundAssignOperatorClass: {
1568     QualType Ty = E->getType();
1569     if (const AtomicType *AT = Ty->getAs<AtomicType>())
1570       Ty = AT->getValueType();
1571     if (!Ty->isAnyComplexType())
1572       return EmitCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
1573     return EmitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
1574   }
1575   case Expr::CallExprClass:
1576   case Expr::CXXMemberCallExprClass:
1577   case Expr::CXXOperatorCallExprClass:
1578   case Expr::UserDefinedLiteralClass:
1579     return EmitCallExprLValue(cast<CallExpr>(E));
1580   case Expr::CXXRewrittenBinaryOperatorClass:
1581     return EmitLValue(cast<CXXRewrittenBinaryOperator>(E)->getSemanticForm(),
1582                       IsKnownNonNull);
1583   case Expr::VAArgExprClass:
1584     return EmitVAArgExprLValue(cast<VAArgExpr>(E));
1585   case Expr::DeclRefExprClass:
1586     return EmitDeclRefLValue(cast<DeclRefExpr>(E));
1587   case Expr::ConstantExprClass: {
1588     const ConstantExpr *CE = cast<ConstantExpr>(E);
1589     if (llvm::Value *Result = ConstantEmitter(*this).tryEmitConstantExpr(CE)) {
1590       QualType RetType = getConstantExprReferredType(CE, getContext());
1591       return MakeNaturalAlignAddrLValue(Result, RetType);
1592     }
1593     return EmitLValue(cast<ConstantExpr>(E)->getSubExpr(), IsKnownNonNull);
1594   }
1595   case Expr::ParenExprClass:
1596     return EmitLValue(cast<ParenExpr>(E)->getSubExpr(), IsKnownNonNull);
1597   case Expr::GenericSelectionExprClass:
1598     return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr(),
1599                       IsKnownNonNull);
1600   case Expr::PredefinedExprClass:
1601     return EmitPredefinedLValue(cast<PredefinedExpr>(E));
1602   case Expr::StringLiteralClass:
1603     return EmitStringLiteralLValue(cast<StringLiteral>(E));
1604   case Expr::ObjCEncodeExprClass:
1605     return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E));
1606   case Expr::PseudoObjectExprClass:
1607     return EmitPseudoObjectLValue(cast<PseudoObjectExpr>(E));
1608   case Expr::InitListExprClass:
1609     return EmitInitListLValue(cast<InitListExpr>(E));
1610   case Expr::CXXTemporaryObjectExprClass:
1611   case Expr::CXXConstructExprClass:
1612     return EmitCXXConstructLValue(cast<CXXConstructExpr>(E));
1613   case Expr::CXXBindTemporaryExprClass:
1614     return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E));
1615   case Expr::CXXUuidofExprClass:
1616     return EmitCXXUuidofLValue(cast<CXXUuidofExpr>(E));
1617   case Expr::LambdaExprClass:
1618     return EmitAggExprToLValue(E);
1619 
1620   case Expr::ExprWithCleanupsClass: {
1621     const auto *cleanups = cast<ExprWithCleanups>(E);
1622     RunCleanupsScope Scope(*this);
1623     LValue LV = EmitLValue(cleanups->getSubExpr(), IsKnownNonNull);
1624     if (LV.isSimple()) {
1625       // Defend against branches out of gnu statement expressions surrounded by
1626       // cleanups.
1627       Address Addr = LV.getAddress();
1628       llvm::Value *V = Addr.getBasePointer();
1629       Scope.ForceCleanup({&V});
1630       Addr.replaceBasePointer(V);
1631       return LValue::MakeAddr(Addr, LV.getType(), getContext(),
1632                               LV.getBaseInfo(), LV.getTBAAInfo());
1633     }
1634     // FIXME: Is it possible to create an ExprWithCleanups that produces a
1635     // bitfield lvalue or some other non-simple lvalue?
1636     return LV;
1637   }
1638 
1639   case Expr::CXXDefaultArgExprClass: {
1640     auto *DAE = cast<CXXDefaultArgExpr>(E);
1641     CXXDefaultArgExprScope Scope(*this, DAE);
1642     return EmitLValue(DAE->getExpr(), IsKnownNonNull);
1643   }
1644   case Expr::CXXDefaultInitExprClass: {
1645     auto *DIE = cast<CXXDefaultInitExpr>(E);
1646     CXXDefaultInitExprScope Scope(*this, DIE);
1647     return EmitLValue(DIE->getExpr(), IsKnownNonNull);
1648   }
1649   case Expr::CXXTypeidExprClass:
1650     return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E));
1651 
1652   case Expr::ObjCMessageExprClass:
1653     return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E));
1654   case Expr::ObjCIvarRefExprClass:
1655     return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E));
1656   case Expr::StmtExprClass:
1657     return EmitStmtExprLValue(cast<StmtExpr>(E));
1658   case Expr::UnaryOperatorClass:
1659     return EmitUnaryOpLValue(cast<UnaryOperator>(E));
1660   case Expr::ArraySubscriptExprClass:
1661     return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E));
1662   case Expr::MatrixSubscriptExprClass:
1663     return EmitMatrixSubscriptExpr(cast<MatrixSubscriptExpr>(E));
1664   case Expr::ArraySectionExprClass:
1665     return EmitArraySectionExpr(cast<ArraySectionExpr>(E));
1666   case Expr::ExtVectorElementExprClass:
1667     return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E));
1668   case Expr::CXXThisExprClass:
1669     return MakeAddrLValue(LoadCXXThisAddress(), E->getType());
1670   case Expr::MemberExprClass:
1671     return EmitMemberExpr(cast<MemberExpr>(E));
1672   case Expr::CompoundLiteralExprClass:
1673     return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E));
1674   case Expr::ConditionalOperatorClass:
1675     return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E));
1676   case Expr::BinaryConditionalOperatorClass:
1677     return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(E));
1678   case Expr::ChooseExprClass:
1679     return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(), IsKnownNonNull);
1680   case Expr::OpaqueValueExprClass:
1681     return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E));
1682   case Expr::SubstNonTypeTemplateParmExprClass:
1683     return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement(),
1684                       IsKnownNonNull);
1685   case Expr::ImplicitCastExprClass:
1686   case Expr::CStyleCastExprClass:
1687   case Expr::CXXFunctionalCastExprClass:
1688   case Expr::CXXStaticCastExprClass:
1689   case Expr::CXXDynamicCastExprClass:
1690   case Expr::CXXReinterpretCastExprClass:
1691   case Expr::CXXConstCastExprClass:
1692   case Expr::CXXAddrspaceCastExprClass:
1693   case Expr::ObjCBridgedCastExprClass:
1694     return EmitCastLValue(cast<CastExpr>(E));
1695 
1696   case Expr::MaterializeTemporaryExprClass:
1697     return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E));
1698 
1699   case Expr::CoawaitExprClass:
1700     return EmitCoawaitLValue(cast<CoawaitExpr>(E));
1701   case Expr::CoyieldExprClass:
1702     return EmitCoyieldLValue(cast<CoyieldExpr>(E));
1703   case Expr::PackIndexingExprClass:
1704     return EmitLValue(cast<PackIndexingExpr>(E)->getSelectedExpr());
1705   case Expr::HLSLOutArgExprClass:
1706     llvm_unreachable("cannot emit a HLSL out argument directly");
1707   }
1708 }
1709 
1710 /// Given an object of the given canonical type, can we safely copy a
1711 /// value out of it based on its initializer?
1712 static bool isConstantEmittableObjectType(QualType type) {
1713   assert(type.isCanonical());
1714   assert(!type->isReferenceType());
1715 
1716   // Must be const-qualified but non-volatile.
1717   Qualifiers qs = type.getLocalQualifiers();
1718   if (!qs.hasConst() || qs.hasVolatile()) return false;
1719 
1720   // Otherwise, all object types satisfy this except C++ classes with
1721   // mutable subobjects or non-trivial copy/destroy behavior.
1722   if (const auto *RT = dyn_cast<RecordType>(type))
1723     if (const auto *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1724       if (RD->hasMutableFields() || !RD->isTrivial())
1725         return false;
1726 
1727   return true;
1728 }
1729 
1730 /// Can we constant-emit a load of a reference to a variable of the
1731 /// given type?  This is different from predicates like
1732 /// Decl::mightBeUsableInConstantExpressions because we do want it to apply
1733 /// in situations that don't necessarily satisfy the language's rules
1734 /// for this (e.g. C++'s ODR-use rules).  For example, we want to able
1735 /// to do this with const float variables even if those variables
1736 /// aren't marked 'constexpr'.
1737 enum ConstantEmissionKind {
1738   CEK_None,
1739   CEK_AsReferenceOnly,
1740   CEK_AsValueOrReference,
1741   CEK_AsValueOnly
1742 };
1743 static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type) {
1744   type = type.getCanonicalType();
1745   if (const auto *ref = dyn_cast<ReferenceType>(type)) {
1746     if (isConstantEmittableObjectType(ref->getPointeeType()))
1747       return CEK_AsValueOrReference;
1748     return CEK_AsReferenceOnly;
1749   }
1750   if (isConstantEmittableObjectType(type))
1751     return CEK_AsValueOnly;
1752   return CEK_None;
1753 }
1754 
1755 /// Try to emit a reference to the given value without producing it as
1756 /// an l-value.  This is just an optimization, but it avoids us needing
1757 /// to emit global copies of variables if they're named without triggering
1758 /// a formal use in a context where we can't emit a direct reference to them,
1759 /// for instance if a block or lambda or a member of a local class uses a
1760 /// const int variable or constexpr variable from an enclosing function.
1761 CodeGenFunction::ConstantEmission
1762 CodeGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) {
1763   ValueDecl *value = refExpr->getDecl();
1764 
1765   // The value needs to be an enum constant or a constant variable.
1766   ConstantEmissionKind CEK;
1767   if (isa<ParmVarDecl>(value)) {
1768     CEK = CEK_None;
1769   } else if (auto *var = dyn_cast<VarDecl>(value)) {
1770     CEK = checkVarTypeForConstantEmission(var->getType());
1771   } else if (isa<EnumConstantDecl>(value)) {
1772     CEK = CEK_AsValueOnly;
1773   } else {
1774     CEK = CEK_None;
1775   }
1776   if (CEK == CEK_None) return ConstantEmission();
1777 
1778   Expr::EvalResult result;
1779   bool resultIsReference;
1780   QualType resultType;
1781 
1782   // It's best to evaluate all the way as an r-value if that's permitted.
1783   if (CEK != CEK_AsReferenceOnly &&
1784       refExpr->EvaluateAsRValue(result, getContext())) {
1785     resultIsReference = false;
1786     resultType = refExpr->getType();
1787 
1788   // Otherwise, try to evaluate as an l-value.
1789   } else if (CEK != CEK_AsValueOnly &&
1790              refExpr->EvaluateAsLValue(result, getContext())) {
1791     resultIsReference = true;
1792     resultType = value->getType();
1793 
1794   // Failure.
1795   } else {
1796     return ConstantEmission();
1797   }
1798 
1799   // In any case, if the initializer has side-effects, abandon ship.
1800   if (result.HasSideEffects)
1801     return ConstantEmission();
1802 
1803   // In CUDA/HIP device compilation, a lambda may capture a reference variable
1804   // referencing a global host variable by copy. In this case the lambda should
1805   // make a copy of the value of the global host variable. The DRE of the
1806   // captured reference variable cannot be emitted as load from the host
1807   // global variable as compile time constant, since the host variable is not
1808   // accessible on device. The DRE of the captured reference variable has to be
1809   // loaded from captures.
1810   if (CGM.getLangOpts().CUDAIsDevice && result.Val.isLValue() &&
1811       refExpr->refersToEnclosingVariableOrCapture()) {
1812     auto *MD = dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl);
1813     if (isLambdaMethod(MD) && MD->getOverloadedOperator() == OO_Call) {
1814       const APValue::LValueBase &base = result.Val.getLValueBase();
1815       if (const ValueDecl *D = base.dyn_cast<const ValueDecl *>()) {
1816         if (const VarDecl *VD = dyn_cast<const VarDecl>(D)) {
1817           if (!VD->hasAttr<CUDADeviceAttr>()) {
1818             return ConstantEmission();
1819           }
1820         }
1821       }
1822     }
1823   }
1824 
1825   // Emit as a constant.
1826   auto C = ConstantEmitter(*this).emitAbstract(refExpr->getLocation(),
1827                                                result.Val, resultType);
1828 
1829   // Make sure we emit a debug reference to the global variable.
1830   // This should probably fire even for
1831   if (isa<VarDecl>(value)) {
1832     if (!getContext().DeclMustBeEmitted(cast<VarDecl>(value)))
1833       EmitDeclRefExprDbgValue(refExpr, result.Val);
1834   } else {
1835     assert(isa<EnumConstantDecl>(value));
1836     EmitDeclRefExprDbgValue(refExpr, result.Val);
1837   }
1838 
1839   // If we emitted a reference constant, we need to dereference that.
1840   if (resultIsReference)
1841     return ConstantEmission::forReference(C);
1842 
1843   return ConstantEmission::forValue(C);
1844 }
1845 
1846 static DeclRefExpr *tryToConvertMemberExprToDeclRefExpr(CodeGenFunction &CGF,
1847                                                         const MemberExpr *ME) {
1848   if (auto *VD = dyn_cast<VarDecl>(ME->getMemberDecl())) {
1849     // Try to emit static variable member expressions as DREs.
1850     return DeclRefExpr::Create(
1851         CGF.getContext(), NestedNameSpecifierLoc(), SourceLocation(), VD,
1852         /*RefersToEnclosingVariableOrCapture=*/false, ME->getExprLoc(),
1853         ME->getType(), ME->getValueKind(), nullptr, nullptr, ME->isNonOdrUse());
1854   }
1855   return nullptr;
1856 }
1857 
1858 CodeGenFunction::ConstantEmission
1859 CodeGenFunction::tryEmitAsConstant(const MemberExpr *ME) {
1860   if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(*this, ME))
1861     return tryEmitAsConstant(DRE);
1862   return ConstantEmission();
1863 }
1864 
1865 llvm::Value *CodeGenFunction::emitScalarConstant(
1866     const CodeGenFunction::ConstantEmission &Constant, Expr *E) {
1867   assert(Constant && "not a constant");
1868   if (Constant.isReference())
1869     return EmitLoadOfLValue(Constant.getReferenceLValue(*this, E),
1870                             E->getExprLoc())
1871         .getScalarVal();
1872   return Constant.getValue();
1873 }
1874 
1875 llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue,
1876                                                SourceLocation Loc) {
1877   return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
1878                           lvalue.getType(), Loc, lvalue.getBaseInfo(),
1879                           lvalue.getTBAAInfo(), lvalue.isNontemporal());
1880 }
1881 
1882 static bool hasBooleanRepresentation(QualType Ty) {
1883   if (Ty->isBooleanType())
1884     return true;
1885 
1886   if (const EnumType *ET = Ty->getAs<EnumType>())
1887     return ET->getDecl()->getIntegerType()->isBooleanType();
1888 
1889   if (const AtomicType *AT = Ty->getAs<AtomicType>())
1890     return hasBooleanRepresentation(AT->getValueType());
1891 
1892   return false;
1893 }
1894 
1895 static bool getRangeForType(CodeGenFunction &CGF, QualType Ty,
1896                             llvm::APInt &Min, llvm::APInt &End,
1897                             bool StrictEnums, bool IsBool) {
1898   const EnumType *ET = Ty->getAs<EnumType>();
1899   bool IsRegularCPlusPlusEnum = CGF.getLangOpts().CPlusPlus && StrictEnums &&
1900                                 ET && !ET->getDecl()->isFixed();
1901   if (!IsBool && !IsRegularCPlusPlusEnum)
1902     return false;
1903 
1904   if (IsBool) {
1905     Min = llvm::APInt(CGF.getContext().getTypeSize(Ty), 0);
1906     End = llvm::APInt(CGF.getContext().getTypeSize(Ty), 2);
1907   } else {
1908     const EnumDecl *ED = ET->getDecl();
1909     ED->getValueRange(End, Min);
1910   }
1911   return true;
1912 }
1913 
1914 llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) {
1915   llvm::APInt Min, End;
1916   if (!getRangeForType(*this, Ty, Min, End, CGM.getCodeGenOpts().StrictEnums,
1917                        hasBooleanRepresentation(Ty)))
1918     return nullptr;
1919 
1920   llvm::MDBuilder MDHelper(getLLVMContext());
1921   return MDHelper.createRange(Min, End);
1922 }
1923 
1924 bool CodeGenFunction::EmitScalarRangeCheck(llvm::Value *Value, QualType Ty,
1925                                            SourceLocation Loc) {
1926   bool HasBoolCheck = SanOpts.has(SanitizerKind::Bool);
1927   bool HasEnumCheck = SanOpts.has(SanitizerKind::Enum);
1928   if (!HasBoolCheck && !HasEnumCheck)
1929     return false;
1930 
1931   bool IsBool = hasBooleanRepresentation(Ty) ||
1932                 NSAPI(CGM.getContext()).isObjCBOOLType(Ty);
1933   bool NeedsBoolCheck = HasBoolCheck && IsBool;
1934   bool NeedsEnumCheck = HasEnumCheck && Ty->getAs<EnumType>();
1935   if (!NeedsBoolCheck && !NeedsEnumCheck)
1936     return false;
1937 
1938   // Single-bit booleans don't need to be checked. Special-case this to avoid
1939   // a bit width mismatch when handling bitfield values. This is handled by
1940   // EmitFromMemory for the non-bitfield case.
1941   if (IsBool &&
1942       cast<llvm::IntegerType>(Value->getType())->getBitWidth() == 1)
1943     return false;
1944 
1945   if (NeedsEnumCheck &&
1946       getContext().isTypeIgnoredBySanitizer(SanitizerKind::Enum, Ty))
1947     return false;
1948 
1949   llvm::APInt Min, End;
1950   if (!getRangeForType(*this, Ty, Min, End, /*StrictEnums=*/true, IsBool))
1951     return true;
1952 
1953   auto &Ctx = getLLVMContext();
1954   SanitizerScope SanScope(this);
1955   llvm::Value *Check;
1956   --End;
1957   if (!Min) {
1958     Check = Builder.CreateICmpULE(Value, llvm::ConstantInt::get(Ctx, End));
1959   } else {
1960     llvm::Value *Upper =
1961         Builder.CreateICmpSLE(Value, llvm::ConstantInt::get(Ctx, End));
1962     llvm::Value *Lower =
1963         Builder.CreateICmpSGE(Value, llvm::ConstantInt::get(Ctx, Min));
1964     Check = Builder.CreateAnd(Upper, Lower);
1965   }
1966   llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc),
1967                                   EmitCheckTypeDescriptor(Ty)};
1968   SanitizerKind::SanitizerOrdinal Kind =
1969       NeedsEnumCheck ? SanitizerKind::SO_Enum : SanitizerKind::SO_Bool;
1970   EmitCheck(std::make_pair(Check, Kind), SanitizerHandler::LoadInvalidValue,
1971             StaticArgs, EmitCheckValue(Value));
1972   return true;
1973 }
1974 
1975 llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile,
1976                                                QualType Ty,
1977                                                SourceLocation Loc,
1978                                                LValueBaseInfo BaseInfo,
1979                                                TBAAAccessInfo TBAAInfo,
1980                                                bool isNontemporal) {
1981   if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr.getBasePointer()))
1982     if (GV->isThreadLocal())
1983       Addr = Addr.withPointer(Builder.CreateThreadLocalAddress(GV),
1984                               NotKnownNonNull);
1985 
1986   if (const auto *ClangVecTy = Ty->getAs<VectorType>()) {
1987     // Boolean vectors use `iN` as storage type.
1988     if (ClangVecTy->isExtVectorBoolType()) {
1989       llvm::Type *ValTy = ConvertType(Ty);
1990       unsigned ValNumElems =
1991           cast<llvm::FixedVectorType>(ValTy)->getNumElements();
1992       // Load the `iP` storage object (P is the padded vector size).
1993       auto *RawIntV = Builder.CreateLoad(Addr, Volatile, "load_bits");
1994       const auto *RawIntTy = RawIntV->getType();
1995       assert(RawIntTy->isIntegerTy() && "compressed iN storage for bitvectors");
1996       // Bitcast iP --> <P x i1>.
1997       auto *PaddedVecTy = llvm::FixedVectorType::get(
1998           Builder.getInt1Ty(), RawIntTy->getPrimitiveSizeInBits());
1999       llvm::Value *V = Builder.CreateBitCast(RawIntV, PaddedVecTy);
2000       // Shuffle <P x i1> --> <N x i1> (N is the actual bit size).
2001       V = emitBoolVecConversion(V, ValNumElems, "extractvec");
2002 
2003       return EmitFromMemory(V, Ty);
2004     }
2005 
2006     // Handles vectors of sizes that are likely to be expanded to a larger size
2007     // to optimize performance.
2008     auto *VTy = cast<llvm::FixedVectorType>(Addr.getElementType());
2009     auto *NewVecTy =
2010         CGM.getABIInfo().getOptimalVectorMemoryType(VTy, getLangOpts());
2011 
2012     if (VTy != NewVecTy) {
2013       Address Cast = Addr.withElementType(NewVecTy);
2014       llvm::Value *V = Builder.CreateLoad(Cast, Volatile, "loadVecN");
2015       unsigned OldNumElements = VTy->getNumElements();
2016       SmallVector<int, 16> Mask(OldNumElements);
2017       std::iota(Mask.begin(), Mask.end(), 0);
2018       V = Builder.CreateShuffleVector(V, Mask, "extractVec");
2019       return EmitFromMemory(V, Ty);
2020     }
2021   }
2022 
2023   // Atomic operations have to be done on integral types.
2024   LValue AtomicLValue =
2025       LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo);
2026   if (Ty->isAtomicType() || LValueIsSuitableForInlineAtomic(AtomicLValue)) {
2027     return EmitAtomicLoad(AtomicLValue, Loc).getScalarVal();
2028   }
2029 
2030   Addr =
2031       Addr.withElementType(convertTypeForLoadStore(Ty, Addr.getElementType()));
2032 
2033   llvm::LoadInst *Load = Builder.CreateLoad(Addr, Volatile);
2034   if (isNontemporal) {
2035     llvm::MDNode *Node = llvm::MDNode::get(
2036         Load->getContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
2037     Load->setMetadata(llvm::LLVMContext::MD_nontemporal, Node);
2038   }
2039 
2040   CGM.DecorateInstructionWithTBAA(Load, TBAAInfo);
2041 
2042   if (EmitScalarRangeCheck(Load, Ty, Loc)) {
2043     // In order to prevent the optimizer from throwing away the check, don't
2044     // attach range metadata to the load.
2045   } else if (CGM.getCodeGenOpts().OptimizationLevel > 0)
2046     if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty)) {
2047       Load->setMetadata(llvm::LLVMContext::MD_range, RangeInfo);
2048       Load->setMetadata(llvm::LLVMContext::MD_noundef,
2049                         llvm::MDNode::get(getLLVMContext(), {}));
2050     }
2051 
2052   return EmitFromMemory(Load, Ty);
2053 }
2054 
2055 /// Converts a scalar value from its primary IR type (as returned
2056 /// by ConvertType) to its load/store type (as returned by
2057 /// convertTypeForLoadStore).
2058 llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) {
2059   if (hasBooleanRepresentation(Ty) || Ty->isBitIntType()) {
2060     llvm::Type *StoreTy = convertTypeForLoadStore(Ty, Value->getType());
2061     bool Signed = Ty->isSignedIntegerOrEnumerationType();
2062     return Builder.CreateIntCast(Value, StoreTy, Signed, "storedv");
2063   }
2064 
2065   if (Ty->isExtVectorBoolType()) {
2066     llvm::Type *StoreTy = convertTypeForLoadStore(Ty, Value->getType());
2067     // Expand to the memory bit width.
2068     unsigned MemNumElems = StoreTy->getPrimitiveSizeInBits();
2069     // <N x i1> --> <P x i1>.
2070     Value = emitBoolVecConversion(Value, MemNumElems, "insertvec");
2071     // <P x i1> --> iP.
2072     Value = Builder.CreateBitCast(Value, StoreTy);
2073   }
2074 
2075   return Value;
2076 }
2077 
2078 /// Converts a scalar value from its load/store type (as returned
2079 /// by convertTypeForLoadStore) to its primary IR type (as returned
2080 /// by ConvertType).
2081 llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
2082   if (Ty->isExtVectorBoolType()) {
2083     const auto *RawIntTy = Value->getType();
2084     // Bitcast iP --> <P x i1>.
2085     auto *PaddedVecTy = llvm::FixedVectorType::get(
2086         Builder.getInt1Ty(), RawIntTy->getPrimitiveSizeInBits());
2087     auto *V = Builder.CreateBitCast(Value, PaddedVecTy);
2088     // Shuffle <P x i1> --> <N x i1> (N is the actual bit size).
2089     llvm::Type *ValTy = ConvertType(Ty);
2090     unsigned ValNumElems = cast<llvm::FixedVectorType>(ValTy)->getNumElements();
2091     return emitBoolVecConversion(V, ValNumElems, "extractvec");
2092   }
2093 
2094   if (hasBooleanRepresentation(Ty) || Ty->isBitIntType()) {
2095     llvm::Type *ResTy = ConvertType(Ty);
2096     return Builder.CreateTrunc(Value, ResTy, "loadedv");
2097   }
2098 
2099   return Value;
2100 }
2101 
2102 // Convert the pointer of \p Addr to a pointer to a vector (the value type of
2103 // MatrixType), if it points to a array (the memory type of MatrixType).
2104 static RawAddress MaybeConvertMatrixAddress(RawAddress Addr,
2105                                             CodeGenFunction &CGF,
2106                                             bool IsVector = true) {
2107   auto *ArrayTy = dyn_cast<llvm::ArrayType>(Addr.getElementType());
2108   if (ArrayTy && IsVector) {
2109     auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(),
2110                                                 ArrayTy->getNumElements());
2111 
2112     return Addr.withElementType(VectorTy);
2113   }
2114   auto *VectorTy = dyn_cast<llvm::VectorType>(Addr.getElementType());
2115   if (VectorTy && !IsVector) {
2116     auto *ArrayTy = llvm::ArrayType::get(
2117         VectorTy->getElementType(),
2118         cast<llvm::FixedVectorType>(VectorTy)->getNumElements());
2119 
2120     return Addr.withElementType(ArrayTy);
2121   }
2122 
2123   return Addr;
2124 }
2125 
2126 // Emit a store of a matrix LValue. This may require casting the original
2127 // pointer to memory address (ArrayType) to a pointer to the value type
2128 // (VectorType).
2129 static void EmitStoreOfMatrixScalar(llvm::Value *value, LValue lvalue,
2130                                     bool isInit, CodeGenFunction &CGF) {
2131   Address Addr = MaybeConvertMatrixAddress(lvalue.getAddress(), CGF,
2132                                            value->getType()->isVectorTy());
2133   CGF.EmitStoreOfScalar(value, Addr, lvalue.isVolatile(), lvalue.getType(),
2134                         lvalue.getBaseInfo(), lvalue.getTBAAInfo(), isInit,
2135                         lvalue.isNontemporal());
2136 }
2137 
2138 void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, Address Addr,
2139                                         bool Volatile, QualType Ty,
2140                                         LValueBaseInfo BaseInfo,
2141                                         TBAAAccessInfo TBAAInfo,
2142                                         bool isInit, bool isNontemporal) {
2143   if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr.getBasePointer()))
2144     if (GV->isThreadLocal())
2145       Addr = Addr.withPointer(Builder.CreateThreadLocalAddress(GV),
2146                               NotKnownNonNull);
2147 
2148   // Handles vectors of sizes that are likely to be expanded to a larger size
2149   // to optimize performance.
2150   llvm::Type *SrcTy = Value->getType();
2151   if (const auto *ClangVecTy = Ty->getAs<VectorType>()) {
2152     if (auto *VecTy = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
2153       auto *NewVecTy =
2154           CGM.getABIInfo().getOptimalVectorMemoryType(VecTy, getLangOpts());
2155       if (!ClangVecTy->isExtVectorBoolType() && VecTy != NewVecTy) {
2156         SmallVector<int, 16> Mask(NewVecTy->getNumElements(), -1);
2157         std::iota(Mask.begin(), Mask.begin() + VecTy->getNumElements(), 0);
2158         Value = Builder.CreateShuffleVector(Value, Mask, "extractVec");
2159         SrcTy = NewVecTy;
2160       }
2161       if (Addr.getElementType() != SrcTy)
2162         Addr = Addr.withElementType(SrcTy);
2163     }
2164   }
2165 
2166   Value = EmitToMemory(Value, Ty);
2167 
2168   LValue AtomicLValue =
2169       LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo);
2170   if (Ty->isAtomicType() ||
2171       (!isInit && LValueIsSuitableForInlineAtomic(AtomicLValue))) {
2172     EmitAtomicStore(RValue::get(Value), AtomicLValue, isInit);
2173     return;
2174   }
2175 
2176   llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile);
2177   if (isNontemporal) {
2178     llvm::MDNode *Node =
2179         llvm::MDNode::get(Store->getContext(),
2180                           llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
2181     Store->setMetadata(llvm::LLVMContext::MD_nontemporal, Node);
2182   }
2183 
2184   CGM.DecorateInstructionWithTBAA(Store, TBAAInfo);
2185 }
2186 
2187 void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
2188                                         bool isInit) {
2189   if (lvalue.getType()->isConstantMatrixType()) {
2190     EmitStoreOfMatrixScalar(value, lvalue, isInit, *this);
2191     return;
2192   }
2193 
2194   EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),
2195                     lvalue.getType(), lvalue.getBaseInfo(),
2196                     lvalue.getTBAAInfo(), isInit, lvalue.isNontemporal());
2197 }
2198 
2199 // Emit a load of a LValue of matrix type. This may require casting the pointer
2200 // to memory address (ArrayType) to a pointer to the value type (VectorType).
2201 static RValue EmitLoadOfMatrixLValue(LValue LV, SourceLocation Loc,
2202                                      CodeGenFunction &CGF) {
2203   assert(LV.getType()->isConstantMatrixType());
2204   Address Addr = MaybeConvertMatrixAddress(LV.getAddress(), CGF);
2205   LV.setAddress(Addr);
2206   return RValue::get(CGF.EmitLoadOfScalar(LV, Loc));
2207 }
2208 
2209 RValue CodeGenFunction::EmitLoadOfAnyValue(LValue LV, AggValueSlot Slot,
2210                                            SourceLocation Loc) {
2211   QualType Ty = LV.getType();
2212   switch (getEvaluationKind(Ty)) {
2213   case TEK_Scalar:
2214     return EmitLoadOfLValue(LV, Loc);
2215   case TEK_Complex:
2216     return RValue::getComplex(EmitLoadOfComplex(LV, Loc));
2217   case TEK_Aggregate:
2218     EmitAggFinalDestCopy(Ty, Slot, LV, EVK_NonRValue);
2219     return Slot.asRValue();
2220   }
2221   llvm_unreachable("bad evaluation kind");
2222 }
2223 
2224 /// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
2225 /// method emits the address of the lvalue, then loads the result as an rvalue,
2226 /// returning the rvalue.
2227 RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
2228   if (LV.isObjCWeak()) {
2229     // load of a __weak object.
2230     Address AddrWeakObj = LV.getAddress();
2231     return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this,
2232                                                              AddrWeakObj));
2233   }
2234   if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) {
2235     // In MRC mode, we do a load+autorelease.
2236     if (!getLangOpts().ObjCAutoRefCount) {
2237       return RValue::get(EmitARCLoadWeak(LV.getAddress()));
2238     }
2239 
2240     // In ARC mode, we load retained and then consume the value.
2241     llvm::Value *Object = EmitARCLoadWeakRetained(LV.getAddress());
2242     Object = EmitObjCConsumeObject(LV.getType(), Object);
2243     return RValue::get(Object);
2244   }
2245 
2246   if (LV.isSimple()) {
2247     assert(!LV.getType()->isFunctionType());
2248 
2249     if (LV.getType()->isConstantMatrixType())
2250       return EmitLoadOfMatrixLValue(LV, Loc, *this);
2251 
2252     // Everything needs a load.
2253     return RValue::get(EmitLoadOfScalar(LV, Loc));
2254   }
2255 
2256   if (LV.isVectorElt()) {
2257     llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddress(),
2258                                               LV.isVolatileQualified());
2259     return RValue::get(Builder.CreateExtractElement(Load, LV.getVectorIdx(),
2260                                                     "vecext"));
2261   }
2262 
2263   // If this is a reference to a subset of the elements of a vector, either
2264   // shuffle the input or extract/insert them as appropriate.
2265   if (LV.isExtVectorElt()) {
2266     return EmitLoadOfExtVectorElementLValue(LV);
2267   }
2268 
2269   // Global Register variables always invoke intrinsics
2270   if (LV.isGlobalReg())
2271     return EmitLoadOfGlobalRegLValue(LV);
2272 
2273   if (LV.isMatrixElt()) {
2274     llvm::Value *Idx = LV.getMatrixIdx();
2275     if (CGM.getCodeGenOpts().OptimizationLevel > 0) {
2276       const auto *const MatTy = LV.getType()->castAs<ConstantMatrixType>();
2277       llvm::MatrixBuilder MB(Builder);
2278       MB.CreateIndexAssumption(Idx, MatTy->getNumElementsFlattened());
2279     }
2280     llvm::LoadInst *Load =
2281         Builder.CreateLoad(LV.getMatrixAddress(), LV.isVolatileQualified());
2282     return RValue::get(Builder.CreateExtractElement(Load, Idx, "matrixext"));
2283   }
2284 
2285   assert(LV.isBitField() && "Unknown LValue type!");
2286   return EmitLoadOfBitfieldLValue(LV, Loc);
2287 }
2288 
2289 RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV,
2290                                                  SourceLocation Loc) {
2291   const CGBitFieldInfo &Info = LV.getBitFieldInfo();
2292 
2293   // Get the output type.
2294   llvm::Type *ResLTy = ConvertType(LV.getType());
2295 
2296   Address Ptr = LV.getBitFieldAddress();
2297   llvm::Value *Val =
2298       Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "bf.load");
2299 
2300   bool UseVolatile = LV.isVolatileQualified() &&
2301                      Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget());
2302   const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset;
2303   const unsigned StorageSize =
2304       UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
2305   if (Info.IsSigned) {
2306     assert(static_cast<unsigned>(Offset + Info.Size) <= StorageSize);
2307     unsigned HighBits = StorageSize - Offset - Info.Size;
2308     if (HighBits)
2309       Val = Builder.CreateShl(Val, HighBits, "bf.shl");
2310     if (Offset + HighBits)
2311       Val = Builder.CreateAShr(Val, Offset + HighBits, "bf.ashr");
2312   } else {
2313     if (Offset)
2314       Val = Builder.CreateLShr(Val, Offset, "bf.lshr");
2315     if (static_cast<unsigned>(Offset) + Info.Size < StorageSize)
2316       Val = Builder.CreateAnd(
2317           Val, llvm::APInt::getLowBitsSet(StorageSize, Info.Size), "bf.clear");
2318   }
2319   Val = Builder.CreateIntCast(Val, ResLTy, Info.IsSigned, "bf.cast");
2320   EmitScalarRangeCheck(Val, LV.getType(), Loc);
2321   return RValue::get(Val);
2322 }
2323 
2324 // If this is a reference to a subset of the elements of a vector, create an
2325 // appropriate shufflevector.
2326 RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) {
2327   llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddress(),
2328                                         LV.isVolatileQualified());
2329 
2330   // HLSL allows treating scalars as one-element vectors. Converting the scalar
2331   // IR value to a vector here allows the rest of codegen to behave as normal.
2332   if (getLangOpts().HLSL && !Vec->getType()->isVectorTy()) {
2333     llvm::Type *DstTy = llvm::FixedVectorType::get(Vec->getType(), 1);
2334     llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty);
2335     Vec = Builder.CreateInsertElement(DstTy, Vec, Zero, "cast.splat");
2336   }
2337 
2338   const llvm::Constant *Elts = LV.getExtVectorElts();
2339 
2340   // If the result of the expression is a non-vector type, we must be extracting
2341   // a single element.  Just codegen as an extractelement.
2342   const VectorType *ExprVT = LV.getType()->getAs<VectorType>();
2343   if (!ExprVT) {
2344     unsigned InIdx = getAccessedFieldNo(0, Elts);
2345     llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx);
2346     return RValue::get(Builder.CreateExtractElement(Vec, Elt));
2347   }
2348 
2349   // Always use shuffle vector to try to retain the original program structure
2350   unsigned NumResultElts = ExprVT->getNumElements();
2351 
2352   SmallVector<int, 4> Mask;
2353   for (unsigned i = 0; i != NumResultElts; ++i)
2354     Mask.push_back(getAccessedFieldNo(i, Elts));
2355 
2356   Vec = Builder.CreateShuffleVector(Vec, Mask);
2357   return RValue::get(Vec);
2358 }
2359 
2360 /// Generates lvalue for partial ext_vector access.
2361 Address CodeGenFunction::EmitExtVectorElementLValue(LValue LV) {
2362   Address VectorAddress = LV.getExtVectorAddress();
2363   QualType EQT = LV.getType()->castAs<VectorType>()->getElementType();
2364   llvm::Type *VectorElementTy = CGM.getTypes().ConvertType(EQT);
2365 
2366   Address CastToPointerElement = VectorAddress.withElementType(VectorElementTy);
2367 
2368   const llvm::Constant *Elts = LV.getExtVectorElts();
2369   unsigned ix = getAccessedFieldNo(0, Elts);
2370 
2371   Address VectorBasePtrPlusIx =
2372     Builder.CreateConstInBoundsGEP(CastToPointerElement, ix,
2373                                    "vector.elt");
2374 
2375   return VectorBasePtrPlusIx;
2376 }
2377 
2378 /// Load of global named registers are always calls to intrinsics.
2379 RValue CodeGenFunction::EmitLoadOfGlobalRegLValue(LValue LV) {
2380   assert((LV.getType()->isIntegerType() || LV.getType()->isPointerType()) &&
2381          "Bad type for register variable");
2382   llvm::MDNode *RegName = cast<llvm::MDNode>(
2383       cast<llvm::MetadataAsValue>(LV.getGlobalReg())->getMetadata());
2384 
2385   // We accept integer and pointer types only
2386   llvm::Type *OrigTy = CGM.getTypes().ConvertType(LV.getType());
2387   llvm::Type *Ty = OrigTy;
2388   if (OrigTy->isPointerTy())
2389     Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy);
2390   llvm::Type *Types[] = { Ty };
2391 
2392   llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
2393   llvm::Value *Call = Builder.CreateCall(
2394       F, llvm::MetadataAsValue::get(Ty->getContext(), RegName));
2395   if (OrigTy->isPointerTy())
2396     Call = Builder.CreateIntToPtr(Call, OrigTy);
2397   return RValue::get(Call);
2398 }
2399 
2400 /// EmitStoreThroughLValue - Store the specified rvalue into the specified
2401 /// lvalue, where both are guaranteed to the have the same type, and that type
2402 /// is 'Ty'.
2403 void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
2404                                              bool isInit) {
2405   if (!Dst.isSimple()) {
2406     if (Dst.isVectorElt()) {
2407       // Read/modify/write the vector, inserting the new element.
2408       llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddress(),
2409                                             Dst.isVolatileQualified());
2410       auto *IRStoreTy = dyn_cast<llvm::IntegerType>(Vec->getType());
2411       if (IRStoreTy) {
2412         auto *IRVecTy = llvm::FixedVectorType::get(
2413             Builder.getInt1Ty(), IRStoreTy->getPrimitiveSizeInBits());
2414         Vec = Builder.CreateBitCast(Vec, IRVecTy);
2415         // iN --> <N x i1>.
2416       }
2417       llvm::Value *SrcVal = Src.getScalarVal();
2418       // Allow inserting `<1 x T>` into an `<N x T>`. It can happen with scalar
2419       // types which are mapped to vector LLVM IR types (e.g. for implementing
2420       // an ABI).
2421       if (auto *EltTy = dyn_cast<llvm::FixedVectorType>(SrcVal->getType());
2422           EltTy && EltTy->getNumElements() == 1)
2423         SrcVal = Builder.CreateBitCast(SrcVal, EltTy->getElementType());
2424       Vec = Builder.CreateInsertElement(Vec, SrcVal, Dst.getVectorIdx(),
2425                                         "vecins");
2426       if (IRStoreTy) {
2427         // <N x i1> --> <iN>.
2428         Vec = Builder.CreateBitCast(Vec, IRStoreTy);
2429       }
2430       Builder.CreateStore(Vec, Dst.getVectorAddress(),
2431                           Dst.isVolatileQualified());
2432       return;
2433     }
2434 
2435     // If this is an update of extended vector elements, insert them as
2436     // appropriate.
2437     if (Dst.isExtVectorElt())
2438       return EmitStoreThroughExtVectorComponentLValue(Src, Dst);
2439 
2440     if (Dst.isGlobalReg())
2441       return EmitStoreThroughGlobalRegLValue(Src, Dst);
2442 
2443     if (Dst.isMatrixElt()) {
2444       llvm::Value *Idx = Dst.getMatrixIdx();
2445       if (CGM.getCodeGenOpts().OptimizationLevel > 0) {
2446         const auto *const MatTy = Dst.getType()->castAs<ConstantMatrixType>();
2447         llvm::MatrixBuilder MB(Builder);
2448         MB.CreateIndexAssumption(Idx, MatTy->getNumElementsFlattened());
2449       }
2450       llvm::Instruction *Load = Builder.CreateLoad(Dst.getMatrixAddress());
2451       llvm::Value *Vec =
2452           Builder.CreateInsertElement(Load, Src.getScalarVal(), Idx, "matins");
2453       Builder.CreateStore(Vec, Dst.getMatrixAddress(),
2454                           Dst.isVolatileQualified());
2455       return;
2456     }
2457 
2458     assert(Dst.isBitField() && "Unknown LValue type");
2459     return EmitStoreThroughBitfieldLValue(Src, Dst);
2460   }
2461 
2462   // There's special magic for assigning into an ARC-qualified l-value.
2463   if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) {
2464     switch (Lifetime) {
2465     case Qualifiers::OCL_None:
2466       llvm_unreachable("present but none");
2467 
2468     case Qualifiers::OCL_ExplicitNone:
2469       // nothing special
2470       break;
2471 
2472     case Qualifiers::OCL_Strong:
2473       if (isInit) {
2474         Src = RValue::get(EmitARCRetain(Dst.getType(), Src.getScalarVal()));
2475         break;
2476       }
2477       EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true);
2478       return;
2479 
2480     case Qualifiers::OCL_Weak:
2481       if (isInit)
2482         // Initialize and then skip the primitive store.
2483         EmitARCInitWeak(Dst.getAddress(), Src.getScalarVal());
2484       else
2485         EmitARCStoreWeak(Dst.getAddress(), Src.getScalarVal(),
2486                          /*ignore*/ true);
2487       return;
2488 
2489     case Qualifiers::OCL_Autoreleasing:
2490       Src = RValue::get(EmitObjCExtendObjectLifetime(Dst.getType(),
2491                                                      Src.getScalarVal()));
2492       // fall into the normal path
2493       break;
2494     }
2495   }
2496 
2497   if (Dst.isObjCWeak() && !Dst.isNonGC()) {
2498     // load of a __weak object.
2499     Address LvalueDst = Dst.getAddress();
2500     llvm::Value *src = Src.getScalarVal();
2501      CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst);
2502     return;
2503   }
2504 
2505   if (Dst.isObjCStrong() && !Dst.isNonGC()) {
2506     // load of a __strong object.
2507     Address LvalueDst = Dst.getAddress();
2508     llvm::Value *src = Src.getScalarVal();
2509     if (Dst.isObjCIvar()) {
2510       assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL");
2511       llvm::Type *ResultType = IntPtrTy;
2512       Address dst = EmitPointerWithAlignment(Dst.getBaseIvarExp());
2513       llvm::Value *RHS = dst.emitRawPointer(*this);
2514       RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
2515       llvm::Value *LHS = Builder.CreatePtrToInt(LvalueDst.emitRawPointer(*this),
2516                                                 ResultType, "sub.ptr.lhs.cast");
2517       llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset");
2518       CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst, BytesBetween);
2519     } else if (Dst.isGlobalObjCRef()) {
2520       CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst,
2521                                                 Dst.isThreadLocalRef());
2522     }
2523     else
2524       CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst);
2525     return;
2526   }
2527 
2528   assert(Src.isScalar() && "Can't emit an agg store with this method");
2529   EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit);
2530 }
2531 
2532 void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
2533                                                      llvm::Value **Result) {
2534   const CGBitFieldInfo &Info = Dst.getBitFieldInfo();
2535   llvm::Type *ResLTy = convertTypeForLoadStore(Dst.getType());
2536   Address Ptr = Dst.getBitFieldAddress();
2537 
2538   // Get the source value, truncated to the width of the bit-field.
2539   llvm::Value *SrcVal = Src.getScalarVal();
2540 
2541   // Cast the source to the storage type and shift it into place.
2542   SrcVal = Builder.CreateIntCast(SrcVal, Ptr.getElementType(),
2543                                  /*isSigned=*/false);
2544   llvm::Value *MaskedVal = SrcVal;
2545 
2546   const bool UseVolatile =
2547       CGM.getCodeGenOpts().AAPCSBitfieldWidth && Dst.isVolatileQualified() &&
2548       Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget());
2549   const unsigned StorageSize =
2550       UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
2551   const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset;
2552   // See if there are other bits in the bitfield's storage we'll need to load
2553   // and mask together with source before storing.
2554   if (StorageSize != Info.Size) {
2555     assert(StorageSize > Info.Size && "Invalid bitfield size.");
2556     llvm::Value *Val =
2557         Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), "bf.load");
2558 
2559     // Mask the source value as needed.
2560     if (!hasBooleanRepresentation(Dst.getType()))
2561       SrcVal = Builder.CreateAnd(
2562           SrcVal, llvm::APInt::getLowBitsSet(StorageSize, Info.Size),
2563           "bf.value");
2564     MaskedVal = SrcVal;
2565     if (Offset)
2566       SrcVal = Builder.CreateShl(SrcVal, Offset, "bf.shl");
2567 
2568     // Mask out the original value.
2569     Val = Builder.CreateAnd(
2570         Val, ~llvm::APInt::getBitsSet(StorageSize, Offset, Offset + Info.Size),
2571         "bf.clear");
2572 
2573     // Or together the unchanged values and the source value.
2574     SrcVal = Builder.CreateOr(Val, SrcVal, "bf.set");
2575   } else {
2576     assert(Offset == 0);
2577     // According to the AACPS:
2578     // When a volatile bit-field is written, and its container does not overlap
2579     // with any non-bit-field member, its container must be read exactly once
2580     // and written exactly once using the access width appropriate to the type
2581     // of the container. The two accesses are not atomic.
2582     if (Dst.isVolatileQualified() && isAAPCS(CGM.getTarget()) &&
2583         CGM.getCodeGenOpts().ForceAAPCSBitfieldLoad)
2584       Builder.CreateLoad(Ptr, true, "bf.load");
2585   }
2586 
2587   // Write the new value back out.
2588   Builder.CreateStore(SrcVal, Ptr, Dst.isVolatileQualified());
2589 
2590   // Return the new value of the bit-field, if requested.
2591   if (Result) {
2592     llvm::Value *ResultVal = MaskedVal;
2593 
2594     // Sign extend the value if needed.
2595     if (Info.IsSigned) {
2596       assert(Info.Size <= StorageSize);
2597       unsigned HighBits = StorageSize - Info.Size;
2598       if (HighBits) {
2599         ResultVal = Builder.CreateShl(ResultVal, HighBits, "bf.result.shl");
2600         ResultVal = Builder.CreateAShr(ResultVal, HighBits, "bf.result.ashr");
2601       }
2602     }
2603 
2604     ResultVal = Builder.CreateIntCast(ResultVal, ResLTy, Info.IsSigned,
2605                                       "bf.result.cast");
2606     *Result = EmitFromMemory(ResultVal, Dst.getType());
2607   }
2608 }
2609 
2610 void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
2611                                                                LValue Dst) {
2612   // HLSL allows storing to scalar values through ExtVector component LValues.
2613   // To support this we need to handle the case where the destination address is
2614   // a scalar.
2615   Address DstAddr = Dst.getExtVectorAddress();
2616   if (!DstAddr.getElementType()->isVectorTy()) {
2617     assert(!Dst.getType()->isVectorType() &&
2618            "this should only occur for non-vector l-values");
2619     Builder.CreateStore(Src.getScalarVal(), DstAddr, Dst.isVolatileQualified());
2620     return;
2621   }
2622 
2623   // This access turns into a read/modify/write of the vector.  Load the input
2624   // value now.
2625   llvm::Value *Vec = Builder.CreateLoad(DstAddr, Dst.isVolatileQualified());
2626   const llvm::Constant *Elts = Dst.getExtVectorElts();
2627 
2628   llvm::Value *SrcVal = Src.getScalarVal();
2629 
2630   if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) {
2631     unsigned NumSrcElts = VTy->getNumElements();
2632     unsigned NumDstElts =
2633         cast<llvm::FixedVectorType>(Vec->getType())->getNumElements();
2634     if (NumDstElts == NumSrcElts) {
2635       // Use shuffle vector is the src and destination are the same number of
2636       // elements and restore the vector mask since it is on the side it will be
2637       // stored.
2638       SmallVector<int, 4> Mask(NumDstElts);
2639       for (unsigned i = 0; i != NumSrcElts; ++i)
2640         Mask[getAccessedFieldNo(i, Elts)] = i;
2641 
2642       Vec = Builder.CreateShuffleVector(SrcVal, Mask);
2643     } else if (NumDstElts > NumSrcElts) {
2644       // Extended the source vector to the same length and then shuffle it
2645       // into the destination.
2646       // FIXME: since we're shuffling with undef, can we just use the indices
2647       //        into that?  This could be simpler.
2648       SmallVector<int, 4> ExtMask;
2649       for (unsigned i = 0; i != NumSrcElts; ++i)
2650         ExtMask.push_back(i);
2651       ExtMask.resize(NumDstElts, -1);
2652       llvm::Value *ExtSrcVal = Builder.CreateShuffleVector(SrcVal, ExtMask);
2653       // build identity
2654       SmallVector<int, 4> Mask;
2655       for (unsigned i = 0; i != NumDstElts; ++i)
2656         Mask.push_back(i);
2657 
2658       // When the vector size is odd and .odd or .hi is used, the last element
2659       // of the Elts constant array will be one past the size of the vector.
2660       // Ignore the last element here, if it is greater than the mask size.
2661       if (getAccessedFieldNo(NumSrcElts - 1, Elts) == Mask.size())
2662         NumSrcElts--;
2663 
2664       // modify when what gets shuffled in
2665       for (unsigned i = 0; i != NumSrcElts; ++i)
2666         Mask[getAccessedFieldNo(i, Elts)] = i + NumDstElts;
2667       Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, Mask);
2668     } else {
2669       // We should never shorten the vector
2670       llvm_unreachable("unexpected shorten vector length");
2671     }
2672   } else {
2673     // If the Src is a scalar (not a vector), and the target is a vector it must
2674     // be updating one element.
2675     unsigned InIdx = getAccessedFieldNo(0, Elts);
2676     llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx);
2677     Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt);
2678   }
2679 
2680   Builder.CreateStore(Vec, Dst.getExtVectorAddress(),
2681                       Dst.isVolatileQualified());
2682 }
2683 
2684 /// Store of global named registers are always calls to intrinsics.
2685 void CodeGenFunction::EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst) {
2686   assert((Dst.getType()->isIntegerType() || Dst.getType()->isPointerType()) &&
2687          "Bad type for register variable");
2688   llvm::MDNode *RegName = cast<llvm::MDNode>(
2689       cast<llvm::MetadataAsValue>(Dst.getGlobalReg())->getMetadata());
2690   assert(RegName && "Register LValue is not metadata");
2691 
2692   // We accept integer and pointer types only
2693   llvm::Type *OrigTy = CGM.getTypes().ConvertType(Dst.getType());
2694   llvm::Type *Ty = OrigTy;
2695   if (OrigTy->isPointerTy())
2696     Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy);
2697   llvm::Type *Types[] = { Ty };
2698 
2699   llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
2700   llvm::Value *Value = Src.getScalarVal();
2701   if (OrigTy->isPointerTy())
2702     Value = Builder.CreatePtrToInt(Value, Ty);
2703   Builder.CreateCall(
2704       F, {llvm::MetadataAsValue::get(Ty->getContext(), RegName), Value});
2705 }
2706 
2707 // setObjCGCLValueClass - sets class of the lvalue for the purpose of
2708 // generating write-barries API. It is currently a global, ivar,
2709 // or neither.
2710 static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
2711                                  LValue &LV,
2712                                  bool IsMemberAccess=false) {
2713   if (Ctx.getLangOpts().getGC() == LangOptions::NonGC)
2714     return;
2715 
2716   if (isa<ObjCIvarRefExpr>(E)) {
2717     QualType ExpTy = E->getType();
2718     if (IsMemberAccess && ExpTy->isPointerType()) {
2719       // If ivar is a structure pointer, assigning to field of
2720       // this struct follows gcc's behavior and makes it a non-ivar
2721       // writer-barrier conservatively.
2722       ExpTy = ExpTy->castAs<PointerType>()->getPointeeType();
2723       if (ExpTy->isRecordType()) {
2724         LV.setObjCIvar(false);
2725         return;
2726       }
2727     }
2728     LV.setObjCIvar(true);
2729     auto *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr *>(E));
2730     LV.setBaseIvarExp(Exp->getBase());
2731     LV.setObjCArray(E->getType()->isArrayType());
2732     return;
2733   }
2734 
2735   if (const auto *Exp = dyn_cast<DeclRefExpr>(E)) {
2736     if (const auto *VD = dyn_cast<VarDecl>(Exp->getDecl())) {
2737       if (VD->hasGlobalStorage()) {
2738         LV.setGlobalObjCRef(true);
2739         LV.setThreadLocalRef(VD->getTLSKind() != VarDecl::TLS_None);
2740       }
2741     }
2742     LV.setObjCArray(E->getType()->isArrayType());
2743     return;
2744   }
2745 
2746   if (const auto *Exp = dyn_cast<UnaryOperator>(E)) {
2747     setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2748     return;
2749   }
2750 
2751   if (const auto *Exp = dyn_cast<ParenExpr>(E)) {
2752     setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2753     if (LV.isObjCIvar()) {
2754       // If cast is to a structure pointer, follow gcc's behavior and make it
2755       // a non-ivar write-barrier.
2756       QualType ExpTy = E->getType();
2757       if (ExpTy->isPointerType())
2758         ExpTy = ExpTy->castAs<PointerType>()->getPointeeType();
2759       if (ExpTy->isRecordType())
2760         LV.setObjCIvar(false);
2761     }
2762     return;
2763   }
2764 
2765   if (const auto *Exp = dyn_cast<GenericSelectionExpr>(E)) {
2766     setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV);
2767     return;
2768   }
2769 
2770   if (const auto *Exp = dyn_cast<ImplicitCastExpr>(E)) {
2771     setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2772     return;
2773   }
2774 
2775   if (const auto *Exp = dyn_cast<CStyleCastExpr>(E)) {
2776     setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2777     return;
2778   }
2779 
2780   if (const auto *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) {
2781     setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2782     return;
2783   }
2784 
2785   if (const auto *Exp = dyn_cast<ArraySubscriptExpr>(E)) {
2786     setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
2787     if (LV.isObjCIvar() && !LV.isObjCArray())
2788       // Using array syntax to assigning to what an ivar points to is not
2789       // same as assigning to the ivar itself. {id *Names;} Names[i] = 0;
2790       LV.setObjCIvar(false);
2791     else if (LV.isGlobalObjCRef() && !LV.isObjCArray())
2792       // Using array syntax to assigning to what global points to is not
2793       // same as assigning to the global itself. {id *G;} G[i] = 0;
2794       LV.setGlobalObjCRef(false);
2795     return;
2796   }
2797 
2798   if (const auto *Exp = dyn_cast<MemberExpr>(E)) {
2799     setObjCGCLValueClass(Ctx, Exp->getBase(), LV, true);
2800     // We don't know if member is an 'ivar', but this flag is looked at
2801     // only in the context of LV.isObjCIvar().
2802     LV.setObjCArray(E->getType()->isArrayType());
2803     return;
2804   }
2805 }
2806 
2807 static LValue EmitThreadPrivateVarDeclLValue(
2808     CodeGenFunction &CGF, const VarDecl *VD, QualType T, Address Addr,
2809     llvm::Type *RealVarTy, SourceLocation Loc) {
2810   if (CGF.CGM.getLangOpts().OpenMPIRBuilder)
2811     Addr = CodeGenFunction::OMPBuilderCBHelpers::getAddrOfThreadPrivate(
2812         CGF, VD, Addr, Loc);
2813   else
2814     Addr =
2815         CGF.CGM.getOpenMPRuntime().getAddrOfThreadPrivate(CGF, VD, Addr, Loc);
2816 
2817   Addr = Addr.withElementType(RealVarTy);
2818   return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
2819 }
2820 
2821 static Address emitDeclTargetVarDeclLValue(CodeGenFunction &CGF,
2822                                            const VarDecl *VD, QualType T) {
2823   std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
2824       OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
2825   // Return an invalid address if variable is MT_To (or MT_Enter starting with
2826   // OpenMP 5.2) and unified memory is not enabled. For all other cases: MT_Link
2827   // and MT_To (or MT_Enter) with unified memory, return a valid address.
2828   if (!Res || ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
2829                 *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
2830                !CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory()))
2831     return Address::invalid();
2832   assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) ||
2833           ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
2834             *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
2835            CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory())) &&
2836          "Expected link clause OR to clause with unified memory enabled.");
2837   QualType PtrTy = CGF.getContext().getPointerType(VD->getType());
2838   Address Addr = CGF.CGM.getOpenMPRuntime().getAddrOfDeclareTargetVar(VD);
2839   return CGF.EmitLoadOfPointer(Addr, PtrTy->castAs<PointerType>());
2840 }
2841 
2842 Address
2843 CodeGenFunction::EmitLoadOfReference(LValue RefLVal,
2844                                      LValueBaseInfo *PointeeBaseInfo,
2845                                      TBAAAccessInfo *PointeeTBAAInfo) {
2846   llvm::LoadInst *Load =
2847       Builder.CreateLoad(RefLVal.getAddress(), RefLVal.isVolatile());
2848   CGM.DecorateInstructionWithTBAA(Load, RefLVal.getTBAAInfo());
2849   return makeNaturalAddressForPointer(Load, RefLVal.getType()->getPointeeType(),
2850                                       CharUnits(), /*ForPointeeType=*/true,
2851                                       PointeeBaseInfo, PointeeTBAAInfo);
2852 }
2853 
2854 LValue CodeGenFunction::EmitLoadOfReferenceLValue(LValue RefLVal) {
2855   LValueBaseInfo PointeeBaseInfo;
2856   TBAAAccessInfo PointeeTBAAInfo;
2857   Address PointeeAddr = EmitLoadOfReference(RefLVal, &PointeeBaseInfo,
2858                                             &PointeeTBAAInfo);
2859   return MakeAddrLValue(PointeeAddr, RefLVal.getType()->getPointeeType(),
2860                         PointeeBaseInfo, PointeeTBAAInfo);
2861 }
2862 
2863 Address CodeGenFunction::EmitLoadOfPointer(Address Ptr,
2864                                            const PointerType *PtrTy,
2865                                            LValueBaseInfo *BaseInfo,
2866                                            TBAAAccessInfo *TBAAInfo) {
2867   llvm::Value *Addr = Builder.CreateLoad(Ptr);
2868   return makeNaturalAddressForPointer(Addr, PtrTy->getPointeeType(),
2869                                       CharUnits(), /*ForPointeeType=*/true,
2870                                       BaseInfo, TBAAInfo);
2871 }
2872 
2873 LValue CodeGenFunction::EmitLoadOfPointerLValue(Address PtrAddr,
2874                                                 const PointerType *PtrTy) {
2875   LValueBaseInfo BaseInfo;
2876   TBAAAccessInfo TBAAInfo;
2877   Address Addr = EmitLoadOfPointer(PtrAddr, PtrTy, &BaseInfo, &TBAAInfo);
2878   return MakeAddrLValue(Addr, PtrTy->getPointeeType(), BaseInfo, TBAAInfo);
2879 }
2880 
2881 static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
2882                                       const Expr *E, const VarDecl *VD) {
2883   QualType T = E->getType();
2884 
2885   // If it's thread_local, emit a call to its wrapper function instead.
2886   if (VD->getTLSKind() == VarDecl::TLS_Dynamic &&
2887       CGF.CGM.getCXXABI().usesThreadWrapperFunction(VD))
2888     return CGF.CGM.getCXXABI().EmitThreadLocalVarDeclLValue(CGF, VD, T);
2889   // Check if the variable is marked as declare target with link clause in
2890   // device codegen.
2891   if (CGF.getLangOpts().OpenMPIsTargetDevice) {
2892     Address Addr = emitDeclTargetVarDeclLValue(CGF, VD, T);
2893     if (Addr.isValid())
2894       return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
2895   }
2896 
2897   llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
2898 
2899   if (VD->getTLSKind() != VarDecl::TLS_None)
2900     V = CGF.Builder.CreateThreadLocalAddress(V);
2901 
2902   llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType());
2903   CharUnits Alignment = CGF.getContext().getDeclAlign(VD);
2904   Address Addr(V, RealVarTy, Alignment);
2905   // Emit reference to the private copy of the variable if it is an OpenMP
2906   // threadprivate variable.
2907   if (CGF.getLangOpts().OpenMP && !CGF.getLangOpts().OpenMPSimd &&
2908       VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
2909     return EmitThreadPrivateVarDeclLValue(CGF, VD, T, Addr, RealVarTy,
2910                                           E->getExprLoc());
2911   }
2912   LValue LV = VD->getType()->isReferenceType() ?
2913       CGF.EmitLoadOfReferenceLValue(Addr, VD->getType(),
2914                                     AlignmentSource::Decl) :
2915       CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
2916   setObjCGCLValueClass(CGF.getContext(), E, LV);
2917   return LV;
2918 }
2919 
2920 llvm::Constant *CodeGenModule::getRawFunctionPointer(GlobalDecl GD,
2921                                                      llvm::Type *Ty) {
2922   const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
2923   if (FD->hasAttr<WeakRefAttr>()) {
2924     ConstantAddress aliasee = GetWeakRefReference(FD);
2925     return aliasee.getPointer();
2926   }
2927 
2928   llvm::Constant *V = GetAddrOfFunction(GD, Ty);
2929   return V;
2930 }
2931 
2932 static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, const Expr *E,
2933                                      GlobalDecl GD) {
2934   const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
2935   llvm::Constant *V = CGF.CGM.getFunctionPointer(GD);
2936   CharUnits Alignment = CGF.getContext().getDeclAlign(FD);
2937   return CGF.MakeAddrLValue(V, E->getType(), Alignment,
2938                             AlignmentSource::Decl);
2939 }
2940 
2941 static LValue EmitCapturedFieldLValue(CodeGenFunction &CGF, const FieldDecl *FD,
2942                                       llvm::Value *ThisValue) {
2943 
2944   return CGF.EmitLValueForLambdaField(FD, ThisValue);
2945 }
2946 
2947 /// Named Registers are named metadata pointing to the register name
2948 /// which will be read from/written to as an argument to the intrinsic
2949 /// @llvm.read/write_register.
2950 /// So far, only the name is being passed down, but other options such as
2951 /// register type, allocation type or even optimization options could be
2952 /// passed down via the metadata node.
2953 static LValue EmitGlobalNamedRegister(const VarDecl *VD, CodeGenModule &CGM) {
2954   SmallString<64> Name("llvm.named.register.");
2955   AsmLabelAttr *Asm = VD->getAttr<AsmLabelAttr>();
2956   assert(Asm->getLabel().size() < 64-Name.size() &&
2957       "Register name too big");
2958   Name.append(Asm->getLabel());
2959   llvm::NamedMDNode *M =
2960     CGM.getModule().getOrInsertNamedMetadata(Name);
2961   if (M->getNumOperands() == 0) {
2962     llvm::MDString *Str = llvm::MDString::get(CGM.getLLVMContext(),
2963                                               Asm->getLabel());
2964     llvm::Metadata *Ops[] = {Str};
2965     M->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops));
2966   }
2967 
2968   CharUnits Alignment = CGM.getContext().getDeclAlign(VD);
2969 
2970   llvm::Value *Ptr =
2971     llvm::MetadataAsValue::get(CGM.getLLVMContext(), M->getOperand(0));
2972   return LValue::MakeGlobalReg(Ptr, Alignment, VD->getType());
2973 }
2974 
2975 /// Determine whether we can emit a reference to \p VD from the current
2976 /// context, despite not necessarily having seen an odr-use of the variable in
2977 /// this context.
2978 static bool canEmitSpuriousReferenceToVariable(CodeGenFunction &CGF,
2979                                                const DeclRefExpr *E,
2980                                                const VarDecl *VD) {
2981   // For a variable declared in an enclosing scope, do not emit a spurious
2982   // reference even if we have a capture, as that will emit an unwarranted
2983   // reference to our capture state, and will likely generate worse code than
2984   // emitting a local copy.
2985   if (E->refersToEnclosingVariableOrCapture())
2986     return false;
2987 
2988   // For a local declaration declared in this function, we can always reference
2989   // it even if we don't have an odr-use.
2990   if (VD->hasLocalStorage()) {
2991     return VD->getDeclContext() ==
2992            dyn_cast_or_null<DeclContext>(CGF.CurCodeDecl);
2993   }
2994 
2995   // For a global declaration, we can emit a reference to it if we know
2996   // for sure that we are able to emit a definition of it.
2997   VD = VD->getDefinition(CGF.getContext());
2998   if (!VD)
2999     return false;
3000 
3001   // Don't emit a spurious reference if it might be to a variable that only
3002   // exists on a different device / target.
3003   // FIXME: This is unnecessarily broad. Check whether this would actually be a
3004   // cross-target reference.
3005   if (CGF.getLangOpts().OpenMP || CGF.getLangOpts().CUDA ||
3006       CGF.getLangOpts().OpenCL) {
3007     return false;
3008   }
3009 
3010   // We can emit a spurious reference only if the linkage implies that we'll
3011   // be emitting a non-interposable symbol that will be retained until link
3012   // time.
3013   switch (CGF.CGM.getLLVMLinkageVarDefinition(VD)) {
3014   case llvm::GlobalValue::ExternalLinkage:
3015   case llvm::GlobalValue::LinkOnceODRLinkage:
3016   case llvm::GlobalValue::WeakODRLinkage:
3017   case llvm::GlobalValue::InternalLinkage:
3018   case llvm::GlobalValue::PrivateLinkage:
3019     return true;
3020   default:
3021     return false;
3022   }
3023 }
3024 
3025 LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
3026   const NamedDecl *ND = E->getDecl();
3027   QualType T = E->getType();
3028 
3029   assert(E->isNonOdrUse() != NOUR_Unevaluated &&
3030          "should not emit an unevaluated operand");
3031 
3032   if (const auto *VD = dyn_cast<VarDecl>(ND)) {
3033     // Global Named registers access via intrinsics only
3034     if (VD->getStorageClass() == SC_Register &&
3035         VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl())
3036       return EmitGlobalNamedRegister(VD, CGM);
3037 
3038     // If this DeclRefExpr does not constitute an odr-use of the variable,
3039     // we're not permitted to emit a reference to it in general, and it might
3040     // not be captured if capture would be necessary for a use. Emit the
3041     // constant value directly instead.
3042     if (E->isNonOdrUse() == NOUR_Constant &&
3043         (VD->getType()->isReferenceType() ||
3044          !canEmitSpuriousReferenceToVariable(*this, E, VD))) {
3045       VD->getAnyInitializer(VD);
3046       llvm::Constant *Val = ConstantEmitter(*this).emitAbstract(
3047           E->getLocation(), *VD->evaluateValue(), VD->getType());
3048       assert(Val && "failed to emit constant expression");
3049 
3050       Address Addr = Address::invalid();
3051       if (!VD->getType()->isReferenceType()) {
3052         // Spill the constant value to a global.
3053         Addr = CGM.createUnnamedGlobalFrom(*VD, Val,
3054                                            getContext().getDeclAlign(VD));
3055         llvm::Type *VarTy = getTypes().ConvertTypeForMem(VD->getType());
3056         auto *PTy = llvm::PointerType::get(
3057             VarTy, getTypes().getTargetAddressSpace(VD->getType()));
3058         Addr = Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, PTy, VarTy);
3059       } else {
3060         // Should we be using the alignment of the constant pointer we emitted?
3061         CharUnits Alignment =
3062             CGM.getNaturalTypeAlignment(E->getType(),
3063                                         /* BaseInfo= */ nullptr,
3064                                         /* TBAAInfo= */ nullptr,
3065                                         /* forPointeeType= */ true);
3066         Addr = makeNaturalAddressForPointer(Val, T, Alignment);
3067       }
3068       return MakeAddrLValue(Addr, T, AlignmentSource::Decl);
3069     }
3070 
3071     // FIXME: Handle other kinds of non-odr-use DeclRefExprs.
3072 
3073     // Check for captured variables.
3074     if (E->refersToEnclosingVariableOrCapture()) {
3075       VD = VD->getCanonicalDecl();
3076       if (auto *FD = LambdaCaptureFields.lookup(VD))
3077         return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue);
3078       if (CapturedStmtInfo) {
3079         auto I = LocalDeclMap.find(VD);
3080         if (I != LocalDeclMap.end()) {
3081           LValue CapLVal;
3082           if (VD->getType()->isReferenceType())
3083             CapLVal = EmitLoadOfReferenceLValue(I->second, VD->getType(),
3084                                                 AlignmentSource::Decl);
3085           else
3086             CapLVal = MakeAddrLValue(I->second, T);
3087           // Mark lvalue as nontemporal if the variable is marked as nontemporal
3088           // in simd context.
3089           if (getLangOpts().OpenMP &&
3090               CGM.getOpenMPRuntime().isNontemporalDecl(VD))
3091             CapLVal.setNontemporal(/*Value=*/true);
3092           return CapLVal;
3093         }
3094         LValue CapLVal =
3095             EmitCapturedFieldLValue(*this, CapturedStmtInfo->lookup(VD),
3096                                     CapturedStmtInfo->getContextValue());
3097         Address LValueAddress = CapLVal.getAddress();
3098         CapLVal = MakeAddrLValue(Address(LValueAddress.emitRawPointer(*this),
3099                                          LValueAddress.getElementType(),
3100                                          getContext().getDeclAlign(VD)),
3101                                  CapLVal.getType(),
3102                                  LValueBaseInfo(AlignmentSource::Decl),
3103                                  CapLVal.getTBAAInfo());
3104         // Mark lvalue as nontemporal if the variable is marked as nontemporal
3105         // in simd context.
3106         if (getLangOpts().OpenMP &&
3107             CGM.getOpenMPRuntime().isNontemporalDecl(VD))
3108           CapLVal.setNontemporal(/*Value=*/true);
3109         return CapLVal;
3110       }
3111 
3112       assert(isa<BlockDecl>(CurCodeDecl));
3113       Address addr = GetAddrOfBlockDecl(VD);
3114       return MakeAddrLValue(addr, T, AlignmentSource::Decl);
3115     }
3116   }
3117 
3118   // FIXME: We should be able to assert this for FunctionDecls as well!
3119   // FIXME: We should be able to assert this for all DeclRefExprs, not just
3120   // those with a valid source location.
3121   assert((ND->isUsed(false) || !isa<VarDecl>(ND) || E->isNonOdrUse() ||
3122           !E->getLocation().isValid()) &&
3123          "Should not use decl without marking it used!");
3124 
3125   if (ND->hasAttr<WeakRefAttr>()) {
3126     const auto *VD = cast<ValueDecl>(ND);
3127     ConstantAddress Aliasee = CGM.GetWeakRefReference(VD);
3128     return MakeAddrLValue(Aliasee, T, AlignmentSource::Decl);
3129   }
3130 
3131   if (const auto *VD = dyn_cast<VarDecl>(ND)) {
3132     // Check if this is a global variable.
3133     if (VD->hasLinkage() || VD->isStaticDataMember())
3134       return EmitGlobalVarDeclLValue(*this, E, VD);
3135 
3136     Address addr = Address::invalid();
3137 
3138     // The variable should generally be present in the local decl map.
3139     auto iter = LocalDeclMap.find(VD);
3140     if (iter != LocalDeclMap.end()) {
3141       addr = iter->second;
3142 
3143     // Otherwise, it might be static local we haven't emitted yet for
3144     // some reason; most likely, because it's in an outer function.
3145     } else if (VD->isStaticLocal()) {
3146       llvm::Constant *var = CGM.getOrCreateStaticVarDecl(
3147           *VD, CGM.getLLVMLinkageVarDefinition(VD));
3148       addr = Address(
3149           var, ConvertTypeForMem(VD->getType()), getContext().getDeclAlign(VD));
3150 
3151     // No other cases for now.
3152     } else {
3153       llvm_unreachable("DeclRefExpr for Decl not entered in LocalDeclMap?");
3154     }
3155 
3156     // Handle threadlocal function locals.
3157     if (VD->getTLSKind() != VarDecl::TLS_None)
3158       addr = addr.withPointer(
3159           Builder.CreateThreadLocalAddress(addr.getBasePointer()),
3160           NotKnownNonNull);
3161 
3162     // Check for OpenMP threadprivate variables.
3163     if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd &&
3164         VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
3165       return EmitThreadPrivateVarDeclLValue(
3166           *this, VD, T, addr, getTypes().ConvertTypeForMem(VD->getType()),
3167           E->getExprLoc());
3168     }
3169 
3170     // Drill into block byref variables.
3171     bool isBlockByref = VD->isEscapingByref();
3172     if (isBlockByref) {
3173       addr = emitBlockByrefAddress(addr, VD);
3174     }
3175 
3176     // Drill into reference types.
3177     LValue LV = VD->getType()->isReferenceType() ?
3178         EmitLoadOfReferenceLValue(addr, VD->getType(), AlignmentSource::Decl) :
3179         MakeAddrLValue(addr, T, AlignmentSource::Decl);
3180 
3181     bool isLocalStorage = VD->hasLocalStorage();
3182 
3183     bool NonGCable = isLocalStorage &&
3184                      !VD->getType()->isReferenceType() &&
3185                      !isBlockByref;
3186     if (NonGCable) {
3187       LV.getQuals().removeObjCGCAttr();
3188       LV.setNonGC(true);
3189     }
3190 
3191     bool isImpreciseLifetime =
3192       (isLocalStorage && !VD->hasAttr<ObjCPreciseLifetimeAttr>());
3193     if (isImpreciseLifetime)
3194       LV.setARCPreciseLifetime(ARCImpreciseLifetime);
3195     setObjCGCLValueClass(getContext(), E, LV);
3196     return LV;
3197   }
3198 
3199   if (const auto *FD = dyn_cast<FunctionDecl>(ND))
3200     return EmitFunctionDeclLValue(*this, E, FD);
3201 
3202   // FIXME: While we're emitting a binding from an enclosing scope, all other
3203   // DeclRefExprs we see should be implicitly treated as if they also refer to
3204   // an enclosing scope.
3205   if (const auto *BD = dyn_cast<BindingDecl>(ND)) {
3206     if (E->refersToEnclosingVariableOrCapture()) {
3207       auto *FD = LambdaCaptureFields.lookup(BD);
3208       return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue);
3209     }
3210     return EmitLValue(BD->getBinding());
3211   }
3212 
3213   // We can form DeclRefExprs naming GUID declarations when reconstituting
3214   // non-type template parameters into expressions.
3215   if (const auto *GD = dyn_cast<MSGuidDecl>(ND))
3216     return MakeAddrLValue(CGM.GetAddrOfMSGuidDecl(GD), T,
3217                           AlignmentSource::Decl);
3218 
3219   if (const auto *TPO = dyn_cast<TemplateParamObjectDecl>(ND)) {
3220     auto ATPO = CGM.GetAddrOfTemplateParamObject(TPO);
3221     auto AS = getLangASFromTargetAS(ATPO.getAddressSpace());
3222 
3223     if (AS != T.getAddressSpace()) {
3224       auto TargetAS = getContext().getTargetAddressSpace(T.getAddressSpace());
3225       auto PtrTy = llvm::PointerType::get(CGM.getLLVMContext(), TargetAS);
3226       auto ASC = getTargetHooks().performAddrSpaceCast(
3227           CGM, ATPO.getPointer(), AS, T.getAddressSpace(), PtrTy);
3228       ATPO = ConstantAddress(ASC, ATPO.getElementType(), ATPO.getAlignment());
3229     }
3230 
3231     return MakeAddrLValue(ATPO, T, AlignmentSource::Decl);
3232   }
3233 
3234   llvm_unreachable("Unhandled DeclRefExpr");
3235 }
3236 
3237 LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
3238   // __extension__ doesn't affect lvalue-ness.
3239   if (E->getOpcode() == UO_Extension)
3240     return EmitLValue(E->getSubExpr());
3241 
3242   QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType());
3243   switch (E->getOpcode()) {
3244   default: llvm_unreachable("Unknown unary operator lvalue!");
3245   case UO_Deref: {
3246     QualType T = E->getSubExpr()->getType()->getPointeeType();
3247     assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
3248 
3249     LValueBaseInfo BaseInfo;
3250     TBAAAccessInfo TBAAInfo;
3251     Address Addr = EmitPointerWithAlignment(E->getSubExpr(), &BaseInfo,
3252                                             &TBAAInfo);
3253     LValue LV = MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo);
3254     LV.getQuals().setAddressSpace(ExprTy.getAddressSpace());
3255 
3256     // We should not generate __weak write barrier on indirect reference
3257     // of a pointer to object; as in void foo (__weak id *param); *param = 0;
3258     // But, we continue to generate __strong write barrier on indirect write
3259     // into a pointer to object.
3260     if (getLangOpts().ObjC &&
3261         getLangOpts().getGC() != LangOptions::NonGC &&
3262         LV.isObjCWeak())
3263       LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
3264     return LV;
3265   }
3266   case UO_Real:
3267   case UO_Imag: {
3268     LValue LV = EmitLValue(E->getSubExpr());
3269     assert(LV.isSimple() && "real/imag on non-ordinary l-value");
3270 
3271     // __real is valid on scalars.  This is a faster way of testing that.
3272     // __imag can only produce an rvalue on scalars.
3273     if (E->getOpcode() == UO_Real &&
3274         !LV.getAddress().getElementType()->isStructTy()) {
3275       assert(E->getSubExpr()->getType()->isArithmeticType());
3276       return LV;
3277     }
3278 
3279     QualType T = ExprTy->castAs<ComplexType>()->getElementType();
3280 
3281     Address Component =
3282         (E->getOpcode() == UO_Real
3283              ? emitAddrOfRealComponent(LV.getAddress(), LV.getType())
3284              : emitAddrOfImagComponent(LV.getAddress(), LV.getType()));
3285     LValue ElemLV = MakeAddrLValue(Component, T, LV.getBaseInfo(),
3286                                    CGM.getTBAAInfoForSubobject(LV, T));
3287     ElemLV.getQuals().addQualifiers(LV.getQuals());
3288     return ElemLV;
3289   }
3290   case UO_PreInc:
3291   case UO_PreDec: {
3292     LValue LV = EmitLValue(E->getSubExpr());
3293     bool isInc = E->getOpcode() == UO_PreInc;
3294 
3295     if (E->getType()->isAnyComplexType())
3296       EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/);
3297     else
3298       EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/);
3299     return LV;
3300   }
3301   }
3302 }
3303 
3304 LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) {
3305   return MakeAddrLValue(CGM.GetAddrOfConstantStringFromLiteral(E),
3306                         E->getType(), AlignmentSource::Decl);
3307 }
3308 
3309 LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) {
3310   return MakeAddrLValue(CGM.GetAddrOfConstantStringFromObjCEncode(E),
3311                         E->getType(), AlignmentSource::Decl);
3312 }
3313 
3314 LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
3315   auto SL = E->getFunctionName();
3316   assert(SL != nullptr && "No StringLiteral name in PredefinedExpr");
3317   StringRef FnName = CurFn->getName();
3318   if (FnName.starts_with("\01"))
3319     FnName = FnName.substr(1);
3320   StringRef NameItems[] = {
3321       PredefinedExpr::getIdentKindName(E->getIdentKind()), FnName};
3322   std::string GVName = llvm::join(NameItems, NameItems + 2, ".");
3323   if (auto *BD = dyn_cast_or_null<BlockDecl>(CurCodeDecl)) {
3324     std::string Name = std::string(SL->getString());
3325     if (!Name.empty()) {
3326       unsigned Discriminator =
3327           CGM.getCXXABI().getMangleContext().getBlockId(BD, true);
3328       if (Discriminator)
3329         Name += "_" + Twine(Discriminator + 1).str();
3330       auto C = CGM.GetAddrOfConstantCString(Name, GVName.c_str());
3331       return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl);
3332     } else {
3333       auto C =
3334           CGM.GetAddrOfConstantCString(std::string(FnName), GVName.c_str());
3335       return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl);
3336     }
3337   }
3338   auto C = CGM.GetAddrOfConstantStringFromLiteral(SL, GVName);
3339   return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl);
3340 }
3341 
3342 /// Emit a type description suitable for use by a runtime sanitizer library. The
3343 /// format of a type descriptor is
3344 ///
3345 /// \code
3346 ///   { i16 TypeKind, i16 TypeInfo }
3347 /// \endcode
3348 ///
3349 /// followed by an array of i8 containing the type name with extra information
3350 /// for BitInt. TypeKind is TK_Integer(0) for an integer, TK_Float(1) for a
3351 /// floating point value, TK_BitInt(2) for BitInt and TK_Unknown(0xFFFF) for
3352 /// anything else.
3353 llvm::Constant *CodeGenFunction::EmitCheckTypeDescriptor(QualType T) {
3354   // Only emit each type's descriptor once.
3355   if (llvm::Constant *C = CGM.getTypeDescriptorFromMap(T))
3356     return C;
3357 
3358   uint16_t TypeKind = TK_Unknown;
3359   uint16_t TypeInfo = 0;
3360   bool IsBitInt = false;
3361 
3362   if (T->isIntegerType()) {
3363     TypeKind = TK_Integer;
3364     TypeInfo = (llvm::Log2_32(getContext().getTypeSize(T)) << 1) |
3365                (T->isSignedIntegerType() ? 1 : 0);
3366     // Follow suggestion from discussion of issue 64100.
3367     // So we can write the exact amount of bits in TypeName after '\0'
3368     // making it <diagnostic-like type name>.'\0'.<32-bit width>.
3369     if (T->isSignedIntegerType() && T->getAs<BitIntType>()) {
3370       // Do a sanity checks as we are using 32-bit type to store bit length.
3371       assert(getContext().getTypeSize(T) > 0 &&
3372              " non positive amount of bits in __BitInt type");
3373       assert(getContext().getTypeSize(T) <= 0xFFFFFFFF &&
3374              " too many bits in __BitInt type");
3375 
3376       // Redefine TypeKind with the actual __BitInt type if we have signed
3377       // BitInt.
3378       TypeKind = TK_BitInt;
3379       IsBitInt = true;
3380     }
3381   } else if (T->isFloatingType()) {
3382     TypeKind = TK_Float;
3383     TypeInfo = getContext().getTypeSize(T);
3384   }
3385 
3386   // Format the type name as if for a diagnostic, including quotes and
3387   // optionally an 'aka'.
3388   SmallString<32> Buffer;
3389   CGM.getDiags().ConvertArgToString(DiagnosticsEngine::ak_qualtype,
3390                                     (intptr_t)T.getAsOpaquePtr(), StringRef(),
3391                                     StringRef(), {}, Buffer, {});
3392 
3393   if (IsBitInt) {
3394     // The Structure is: 0 to end the string, 32 bit unsigned integer in target
3395     // endianness, zero.
3396     char S[6] = {'\0', '\0', '\0', '\0', '\0', '\0'};
3397     const auto *EIT = T->castAs<BitIntType>();
3398     uint32_t Bits = EIT->getNumBits();
3399     llvm::support::endian::write32(S + 1, Bits,
3400                                    getTarget().isBigEndian()
3401                                        ? llvm::endianness::big
3402                                        : llvm::endianness::little);
3403     StringRef Str = StringRef(S, sizeof(S) / sizeof(decltype(S[0])));
3404     Buffer.append(Str);
3405   }
3406 
3407   llvm::Constant *Components[] = {
3408     Builder.getInt16(TypeKind), Builder.getInt16(TypeInfo),
3409     llvm::ConstantDataArray::getString(getLLVMContext(), Buffer)
3410   };
3411   llvm::Constant *Descriptor = llvm::ConstantStruct::getAnon(Components);
3412 
3413   auto *GV = new llvm::GlobalVariable(
3414       CGM.getModule(), Descriptor->getType(),
3415       /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage, Descriptor);
3416   GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3417   CGM.getSanitizerMetadata()->disableSanitizerForGlobal(GV);
3418 
3419   // Remember the descriptor for this type.
3420   CGM.setTypeDescriptorInMap(T, GV);
3421 
3422   return GV;
3423 }
3424 
3425 llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) {
3426   llvm::Type *TargetTy = IntPtrTy;
3427 
3428   if (V->getType() == TargetTy)
3429     return V;
3430 
3431   // Floating-point types which fit into intptr_t are bitcast to integers
3432   // and then passed directly (after zero-extension, if necessary).
3433   if (V->getType()->isFloatingPointTy()) {
3434     unsigned Bits = V->getType()->getPrimitiveSizeInBits().getFixedValue();
3435     if (Bits <= TargetTy->getIntegerBitWidth())
3436       V = Builder.CreateBitCast(V, llvm::Type::getIntNTy(getLLVMContext(),
3437                                                          Bits));
3438   }
3439 
3440   // Integers which fit in intptr_t are zero-extended and passed directly.
3441   if (V->getType()->isIntegerTy() &&
3442       V->getType()->getIntegerBitWidth() <= TargetTy->getIntegerBitWidth())
3443     return Builder.CreateZExt(V, TargetTy);
3444 
3445   // Pointers are passed directly, everything else is passed by address.
3446   if (!V->getType()->isPointerTy()) {
3447     RawAddress Ptr = CreateDefaultAlignTempAlloca(V->getType());
3448     Builder.CreateStore(V, Ptr);
3449     V = Ptr.getPointer();
3450   }
3451   return Builder.CreatePtrToInt(V, TargetTy);
3452 }
3453 
3454 /// Emit a representation of a SourceLocation for passing to a handler
3455 /// in a sanitizer runtime library. The format for this data is:
3456 /// \code
3457 ///   struct SourceLocation {
3458 ///     const char *Filename;
3459 ///     int32_t Line, Column;
3460 ///   };
3461 /// \endcode
3462 /// For an invalid SourceLocation, the Filename pointer is null.
3463 llvm::Constant *CodeGenFunction::EmitCheckSourceLocation(SourceLocation Loc) {
3464   llvm::Constant *Filename;
3465   int Line, Column;
3466 
3467   PresumedLoc PLoc = getContext().getSourceManager().getPresumedLoc(Loc);
3468   if (PLoc.isValid()) {
3469     StringRef FilenameString = PLoc.getFilename();
3470 
3471     int PathComponentsToStrip =
3472         CGM.getCodeGenOpts().EmitCheckPathComponentsToStrip;
3473     if (PathComponentsToStrip < 0) {
3474       assert(PathComponentsToStrip != INT_MIN);
3475       int PathComponentsToKeep = -PathComponentsToStrip;
3476       auto I = llvm::sys::path::rbegin(FilenameString);
3477       auto E = llvm::sys::path::rend(FilenameString);
3478       while (I != E && --PathComponentsToKeep)
3479         ++I;
3480 
3481       FilenameString = FilenameString.substr(I - E);
3482     } else if (PathComponentsToStrip > 0) {
3483       auto I = llvm::sys::path::begin(FilenameString);
3484       auto E = llvm::sys::path::end(FilenameString);
3485       while (I != E && PathComponentsToStrip--)
3486         ++I;
3487 
3488       if (I != E)
3489         FilenameString =
3490             FilenameString.substr(I - llvm::sys::path::begin(FilenameString));
3491       else
3492         FilenameString = llvm::sys::path::filename(FilenameString);
3493     }
3494 
3495     auto FilenameGV =
3496         CGM.GetAddrOfConstantCString(std::string(FilenameString), ".src");
3497     CGM.getSanitizerMetadata()->disableSanitizerForGlobal(
3498         cast<llvm::GlobalVariable>(
3499             FilenameGV.getPointer()->stripPointerCasts()));
3500     Filename = FilenameGV.getPointer();
3501     Line = PLoc.getLine();
3502     Column = PLoc.getColumn();
3503   } else {
3504     Filename = llvm::Constant::getNullValue(Int8PtrTy);
3505     Line = Column = 0;
3506   }
3507 
3508   llvm::Constant *Data[] = {Filename, Builder.getInt32(Line),
3509                             Builder.getInt32(Column)};
3510 
3511   return llvm::ConstantStruct::getAnon(Data);
3512 }
3513 
3514 namespace {
3515 /// Specify under what conditions this check can be recovered
3516 enum class CheckRecoverableKind {
3517   /// Always terminate program execution if this check fails.
3518   Unrecoverable,
3519   /// Check supports recovering, runtime has both fatal (noreturn) and
3520   /// non-fatal handlers for this check.
3521   Recoverable,
3522   /// Runtime conditionally aborts, always need to support recovery.
3523   AlwaysRecoverable
3524 };
3525 }
3526 
3527 static CheckRecoverableKind
3528 getRecoverableKind(SanitizerKind::SanitizerOrdinal Ordinal) {
3529   if (Ordinal == SanitizerKind::SO_Vptr)
3530     return CheckRecoverableKind::AlwaysRecoverable;
3531   else if (Ordinal == SanitizerKind::SO_Return ||
3532            Ordinal == SanitizerKind::SO_Unreachable)
3533     return CheckRecoverableKind::Unrecoverable;
3534   else
3535     return CheckRecoverableKind::Recoverable;
3536 }
3537 
3538 namespace {
3539 struct SanitizerHandlerInfo {
3540   char const *const Name;
3541   unsigned Version;
3542 };
3543 }
3544 
3545 const SanitizerHandlerInfo SanitizerHandlers[] = {
3546 #define SANITIZER_CHECK(Enum, Name, Version) {#Name, Version},
3547     LIST_SANITIZER_CHECKS
3548 #undef SANITIZER_CHECK
3549 };
3550 
3551 static void emitCheckHandlerCall(CodeGenFunction &CGF,
3552                                  llvm::FunctionType *FnType,
3553                                  ArrayRef<llvm::Value *> FnArgs,
3554                                  SanitizerHandler CheckHandler,
3555                                  CheckRecoverableKind RecoverKind, bool IsFatal,
3556                                  llvm::BasicBlock *ContBB, bool NoMerge) {
3557   assert(IsFatal || RecoverKind != CheckRecoverableKind::Unrecoverable);
3558   std::optional<ApplyDebugLocation> DL;
3559   if (!CGF.Builder.getCurrentDebugLocation()) {
3560     // Ensure that the call has at least an artificial debug location.
3561     DL.emplace(CGF, SourceLocation());
3562   }
3563   bool NeedsAbortSuffix =
3564       IsFatal && RecoverKind != CheckRecoverableKind::Unrecoverable;
3565   bool MinimalRuntime = CGF.CGM.getCodeGenOpts().SanitizeMinimalRuntime;
3566   const SanitizerHandlerInfo &CheckInfo = SanitizerHandlers[CheckHandler];
3567   const StringRef CheckName = CheckInfo.Name;
3568   std::string FnName = "__ubsan_handle_" + CheckName.str();
3569   if (CheckInfo.Version && !MinimalRuntime)
3570     FnName += "_v" + llvm::utostr(CheckInfo.Version);
3571   if (MinimalRuntime)
3572     FnName += "_minimal";
3573   if (NeedsAbortSuffix)
3574     FnName += "_abort";
3575   bool MayReturn =
3576       !IsFatal || RecoverKind == CheckRecoverableKind::AlwaysRecoverable;
3577 
3578   llvm::AttrBuilder B(CGF.getLLVMContext());
3579   if (!MayReturn) {
3580     B.addAttribute(llvm::Attribute::NoReturn)
3581         .addAttribute(llvm::Attribute::NoUnwind);
3582   }
3583   B.addUWTableAttr(llvm::UWTableKind::Default);
3584 
3585   llvm::FunctionCallee Fn = CGF.CGM.CreateRuntimeFunction(
3586       FnType, FnName,
3587       llvm::AttributeList::get(CGF.getLLVMContext(),
3588                                llvm::AttributeList::FunctionIndex, B),
3589       /*Local=*/true);
3590   llvm::CallInst *HandlerCall = CGF.EmitNounwindRuntimeCall(Fn, FnArgs);
3591   NoMerge = NoMerge || !CGF.CGM.getCodeGenOpts().OptimizationLevel ||
3592             (CGF.CurCodeDecl && CGF.CurCodeDecl->hasAttr<OptimizeNoneAttr>());
3593   if (NoMerge)
3594     HandlerCall->addFnAttr(llvm::Attribute::NoMerge);
3595   if (!MayReturn) {
3596     HandlerCall->setDoesNotReturn();
3597     CGF.Builder.CreateUnreachable();
3598   } else {
3599     CGF.Builder.CreateBr(ContBB);
3600   }
3601 }
3602 
3603 void CodeGenFunction::EmitCheck(
3604     ArrayRef<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>> Checked,
3605     SanitizerHandler CheckHandler, ArrayRef<llvm::Constant *> StaticArgs,
3606     ArrayRef<llvm::Value *> DynamicArgs) {
3607   assert(IsSanitizerScope);
3608   assert(Checked.size() > 0);
3609   assert(CheckHandler >= 0 &&
3610          size_t(CheckHandler) < std::size(SanitizerHandlers));
3611   const StringRef CheckName = SanitizerHandlers[CheckHandler].Name;
3612 
3613   llvm::Value *FatalCond = nullptr;
3614   llvm::Value *RecoverableCond = nullptr;
3615   llvm::Value *TrapCond = nullptr;
3616   bool NoMerge = false;
3617   for (auto &[Check, Ord] : Checked) {
3618     // -fsanitize-trap= overrides -fsanitize-recover=.
3619     llvm::Value *&Cond = CGM.getCodeGenOpts().SanitizeTrap.has(Ord) ? TrapCond
3620                          : CGM.getCodeGenOpts().SanitizeRecover.has(Ord)
3621                              ? RecoverableCond
3622                              : FatalCond;
3623     Cond = Cond ? Builder.CreateAnd(Cond, Check) : Check;
3624 
3625     if (!CGM.getCodeGenOpts().SanitizeMergeHandlers.has(Ord))
3626       NoMerge = true;
3627   }
3628 
3629   if (ClSanitizeGuardChecks) {
3630     llvm::Value *Allow =
3631         Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::allow_ubsan_check),
3632                            llvm::ConstantInt::get(CGM.Int8Ty, CheckHandler));
3633 
3634     for (llvm::Value **Cond : {&FatalCond, &RecoverableCond, &TrapCond}) {
3635       if (*Cond)
3636         *Cond = Builder.CreateOr(*Cond, Builder.CreateNot(Allow));
3637     }
3638   }
3639 
3640   if (TrapCond)
3641     EmitTrapCheck(TrapCond, CheckHandler, NoMerge);
3642   if (!FatalCond && !RecoverableCond)
3643     return;
3644 
3645   llvm::Value *JointCond;
3646   if (FatalCond && RecoverableCond)
3647     JointCond = Builder.CreateAnd(FatalCond, RecoverableCond);
3648   else
3649     JointCond = FatalCond ? FatalCond : RecoverableCond;
3650   assert(JointCond);
3651 
3652   CheckRecoverableKind RecoverKind = getRecoverableKind(Checked[0].second);
3653   assert(SanOpts.has(Checked[0].second));
3654 #ifndef NDEBUG
3655   for (int i = 1, n = Checked.size(); i < n; ++i) {
3656     assert(RecoverKind == getRecoverableKind(Checked[i].second) &&
3657            "All recoverable kinds in a single check must be same!");
3658     assert(SanOpts.has(Checked[i].second));
3659   }
3660 #endif
3661 
3662   llvm::BasicBlock *Cont = createBasicBlock("cont");
3663   llvm::BasicBlock *Handlers = createBasicBlock("handler." + CheckName);
3664   llvm::Instruction *Branch = Builder.CreateCondBr(JointCond, Cont, Handlers);
3665   // Give hint that we very much don't expect to execute the handler
3666   llvm::MDBuilder MDHelper(getLLVMContext());
3667   llvm::MDNode *Node = MDHelper.createLikelyBranchWeights();
3668   Branch->setMetadata(llvm::LLVMContext::MD_prof, Node);
3669   EmitBlock(Handlers);
3670 
3671   // Handler functions take an i8* pointing to the (handler-specific) static
3672   // information block, followed by a sequence of intptr_t arguments
3673   // representing operand values.
3674   SmallVector<llvm::Value *, 4> Args;
3675   SmallVector<llvm::Type *, 4> ArgTypes;
3676   if (!CGM.getCodeGenOpts().SanitizeMinimalRuntime) {
3677     Args.reserve(DynamicArgs.size() + 1);
3678     ArgTypes.reserve(DynamicArgs.size() + 1);
3679 
3680     // Emit handler arguments and create handler function type.
3681     if (!StaticArgs.empty()) {
3682       llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
3683       auto *InfoPtr = new llvm::GlobalVariable(
3684           CGM.getModule(), Info->getType(), false,
3685           llvm::GlobalVariable::PrivateLinkage, Info, "", nullptr,
3686           llvm::GlobalVariable::NotThreadLocal,
3687           CGM.getDataLayout().getDefaultGlobalsAddressSpace());
3688       InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3689       CGM.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr);
3690       Args.push_back(InfoPtr);
3691       ArgTypes.push_back(Args.back()->getType());
3692     }
3693 
3694     for (size_t i = 0, n = DynamicArgs.size(); i != n; ++i) {
3695       Args.push_back(EmitCheckValue(DynamicArgs[i]));
3696       ArgTypes.push_back(IntPtrTy);
3697     }
3698   }
3699 
3700   llvm::FunctionType *FnType =
3701     llvm::FunctionType::get(CGM.VoidTy, ArgTypes, false);
3702 
3703   if (!FatalCond || !RecoverableCond) {
3704     // Simple case: we need to generate a single handler call, either
3705     // fatal, or non-fatal.
3706     emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind,
3707                          (FatalCond != nullptr), Cont, NoMerge);
3708   } else {
3709     // Emit two handler calls: first one for set of unrecoverable checks,
3710     // another one for recoverable.
3711     llvm::BasicBlock *NonFatalHandlerBB =
3712         createBasicBlock("non_fatal." + CheckName);
3713     llvm::BasicBlock *FatalHandlerBB = createBasicBlock("fatal." + CheckName);
3714     Builder.CreateCondBr(FatalCond, NonFatalHandlerBB, FatalHandlerBB);
3715     EmitBlock(FatalHandlerBB);
3716     emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, true,
3717                          NonFatalHandlerBB, NoMerge);
3718     EmitBlock(NonFatalHandlerBB);
3719     emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, false,
3720                          Cont, NoMerge);
3721   }
3722 
3723   EmitBlock(Cont);
3724 }
3725 
3726 void CodeGenFunction::EmitCfiSlowPathCheck(
3727     SanitizerKind::SanitizerOrdinal Ordinal, llvm::Value *Cond,
3728     llvm::ConstantInt *TypeId, llvm::Value *Ptr,
3729     ArrayRef<llvm::Constant *> StaticArgs) {
3730   llvm::BasicBlock *Cont = createBasicBlock("cfi.cont");
3731 
3732   llvm::BasicBlock *CheckBB = createBasicBlock("cfi.slowpath");
3733   llvm::BranchInst *BI = Builder.CreateCondBr(Cond, Cont, CheckBB);
3734 
3735   llvm::MDBuilder MDHelper(getLLVMContext());
3736   llvm::MDNode *Node = MDHelper.createLikelyBranchWeights();
3737   BI->setMetadata(llvm::LLVMContext::MD_prof, Node);
3738 
3739   EmitBlock(CheckBB);
3740 
3741   bool WithDiag = !CGM.getCodeGenOpts().SanitizeTrap.has(Ordinal);
3742 
3743   llvm::CallInst *CheckCall;
3744   llvm::FunctionCallee SlowPathFn;
3745   if (WithDiag) {
3746     llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
3747     auto *InfoPtr =
3748         new llvm::GlobalVariable(CGM.getModule(), Info->getType(), false,
3749                                  llvm::GlobalVariable::PrivateLinkage, Info);
3750     InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3751     CGM.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr);
3752 
3753     SlowPathFn = CGM.getModule().getOrInsertFunction(
3754         "__cfi_slowpath_diag",
3755         llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy, Int8PtrTy},
3756                                 false));
3757     CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr, InfoPtr});
3758   } else {
3759     SlowPathFn = CGM.getModule().getOrInsertFunction(
3760         "__cfi_slowpath",
3761         llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy}, false));
3762     CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr});
3763   }
3764 
3765   CGM.setDSOLocal(
3766       cast<llvm::GlobalValue>(SlowPathFn.getCallee()->stripPointerCasts()));
3767   CheckCall->setDoesNotThrow();
3768 
3769   EmitBlock(Cont);
3770 }
3771 
3772 // Emit a stub for __cfi_check function so that the linker knows about this
3773 // symbol in LTO mode.
3774 void CodeGenFunction::EmitCfiCheckStub() {
3775   llvm::Module *M = &CGM.getModule();
3776   ASTContext &C = getContext();
3777   QualType QInt64Ty = C.getIntTypeForBitwidth(64, false);
3778 
3779   FunctionArgList FnArgs;
3780   ImplicitParamDecl ArgCallsiteTypeId(C, QInt64Ty, ImplicitParamKind::Other);
3781   ImplicitParamDecl ArgAddr(C, C.VoidPtrTy, ImplicitParamKind::Other);
3782   ImplicitParamDecl ArgCFICheckFailData(C, C.VoidPtrTy,
3783                                         ImplicitParamKind::Other);
3784   FnArgs.push_back(&ArgCallsiteTypeId);
3785   FnArgs.push_back(&ArgAddr);
3786   FnArgs.push_back(&ArgCFICheckFailData);
3787   const CGFunctionInfo &FI =
3788       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, FnArgs);
3789 
3790   llvm::Function *F = llvm::Function::Create(
3791       llvm::FunctionType::get(VoidTy, {Int64Ty, VoidPtrTy, VoidPtrTy}, false),
3792       llvm::GlobalValue::WeakAnyLinkage, "__cfi_check", M);
3793   CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, F, /*IsThunk=*/false);
3794   CGM.SetLLVMFunctionAttributesForDefinition(nullptr, F);
3795   F->setAlignment(llvm::Align(4096));
3796   CGM.setDSOLocal(F);
3797 
3798   llvm::LLVMContext &Ctx = M->getContext();
3799   llvm::BasicBlock *BB = llvm::BasicBlock::Create(Ctx, "entry", F);
3800   // CrossDSOCFI pass is not executed if there is no executable code.
3801   SmallVector<llvm::Value*> Args{F->getArg(2), F->getArg(1)};
3802   llvm::CallInst::Create(M->getFunction("__cfi_check_fail"), Args, "", BB);
3803   llvm::ReturnInst::Create(Ctx, nullptr, BB);
3804 }
3805 
3806 // This function is basically a switch over the CFI failure kind, which is
3807 // extracted from CFICheckFailData (1st function argument). Each case is either
3808 // llvm.trap or a call to one of the two runtime handlers, based on
3809 // -fsanitize-trap and -fsanitize-recover settings.  Default case (invalid
3810 // failure kind) traps, but this should really never happen.  CFICheckFailData
3811 // can be nullptr if the calling module has -fsanitize-trap behavior for this
3812 // check kind; in this case __cfi_check_fail traps as well.
3813 void CodeGenFunction::EmitCfiCheckFail() {
3814   SanitizerScope SanScope(this);
3815   FunctionArgList Args;
3816   ImplicitParamDecl ArgData(getContext(), getContext().VoidPtrTy,
3817                             ImplicitParamKind::Other);
3818   ImplicitParamDecl ArgAddr(getContext(), getContext().VoidPtrTy,
3819                             ImplicitParamKind::Other);
3820   Args.push_back(&ArgData);
3821   Args.push_back(&ArgAddr);
3822 
3823   const CGFunctionInfo &FI =
3824     CGM.getTypes().arrangeBuiltinFunctionDeclaration(getContext().VoidTy, Args);
3825 
3826   llvm::Function *F = llvm::Function::Create(
3827       llvm::FunctionType::get(VoidTy, {VoidPtrTy, VoidPtrTy}, false),
3828       llvm::GlobalValue::WeakODRLinkage, "__cfi_check_fail", &CGM.getModule());
3829 
3830   CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, F, /*IsThunk=*/false);
3831   CGM.SetLLVMFunctionAttributesForDefinition(nullptr, F);
3832   F->setVisibility(llvm::GlobalValue::HiddenVisibility);
3833 
3834   StartFunction(GlobalDecl(), CGM.getContext().VoidTy, F, FI, Args,
3835                 SourceLocation());
3836 
3837   // This function is not affected by NoSanitizeList. This function does
3838   // not have a source location, but "src:*" would still apply. Revert any
3839   // changes to SanOpts made in StartFunction.
3840   SanOpts = CGM.getLangOpts().Sanitize;
3841 
3842   llvm::Value *Data =
3843       EmitLoadOfScalar(GetAddrOfLocalVar(&ArgData), /*Volatile=*/false,
3844                        CGM.getContext().VoidPtrTy, ArgData.getLocation());
3845   llvm::Value *Addr =
3846       EmitLoadOfScalar(GetAddrOfLocalVar(&ArgAddr), /*Volatile=*/false,
3847                        CGM.getContext().VoidPtrTy, ArgAddr.getLocation());
3848 
3849   // Data == nullptr means the calling module has trap behaviour for this check.
3850   llvm::Value *DataIsNotNullPtr =
3851       Builder.CreateICmpNE(Data, llvm::ConstantPointerNull::get(Int8PtrTy));
3852   EmitTrapCheck(DataIsNotNullPtr, SanitizerHandler::CFICheckFail);
3853 
3854   llvm::StructType *SourceLocationTy =
3855       llvm::StructType::get(VoidPtrTy, Int32Ty, Int32Ty);
3856   llvm::StructType *CfiCheckFailDataTy =
3857       llvm::StructType::get(Int8Ty, SourceLocationTy, VoidPtrTy);
3858 
3859   llvm::Value *V = Builder.CreateConstGEP2_32(
3860       CfiCheckFailDataTy, Builder.CreatePointerCast(Data, UnqualPtrTy), 0, 0);
3861 
3862   Address CheckKindAddr(V, Int8Ty, getIntAlign());
3863   llvm::Value *CheckKind = Builder.CreateLoad(CheckKindAddr);
3864 
3865   llvm::Value *AllVtables = llvm::MetadataAsValue::get(
3866       CGM.getLLVMContext(),
3867       llvm::MDString::get(CGM.getLLVMContext(), "all-vtables"));
3868   llvm::Value *ValidVtable = Builder.CreateZExt(
3869       Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test),
3870                          {Addr, AllVtables}),
3871       IntPtrTy);
3872 
3873   const std::pair<int, SanitizerKind::SanitizerOrdinal> CheckKinds[] = {
3874       {CFITCK_VCall, SanitizerKind::SO_CFIVCall},
3875       {CFITCK_NVCall, SanitizerKind::SO_CFINVCall},
3876       {CFITCK_DerivedCast, SanitizerKind::SO_CFIDerivedCast},
3877       {CFITCK_UnrelatedCast, SanitizerKind::SO_CFIUnrelatedCast},
3878       {CFITCK_ICall, SanitizerKind::SO_CFIICall}};
3879 
3880   SmallVector<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>, 5>
3881       Checks;
3882   for (auto CheckKindOrdinalPair : CheckKinds) {
3883     int Kind = CheckKindOrdinalPair.first;
3884     SanitizerKind::SanitizerOrdinal Ordinal = CheckKindOrdinalPair.second;
3885     llvm::Value *Cond =
3886         Builder.CreateICmpNE(CheckKind, llvm::ConstantInt::get(Int8Ty, Kind));
3887     if (CGM.getLangOpts().Sanitize.has(Ordinal))
3888       EmitCheck(std::make_pair(Cond, Ordinal), SanitizerHandler::CFICheckFail,
3889                 {}, {Data, Addr, ValidVtable});
3890     else
3891       EmitTrapCheck(Cond, SanitizerHandler::CFICheckFail);
3892   }
3893 
3894   FinishFunction();
3895   // The only reference to this function will be created during LTO link.
3896   // Make sure it survives until then.
3897   CGM.addUsedGlobal(F);
3898 }
3899 
3900 void CodeGenFunction::EmitUnreachable(SourceLocation Loc) {
3901   if (SanOpts.has(SanitizerKind::Unreachable)) {
3902     SanitizerScope SanScope(this);
3903     EmitCheck(std::make_pair(static_cast<llvm::Value *>(Builder.getFalse()),
3904                              SanitizerKind::SO_Unreachable),
3905               SanitizerHandler::BuiltinUnreachable,
3906               EmitCheckSourceLocation(Loc), {});
3907   }
3908   Builder.CreateUnreachable();
3909 }
3910 
3911 void CodeGenFunction::EmitTrapCheck(llvm::Value *Checked,
3912                                     SanitizerHandler CheckHandlerID,
3913                                     bool NoMerge) {
3914   llvm::BasicBlock *Cont = createBasicBlock("cont");
3915 
3916   // If we're optimizing, collapse all calls to trap down to just one per
3917   // check-type per function to save on code size.
3918   if ((int)TrapBBs.size() <= CheckHandlerID)
3919     TrapBBs.resize(CheckHandlerID + 1);
3920 
3921   llvm::BasicBlock *&TrapBB = TrapBBs[CheckHandlerID];
3922 
3923   NoMerge = NoMerge || !CGM.getCodeGenOpts().OptimizationLevel ||
3924             (CurCodeDecl && CurCodeDecl->hasAttr<OptimizeNoneAttr>());
3925 
3926   if (TrapBB && !NoMerge) {
3927     auto Call = TrapBB->begin();
3928     assert(isa<llvm::CallInst>(Call) && "Expected call in trap BB");
3929 
3930     Call->applyMergedLocation(Call->getDebugLoc(),
3931                               Builder.getCurrentDebugLocation());
3932     Builder.CreateCondBr(Checked, Cont, TrapBB);
3933   } else {
3934     TrapBB = createBasicBlock("trap");
3935     Builder.CreateCondBr(Checked, Cont, TrapBB);
3936     EmitBlock(TrapBB);
3937 
3938     llvm::CallInst *TrapCall =
3939         Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::ubsantrap),
3940                            llvm::ConstantInt::get(CGM.Int8Ty, CheckHandlerID));
3941 
3942     if (!CGM.getCodeGenOpts().TrapFuncName.empty()) {
3943       auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name",
3944                                     CGM.getCodeGenOpts().TrapFuncName);
3945       TrapCall->addFnAttr(A);
3946     }
3947     if (NoMerge)
3948       TrapCall->addFnAttr(llvm::Attribute::NoMerge);
3949     TrapCall->setDoesNotReturn();
3950     TrapCall->setDoesNotThrow();
3951     Builder.CreateUnreachable();
3952   }
3953 
3954   EmitBlock(Cont);
3955 }
3956 
3957 llvm::CallInst *CodeGenFunction::EmitTrapCall(llvm::Intrinsic::ID IntrID) {
3958   llvm::CallInst *TrapCall =
3959       Builder.CreateCall(CGM.getIntrinsic(IntrID));
3960 
3961   if (!CGM.getCodeGenOpts().TrapFuncName.empty()) {
3962     auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name",
3963                                   CGM.getCodeGenOpts().TrapFuncName);
3964     TrapCall->addFnAttr(A);
3965   }
3966 
3967   if (InNoMergeAttributedStmt)
3968     TrapCall->addFnAttr(llvm::Attribute::NoMerge);
3969   return TrapCall;
3970 }
3971 
3972 Address CodeGenFunction::EmitArrayToPointerDecay(const Expr *E,
3973                                                  LValueBaseInfo *BaseInfo,
3974                                                  TBAAAccessInfo *TBAAInfo) {
3975   assert(E->getType()->isArrayType() &&
3976          "Array to pointer decay must have array source type!");
3977 
3978   // Expressions of array type can't be bitfields or vector elements.
3979   LValue LV = EmitLValue(E);
3980   Address Addr = LV.getAddress();
3981 
3982   // If the array type was an incomplete type, we need to make sure
3983   // the decay ends up being the right type.
3984   llvm::Type *NewTy = ConvertType(E->getType());
3985   Addr = Addr.withElementType(NewTy);
3986 
3987   // Note that VLA pointers are always decayed, so we don't need to do
3988   // anything here.
3989   if (!E->getType()->isVariableArrayType()) {
3990     assert(isa<llvm::ArrayType>(Addr.getElementType()) &&
3991            "Expected pointer to array");
3992     Addr = Builder.CreateConstArrayGEP(Addr, 0, "arraydecay");
3993   }
3994 
3995   // The result of this decay conversion points to an array element within the
3996   // base lvalue. However, since TBAA currently does not support representing
3997   // accesses to elements of member arrays, we conservatively represent accesses
3998   // to the pointee object as if it had no any base lvalue specified.
3999   // TODO: Support TBAA for member arrays.
4000   QualType EltType = E->getType()->castAsArrayTypeUnsafe()->getElementType();
4001   if (BaseInfo) *BaseInfo = LV.getBaseInfo();
4002   if (TBAAInfo) *TBAAInfo = CGM.getTBAAAccessInfo(EltType);
4003 
4004   return Addr.withElementType(ConvertTypeForMem(EltType));
4005 }
4006 
4007 /// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an
4008 /// array to pointer, return the array subexpression.
4009 static const Expr *isSimpleArrayDecayOperand(const Expr *E) {
4010   // If this isn't just an array->pointer decay, bail out.
4011   const auto *CE = dyn_cast<CastExpr>(E);
4012   if (!CE || CE->getCastKind() != CK_ArrayToPointerDecay)
4013     return nullptr;
4014 
4015   // If this is a decay from variable width array, bail out.
4016   const Expr *SubExpr = CE->getSubExpr();
4017   if (SubExpr->getType()->isVariableArrayType())
4018     return nullptr;
4019 
4020   return SubExpr;
4021 }
4022 
4023 static llvm::Value *emitArraySubscriptGEP(CodeGenFunction &CGF,
4024                                           llvm::Type *elemType,
4025                                           llvm::Value *ptr,
4026                                           ArrayRef<llvm::Value*> indices,
4027                                           bool inbounds,
4028                                           bool signedIndices,
4029                                           SourceLocation loc,
4030                                     const llvm::Twine &name = "arrayidx") {
4031   if (inbounds) {
4032     return CGF.EmitCheckedInBoundsGEP(elemType, ptr, indices, signedIndices,
4033                                       CodeGenFunction::NotSubtraction, loc,
4034                                       name);
4035   } else {
4036     return CGF.Builder.CreateGEP(elemType, ptr, indices, name);
4037   }
4038 }
4039 
4040 static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr,
4041                                      ArrayRef<llvm::Value *> indices,
4042                                      llvm::Type *elementType, bool inbounds,
4043                                      bool signedIndices, SourceLocation loc,
4044                                      CharUnits align,
4045                                      const llvm::Twine &name = "arrayidx") {
4046   if (inbounds) {
4047     return CGF.EmitCheckedInBoundsGEP(addr, indices, elementType, signedIndices,
4048                                       CodeGenFunction::NotSubtraction, loc,
4049                                       align, name);
4050   } else {
4051     return CGF.Builder.CreateGEP(addr, indices, elementType, align, name);
4052   }
4053 }
4054 
4055 static CharUnits getArrayElementAlign(CharUnits arrayAlign,
4056                                       llvm::Value *idx,
4057                                       CharUnits eltSize) {
4058   // If we have a constant index, we can use the exact offset of the
4059   // element we're accessing.
4060   if (auto constantIdx = dyn_cast<llvm::ConstantInt>(idx)) {
4061     CharUnits offset = constantIdx->getZExtValue() * eltSize;
4062     return arrayAlign.alignmentAtOffset(offset);
4063 
4064   // Otherwise, use the worst-case alignment for any element.
4065   } else {
4066     return arrayAlign.alignmentOfArrayElement(eltSize);
4067   }
4068 }
4069 
4070 static QualType getFixedSizeElementType(const ASTContext &ctx,
4071                                         const VariableArrayType *vla) {
4072   QualType eltType;
4073   do {
4074     eltType = vla->getElementType();
4075   } while ((vla = ctx.getAsVariableArrayType(eltType)));
4076   return eltType;
4077 }
4078 
4079 static bool hasBPFPreserveStaticOffset(const RecordDecl *D) {
4080   return D && D->hasAttr<BPFPreserveStaticOffsetAttr>();
4081 }
4082 
4083 static bool hasBPFPreserveStaticOffset(const Expr *E) {
4084   if (!E)
4085     return false;
4086   QualType PointeeType = E->getType()->getPointeeType();
4087   if (PointeeType.isNull())
4088     return false;
4089   if (const auto *BaseDecl = PointeeType->getAsRecordDecl())
4090     return hasBPFPreserveStaticOffset(BaseDecl);
4091   return false;
4092 }
4093 
4094 // Wraps Addr with a call to llvm.preserve.static.offset intrinsic.
4095 static Address wrapWithBPFPreserveStaticOffset(CodeGenFunction &CGF,
4096                                                Address &Addr) {
4097   if (!CGF.getTarget().getTriple().isBPF())
4098     return Addr;
4099 
4100   llvm::Function *Fn =
4101       CGF.CGM.getIntrinsic(llvm::Intrinsic::preserve_static_offset);
4102   llvm::CallInst *Call = CGF.Builder.CreateCall(Fn, {Addr.emitRawPointer(CGF)});
4103   return Address(Call, Addr.getElementType(), Addr.getAlignment());
4104 }
4105 
4106 /// Given an array base, check whether its member access belongs to a record
4107 /// with preserve_access_index attribute or not.
4108 static bool IsPreserveAIArrayBase(CodeGenFunction &CGF, const Expr *ArrayBase) {
4109   if (!ArrayBase || !CGF.getDebugInfo())
4110     return false;
4111 
4112   // Only support base as either a MemberExpr or DeclRefExpr.
4113   // DeclRefExpr to cover cases like:
4114   //    struct s { int a; int b[10]; };
4115   //    struct s *p;
4116   //    p[1].a
4117   // p[1] will generate a DeclRefExpr and p[1].a is a MemberExpr.
4118   // p->b[5] is a MemberExpr example.
4119   const Expr *E = ArrayBase->IgnoreImpCasts();
4120   if (const auto *ME = dyn_cast<MemberExpr>(E))
4121     return ME->getMemberDecl()->hasAttr<BPFPreserveAccessIndexAttr>();
4122 
4123   if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
4124     const auto *VarDef = dyn_cast<VarDecl>(DRE->getDecl());
4125     if (!VarDef)
4126       return false;
4127 
4128     const auto *PtrT = VarDef->getType()->getAs<PointerType>();
4129     if (!PtrT)
4130       return false;
4131 
4132     const auto *PointeeT = PtrT->getPointeeType()
4133                              ->getUnqualifiedDesugaredType();
4134     if (const auto *RecT = dyn_cast<RecordType>(PointeeT))
4135       return RecT->getDecl()->hasAttr<BPFPreserveAccessIndexAttr>();
4136     return false;
4137   }
4138 
4139   return false;
4140 }
4141 
4142 static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr,
4143                                      ArrayRef<llvm::Value *> indices,
4144                                      QualType eltType, bool inbounds,
4145                                      bool signedIndices, SourceLocation loc,
4146                                      QualType *arrayType = nullptr,
4147                                      const Expr *Base = nullptr,
4148                                      const llvm::Twine &name = "arrayidx") {
4149   // All the indices except that last must be zero.
4150 #ifndef NDEBUG
4151   for (auto *idx : indices.drop_back())
4152     assert(isa<llvm::ConstantInt>(idx) &&
4153            cast<llvm::ConstantInt>(idx)->isZero());
4154 #endif
4155 
4156   // Determine the element size of the statically-sized base.  This is
4157   // the thing that the indices are expressed in terms of.
4158   if (auto vla = CGF.getContext().getAsVariableArrayType(eltType)) {
4159     eltType = getFixedSizeElementType(CGF.getContext(), vla);
4160   }
4161 
4162   // We can use that to compute the best alignment of the element.
4163   CharUnits eltSize = CGF.getContext().getTypeSizeInChars(eltType);
4164   CharUnits eltAlign =
4165       getArrayElementAlign(addr.getAlignment(), indices.back(), eltSize);
4166 
4167   if (hasBPFPreserveStaticOffset(Base))
4168     addr = wrapWithBPFPreserveStaticOffset(CGF, addr);
4169 
4170   llvm::Value *eltPtr;
4171   auto LastIndex = dyn_cast<llvm::ConstantInt>(indices.back());
4172   if (!LastIndex ||
4173       (!CGF.IsInPreservedAIRegion && !IsPreserveAIArrayBase(CGF, Base))) {
4174     addr = emitArraySubscriptGEP(CGF, addr, indices,
4175                                  CGF.ConvertTypeForMem(eltType), inbounds,
4176                                  signedIndices, loc, eltAlign, name);
4177     return addr;
4178   } else {
4179     // Remember the original array subscript for bpf target
4180     unsigned idx = LastIndex->getZExtValue();
4181     llvm::DIType *DbgInfo = nullptr;
4182     if (arrayType)
4183       DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(*arrayType, loc);
4184     eltPtr = CGF.Builder.CreatePreserveArrayAccessIndex(
4185         addr.getElementType(), addr.emitRawPointer(CGF), indices.size() - 1,
4186         idx, DbgInfo);
4187   }
4188 
4189   return Address(eltPtr, CGF.ConvertTypeForMem(eltType), eltAlign);
4190 }
4191 
4192 /// The offset of a field from the beginning of the record.
4193 static bool getFieldOffsetInBits(CodeGenFunction &CGF, const RecordDecl *RD,
4194                                  const FieldDecl *Field, int64_t &Offset) {
4195   ASTContext &Ctx = CGF.getContext();
4196   const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD);
4197   unsigned FieldNo = 0;
4198 
4199   for (const FieldDecl *FD : RD->fields()) {
4200     if (FD == Field) {
4201       Offset += Layout.getFieldOffset(FieldNo);
4202       return true;
4203     }
4204 
4205     QualType Ty = FD->getType();
4206     if (Ty->isRecordType())
4207       if (getFieldOffsetInBits(CGF, Ty->getAsRecordDecl(), Field, Offset)) {
4208         Offset += Layout.getFieldOffset(FieldNo);
4209         return true;
4210       }
4211 
4212     if (!RD->isUnion())
4213       ++FieldNo;
4214   }
4215 
4216   return false;
4217 }
4218 
4219 /// Returns the relative offset difference between \p FD1 and \p FD2.
4220 /// \code
4221 ///   offsetof(struct foo, FD1) - offsetof(struct foo, FD2)
4222 /// \endcode
4223 /// Both fields must be within the same struct.
4224 static std::optional<int64_t> getOffsetDifferenceInBits(CodeGenFunction &CGF,
4225                                                         const FieldDecl *FD1,
4226                                                         const FieldDecl *FD2) {
4227   const RecordDecl *FD1OuterRec =
4228       FD1->getParent()->getOuterLexicalRecordContext();
4229   const RecordDecl *FD2OuterRec =
4230       FD2->getParent()->getOuterLexicalRecordContext();
4231 
4232   if (FD1OuterRec != FD2OuterRec)
4233     // Fields must be within the same RecordDecl.
4234     return std::optional<int64_t>();
4235 
4236   int64_t FD1Offset = 0;
4237   if (!getFieldOffsetInBits(CGF, FD1OuterRec, FD1, FD1Offset))
4238     return std::optional<int64_t>();
4239 
4240   int64_t FD2Offset = 0;
4241   if (!getFieldOffsetInBits(CGF, FD2OuterRec, FD2, FD2Offset))
4242     return std::optional<int64_t>();
4243 
4244   return std::make_optional<int64_t>(FD1Offset - FD2Offset);
4245 }
4246 
4247 LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
4248                                                bool Accessed) {
4249   // The index must always be an integer, which is not an aggregate.  Emit it
4250   // in lexical order (this complexity is, sadly, required by C++17).
4251   llvm::Value *IdxPre =
4252       (E->getLHS() == E->getIdx()) ? EmitScalarExpr(E->getIdx()) : nullptr;
4253   bool SignedIndices = false;
4254   auto EmitIdxAfterBase = [&, IdxPre](bool Promote) -> llvm::Value * {
4255     auto *Idx = IdxPre;
4256     if (E->getLHS() != E->getIdx()) {
4257       assert(E->getRHS() == E->getIdx() && "index was neither LHS nor RHS");
4258       Idx = EmitScalarExpr(E->getIdx());
4259     }
4260 
4261     QualType IdxTy = E->getIdx()->getType();
4262     bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType();
4263     SignedIndices |= IdxSigned;
4264 
4265     if (SanOpts.has(SanitizerKind::ArrayBounds))
4266       EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, Accessed);
4267 
4268     // Extend or truncate the index type to 32 or 64-bits.
4269     if (Promote && Idx->getType() != IntPtrTy)
4270       Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom");
4271 
4272     return Idx;
4273   };
4274   IdxPre = nullptr;
4275 
4276   // If the base is a vector type, then we are forming a vector element lvalue
4277   // with this subscript.
4278   if (E->getBase()->getType()->isSubscriptableVectorType() &&
4279       !isa<ExtVectorElementExpr>(E->getBase())) {
4280     // Emit the vector as an lvalue to get its address.
4281     LValue LHS = EmitLValue(E->getBase());
4282     auto *Idx = EmitIdxAfterBase(/*Promote*/false);
4283     assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
4284     return LValue::MakeVectorElt(LHS.getAddress(), Idx, E->getBase()->getType(),
4285                                  LHS.getBaseInfo(), TBAAAccessInfo());
4286   }
4287 
4288   // All the other cases basically behave like simple offsetting.
4289 
4290   // Handle the extvector case we ignored above.
4291   if (isa<ExtVectorElementExpr>(E->getBase())) {
4292     LValue LV = EmitLValue(E->getBase());
4293     auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4294     Address Addr = EmitExtVectorElementLValue(LV);
4295 
4296     QualType EltType = LV.getType()->castAs<VectorType>()->getElementType();
4297     Addr = emitArraySubscriptGEP(*this, Addr, Idx, EltType, /*inbounds*/ true,
4298                                  SignedIndices, E->getExprLoc());
4299     return MakeAddrLValue(Addr, EltType, LV.getBaseInfo(),
4300                           CGM.getTBAAInfoForSubobject(LV, EltType));
4301   }
4302 
4303   LValueBaseInfo EltBaseInfo;
4304   TBAAAccessInfo EltTBAAInfo;
4305   Address Addr = Address::invalid();
4306   if (const VariableArrayType *vla =
4307            getContext().getAsVariableArrayType(E->getType())) {
4308     // The base must be a pointer, which is not an aggregate.  Emit
4309     // it.  It needs to be emitted first in case it's what captures
4310     // the VLA bounds.
4311     Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
4312     auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4313 
4314     // The element count here is the total number of non-VLA elements.
4315     llvm::Value *numElements = getVLASize(vla).NumElts;
4316 
4317     // Effectively, the multiply by the VLA size is part of the GEP.
4318     // GEP indexes are signed, and scaling an index isn't permitted to
4319     // signed-overflow, so we use the same semantics for our explicit
4320     // multiply.  We suppress this if overflow is not undefined behavior.
4321     if (getLangOpts().PointerOverflowDefined) {
4322       Idx = Builder.CreateMul(Idx, numElements);
4323     } else {
4324       Idx = Builder.CreateNSWMul(Idx, numElements);
4325     }
4326 
4327     Addr = emitArraySubscriptGEP(*this, Addr, Idx, vla->getElementType(),
4328                                  !getLangOpts().PointerOverflowDefined,
4329                                  SignedIndices, E->getExprLoc());
4330 
4331   } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
4332     // Indexing over an interface, as in "NSString *P; P[4];"
4333 
4334     // Emit the base pointer.
4335     Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
4336     auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4337 
4338     CharUnits InterfaceSize = getContext().getTypeSizeInChars(OIT);
4339     llvm::Value *InterfaceSizeVal =
4340         llvm::ConstantInt::get(Idx->getType(), InterfaceSize.getQuantity());
4341 
4342     llvm::Value *ScaledIdx = Builder.CreateMul(Idx, InterfaceSizeVal);
4343 
4344     // We don't necessarily build correct LLVM struct types for ObjC
4345     // interfaces, so we can't rely on GEP to do this scaling
4346     // correctly, so we need to cast to i8*.  FIXME: is this actually
4347     // true?  A lot of other things in the fragile ABI would break...
4348     llvm::Type *OrigBaseElemTy = Addr.getElementType();
4349 
4350     // Do the GEP.
4351     CharUnits EltAlign =
4352       getArrayElementAlign(Addr.getAlignment(), Idx, InterfaceSize);
4353     llvm::Value *EltPtr =
4354         emitArraySubscriptGEP(*this, Int8Ty, Addr.emitRawPointer(*this),
4355                               ScaledIdx, false, SignedIndices, E->getExprLoc());
4356     Addr = Address(EltPtr, OrigBaseElemTy, EltAlign);
4357   } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
4358     // If this is A[i] where A is an array, the frontend will have decayed the
4359     // base to be a ArrayToPointerDecay implicit cast.  While correct, it is
4360     // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
4361     // "gep x, i" here.  Emit one "gep A, 0, i".
4362     assert(Array->getType()->isArrayType() &&
4363            "Array to pointer decay must have array source type!");
4364     LValue ArrayLV;
4365     // For simple multidimensional array indexing, set the 'accessed' flag for
4366     // better bounds-checking of the base expression.
4367     if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array))
4368       ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true);
4369     else
4370       ArrayLV = EmitLValue(Array);
4371     auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4372 
4373     if (SanOpts.has(SanitizerKind::ArrayBounds)) {
4374       // If the array being accessed has a "counted_by" attribute, generate
4375       // bounds checking code. The "count" field is at the top level of the
4376       // struct or in an anonymous struct, that's also at the top level. Future
4377       // expansions may allow the "count" to reside at any place in the struct,
4378       // but the value of "counted_by" will be a "simple" path to the count,
4379       // i.e. "a.b.count", so we shouldn't need the full force of EmitLValue or
4380       // similar to emit the correct GEP.
4381       const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
4382           getLangOpts().getStrictFlexArraysLevel();
4383 
4384       if (const auto *ME = dyn_cast<MemberExpr>(Array);
4385           ME &&
4386           ME->isFlexibleArrayMemberLike(getContext(), StrictFlexArraysLevel) &&
4387           ME->getMemberDecl()->getType()->isCountAttributedType()) {
4388         const FieldDecl *FAMDecl = cast<FieldDecl>(ME->getMemberDecl());
4389         if (const FieldDecl *CountFD = FAMDecl->findCountedByField()) {
4390           if (std::optional<int64_t> Diff =
4391                   getOffsetDifferenceInBits(*this, CountFD, FAMDecl)) {
4392             CharUnits OffsetDiff = CGM.getContext().toCharUnitsFromBits(*Diff);
4393 
4394             // Create a GEP with a byte offset between the FAM and count and
4395             // use that to load the count value.
4396             Addr = Builder.CreatePointerBitCastOrAddrSpaceCast(
4397                 ArrayLV.getAddress(), Int8PtrTy, Int8Ty);
4398 
4399             llvm::Type *CountTy = ConvertType(CountFD->getType());
4400             llvm::Value *Res = Builder.CreateInBoundsGEP(
4401                 Int8Ty, Addr.emitRawPointer(*this),
4402                 Builder.getInt32(OffsetDiff.getQuantity()), ".counted_by.gep");
4403             Res = Builder.CreateAlignedLoad(CountTy, Res, getIntAlign(),
4404                                             ".counted_by.load");
4405 
4406             // Now emit the bounds checking.
4407             EmitBoundsCheckImpl(E, Res, Idx, E->getIdx()->getType(),
4408                                 Array->getType(), Accessed);
4409           }
4410         }
4411       }
4412     }
4413 
4414     // Propagate the alignment from the array itself to the result.
4415     QualType arrayType = Array->getType();
4416     Addr = emitArraySubscriptGEP(
4417         *this, ArrayLV.getAddress(), {CGM.getSize(CharUnits::Zero()), Idx},
4418         E->getType(), !getLangOpts().PointerOverflowDefined, SignedIndices,
4419         E->getExprLoc(), &arrayType, E->getBase());
4420     EltBaseInfo = ArrayLV.getBaseInfo();
4421     EltTBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, E->getType());
4422   } else {
4423     // The base must be a pointer; emit it with an estimate of its alignment.
4424     Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
4425     auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4426     QualType ptrType = E->getBase()->getType();
4427     Addr = emitArraySubscriptGEP(
4428         *this, Addr, Idx, E->getType(), !getLangOpts().PointerOverflowDefined,
4429         SignedIndices, E->getExprLoc(), &ptrType, E->getBase());
4430   }
4431 
4432   LValue LV = MakeAddrLValue(Addr, E->getType(), EltBaseInfo, EltTBAAInfo);
4433 
4434   if (getLangOpts().ObjC &&
4435       getLangOpts().getGC() != LangOptions::NonGC) {
4436     LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
4437     setObjCGCLValueClass(getContext(), E, LV);
4438   }
4439   return LV;
4440 }
4441 
4442 llvm::Value *CodeGenFunction::EmitMatrixIndexExpr(const Expr *E) {
4443   llvm::Value *Idx = EmitScalarExpr(E);
4444   if (Idx->getType() == IntPtrTy)
4445     return Idx;
4446   bool IsSigned = E->getType()->isSignedIntegerOrEnumerationType();
4447   return Builder.CreateIntCast(Idx, IntPtrTy, IsSigned);
4448 }
4449 
4450 LValue CodeGenFunction::EmitMatrixSubscriptExpr(const MatrixSubscriptExpr *E) {
4451   assert(
4452       !E->isIncomplete() &&
4453       "incomplete matrix subscript expressions should be rejected during Sema");
4454   LValue Base = EmitLValue(E->getBase());
4455 
4456   // Extend or truncate the index type to 32 or 64-bits if needed.
4457   llvm::Value *RowIdx = EmitMatrixIndexExpr(E->getRowIdx());
4458   llvm::Value *ColIdx = EmitMatrixIndexExpr(E->getColumnIdx());
4459 
4460   llvm::Value *NumRows = Builder.getIntN(
4461       RowIdx->getType()->getScalarSizeInBits(),
4462       E->getBase()->getType()->castAs<ConstantMatrixType>()->getNumRows());
4463   llvm::Value *FinalIdx =
4464       Builder.CreateAdd(Builder.CreateMul(ColIdx, NumRows), RowIdx);
4465   return LValue::MakeMatrixElt(
4466       MaybeConvertMatrixAddress(Base.getAddress(), *this), FinalIdx,
4467       E->getBase()->getType(), Base.getBaseInfo(), TBAAAccessInfo());
4468 }
4469 
4470 static Address emitOMPArraySectionBase(CodeGenFunction &CGF, const Expr *Base,
4471                                        LValueBaseInfo &BaseInfo,
4472                                        TBAAAccessInfo &TBAAInfo,
4473                                        QualType BaseTy, QualType ElTy,
4474                                        bool IsLowerBound) {
4475   LValue BaseLVal;
4476   if (auto *ASE = dyn_cast<ArraySectionExpr>(Base->IgnoreParenImpCasts())) {
4477     BaseLVal = CGF.EmitArraySectionExpr(ASE, IsLowerBound);
4478     if (BaseTy->isArrayType()) {
4479       Address Addr = BaseLVal.getAddress();
4480       BaseInfo = BaseLVal.getBaseInfo();
4481 
4482       // If the array type was an incomplete type, we need to make sure
4483       // the decay ends up being the right type.
4484       llvm::Type *NewTy = CGF.ConvertType(BaseTy);
4485       Addr = Addr.withElementType(NewTy);
4486 
4487       // Note that VLA pointers are always decayed, so we don't need to do
4488       // anything here.
4489       if (!BaseTy->isVariableArrayType()) {
4490         assert(isa<llvm::ArrayType>(Addr.getElementType()) &&
4491                "Expected pointer to array");
4492         Addr = CGF.Builder.CreateConstArrayGEP(Addr, 0, "arraydecay");
4493       }
4494 
4495       return Addr.withElementType(CGF.ConvertTypeForMem(ElTy));
4496     }
4497     LValueBaseInfo TypeBaseInfo;
4498     TBAAAccessInfo TypeTBAAInfo;
4499     CharUnits Align =
4500         CGF.CGM.getNaturalTypeAlignment(ElTy, &TypeBaseInfo, &TypeTBAAInfo);
4501     BaseInfo.mergeForCast(TypeBaseInfo);
4502     TBAAInfo = CGF.CGM.mergeTBAAInfoForCast(TBAAInfo, TypeTBAAInfo);
4503     return Address(CGF.Builder.CreateLoad(BaseLVal.getAddress()),
4504                    CGF.ConvertTypeForMem(ElTy), Align);
4505   }
4506   return CGF.EmitPointerWithAlignment(Base, &BaseInfo, &TBAAInfo);
4507 }
4508 
4509 LValue CodeGenFunction::EmitArraySectionExpr(const ArraySectionExpr *E,
4510                                              bool IsLowerBound) {
4511 
4512   assert(!E->isOpenACCArraySection() &&
4513          "OpenACC Array section codegen not implemented");
4514 
4515   QualType BaseTy = ArraySectionExpr::getBaseOriginalType(E->getBase());
4516   QualType ResultExprTy;
4517   if (auto *AT = getContext().getAsArrayType(BaseTy))
4518     ResultExprTy = AT->getElementType();
4519   else
4520     ResultExprTy = BaseTy->getPointeeType();
4521   llvm::Value *Idx = nullptr;
4522   if (IsLowerBound || E->getColonLocFirst().isInvalid()) {
4523     // Requesting lower bound or upper bound, but without provided length and
4524     // without ':' symbol for the default length -> length = 1.
4525     // Idx = LowerBound ?: 0;
4526     if (auto *LowerBound = E->getLowerBound()) {
4527       Idx = Builder.CreateIntCast(
4528           EmitScalarExpr(LowerBound), IntPtrTy,
4529           LowerBound->getType()->hasSignedIntegerRepresentation());
4530     } else
4531       Idx = llvm::ConstantInt::getNullValue(IntPtrTy);
4532   } else {
4533     // Try to emit length or lower bound as constant. If this is possible, 1
4534     // is subtracted from constant length or lower bound. Otherwise, emit LLVM
4535     // IR (LB + Len) - 1.
4536     auto &C = CGM.getContext();
4537     auto *Length = E->getLength();
4538     llvm::APSInt ConstLength;
4539     if (Length) {
4540       // Idx = LowerBound + Length - 1;
4541       if (std::optional<llvm::APSInt> CL = Length->getIntegerConstantExpr(C)) {
4542         ConstLength = CL->zextOrTrunc(PointerWidthInBits);
4543         Length = nullptr;
4544       }
4545       auto *LowerBound = E->getLowerBound();
4546       llvm::APSInt ConstLowerBound(PointerWidthInBits, /*isUnsigned=*/false);
4547       if (LowerBound) {
4548         if (std::optional<llvm::APSInt> LB =
4549                 LowerBound->getIntegerConstantExpr(C)) {
4550           ConstLowerBound = LB->zextOrTrunc(PointerWidthInBits);
4551           LowerBound = nullptr;
4552         }
4553       }
4554       if (!Length)
4555         --ConstLength;
4556       else if (!LowerBound)
4557         --ConstLowerBound;
4558 
4559       if (Length || LowerBound) {
4560         auto *LowerBoundVal =
4561             LowerBound
4562                 ? Builder.CreateIntCast(
4563                       EmitScalarExpr(LowerBound), IntPtrTy,
4564                       LowerBound->getType()->hasSignedIntegerRepresentation())
4565                 : llvm::ConstantInt::get(IntPtrTy, ConstLowerBound);
4566         auto *LengthVal =
4567             Length
4568                 ? Builder.CreateIntCast(
4569                       EmitScalarExpr(Length), IntPtrTy,
4570                       Length->getType()->hasSignedIntegerRepresentation())
4571                 : llvm::ConstantInt::get(IntPtrTy, ConstLength);
4572         Idx = Builder.CreateAdd(LowerBoundVal, LengthVal, "lb_add_len",
4573                                 /*HasNUW=*/false,
4574                                 !getLangOpts().PointerOverflowDefined);
4575         if (Length && LowerBound) {
4576           Idx = Builder.CreateSub(
4577               Idx, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "idx_sub_1",
4578               /*HasNUW=*/false, !getLangOpts().PointerOverflowDefined);
4579         }
4580       } else
4581         Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength + ConstLowerBound);
4582     } else {
4583       // Idx = ArraySize - 1;
4584       QualType ArrayTy = BaseTy->isPointerType()
4585                              ? E->getBase()->IgnoreParenImpCasts()->getType()
4586                              : BaseTy;
4587       if (auto *VAT = C.getAsVariableArrayType(ArrayTy)) {
4588         Length = VAT->getSizeExpr();
4589         if (std::optional<llvm::APSInt> L = Length->getIntegerConstantExpr(C)) {
4590           ConstLength = *L;
4591           Length = nullptr;
4592         }
4593       } else {
4594         auto *CAT = C.getAsConstantArrayType(ArrayTy);
4595         assert(CAT && "unexpected type for array initializer");
4596         ConstLength = CAT->getSize();
4597       }
4598       if (Length) {
4599         auto *LengthVal = Builder.CreateIntCast(
4600             EmitScalarExpr(Length), IntPtrTy,
4601             Length->getType()->hasSignedIntegerRepresentation());
4602         Idx = Builder.CreateSub(
4603             LengthVal, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "len_sub_1",
4604             /*HasNUW=*/false, !getLangOpts().PointerOverflowDefined);
4605       } else {
4606         ConstLength = ConstLength.zextOrTrunc(PointerWidthInBits);
4607         --ConstLength;
4608         Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength);
4609       }
4610     }
4611   }
4612   assert(Idx);
4613 
4614   Address EltPtr = Address::invalid();
4615   LValueBaseInfo BaseInfo;
4616   TBAAAccessInfo TBAAInfo;
4617   if (auto *VLA = getContext().getAsVariableArrayType(ResultExprTy)) {
4618     // The base must be a pointer, which is not an aggregate.  Emit
4619     // it.  It needs to be emitted first in case it's what captures
4620     // the VLA bounds.
4621     Address Base =
4622         emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, TBAAInfo,
4623                                 BaseTy, VLA->getElementType(), IsLowerBound);
4624     // The element count here is the total number of non-VLA elements.
4625     llvm::Value *NumElements = getVLASize(VLA).NumElts;
4626 
4627     // Effectively, the multiply by the VLA size is part of the GEP.
4628     // GEP indexes are signed, and scaling an index isn't permitted to
4629     // signed-overflow, so we use the same semantics for our explicit
4630     // multiply.  We suppress this if overflow is not undefined behavior.
4631     if (getLangOpts().PointerOverflowDefined)
4632       Idx = Builder.CreateMul(Idx, NumElements);
4633     else
4634       Idx = Builder.CreateNSWMul(Idx, NumElements);
4635     EltPtr = emitArraySubscriptGEP(*this, Base, Idx, VLA->getElementType(),
4636                                    !getLangOpts().PointerOverflowDefined,
4637                                    /*signedIndices=*/false, E->getExprLoc());
4638   } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
4639     // If this is A[i] where A is an array, the frontend will have decayed the
4640     // base to be a ArrayToPointerDecay implicit cast.  While correct, it is
4641     // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
4642     // "gep x, i" here.  Emit one "gep A, 0, i".
4643     assert(Array->getType()->isArrayType() &&
4644            "Array to pointer decay must have array source type!");
4645     LValue ArrayLV;
4646     // For simple multidimensional array indexing, set the 'accessed' flag for
4647     // better bounds-checking of the base expression.
4648     if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array))
4649       ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true);
4650     else
4651       ArrayLV = EmitLValue(Array);
4652 
4653     // Propagate the alignment from the array itself to the result.
4654     EltPtr = emitArraySubscriptGEP(
4655         *this, ArrayLV.getAddress(), {CGM.getSize(CharUnits::Zero()), Idx},
4656         ResultExprTy, !getLangOpts().PointerOverflowDefined,
4657         /*signedIndices=*/false, E->getExprLoc());
4658     BaseInfo = ArrayLV.getBaseInfo();
4659     TBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, ResultExprTy);
4660   } else {
4661     Address Base =
4662         emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, TBAAInfo, BaseTy,
4663                                 ResultExprTy, IsLowerBound);
4664     EltPtr = emitArraySubscriptGEP(*this, Base, Idx, ResultExprTy,
4665                                    !getLangOpts().PointerOverflowDefined,
4666                                    /*signedIndices=*/false, E->getExprLoc());
4667   }
4668 
4669   return MakeAddrLValue(EltPtr, ResultExprTy, BaseInfo, TBAAInfo);
4670 }
4671 
4672 LValue CodeGenFunction::
4673 EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
4674   // Emit the base vector as an l-value.
4675   LValue Base;
4676 
4677   // ExtVectorElementExpr's base can either be a vector or pointer to vector.
4678   if (E->isArrow()) {
4679     // If it is a pointer to a vector, emit the address and form an lvalue with
4680     // it.
4681     LValueBaseInfo BaseInfo;
4682     TBAAAccessInfo TBAAInfo;
4683     Address Ptr = EmitPointerWithAlignment(E->getBase(), &BaseInfo, &TBAAInfo);
4684     const auto *PT = E->getBase()->getType()->castAs<PointerType>();
4685     Base = MakeAddrLValue(Ptr, PT->getPointeeType(), BaseInfo, TBAAInfo);
4686     Base.getQuals().removeObjCGCAttr();
4687   } else if (E->getBase()->isGLValue()) {
4688     // Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
4689     // emit the base as an lvalue.
4690     assert(E->getBase()->getType()->isVectorType());
4691     Base = EmitLValue(E->getBase());
4692   } else {
4693     // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
4694     assert(E->getBase()->getType()->isVectorType() &&
4695            "Result must be a vector");
4696     llvm::Value *Vec = EmitScalarExpr(E->getBase());
4697 
4698     // Store the vector to memory (because LValue wants an address).
4699     Address VecMem = CreateMemTemp(E->getBase()->getType());
4700     Builder.CreateStore(Vec, VecMem);
4701     Base = MakeAddrLValue(VecMem, E->getBase()->getType(),
4702                           AlignmentSource::Decl);
4703   }
4704 
4705   QualType type =
4706     E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers());
4707 
4708   // Encode the element access list into a vector of unsigned indices.
4709   SmallVector<uint32_t, 4> Indices;
4710   E->getEncodedElementAccess(Indices);
4711 
4712   if (Base.isSimple()) {
4713     llvm::Constant *CV =
4714         llvm::ConstantDataVector::get(getLLVMContext(), Indices);
4715     return LValue::MakeExtVectorElt(Base.getAddress(), CV, type,
4716                                     Base.getBaseInfo(), TBAAAccessInfo());
4717   }
4718   assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
4719 
4720   llvm::Constant *BaseElts = Base.getExtVectorElts();
4721   SmallVector<llvm::Constant *, 4> CElts;
4722 
4723   for (unsigned i = 0, e = Indices.size(); i != e; ++i)
4724     CElts.push_back(BaseElts->getAggregateElement(Indices[i]));
4725   llvm::Constant *CV = llvm::ConstantVector::get(CElts);
4726   return LValue::MakeExtVectorElt(Base.getExtVectorAddress(), CV, type,
4727                                   Base.getBaseInfo(), TBAAAccessInfo());
4728 }
4729 
4730 LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
4731   if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(*this, E)) {
4732     EmitIgnoredExpr(E->getBase());
4733     return EmitDeclRefLValue(DRE);
4734   }
4735 
4736   Expr *BaseExpr = E->getBase();
4737   // If this is s.x, emit s as an lvalue.  If it is s->x, emit s as a scalar.
4738   LValue BaseLV;
4739   if (E->isArrow()) {
4740     LValueBaseInfo BaseInfo;
4741     TBAAAccessInfo TBAAInfo;
4742     Address Addr = EmitPointerWithAlignment(BaseExpr, &BaseInfo, &TBAAInfo);
4743     QualType PtrTy = BaseExpr->getType()->getPointeeType();
4744     SanitizerSet SkippedChecks;
4745     bool IsBaseCXXThis = IsWrappedCXXThis(BaseExpr);
4746     if (IsBaseCXXThis)
4747       SkippedChecks.set(SanitizerKind::Alignment, true);
4748     if (IsBaseCXXThis || isa<DeclRefExpr>(BaseExpr))
4749       SkippedChecks.set(SanitizerKind::Null, true);
4750     EmitTypeCheck(TCK_MemberAccess, E->getExprLoc(), Addr, PtrTy,
4751                   /*Alignment=*/CharUnits::Zero(), SkippedChecks);
4752     BaseLV = MakeAddrLValue(Addr, PtrTy, BaseInfo, TBAAInfo);
4753   } else
4754     BaseLV = EmitCheckedLValue(BaseExpr, TCK_MemberAccess);
4755 
4756   NamedDecl *ND = E->getMemberDecl();
4757   if (auto *Field = dyn_cast<FieldDecl>(ND)) {
4758     LValue LV = EmitLValueForField(BaseLV, Field);
4759     setObjCGCLValueClass(getContext(), E, LV);
4760     if (getLangOpts().OpenMP) {
4761       // If the member was explicitly marked as nontemporal, mark it as
4762       // nontemporal. If the base lvalue is marked as nontemporal, mark access
4763       // to children as nontemporal too.
4764       if ((IsWrappedCXXThis(BaseExpr) &&
4765            CGM.getOpenMPRuntime().isNontemporalDecl(Field)) ||
4766           BaseLV.isNontemporal())
4767         LV.setNontemporal(/*Value=*/true);
4768     }
4769     return LV;
4770   }
4771 
4772   if (const auto *FD = dyn_cast<FunctionDecl>(ND))
4773     return EmitFunctionDeclLValue(*this, E, FD);
4774 
4775   llvm_unreachable("Unhandled member declaration!");
4776 }
4777 
4778 /// Given that we are currently emitting a lambda, emit an l-value for
4779 /// one of its members.
4780 ///
4781 LValue CodeGenFunction::EmitLValueForLambdaField(const FieldDecl *Field,
4782                                                  llvm::Value *ThisValue) {
4783   bool HasExplicitObjectParameter = false;
4784   const auto *MD = dyn_cast_if_present<CXXMethodDecl>(CurCodeDecl);
4785   if (MD) {
4786     HasExplicitObjectParameter = MD->isExplicitObjectMemberFunction();
4787     assert(MD->getParent()->isLambda());
4788     assert(MD->getParent() == Field->getParent());
4789   }
4790   LValue LambdaLV;
4791   if (HasExplicitObjectParameter) {
4792     const VarDecl *D = cast<CXXMethodDecl>(CurCodeDecl)->getParamDecl(0);
4793     auto It = LocalDeclMap.find(D);
4794     assert(It != LocalDeclMap.end() && "explicit parameter not loaded?");
4795     Address AddrOfExplicitObject = It->getSecond();
4796     if (D->getType()->isReferenceType())
4797       LambdaLV = EmitLoadOfReferenceLValue(AddrOfExplicitObject, D->getType(),
4798                                            AlignmentSource::Decl);
4799     else
4800       LambdaLV = MakeAddrLValue(AddrOfExplicitObject,
4801                                 D->getType().getNonReferenceType());
4802 
4803     // Make sure we have an lvalue to the lambda itself and not a derived class.
4804     auto *ThisTy = D->getType().getNonReferenceType()->getAsCXXRecordDecl();
4805     auto *LambdaTy = cast<CXXRecordDecl>(Field->getParent());
4806     if (ThisTy != LambdaTy) {
4807       const CXXCastPath &BasePathArray = getContext().LambdaCastPaths.at(MD);
4808       Address Base = GetAddressOfBaseClass(
4809           LambdaLV.getAddress(), ThisTy, BasePathArray.begin(),
4810           BasePathArray.end(), /*NullCheckValue=*/false, SourceLocation());
4811       LambdaLV = MakeAddrLValue(Base, QualType{LambdaTy->getTypeForDecl(), 0});
4812     }
4813   } else {
4814     QualType LambdaTagType = getContext().getTagDeclType(Field->getParent());
4815     LambdaLV = MakeNaturalAlignAddrLValue(ThisValue, LambdaTagType);
4816   }
4817   return EmitLValueForField(LambdaLV, Field);
4818 }
4819 
4820 LValue CodeGenFunction::EmitLValueForLambdaField(const FieldDecl *Field) {
4821   return EmitLValueForLambdaField(Field, CXXABIThisValue);
4822 }
4823 
4824 /// Get the field index in the debug info. The debug info structure/union
4825 /// will ignore the unnamed bitfields.
4826 unsigned CodeGenFunction::getDebugInfoFIndex(const RecordDecl *Rec,
4827                                              unsigned FieldIndex) {
4828   unsigned I = 0, Skipped = 0;
4829 
4830   for (auto *F : Rec->getDefinition()->fields()) {
4831     if (I == FieldIndex)
4832       break;
4833     if (F->isUnnamedBitField())
4834       Skipped++;
4835     I++;
4836   }
4837 
4838   return FieldIndex - Skipped;
4839 }
4840 
4841 /// Get the address of a zero-sized field within a record. The resulting
4842 /// address doesn't necessarily have the right type.
4843 static Address emitAddrOfZeroSizeField(CodeGenFunction &CGF, Address Base,
4844                                        const FieldDecl *Field) {
4845   CharUnits Offset = CGF.getContext().toCharUnitsFromBits(
4846       CGF.getContext().getFieldOffset(Field));
4847   if (Offset.isZero())
4848     return Base;
4849   Base = Base.withElementType(CGF.Int8Ty);
4850   return CGF.Builder.CreateConstInBoundsByteGEP(Base, Offset);
4851 }
4852 
4853 /// Drill down to the storage of a field without walking into
4854 /// reference types.
4855 ///
4856 /// The resulting address doesn't necessarily have the right type.
4857 static Address emitAddrOfFieldStorage(CodeGenFunction &CGF, Address base,
4858                                       const FieldDecl *field) {
4859   if (isEmptyFieldForLayout(CGF.getContext(), field))
4860     return emitAddrOfZeroSizeField(CGF, base, field);
4861 
4862   const RecordDecl *rec = field->getParent();
4863 
4864   unsigned idx =
4865     CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
4866 
4867   return CGF.Builder.CreateStructGEP(base, idx, field->getName());
4868 }
4869 
4870 static Address emitPreserveStructAccess(CodeGenFunction &CGF, LValue base,
4871                                         Address addr, const FieldDecl *field) {
4872   const RecordDecl *rec = field->getParent();
4873   llvm::DIType *DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(
4874       base.getType(), rec->getLocation());
4875 
4876   unsigned idx =
4877       CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
4878 
4879   return CGF.Builder.CreatePreserveStructAccessIndex(
4880       addr, idx, CGF.getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo);
4881 }
4882 
4883 static bool hasAnyVptr(const QualType Type, const ASTContext &Context) {
4884   const auto *RD = Type.getTypePtr()->getAsCXXRecordDecl();
4885   if (!RD)
4886     return false;
4887 
4888   if (RD->isDynamicClass())
4889     return true;
4890 
4891   for (const auto &Base : RD->bases())
4892     if (hasAnyVptr(Base.getType(), Context))
4893       return true;
4894 
4895   for (const FieldDecl *Field : RD->fields())
4896     if (hasAnyVptr(Field->getType(), Context))
4897       return true;
4898 
4899   return false;
4900 }
4901 
4902 LValue CodeGenFunction::EmitLValueForField(LValue base,
4903                                            const FieldDecl *field) {
4904   LValueBaseInfo BaseInfo = base.getBaseInfo();
4905 
4906   if (field->isBitField()) {
4907     const CGRecordLayout &RL =
4908         CGM.getTypes().getCGRecordLayout(field->getParent());
4909     const CGBitFieldInfo &Info = RL.getBitFieldInfo(field);
4910     const bool UseVolatile = isAAPCS(CGM.getTarget()) &&
4911                              CGM.getCodeGenOpts().AAPCSBitfieldWidth &&
4912                              Info.VolatileStorageSize != 0 &&
4913                              field->getType()
4914                                  .withCVRQualifiers(base.getVRQualifiers())
4915                                  .isVolatileQualified();
4916     Address Addr = base.getAddress();
4917     unsigned Idx = RL.getLLVMFieldNo(field);
4918     const RecordDecl *rec = field->getParent();
4919     if (hasBPFPreserveStaticOffset(rec))
4920       Addr = wrapWithBPFPreserveStaticOffset(*this, Addr);
4921     if (!UseVolatile) {
4922       if (!IsInPreservedAIRegion &&
4923           (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>())) {
4924         if (Idx != 0)
4925           // For structs, we GEP to the field that the record layout suggests.
4926           Addr = Builder.CreateStructGEP(Addr, Idx, field->getName());
4927       } else {
4928         llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateRecordType(
4929             getContext().getRecordType(rec), rec->getLocation());
4930         Addr = Builder.CreatePreserveStructAccessIndex(
4931             Addr, Idx, getDebugInfoFIndex(rec, field->getFieldIndex()),
4932             DbgInfo);
4933       }
4934     }
4935     const unsigned SS =
4936         UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
4937     // Get the access type.
4938     llvm::Type *FieldIntTy = llvm::Type::getIntNTy(getLLVMContext(), SS);
4939     Addr = Addr.withElementType(FieldIntTy);
4940     if (UseVolatile) {
4941       const unsigned VolatileOffset = Info.VolatileStorageOffset.getQuantity();
4942       if (VolatileOffset)
4943         Addr = Builder.CreateConstInBoundsGEP(Addr, VolatileOffset);
4944     }
4945 
4946     QualType fieldType =
4947         field->getType().withCVRQualifiers(base.getVRQualifiers());
4948     // TODO: Support TBAA for bit fields.
4949     LValueBaseInfo FieldBaseInfo(BaseInfo.getAlignmentSource());
4950     return LValue::MakeBitfield(Addr, Info, fieldType, FieldBaseInfo,
4951                                 TBAAAccessInfo());
4952   }
4953 
4954   // Fields of may-alias structures are may-alias themselves.
4955   // FIXME: this should get propagated down through anonymous structs
4956   // and unions.
4957   QualType FieldType = field->getType();
4958   const RecordDecl *rec = field->getParent();
4959   AlignmentSource BaseAlignSource = BaseInfo.getAlignmentSource();
4960   LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(BaseAlignSource));
4961   TBAAAccessInfo FieldTBAAInfo;
4962   if (base.getTBAAInfo().isMayAlias() ||
4963           rec->hasAttr<MayAliasAttr>() || FieldType->isVectorType()) {
4964     FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo();
4965   } else if (rec->isUnion()) {
4966     // TODO: Support TBAA for unions.
4967     FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo();
4968   } else {
4969     // If no base type been assigned for the base access, then try to generate
4970     // one for this base lvalue.
4971     FieldTBAAInfo = base.getTBAAInfo();
4972     if (!FieldTBAAInfo.BaseType) {
4973         FieldTBAAInfo.BaseType = CGM.getTBAABaseTypeInfo(base.getType());
4974         assert(!FieldTBAAInfo.Offset &&
4975                "Nonzero offset for an access with no base type!");
4976     }
4977 
4978     // Adjust offset to be relative to the base type.
4979     const ASTRecordLayout &Layout =
4980         getContext().getASTRecordLayout(field->getParent());
4981     unsigned CharWidth = getContext().getCharWidth();
4982     if (FieldTBAAInfo.BaseType)
4983       FieldTBAAInfo.Offset +=
4984           Layout.getFieldOffset(field->getFieldIndex()) / CharWidth;
4985 
4986     // Update the final access type and size.
4987     FieldTBAAInfo.AccessType = CGM.getTBAATypeInfo(FieldType);
4988     FieldTBAAInfo.Size =
4989         getContext().getTypeSizeInChars(FieldType).getQuantity();
4990   }
4991 
4992   Address addr = base.getAddress();
4993   if (hasBPFPreserveStaticOffset(rec))
4994     addr = wrapWithBPFPreserveStaticOffset(*this, addr);
4995   if (auto *ClassDef = dyn_cast<CXXRecordDecl>(rec)) {
4996     if (CGM.getCodeGenOpts().StrictVTablePointers &&
4997         ClassDef->isDynamicClass()) {
4998       // Getting to any field of dynamic object requires stripping dynamic
4999       // information provided by invariant.group.  This is because accessing
5000       // fields may leak the real address of dynamic object, which could result
5001       // in miscompilation when leaked pointer would be compared.
5002       auto *stripped =
5003           Builder.CreateStripInvariantGroup(addr.emitRawPointer(*this));
5004       addr = Address(stripped, addr.getElementType(), addr.getAlignment());
5005     }
5006   }
5007 
5008   unsigned RecordCVR = base.getVRQualifiers();
5009   if (rec->isUnion()) {
5010     // For unions, there is no pointer adjustment.
5011     if (CGM.getCodeGenOpts().StrictVTablePointers &&
5012         hasAnyVptr(FieldType, getContext()))
5013       // Because unions can easily skip invariant.barriers, we need to add
5014       // a barrier every time CXXRecord field with vptr is referenced.
5015       addr = Builder.CreateLaunderInvariantGroup(addr);
5016 
5017     if (IsInPreservedAIRegion ||
5018         (getDebugInfo() && rec->hasAttr<BPFPreserveAccessIndexAttr>())) {
5019       // Remember the original union field index
5020       llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(base.getType(),
5021           rec->getLocation());
5022       addr =
5023           Address(Builder.CreatePreserveUnionAccessIndex(
5024                       addr.emitRawPointer(*this),
5025                       getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo),
5026                   addr.getElementType(), addr.getAlignment());
5027     }
5028 
5029     if (FieldType->isReferenceType())
5030       addr = addr.withElementType(CGM.getTypes().ConvertTypeForMem(FieldType));
5031   } else {
5032     if (!IsInPreservedAIRegion &&
5033         (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>()))
5034       // For structs, we GEP to the field that the record layout suggests.
5035       addr = emitAddrOfFieldStorage(*this, addr, field);
5036     else
5037       // Remember the original struct field index
5038       addr = emitPreserveStructAccess(*this, base, addr, field);
5039   }
5040 
5041   // If this is a reference field, load the reference right now.
5042   if (FieldType->isReferenceType()) {
5043     LValue RefLVal =
5044         MakeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo);
5045     if (RecordCVR & Qualifiers::Volatile)
5046       RefLVal.getQuals().addVolatile();
5047     addr = EmitLoadOfReference(RefLVal, &FieldBaseInfo, &FieldTBAAInfo);
5048 
5049     // Qualifiers on the struct don't apply to the referencee.
5050     RecordCVR = 0;
5051     FieldType = FieldType->getPointeeType();
5052   }
5053 
5054   // Make sure that the address is pointing to the right type.  This is critical
5055   // for both unions and structs.
5056   addr = addr.withElementType(CGM.getTypes().ConvertTypeForMem(FieldType));
5057 
5058   if (field->hasAttr<AnnotateAttr>())
5059     addr = EmitFieldAnnotations(field, addr);
5060 
5061   LValue LV = MakeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo);
5062   LV.getQuals().addCVRQualifiers(RecordCVR);
5063 
5064   // __weak attribute on a field is ignored.
5065   if (LV.getQuals().getObjCGCAttr() == Qualifiers::Weak)
5066     LV.getQuals().removeObjCGCAttr();
5067 
5068   return LV;
5069 }
5070 
5071 LValue
5072 CodeGenFunction::EmitLValueForFieldInitialization(LValue Base,
5073                                                   const FieldDecl *Field) {
5074   QualType FieldType = Field->getType();
5075 
5076   if (!FieldType->isReferenceType())
5077     return EmitLValueForField(Base, Field);
5078 
5079   Address V = emitAddrOfFieldStorage(*this, Base.getAddress(), Field);
5080 
5081   // Make sure that the address is pointing to the right type.
5082   llvm::Type *llvmType = ConvertTypeForMem(FieldType);
5083   V = V.withElementType(llvmType);
5084 
5085   // TODO: Generate TBAA information that describes this access as a structure
5086   // member access and not just an access to an object of the field's type. This
5087   // should be similar to what we do in EmitLValueForField().
5088   LValueBaseInfo BaseInfo = Base.getBaseInfo();
5089   AlignmentSource FieldAlignSource = BaseInfo.getAlignmentSource();
5090   LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(FieldAlignSource));
5091   return MakeAddrLValue(V, FieldType, FieldBaseInfo,
5092                         CGM.getTBAAInfoForSubobject(Base, FieldType));
5093 }
5094 
5095 LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){
5096   if (E->isFileScope()) {
5097     ConstantAddress GlobalPtr = CGM.GetAddrOfConstantCompoundLiteral(E);
5098     return MakeAddrLValue(GlobalPtr, E->getType(), AlignmentSource::Decl);
5099   }
5100   if (E->getType()->isVariablyModifiedType())
5101     // make sure to emit the VLA size.
5102     EmitVariablyModifiedType(E->getType());
5103 
5104   Address DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral");
5105   const Expr *InitExpr = E->getInitializer();
5106   LValue Result = MakeAddrLValue(DeclPtr, E->getType(), AlignmentSource::Decl);
5107 
5108   EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(),
5109                    /*Init*/ true);
5110 
5111   // Block-scope compound literals are destroyed at the end of the enclosing
5112   // scope in C.
5113   if (!getLangOpts().CPlusPlus)
5114     if (QualType::DestructionKind DtorKind = E->getType().isDestructedType())
5115       pushLifetimeExtendedDestroy(getCleanupKind(DtorKind), DeclPtr,
5116                                   E->getType(), getDestroyer(DtorKind),
5117                                   DtorKind & EHCleanup);
5118 
5119   return Result;
5120 }
5121 
5122 LValue CodeGenFunction::EmitInitListLValue(const InitListExpr *E) {
5123   if (!E->isGLValue())
5124     // Initializing an aggregate temporary in C++11: T{...}.
5125     return EmitAggExprToLValue(E);
5126 
5127   // An lvalue initializer list must be initializing a reference.
5128   assert(E->isTransparent() && "non-transparent glvalue init list");
5129   return EmitLValue(E->getInit(0));
5130 }
5131 
5132 /// Emit the operand of a glvalue conditional operator. This is either a glvalue
5133 /// or a (possibly-parenthesized) throw-expression. If this is a throw, no
5134 /// LValue is returned and the current block has been terminated.
5135 static std::optional<LValue> EmitLValueOrThrowExpression(CodeGenFunction &CGF,
5136                                                          const Expr *Operand) {
5137   if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Operand->IgnoreParens())) {
5138     CGF.EmitCXXThrowExpr(ThrowExpr, /*KeepInsertionPoint*/false);
5139     return std::nullopt;
5140   }
5141 
5142   return CGF.EmitLValue(Operand);
5143 }
5144 
5145 namespace {
5146 // Handle the case where the condition is a constant evaluatable simple integer,
5147 // which means we don't have to separately handle the true/false blocks.
5148 std::optional<LValue> HandleConditionalOperatorLValueSimpleCase(
5149     CodeGenFunction &CGF, const AbstractConditionalOperator *E) {
5150   const Expr *condExpr = E->getCond();
5151   bool CondExprBool;
5152   if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
5153     const Expr *Live = E->getTrueExpr(), *Dead = E->getFalseExpr();
5154     if (!CondExprBool)
5155       std::swap(Live, Dead);
5156 
5157     if (!CGF.ContainsLabel(Dead)) {
5158       // If the true case is live, we need to track its region.
5159       if (CondExprBool)
5160         CGF.incrementProfileCounter(E);
5161       CGF.markStmtMaybeUsed(Dead);
5162       // If a throw expression we emit it and return an undefined lvalue
5163       // because it can't be used.
5164       if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Live->IgnoreParens())) {
5165         CGF.EmitCXXThrowExpr(ThrowExpr);
5166         llvm::Type *ElemTy = CGF.ConvertType(Dead->getType());
5167         llvm::Type *Ty = CGF.UnqualPtrTy;
5168         return CGF.MakeAddrLValue(
5169             Address(llvm::UndefValue::get(Ty), ElemTy, CharUnits::One()),
5170             Dead->getType());
5171       }
5172       return CGF.EmitLValue(Live);
5173     }
5174   }
5175   return std::nullopt;
5176 }
5177 struct ConditionalInfo {
5178   llvm::BasicBlock *lhsBlock, *rhsBlock;
5179   std::optional<LValue> LHS, RHS;
5180 };
5181 
5182 // Create and generate the 3 blocks for a conditional operator.
5183 // Leaves the 'current block' in the continuation basic block.
5184 template<typename FuncTy>
5185 ConditionalInfo EmitConditionalBlocks(CodeGenFunction &CGF,
5186                                       const AbstractConditionalOperator *E,
5187                                       const FuncTy &BranchGenFunc) {
5188   ConditionalInfo Info{CGF.createBasicBlock("cond.true"),
5189                        CGF.createBasicBlock("cond.false"), std::nullopt,
5190                        std::nullopt};
5191   llvm::BasicBlock *endBlock = CGF.createBasicBlock("cond.end");
5192 
5193   CodeGenFunction::ConditionalEvaluation eval(CGF);
5194   CGF.EmitBranchOnBoolExpr(E->getCond(), Info.lhsBlock, Info.rhsBlock,
5195                            CGF.getProfileCount(E));
5196 
5197   // Any temporaries created here are conditional.
5198   CGF.EmitBlock(Info.lhsBlock);
5199   CGF.incrementProfileCounter(E);
5200   eval.begin(CGF);
5201   Info.LHS = BranchGenFunc(CGF, E->getTrueExpr());
5202   eval.end(CGF);
5203   Info.lhsBlock = CGF.Builder.GetInsertBlock();
5204 
5205   if (Info.LHS)
5206     CGF.Builder.CreateBr(endBlock);
5207 
5208   // Any temporaries created here are conditional.
5209   CGF.EmitBlock(Info.rhsBlock);
5210   eval.begin(CGF);
5211   Info.RHS = BranchGenFunc(CGF, E->getFalseExpr());
5212   eval.end(CGF);
5213   Info.rhsBlock = CGF.Builder.GetInsertBlock();
5214   CGF.EmitBlock(endBlock);
5215 
5216   return Info;
5217 }
5218 } // namespace
5219 
5220 void CodeGenFunction::EmitIgnoredConditionalOperator(
5221     const AbstractConditionalOperator *E) {
5222   if (!E->isGLValue()) {
5223     // ?: here should be an aggregate.
5224     assert(hasAggregateEvaluationKind(E->getType()) &&
5225            "Unexpected conditional operator!");
5226     return (void)EmitAggExprToLValue(E);
5227   }
5228 
5229   OpaqueValueMapping binding(*this, E);
5230   if (HandleConditionalOperatorLValueSimpleCase(*this, E))
5231     return;
5232 
5233   EmitConditionalBlocks(*this, E, [](CodeGenFunction &CGF, const Expr *E) {
5234     CGF.EmitIgnoredExpr(E);
5235     return LValue{};
5236   });
5237 }
5238 LValue CodeGenFunction::EmitConditionalOperatorLValue(
5239     const AbstractConditionalOperator *expr) {
5240   if (!expr->isGLValue()) {
5241     // ?: here should be an aggregate.
5242     assert(hasAggregateEvaluationKind(expr->getType()) &&
5243            "Unexpected conditional operator!");
5244     return EmitAggExprToLValue(expr);
5245   }
5246 
5247   OpaqueValueMapping binding(*this, expr);
5248   if (std::optional<LValue> Res =
5249           HandleConditionalOperatorLValueSimpleCase(*this, expr))
5250     return *Res;
5251 
5252   ConditionalInfo Info = EmitConditionalBlocks(
5253       *this, expr, [](CodeGenFunction &CGF, const Expr *E) {
5254         return EmitLValueOrThrowExpression(CGF, E);
5255       });
5256 
5257   if ((Info.LHS && !Info.LHS->isSimple()) ||
5258       (Info.RHS && !Info.RHS->isSimple()))
5259     return EmitUnsupportedLValue(expr, "conditional operator");
5260 
5261   if (Info.LHS && Info.RHS) {
5262     Address lhsAddr = Info.LHS->getAddress();
5263     Address rhsAddr = Info.RHS->getAddress();
5264     Address result = mergeAddressesInConditionalExpr(
5265         lhsAddr, rhsAddr, Info.lhsBlock, Info.rhsBlock,
5266         Builder.GetInsertBlock(), expr->getType());
5267     AlignmentSource alignSource =
5268         std::max(Info.LHS->getBaseInfo().getAlignmentSource(),
5269                  Info.RHS->getBaseInfo().getAlignmentSource());
5270     TBAAAccessInfo TBAAInfo = CGM.mergeTBAAInfoForConditionalOperator(
5271         Info.LHS->getTBAAInfo(), Info.RHS->getTBAAInfo());
5272     return MakeAddrLValue(result, expr->getType(), LValueBaseInfo(alignSource),
5273                           TBAAInfo);
5274   } else {
5275     assert((Info.LHS || Info.RHS) &&
5276            "both operands of glvalue conditional are throw-expressions?");
5277     return Info.LHS ? *Info.LHS : *Info.RHS;
5278   }
5279 }
5280 
5281 /// EmitCastLValue - Casts are never lvalues unless that cast is to a reference
5282 /// type. If the cast is to a reference, we can have the usual lvalue result,
5283 /// otherwise if a cast is needed by the code generator in an lvalue context,
5284 /// then it must mean that we need the address of an aggregate in order to
5285 /// access one of its members.  This can happen for all the reasons that casts
5286 /// are permitted with aggregate result, including noop aggregate casts, and
5287 /// cast from scalar to union.
5288 LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
5289   switch (E->getCastKind()) {
5290   case CK_ToVoid:
5291   case CK_BitCast:
5292   case CK_LValueToRValueBitCast:
5293   case CK_ArrayToPointerDecay:
5294   case CK_FunctionToPointerDecay:
5295   case CK_NullToMemberPointer:
5296   case CK_NullToPointer:
5297   case CK_IntegralToPointer:
5298   case CK_PointerToIntegral:
5299   case CK_PointerToBoolean:
5300   case CK_IntegralCast:
5301   case CK_BooleanToSignedIntegral:
5302   case CK_IntegralToBoolean:
5303   case CK_IntegralToFloating:
5304   case CK_FloatingToIntegral:
5305   case CK_FloatingToBoolean:
5306   case CK_FloatingCast:
5307   case CK_FloatingRealToComplex:
5308   case CK_FloatingComplexToReal:
5309   case CK_FloatingComplexToBoolean:
5310   case CK_FloatingComplexCast:
5311   case CK_FloatingComplexToIntegralComplex:
5312   case CK_IntegralRealToComplex:
5313   case CK_IntegralComplexToReal:
5314   case CK_IntegralComplexToBoolean:
5315   case CK_IntegralComplexCast:
5316   case CK_IntegralComplexToFloatingComplex:
5317   case CK_DerivedToBaseMemberPointer:
5318   case CK_BaseToDerivedMemberPointer:
5319   case CK_MemberPointerToBoolean:
5320   case CK_ReinterpretMemberPointer:
5321   case CK_AnyPointerToBlockPointerCast:
5322   case CK_ARCProduceObject:
5323   case CK_ARCConsumeObject:
5324   case CK_ARCReclaimReturnedObject:
5325   case CK_ARCExtendBlockObject:
5326   case CK_CopyAndAutoreleaseBlockObject:
5327   case CK_IntToOCLSampler:
5328   case CK_FloatingToFixedPoint:
5329   case CK_FixedPointToFloating:
5330   case CK_FixedPointCast:
5331   case CK_FixedPointToBoolean:
5332   case CK_FixedPointToIntegral:
5333   case CK_IntegralToFixedPoint:
5334   case CK_MatrixCast:
5335   case CK_HLSLVectorTruncation:
5336   case CK_HLSLArrayRValue:
5337     return EmitUnsupportedLValue(E, "unexpected cast lvalue");
5338 
5339   case CK_Dependent:
5340     llvm_unreachable("dependent cast kind in IR gen!");
5341 
5342   case CK_BuiltinFnToFnPtr:
5343     llvm_unreachable("builtin functions are handled elsewhere");
5344 
5345   // These are never l-values; just use the aggregate emission code.
5346   case CK_NonAtomicToAtomic:
5347   case CK_AtomicToNonAtomic:
5348     return EmitAggExprToLValue(E);
5349 
5350   case CK_Dynamic: {
5351     LValue LV = EmitLValue(E->getSubExpr());
5352     Address V = LV.getAddress();
5353     const auto *DCE = cast<CXXDynamicCastExpr>(E);
5354     return MakeNaturalAlignRawAddrLValue(EmitDynamicCast(V, DCE), E->getType());
5355   }
5356 
5357   case CK_ConstructorConversion:
5358   case CK_UserDefinedConversion:
5359   case CK_CPointerToObjCPointerCast:
5360   case CK_BlockPointerToObjCPointerCast:
5361   case CK_LValueToRValue:
5362     return EmitLValue(E->getSubExpr());
5363 
5364   case CK_NoOp: {
5365     // CK_NoOp can model a qualification conversion, which can remove an array
5366     // bound and change the IR type.
5367     // FIXME: Once pointee types are removed from IR, remove this.
5368     LValue LV = EmitLValue(E->getSubExpr());
5369     // Propagate the volatile qualifer to LValue, if exist in E.
5370     if (E->changesVolatileQualification())
5371       LV.getQuals() = E->getType().getQualifiers();
5372     if (LV.isSimple()) {
5373       Address V = LV.getAddress();
5374       if (V.isValid()) {
5375         llvm::Type *T = ConvertTypeForMem(E->getType());
5376         if (V.getElementType() != T)
5377           LV.setAddress(V.withElementType(T));
5378       }
5379     }
5380     return LV;
5381   }
5382 
5383   case CK_UncheckedDerivedToBase:
5384   case CK_DerivedToBase: {
5385     const auto *DerivedClassTy =
5386         E->getSubExpr()->getType()->castAs<RecordType>();
5387     auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl());
5388 
5389     LValue LV = EmitLValue(E->getSubExpr());
5390     Address This = LV.getAddress();
5391 
5392     // Perform the derived-to-base conversion
5393     Address Base = GetAddressOfBaseClass(
5394         This, DerivedClassDecl, E->path_begin(), E->path_end(),
5395         /*NullCheckValue=*/false, E->getExprLoc());
5396 
5397     // TODO: Support accesses to members of base classes in TBAA. For now, we
5398     // conservatively pretend that the complete object is of the base class
5399     // type.
5400     return MakeAddrLValue(Base, E->getType(), LV.getBaseInfo(),
5401                           CGM.getTBAAInfoForSubobject(LV, E->getType()));
5402   }
5403   case CK_ToUnion:
5404     return EmitAggExprToLValue(E);
5405   case CK_BaseToDerived: {
5406     const auto *DerivedClassTy = E->getType()->castAs<RecordType>();
5407     auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl());
5408 
5409     LValue LV = EmitLValue(E->getSubExpr());
5410 
5411     // Perform the base-to-derived conversion
5412     Address Derived = GetAddressOfDerivedClass(
5413         LV.getAddress(), DerivedClassDecl, E->path_begin(), E->path_end(),
5414         /*NullCheckValue=*/false);
5415 
5416     // C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is
5417     // performed and the object is not of the derived type.
5418     if (sanitizePerformTypeCheck())
5419       EmitTypeCheck(TCK_DowncastReference, E->getExprLoc(), Derived,
5420                     E->getType());
5421 
5422     if (SanOpts.has(SanitizerKind::CFIDerivedCast))
5423       EmitVTablePtrCheckForCast(E->getType(), Derived,
5424                                 /*MayBeNull=*/false, CFITCK_DerivedCast,
5425                                 E->getBeginLoc());
5426 
5427     return MakeAddrLValue(Derived, E->getType(), LV.getBaseInfo(),
5428                           CGM.getTBAAInfoForSubobject(LV, E->getType()));
5429   }
5430   case CK_LValueBitCast: {
5431     // This must be a reinterpret_cast (or c-style equivalent).
5432     const auto *CE = cast<ExplicitCastExpr>(E);
5433 
5434     CGM.EmitExplicitCastExprType(CE, this);
5435     LValue LV = EmitLValue(E->getSubExpr());
5436     Address V = LV.getAddress().withElementType(
5437         ConvertTypeForMem(CE->getTypeAsWritten()->getPointeeType()));
5438 
5439     if (SanOpts.has(SanitizerKind::CFIUnrelatedCast))
5440       EmitVTablePtrCheckForCast(E->getType(), V,
5441                                 /*MayBeNull=*/false, CFITCK_UnrelatedCast,
5442                                 E->getBeginLoc());
5443 
5444     return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(),
5445                           CGM.getTBAAInfoForSubobject(LV, E->getType()));
5446   }
5447   case CK_AddressSpaceConversion: {
5448     LValue LV = EmitLValue(E->getSubExpr());
5449     QualType DestTy = getContext().getPointerType(E->getType());
5450     llvm::Value *V = getTargetHooks().performAddrSpaceCast(
5451         *this, LV.getPointer(*this),
5452         E->getSubExpr()->getType().getAddressSpace(),
5453         E->getType().getAddressSpace(), ConvertType(DestTy));
5454     return MakeAddrLValue(Address(V, ConvertTypeForMem(E->getType()),
5455                                   LV.getAddress().getAlignment()),
5456                           E->getType(), LV.getBaseInfo(), LV.getTBAAInfo());
5457   }
5458   case CK_ObjCObjectLValueCast: {
5459     LValue LV = EmitLValue(E->getSubExpr());
5460     Address V = LV.getAddress().withElementType(ConvertType(E->getType()));
5461     return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(),
5462                           CGM.getTBAAInfoForSubobject(LV, E->getType()));
5463   }
5464   case CK_ZeroToOCLOpaqueType:
5465     llvm_unreachable("NULL to OpenCL opaque type lvalue cast is not valid");
5466 
5467   case CK_VectorSplat: {
5468     // LValue results of vector splats are only supported in HLSL.
5469     if (!getLangOpts().HLSL)
5470       return EmitUnsupportedLValue(E, "unexpected cast lvalue");
5471     return EmitLValue(E->getSubExpr());
5472   }
5473   }
5474 
5475   llvm_unreachable("Unhandled lvalue cast kind?");
5476 }
5477 
5478 LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) {
5479   assert(OpaqueValueMappingData::shouldBindAsLValue(e));
5480   return getOrCreateOpaqueLValueMapping(e);
5481 }
5482 
5483 std::pair<LValue, LValue>
5484 CodeGenFunction::EmitHLSLOutArgLValues(const HLSLOutArgExpr *E, QualType Ty) {
5485   // Emitting the casted temporary through an opaque value.
5486   LValue BaseLV = EmitLValue(E->getArgLValue());
5487   OpaqueValueMappingData::bind(*this, E->getOpaqueArgLValue(), BaseLV);
5488 
5489   QualType ExprTy = E->getType();
5490   Address OutTemp = CreateIRTemp(ExprTy);
5491   LValue TempLV = MakeAddrLValue(OutTemp, ExprTy);
5492 
5493   if (E->isInOut())
5494     EmitInitializationToLValue(E->getCastedTemporary()->getSourceExpr(),
5495                                TempLV);
5496 
5497   OpaqueValueMappingData::bind(*this, E->getCastedTemporary(), TempLV);
5498   return std::make_pair(BaseLV, TempLV);
5499 }
5500 
5501 LValue CodeGenFunction::EmitHLSLOutArgExpr(const HLSLOutArgExpr *E,
5502                                            CallArgList &Args, QualType Ty) {
5503 
5504   auto [BaseLV, TempLV] = EmitHLSLOutArgLValues(E, Ty);
5505 
5506   llvm::Value *Addr = TempLV.getAddress().getBasePointer();
5507   llvm::Type *ElTy = ConvertTypeForMem(TempLV.getType());
5508 
5509   llvm::TypeSize Sz = CGM.getDataLayout().getTypeAllocSize(ElTy);
5510 
5511   llvm::Value *LifetimeSize = EmitLifetimeStart(Sz, Addr);
5512 
5513   Address TmpAddr(Addr, ElTy, TempLV.getAlignment());
5514   Args.addWriteback(BaseLV, TmpAddr, nullptr, E->getWritebackCast(),
5515                     LifetimeSize);
5516   Args.add(RValue::get(TmpAddr, *this), Ty);
5517   return TempLV;
5518 }
5519 
5520 LValue
5521 CodeGenFunction::getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e) {
5522   assert(OpaqueValueMapping::shouldBindAsLValue(e));
5523 
5524   llvm::DenseMap<const OpaqueValueExpr*,LValue>::iterator
5525       it = OpaqueLValues.find(e);
5526 
5527   if (it != OpaqueLValues.end())
5528     return it->second;
5529 
5530   assert(e->isUnique() && "LValue for a nonunique OVE hasn't been emitted");
5531   return EmitLValue(e->getSourceExpr());
5532 }
5533 
5534 RValue
5535 CodeGenFunction::getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e) {
5536   assert(!OpaqueValueMapping::shouldBindAsLValue(e));
5537 
5538   llvm::DenseMap<const OpaqueValueExpr*,RValue>::iterator
5539       it = OpaqueRValues.find(e);
5540 
5541   if (it != OpaqueRValues.end())
5542     return it->second;
5543 
5544   assert(e->isUnique() && "RValue for a nonunique OVE hasn't been emitted");
5545   return EmitAnyExpr(e->getSourceExpr());
5546 }
5547 
5548 RValue CodeGenFunction::EmitRValueForField(LValue LV,
5549                                            const FieldDecl *FD,
5550                                            SourceLocation Loc) {
5551   QualType FT = FD->getType();
5552   LValue FieldLV = EmitLValueForField(LV, FD);
5553   switch (getEvaluationKind(FT)) {
5554   case TEK_Complex:
5555     return RValue::getComplex(EmitLoadOfComplex(FieldLV, Loc));
5556   case TEK_Aggregate:
5557     return FieldLV.asAggregateRValue();
5558   case TEK_Scalar:
5559     // This routine is used to load fields one-by-one to perform a copy, so
5560     // don't load reference fields.
5561     if (FD->getType()->isReferenceType())
5562       return RValue::get(FieldLV.getPointer(*this));
5563     // Call EmitLoadOfScalar except when the lvalue is a bitfield to emit a
5564     // primitive load.
5565     if (FieldLV.isBitField())
5566       return EmitLoadOfLValue(FieldLV, Loc);
5567     return RValue::get(EmitLoadOfScalar(FieldLV, Loc));
5568   }
5569   llvm_unreachable("bad evaluation kind");
5570 }
5571 
5572 //===--------------------------------------------------------------------===//
5573 //                             Expression Emission
5574 //===--------------------------------------------------------------------===//
5575 
5576 RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
5577                                      ReturnValueSlot ReturnValue,
5578                                      llvm::CallBase **CallOrInvoke) {
5579   llvm::CallBase *CallOrInvokeStorage;
5580   if (!CallOrInvoke) {
5581     CallOrInvoke = &CallOrInvokeStorage;
5582   }
5583 
5584   auto AddCoroElideSafeOnExit = llvm::make_scope_exit([&] {
5585     if (E->isCoroElideSafe()) {
5586       auto *I = *CallOrInvoke;
5587       if (I)
5588         I->addFnAttr(llvm::Attribute::CoroElideSafe);
5589     }
5590   });
5591 
5592   // Builtins never have block type.
5593   if (E->getCallee()->getType()->isBlockPointerType())
5594     return EmitBlockCallExpr(E, ReturnValue, CallOrInvoke);
5595 
5596   if (const auto *CE = dyn_cast<CXXMemberCallExpr>(E))
5597     return EmitCXXMemberCallExpr(CE, ReturnValue, CallOrInvoke);
5598 
5599   if (const auto *CE = dyn_cast<CUDAKernelCallExpr>(E))
5600     return EmitCUDAKernelCallExpr(CE, ReturnValue, CallOrInvoke);
5601 
5602   // A CXXOperatorCallExpr is created even for explicit object methods, but
5603   // these should be treated like static function call.
5604   if (const auto *CE = dyn_cast<CXXOperatorCallExpr>(E))
5605     if (const auto *MD =
5606             dyn_cast_if_present<CXXMethodDecl>(CE->getCalleeDecl());
5607         MD && MD->isImplicitObjectMemberFunction())
5608       return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue, CallOrInvoke);
5609 
5610   CGCallee callee = EmitCallee(E->getCallee());
5611 
5612   if (callee.isBuiltin()) {
5613     return EmitBuiltinExpr(callee.getBuiltinDecl(), callee.getBuiltinID(),
5614                            E, ReturnValue);
5615   }
5616 
5617   if (callee.isPseudoDestructor()) {
5618     return EmitCXXPseudoDestructorExpr(callee.getPseudoDestructorExpr());
5619   }
5620 
5621   return EmitCall(E->getCallee()->getType(), callee, E, ReturnValue,
5622                   /*Chain=*/nullptr, CallOrInvoke);
5623 }
5624 
5625 /// Emit a CallExpr without considering whether it might be a subclass.
5626 RValue CodeGenFunction::EmitSimpleCallExpr(const CallExpr *E,
5627                                            ReturnValueSlot ReturnValue,
5628                                            llvm::CallBase **CallOrInvoke) {
5629   CGCallee Callee = EmitCallee(E->getCallee());
5630   return EmitCall(E->getCallee()->getType(), Callee, E, ReturnValue,
5631                   /*Chain=*/nullptr, CallOrInvoke);
5632 }
5633 
5634 // Detect the unusual situation where an inline version is shadowed by a
5635 // non-inline version. In that case we should pick the external one
5636 // everywhere. That's GCC behavior too.
5637 static bool OnlyHasInlineBuiltinDeclaration(const FunctionDecl *FD) {
5638   for (const FunctionDecl *PD = FD; PD; PD = PD->getPreviousDecl())
5639     if (!PD->isInlineBuiltinDeclaration())
5640       return false;
5641   return true;
5642 }
5643 
5644 static CGCallee EmitDirectCallee(CodeGenFunction &CGF, GlobalDecl GD) {
5645   const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
5646 
5647   if (auto builtinID = FD->getBuiltinID()) {
5648     std::string NoBuiltinFD = ("no-builtin-" + FD->getName()).str();
5649     std::string NoBuiltins = "no-builtins";
5650 
5651     StringRef Ident = CGF.CGM.getMangledName(GD);
5652     std::string FDInlineName = (Ident + ".inline").str();
5653 
5654     bool IsPredefinedLibFunction =
5655         CGF.getContext().BuiltinInfo.isPredefinedLibFunction(builtinID);
5656     bool HasAttributeNoBuiltin =
5657         CGF.CurFn->getAttributes().hasFnAttr(NoBuiltinFD) ||
5658         CGF.CurFn->getAttributes().hasFnAttr(NoBuiltins);
5659 
5660     // When directing calling an inline builtin, call it through it's mangled
5661     // name to make it clear it's not the actual builtin.
5662     if (CGF.CurFn->getName() != FDInlineName &&
5663         OnlyHasInlineBuiltinDeclaration(FD)) {
5664       llvm::Constant *CalleePtr = CGF.CGM.getRawFunctionPointer(GD);
5665       llvm::Function *Fn = llvm::cast<llvm::Function>(CalleePtr);
5666       llvm::Module *M = Fn->getParent();
5667       llvm::Function *Clone = M->getFunction(FDInlineName);
5668       if (!Clone) {
5669         Clone = llvm::Function::Create(Fn->getFunctionType(),
5670                                        llvm::GlobalValue::InternalLinkage,
5671                                        Fn->getAddressSpace(), FDInlineName, M);
5672         Clone->addFnAttr(llvm::Attribute::AlwaysInline);
5673       }
5674       return CGCallee::forDirect(Clone, GD);
5675     }
5676 
5677     // Replaceable builtins provide their own implementation of a builtin. If we
5678     // are in an inline builtin implementation, avoid trivial infinite
5679     // recursion. Honor __attribute__((no_builtin("foo"))) or
5680     // __attribute__((no_builtin)) on the current function unless foo is
5681     // not a predefined library function which means we must generate the
5682     // builtin no matter what.
5683     else if (!IsPredefinedLibFunction || !HasAttributeNoBuiltin)
5684       return CGCallee::forBuiltin(builtinID, FD);
5685   }
5686 
5687   llvm::Constant *CalleePtr = CGF.CGM.getRawFunctionPointer(GD);
5688   if (CGF.CGM.getLangOpts().CUDA && !CGF.CGM.getLangOpts().CUDAIsDevice &&
5689       FD->hasAttr<CUDAGlobalAttr>())
5690     CalleePtr = CGF.CGM.getCUDARuntime().getKernelStub(
5691         cast<llvm::GlobalValue>(CalleePtr->stripPointerCasts()));
5692 
5693   return CGCallee::forDirect(CalleePtr, GD);
5694 }
5695 
5696 CGCallee CodeGenFunction::EmitCallee(const Expr *E) {
5697   E = E->IgnoreParens();
5698 
5699   // Look through function-to-pointer decay.
5700   if (auto ICE = dyn_cast<ImplicitCastExpr>(E)) {
5701     if (ICE->getCastKind() == CK_FunctionToPointerDecay ||
5702         ICE->getCastKind() == CK_BuiltinFnToFnPtr) {
5703       return EmitCallee(ICE->getSubExpr());
5704     }
5705 
5706   // Resolve direct calls.
5707   } else if (auto DRE = dyn_cast<DeclRefExpr>(E)) {
5708     if (auto FD = dyn_cast<FunctionDecl>(DRE->getDecl())) {
5709       return EmitDirectCallee(*this, FD);
5710     }
5711   } else if (auto ME = dyn_cast<MemberExpr>(E)) {
5712     if (auto FD = dyn_cast<FunctionDecl>(ME->getMemberDecl())) {
5713       EmitIgnoredExpr(ME->getBase());
5714       return EmitDirectCallee(*this, FD);
5715     }
5716 
5717   // Look through template substitutions.
5718   } else if (auto NTTP = dyn_cast<SubstNonTypeTemplateParmExpr>(E)) {
5719     return EmitCallee(NTTP->getReplacement());
5720 
5721   // Treat pseudo-destructor calls differently.
5722   } else if (auto PDE = dyn_cast<CXXPseudoDestructorExpr>(E)) {
5723     return CGCallee::forPseudoDestructor(PDE);
5724   }
5725 
5726   // Otherwise, we have an indirect reference.
5727   llvm::Value *calleePtr;
5728   QualType functionType;
5729   if (auto ptrType = E->getType()->getAs<PointerType>()) {
5730     calleePtr = EmitScalarExpr(E);
5731     functionType = ptrType->getPointeeType();
5732   } else {
5733     functionType = E->getType();
5734     calleePtr = EmitLValue(E, KnownNonNull).getPointer(*this);
5735   }
5736   assert(functionType->isFunctionType());
5737 
5738   GlobalDecl GD;
5739   if (const auto *VD =
5740           dyn_cast_or_null<VarDecl>(E->getReferencedDeclOfCallee()))
5741     GD = GlobalDecl(VD);
5742 
5743   CGCalleeInfo calleeInfo(functionType->getAs<FunctionProtoType>(), GD);
5744   CGPointerAuthInfo pointerAuth = CGM.getFunctionPointerAuthInfo(functionType);
5745   CGCallee callee(calleeInfo, calleePtr, pointerAuth);
5746   return callee;
5747 }
5748 
5749 LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) {
5750   // Comma expressions just emit their LHS then their RHS as an l-value.
5751   if (E->getOpcode() == BO_Comma) {
5752     EmitIgnoredExpr(E->getLHS());
5753     EnsureInsertPoint();
5754     return EmitLValue(E->getRHS());
5755   }
5756 
5757   if (E->getOpcode() == BO_PtrMemD ||
5758       E->getOpcode() == BO_PtrMemI)
5759     return EmitPointerToDataMemberBinaryExpr(E);
5760 
5761   assert(E->getOpcode() == BO_Assign && "unexpected binary l-value");
5762 
5763   // Note that in all of these cases, __block variables need the RHS
5764   // evaluated first just in case the variable gets moved by the RHS.
5765 
5766   switch (getEvaluationKind(E->getType())) {
5767   case TEK_Scalar: {
5768     switch (E->getLHS()->getType().getObjCLifetime()) {
5769     case Qualifiers::OCL_Strong:
5770       return EmitARCStoreStrong(E, /*ignored*/ false).first;
5771 
5772     case Qualifiers::OCL_Autoreleasing:
5773       return EmitARCStoreAutoreleasing(E).first;
5774 
5775     // No reason to do any of these differently.
5776     case Qualifiers::OCL_None:
5777     case Qualifiers::OCL_ExplicitNone:
5778     case Qualifiers::OCL_Weak:
5779       break;
5780     }
5781 
5782     // TODO: Can we de-duplicate this code with the corresponding code in
5783     // CGExprScalar, similar to the way EmitCompoundAssignmentLValue works?
5784     RValue RV;
5785     llvm::Value *Previous = nullptr;
5786     QualType SrcType = E->getRHS()->getType();
5787     // Check if LHS is a bitfield, if RHS contains an implicit cast expression
5788     // we want to extract that value and potentially (if the bitfield sanitizer
5789     // is enabled) use it to check for an implicit conversion.
5790     if (E->getLHS()->refersToBitField()) {
5791       llvm::Value *RHS =
5792           EmitWithOriginalRHSBitfieldAssignment(E, &Previous, &SrcType);
5793       RV = RValue::get(RHS);
5794     } else
5795       RV = EmitAnyExpr(E->getRHS());
5796 
5797     LValue LV = EmitCheckedLValue(E->getLHS(), TCK_Store);
5798 
5799     if (RV.isScalar())
5800       EmitNullabilityCheck(LV, RV.getScalarVal(), E->getExprLoc());
5801 
5802     if (LV.isBitField()) {
5803       llvm::Value *Result = nullptr;
5804       // If bitfield sanitizers are enabled we want to use the result
5805       // to check whether a truncation or sign change has occurred.
5806       if (SanOpts.has(SanitizerKind::ImplicitBitfieldConversion))
5807         EmitStoreThroughBitfieldLValue(RV, LV, &Result);
5808       else
5809         EmitStoreThroughBitfieldLValue(RV, LV);
5810 
5811       // If the expression contained an implicit conversion, make sure
5812       // to use the value before the scalar conversion.
5813       llvm::Value *Src = Previous ? Previous : RV.getScalarVal();
5814       QualType DstType = E->getLHS()->getType();
5815       EmitBitfieldConversionCheck(Src, SrcType, Result, DstType,
5816                                   LV.getBitFieldInfo(), E->getExprLoc());
5817     } else
5818       EmitStoreThroughLValue(RV, LV);
5819 
5820     if (getLangOpts().OpenMP)
5821       CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(*this,
5822                                                                 E->getLHS());
5823     return LV;
5824   }
5825 
5826   case TEK_Complex:
5827     return EmitComplexAssignmentLValue(E);
5828 
5829   case TEK_Aggregate:
5830     // If the lang opt is HLSL and the LHS is a constant array
5831     // then we are performing a copy assignment and call a special
5832     // function because EmitAggExprToLValue emits to a temporary LValue
5833     if (getLangOpts().HLSL && E->getLHS()->getType()->isConstantArrayType())
5834       return EmitHLSLArrayAssignLValue(E);
5835 
5836     return EmitAggExprToLValue(E);
5837   }
5838   llvm_unreachable("bad evaluation kind");
5839 }
5840 
5841 // This function implements trivial copy assignment for HLSL's
5842 // assignable constant arrays.
5843 LValue CodeGenFunction::EmitHLSLArrayAssignLValue(const BinaryOperator *E) {
5844   // Don't emit an LValue for the RHS because it might not be an LValue
5845   LValue LHS = EmitLValue(E->getLHS());
5846   // In C the RHS of an assignment operator is an RValue.
5847   // EmitAggregateAssign takes anan LValue for the RHS. Instead we can call
5848   // EmitInitializationToLValue to emit an RValue into an LValue.
5849   EmitInitializationToLValue(E->getRHS(), LHS);
5850   return LHS;
5851 }
5852 
5853 LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E,
5854                                            llvm::CallBase **CallOrInvoke) {
5855   RValue RV = EmitCallExpr(E, ReturnValueSlot(), CallOrInvoke);
5856 
5857   if (!RV.isScalar())
5858     return MakeAddrLValue(RV.getAggregateAddress(), E->getType(),
5859                           AlignmentSource::Decl);
5860 
5861   assert(E->getCallReturnType(getContext())->isReferenceType() &&
5862          "Can't have a scalar return unless the return type is a "
5863          "reference type!");
5864 
5865   return MakeNaturalAlignPointeeAddrLValue(RV.getScalarVal(), E->getType());
5866 }
5867 
5868 LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) {
5869   // FIXME: This shouldn't require another copy.
5870   return EmitAggExprToLValue(E);
5871 }
5872 
5873 LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) {
5874   assert(E->getType()->getAsCXXRecordDecl()->hasTrivialDestructor()
5875          && "binding l-value to type which needs a temporary");
5876   AggValueSlot Slot = CreateAggTemp(E->getType());
5877   EmitCXXConstructExpr(E, Slot);
5878   return MakeAddrLValue(Slot.getAddress(), E->getType(), AlignmentSource::Decl);
5879 }
5880 
5881 LValue
5882 CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) {
5883   return MakeNaturalAlignRawAddrLValue(EmitCXXTypeidExpr(E), E->getType());
5884 }
5885 
5886 Address CodeGenFunction::EmitCXXUuidofExpr(const CXXUuidofExpr *E) {
5887   return CGM.GetAddrOfMSGuidDecl(E->getGuidDecl())
5888       .withElementType(ConvertType(E->getType()));
5889 }
5890 
5891 LValue CodeGenFunction::EmitCXXUuidofLValue(const CXXUuidofExpr *E) {
5892   return MakeAddrLValue(EmitCXXUuidofExpr(E), E->getType(),
5893                         AlignmentSource::Decl);
5894 }
5895 
5896 LValue
5897 CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) {
5898   AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
5899   Slot.setExternallyDestructed();
5900   EmitAggExpr(E->getSubExpr(), Slot);
5901   EmitCXXTemporary(E->getTemporary(), E->getType(), Slot.getAddress());
5902   return MakeAddrLValue(Slot.getAddress(), E->getType(), AlignmentSource::Decl);
5903 }
5904 
5905 LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) {
5906   RValue RV = EmitObjCMessageExpr(E);
5907 
5908   if (!RV.isScalar())
5909     return MakeAddrLValue(RV.getAggregateAddress(), E->getType(),
5910                           AlignmentSource::Decl);
5911 
5912   assert(E->getMethodDecl()->getReturnType()->isReferenceType() &&
5913          "Can't have a scalar return unless the return type is a "
5914          "reference type!");
5915 
5916   return MakeNaturalAlignPointeeAddrLValue(RV.getScalarVal(), E->getType());
5917 }
5918 
5919 LValue CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr *E) {
5920   Address V =
5921     CGM.getObjCRuntime().GetAddrOfSelector(*this, E->getSelector());
5922   return MakeAddrLValue(V, E->getType(), AlignmentSource::Decl);
5923 }
5924 
5925 llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface,
5926                                              const ObjCIvarDecl *Ivar) {
5927   return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar);
5928 }
5929 
5930 llvm::Value *
5931 CodeGenFunction::EmitIvarOffsetAsPointerDiff(const ObjCInterfaceDecl *Interface,
5932                                              const ObjCIvarDecl *Ivar) {
5933   llvm::Value *OffsetValue = EmitIvarOffset(Interface, Ivar);
5934   QualType PointerDiffType = getContext().getPointerDiffType();
5935   return Builder.CreateZExtOrTrunc(OffsetValue,
5936                                    getTypes().ConvertType(PointerDiffType));
5937 }
5938 
5939 LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy,
5940                                           llvm::Value *BaseValue,
5941                                           const ObjCIvarDecl *Ivar,
5942                                           unsigned CVRQualifiers) {
5943   return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue,
5944                                                    Ivar, CVRQualifiers);
5945 }
5946 
5947 LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) {
5948   // FIXME: A lot of the code below could be shared with EmitMemberExpr.
5949   llvm::Value *BaseValue = nullptr;
5950   const Expr *BaseExpr = E->getBase();
5951   Qualifiers BaseQuals;
5952   QualType ObjectTy;
5953   if (E->isArrow()) {
5954     BaseValue = EmitScalarExpr(BaseExpr);
5955     ObjectTy = BaseExpr->getType()->getPointeeType();
5956     BaseQuals = ObjectTy.getQualifiers();
5957   } else {
5958     LValue BaseLV = EmitLValue(BaseExpr);
5959     BaseValue = BaseLV.getPointer(*this);
5960     ObjectTy = BaseExpr->getType();
5961     BaseQuals = ObjectTy.getQualifiers();
5962   }
5963 
5964   LValue LV =
5965     EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(),
5966                       BaseQuals.getCVRQualifiers());
5967   setObjCGCLValueClass(getContext(), E, LV);
5968   return LV;
5969 }
5970 
5971 LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) {
5972   // Can only get l-value for message expression returning aggregate type
5973   RValue RV = EmitAnyExprToTemp(E);
5974   return MakeAddrLValue(RV.getAggregateAddress(), E->getType(),
5975                         AlignmentSource::Decl);
5976 }
5977 
5978 RValue CodeGenFunction::EmitCall(QualType CalleeType,
5979                                  const CGCallee &OrigCallee, const CallExpr *E,
5980                                  ReturnValueSlot ReturnValue,
5981                                  llvm::Value *Chain,
5982                                  llvm::CallBase **CallOrInvoke,
5983                                  CGFunctionInfo const **ResolvedFnInfo) {
5984   // Get the actual function type. The callee type will always be a pointer to
5985   // function type or a block pointer type.
5986   assert(CalleeType->isFunctionPointerType() &&
5987          "Call must have function pointer type!");
5988 
5989   const Decl *TargetDecl =
5990       OrigCallee.getAbstractInfo().getCalleeDecl().getDecl();
5991 
5992   assert((!isa_and_present<FunctionDecl>(TargetDecl) ||
5993           !cast<FunctionDecl>(TargetDecl)->isImmediateFunction()) &&
5994          "trying to emit a call to an immediate function");
5995 
5996   CalleeType = getContext().getCanonicalType(CalleeType);
5997 
5998   auto PointeeType = cast<PointerType>(CalleeType)->getPointeeType();
5999 
6000   CGCallee Callee = OrigCallee;
6001 
6002   if (SanOpts.has(SanitizerKind::Function) &&
6003       (!TargetDecl || !isa<FunctionDecl>(TargetDecl)) &&
6004       !isa<FunctionNoProtoType>(PointeeType)) {
6005     if (llvm::Constant *PrefixSig =
6006             CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM)) {
6007       SanitizerScope SanScope(this);
6008       auto *TypeHash = getUBSanFunctionTypeHash(PointeeType);
6009 
6010       llvm::Type *PrefixSigType = PrefixSig->getType();
6011       llvm::StructType *PrefixStructTy = llvm::StructType::get(
6012           CGM.getLLVMContext(), {PrefixSigType, Int32Ty}, /*isPacked=*/true);
6013 
6014       llvm::Value *CalleePtr = Callee.getFunctionPointer();
6015       if (CGM.getCodeGenOpts().PointerAuth.FunctionPointers) {
6016         // Use raw pointer since we are using the callee pointer as data here.
6017         Address Addr =
6018             Address(CalleePtr, CalleePtr->getType(),
6019                     CharUnits::fromQuantity(
6020                         CalleePtr->getPointerAlignment(CGM.getDataLayout())),
6021                     Callee.getPointerAuthInfo(), nullptr);
6022         CalleePtr = Addr.emitRawPointer(*this);
6023       }
6024 
6025       // On 32-bit Arm, the low bit of a function pointer indicates whether
6026       // it's using the Arm or Thumb instruction set. The actual first
6027       // instruction lives at the same address either way, so we must clear
6028       // that low bit before using the function address to find the prefix
6029       // structure.
6030       //
6031       // This applies to both Arm and Thumb target triples, because
6032       // either one could be used in an interworking context where it
6033       // might be passed function pointers of both types.
6034       llvm::Value *AlignedCalleePtr;
6035       if (CGM.getTriple().isARM() || CGM.getTriple().isThumb()) {
6036         llvm::Value *CalleeAddress =
6037             Builder.CreatePtrToInt(CalleePtr, IntPtrTy);
6038         llvm::Value *Mask = llvm::ConstantInt::get(IntPtrTy, ~1);
6039         llvm::Value *AlignedCalleeAddress =
6040             Builder.CreateAnd(CalleeAddress, Mask);
6041         AlignedCalleePtr =
6042             Builder.CreateIntToPtr(AlignedCalleeAddress, CalleePtr->getType());
6043       } else {
6044         AlignedCalleePtr = CalleePtr;
6045       }
6046 
6047       llvm::Value *CalleePrefixStruct = AlignedCalleePtr;
6048       llvm::Value *CalleeSigPtr =
6049           Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, -1, 0);
6050       llvm::Value *CalleeSig =
6051           Builder.CreateAlignedLoad(PrefixSigType, CalleeSigPtr, getIntAlign());
6052       llvm::Value *CalleeSigMatch = Builder.CreateICmpEQ(CalleeSig, PrefixSig);
6053 
6054       llvm::BasicBlock *Cont = createBasicBlock("cont");
6055       llvm::BasicBlock *TypeCheck = createBasicBlock("typecheck");
6056       Builder.CreateCondBr(CalleeSigMatch, TypeCheck, Cont);
6057 
6058       EmitBlock(TypeCheck);
6059       llvm::Value *CalleeTypeHash = Builder.CreateAlignedLoad(
6060           Int32Ty,
6061           Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, -1, 1),
6062           getPointerAlign());
6063       llvm::Value *CalleeTypeHashMatch =
6064           Builder.CreateICmpEQ(CalleeTypeHash, TypeHash);
6065       llvm::Constant *StaticData[] = {EmitCheckSourceLocation(E->getBeginLoc()),
6066                                       EmitCheckTypeDescriptor(CalleeType)};
6067       EmitCheck(std::make_pair(CalleeTypeHashMatch, SanitizerKind::SO_Function),
6068                 SanitizerHandler::FunctionTypeMismatch, StaticData,
6069                 {CalleePtr});
6070 
6071       Builder.CreateBr(Cont);
6072       EmitBlock(Cont);
6073     }
6074   }
6075 
6076   const auto *FnType = cast<FunctionType>(PointeeType);
6077 
6078   // If we are checking indirect calls and this call is indirect, check that the
6079   // function pointer is a member of the bit set for the function type.
6080   if (SanOpts.has(SanitizerKind::CFIICall) &&
6081       (!TargetDecl || !isa<FunctionDecl>(TargetDecl))) {
6082     SanitizerScope SanScope(this);
6083     EmitSanitizerStatReport(llvm::SanStat_CFI_ICall);
6084 
6085     llvm::Metadata *MD;
6086     if (CGM.getCodeGenOpts().SanitizeCfiICallGeneralizePointers)
6087       MD = CGM.CreateMetadataIdentifierGeneralized(QualType(FnType, 0));
6088     else
6089       MD = CGM.CreateMetadataIdentifierForType(QualType(FnType, 0));
6090 
6091     llvm::Value *TypeId = llvm::MetadataAsValue::get(getLLVMContext(), MD);
6092 
6093     llvm::Value *CalleePtr = Callee.getFunctionPointer();
6094     llvm::Value *TypeTest = Builder.CreateCall(
6095         CGM.getIntrinsic(llvm::Intrinsic::type_test), {CalleePtr, TypeId});
6096 
6097     auto CrossDsoTypeId = CGM.CreateCrossDsoCfiTypeId(MD);
6098     llvm::Constant *StaticData[] = {
6099         llvm::ConstantInt::get(Int8Ty, CFITCK_ICall),
6100         EmitCheckSourceLocation(E->getBeginLoc()),
6101         EmitCheckTypeDescriptor(QualType(FnType, 0)),
6102     };
6103     if (CGM.getCodeGenOpts().SanitizeCfiCrossDso && CrossDsoTypeId) {
6104       EmitCfiSlowPathCheck(SanitizerKind::SO_CFIICall, TypeTest, CrossDsoTypeId,
6105                            CalleePtr, StaticData);
6106     } else {
6107       EmitCheck(std::make_pair(TypeTest, SanitizerKind::SO_CFIICall),
6108                 SanitizerHandler::CFICheckFail, StaticData,
6109                 {CalleePtr, llvm::UndefValue::get(IntPtrTy)});
6110     }
6111   }
6112 
6113   CallArgList Args;
6114   if (Chain)
6115     Args.add(RValue::get(Chain), CGM.getContext().VoidPtrTy);
6116 
6117   // C++17 requires that we evaluate arguments to a call using assignment syntax
6118   // right-to-left, and that we evaluate arguments to certain other operators
6119   // left-to-right. Note that we allow this to override the order dictated by
6120   // the calling convention on the MS ABI, which means that parameter
6121   // destruction order is not necessarily reverse construction order.
6122   // FIXME: Revisit this based on C++ committee response to unimplementability.
6123   EvaluationOrder Order = EvaluationOrder::Default;
6124   bool StaticOperator = false;
6125   if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(E)) {
6126     if (OCE->isAssignmentOp())
6127       Order = EvaluationOrder::ForceRightToLeft;
6128     else {
6129       switch (OCE->getOperator()) {
6130       case OO_LessLess:
6131       case OO_GreaterGreater:
6132       case OO_AmpAmp:
6133       case OO_PipePipe:
6134       case OO_Comma:
6135       case OO_ArrowStar:
6136         Order = EvaluationOrder::ForceLeftToRight;
6137         break;
6138       default:
6139         break;
6140       }
6141     }
6142 
6143     if (const auto *MD =
6144             dyn_cast_if_present<CXXMethodDecl>(OCE->getCalleeDecl());
6145         MD && MD->isStatic())
6146       StaticOperator = true;
6147   }
6148 
6149   auto Arguments = E->arguments();
6150   if (StaticOperator) {
6151     // If we're calling a static operator, we need to emit the object argument
6152     // and ignore it.
6153     EmitIgnoredExpr(E->getArg(0));
6154     Arguments = drop_begin(Arguments, 1);
6155   }
6156   EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), Arguments,
6157                E->getDirectCallee(), /*ParamsToSkip=*/0, Order);
6158 
6159   const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeFreeFunctionCall(
6160       Args, FnType, /*ChainCall=*/Chain);
6161 
6162   if (ResolvedFnInfo)
6163     *ResolvedFnInfo = &FnInfo;
6164 
6165   // HIP function pointer contains kernel handle when it is used in triple
6166   // chevron. The kernel stub needs to be loaded from kernel handle and used
6167   // as callee.
6168   if (CGM.getLangOpts().HIP && !CGM.getLangOpts().CUDAIsDevice &&
6169       isa<CUDAKernelCallExpr>(E) &&
6170       (!TargetDecl || !isa<FunctionDecl>(TargetDecl))) {
6171     llvm::Value *Handle = Callee.getFunctionPointer();
6172     auto *Stub = Builder.CreateLoad(
6173         Address(Handle, Handle->getType(), CGM.getPointerAlign()));
6174     Callee.setFunctionPointer(Stub);
6175   }
6176   llvm::CallBase *LocalCallOrInvoke = nullptr;
6177   RValue Call = EmitCall(FnInfo, Callee, ReturnValue, Args, &LocalCallOrInvoke,
6178                          E == MustTailCall, E->getExprLoc());
6179 
6180   // Generate function declaration DISuprogram in order to be used
6181   // in debug info about call sites.
6182   if (CGDebugInfo *DI = getDebugInfo()) {
6183     if (auto *CalleeDecl = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
6184       FunctionArgList Args;
6185       QualType ResTy = BuildFunctionArgList(CalleeDecl, Args);
6186       DI->EmitFuncDeclForCallSite(LocalCallOrInvoke,
6187                                   DI->getFunctionType(CalleeDecl, ResTy, Args),
6188                                   CalleeDecl);
6189     }
6190   }
6191   if (CallOrInvoke)
6192     *CallOrInvoke = LocalCallOrInvoke;
6193 
6194   return Call;
6195 }
6196 
6197 LValue CodeGenFunction::
6198 EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) {
6199   Address BaseAddr = Address::invalid();
6200   if (E->getOpcode() == BO_PtrMemI) {
6201     BaseAddr = EmitPointerWithAlignment(E->getLHS());
6202   } else {
6203     BaseAddr = EmitLValue(E->getLHS()).getAddress();
6204   }
6205 
6206   llvm::Value *OffsetV = EmitScalarExpr(E->getRHS());
6207   const auto *MPT = E->getRHS()->getType()->castAs<MemberPointerType>();
6208 
6209   LValueBaseInfo BaseInfo;
6210   TBAAAccessInfo TBAAInfo;
6211   Address MemberAddr =
6212     EmitCXXMemberDataPointerAddress(E, BaseAddr, OffsetV, MPT, &BaseInfo,
6213                                     &TBAAInfo);
6214 
6215   return MakeAddrLValue(MemberAddr, MPT->getPointeeType(), BaseInfo, TBAAInfo);
6216 }
6217 
6218 /// Given the address of a temporary variable, produce an r-value of
6219 /// its type.
6220 RValue CodeGenFunction::convertTempToRValue(Address addr,
6221                                             QualType type,
6222                                             SourceLocation loc) {
6223   LValue lvalue = MakeAddrLValue(addr, type, AlignmentSource::Decl);
6224   switch (getEvaluationKind(type)) {
6225   case TEK_Complex:
6226     return RValue::getComplex(EmitLoadOfComplex(lvalue, loc));
6227   case TEK_Aggregate:
6228     return lvalue.asAggregateRValue();
6229   case TEK_Scalar:
6230     return RValue::get(EmitLoadOfScalar(lvalue, loc));
6231   }
6232   llvm_unreachable("bad evaluation kind");
6233 }
6234 
6235 void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, float Accuracy) {
6236   assert(Val->getType()->isFPOrFPVectorTy());
6237   if (Accuracy == 0.0 || !isa<llvm::Instruction>(Val))
6238     return;
6239 
6240   llvm::MDBuilder MDHelper(getLLVMContext());
6241   llvm::MDNode *Node = MDHelper.createFPMath(Accuracy);
6242 
6243   cast<llvm::Instruction>(Val)->setMetadata(llvm::LLVMContext::MD_fpmath, Node);
6244 }
6245 
6246 void CodeGenFunction::SetSqrtFPAccuracy(llvm::Value *Val) {
6247   llvm::Type *EltTy = Val->getType()->getScalarType();
6248   if (!EltTy->isFloatTy())
6249     return;
6250 
6251   if ((getLangOpts().OpenCL &&
6252        !CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) ||
6253       (getLangOpts().HIP && getLangOpts().CUDAIsDevice &&
6254        !CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) {
6255     // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 3ulp
6256     //
6257     // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
6258     // build option allows an application to specify that single precision
6259     // floating-point divide (x/y and 1/x) and sqrt used in the program
6260     // source are correctly rounded.
6261     //
6262     // TODO: CUDA has a prec-sqrt flag
6263     SetFPAccuracy(Val, 3.0f);
6264   }
6265 }
6266 
6267 void CodeGenFunction::SetDivFPAccuracy(llvm::Value *Val) {
6268   llvm::Type *EltTy = Val->getType()->getScalarType();
6269   if (!EltTy->isFloatTy())
6270     return;
6271 
6272   if ((getLangOpts().OpenCL &&
6273        !CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) ||
6274       (getLangOpts().HIP && getLangOpts().CUDAIsDevice &&
6275        !CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) {
6276     // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp
6277     //
6278     // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
6279     // build option allows an application to specify that single precision
6280     // floating-point divide (x/y and 1/x) and sqrt used in the program
6281     // source are correctly rounded.
6282     //
6283     // TODO: CUDA has a prec-div flag
6284     SetFPAccuracy(Val, 2.5f);
6285   }
6286 }
6287 
6288 namespace {
6289   struct LValueOrRValue {
6290     LValue LV;
6291     RValue RV;
6292   };
6293 }
6294 
6295 static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF,
6296                                            const PseudoObjectExpr *E,
6297                                            bool forLValue,
6298                                            AggValueSlot slot) {
6299   SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques;
6300 
6301   // Find the result expression, if any.
6302   const Expr *resultExpr = E->getResultExpr();
6303   LValueOrRValue result;
6304 
6305   for (PseudoObjectExpr::const_semantics_iterator
6306          i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) {
6307     const Expr *semantic = *i;
6308 
6309     // If this semantic expression is an opaque value, bind it
6310     // to the result of its source expression.
6311     if (const auto *ov = dyn_cast<OpaqueValueExpr>(semantic)) {
6312       // Skip unique OVEs.
6313       if (ov->isUnique()) {
6314         assert(ov != resultExpr &&
6315                "A unique OVE cannot be used as the result expression");
6316         continue;
6317       }
6318 
6319       // If this is the result expression, we may need to evaluate
6320       // directly into the slot.
6321       typedef CodeGenFunction::OpaqueValueMappingData OVMA;
6322       OVMA opaqueData;
6323       if (ov == resultExpr && ov->isPRValue() && !forLValue &&
6324           CodeGenFunction::hasAggregateEvaluationKind(ov->getType())) {
6325         CGF.EmitAggExpr(ov->getSourceExpr(), slot);
6326         LValue LV = CGF.MakeAddrLValue(slot.getAddress(), ov->getType(),
6327                                        AlignmentSource::Decl);
6328         opaqueData = OVMA::bind(CGF, ov, LV);
6329         result.RV = slot.asRValue();
6330 
6331       // Otherwise, emit as normal.
6332       } else {
6333         opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr());
6334 
6335         // If this is the result, also evaluate the result now.
6336         if (ov == resultExpr) {
6337           if (forLValue)
6338             result.LV = CGF.EmitLValue(ov);
6339           else
6340             result.RV = CGF.EmitAnyExpr(ov, slot);
6341         }
6342       }
6343 
6344       opaques.push_back(opaqueData);
6345 
6346     // Otherwise, if the expression is the result, evaluate it
6347     // and remember the result.
6348     } else if (semantic == resultExpr) {
6349       if (forLValue)
6350         result.LV = CGF.EmitLValue(semantic);
6351       else
6352         result.RV = CGF.EmitAnyExpr(semantic, slot);
6353 
6354     // Otherwise, evaluate the expression in an ignored context.
6355     } else {
6356       CGF.EmitIgnoredExpr(semantic);
6357     }
6358   }
6359 
6360   // Unbind all the opaques now.
6361   for (unsigned i = 0, e = opaques.size(); i != e; ++i)
6362     opaques[i].unbind(CGF);
6363 
6364   return result;
6365 }
6366 
6367 RValue CodeGenFunction::EmitPseudoObjectRValue(const PseudoObjectExpr *E,
6368                                                AggValueSlot slot) {
6369   return emitPseudoObjectExpr(*this, E, false, slot).RV;
6370 }
6371 
6372 LValue CodeGenFunction::EmitPseudoObjectLValue(const PseudoObjectExpr *E) {
6373   return emitPseudoObjectExpr(*this, E, true, AggValueSlot::ignored()).LV;
6374 }
6375