1 //===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This contains code to emit Expr nodes as LLVM code.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "CGCUDARuntime.h"
14 #include "CGCXXABI.h"
15 #include "CGCall.h"
16 #include "CGCleanup.h"
17 #include "CGDebugInfo.h"
18 #include "CGObjCRuntime.h"
19 #include "CGOpenMPRuntime.h"
20 #include "CGRecordLayout.h"
21 #include "CodeGenFunction.h"
22 #include "CodeGenModule.h"
23 #include "ConstantEmitter.h"
24 #include "TargetInfo.h"
25 #include "clang/AST/ASTContext.h"
26 #include "clang/AST/Attr.h"
27 #include "clang/AST/DeclObjC.h"
28 #include "clang/AST/NSAPI.h"
29 #include "clang/Basic/Builtins.h"
30 #include "clang/Basic/CodeGenOptions.h"
31 #include "clang/Basic/SourceManager.h"
32 #include "llvm/ADT/Hashing.h"
33 #include "llvm/ADT/STLExtras.h"
34 #include "llvm/ADT/StringExtras.h"
35 #include "llvm/IR/DataLayout.h"
36 #include "llvm/IR/Intrinsics.h"
37 #include "llvm/IR/IntrinsicsWebAssembly.h"
38 #include "llvm/IR/LLVMContext.h"
39 #include "llvm/IR/MDBuilder.h"
40 #include "llvm/IR/MatrixBuilder.h"
41 #include "llvm/Passes/OptimizationLevel.h"
42 #include "llvm/Support/ConvertUTF.h"
43 #include "llvm/Support/MathExtras.h"
44 #include "llvm/Support/Path.h"
45 #include "llvm/Support/SaveAndRestore.h"
46 #include "llvm/Support/xxhash.h"
47 #include "llvm/Transforms/Utils/SanitizerStats.h"
48 
49 #include <optional>
50 #include <string>
51 
52 using namespace clang;
53 using namespace CodeGen;
54 
55 // Experiment to make sanitizers easier to debug
56 static llvm::cl::opt<bool> ClSanitizeDebugDeoptimization(
57     "ubsan-unique-traps", llvm::cl::Optional,
58     llvm::cl::desc("Deoptimize traps for UBSAN so there is 1 trap per check"),
59     llvm::cl::init(false));
60 
61 //===--------------------------------------------------------------------===//
62 //                        Miscellaneous Helper Methods
63 //===--------------------------------------------------------------------===//
64 
65 /// CreateTempAlloca - This creates a alloca and inserts it into the entry
66 /// block.
67 Address CodeGenFunction::CreateTempAllocaWithoutCast(llvm::Type *Ty,
68                                                      CharUnits Align,
69                                                      const Twine &Name,
70                                                      llvm::Value *ArraySize) {
71   auto Alloca = CreateTempAlloca(Ty, Name, ArraySize);
72   Alloca->setAlignment(Align.getAsAlign());
73   return Address(Alloca, Ty, Align, KnownNonNull);
74 }
75 
76 /// CreateTempAlloca - This creates a alloca and inserts it into the entry
77 /// block. The alloca is casted to default address space if necessary.
78 Address CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, CharUnits Align,
79                                           const Twine &Name,
80                                           llvm::Value *ArraySize,
81                                           Address *AllocaAddr) {
82   auto Alloca = CreateTempAllocaWithoutCast(Ty, Align, Name, ArraySize);
83   if (AllocaAddr)
84     *AllocaAddr = Alloca;
85   llvm::Value *V = Alloca.getPointer();
86   // Alloca always returns a pointer in alloca address space, which may
87   // be different from the type defined by the language. For example,
88   // in C++ the auto variables are in the default address space. Therefore
89   // cast alloca to the default address space when necessary.
90   if (getASTAllocaAddressSpace() != LangAS::Default) {
91     auto DestAddrSpace = getContext().getTargetAddressSpace(LangAS::Default);
92     llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
93     // When ArraySize is nullptr, alloca is inserted at AllocaInsertPt,
94     // otherwise alloca is inserted at the current insertion point of the
95     // builder.
96     if (!ArraySize)
97       Builder.SetInsertPoint(getPostAllocaInsertPoint());
98     V = getTargetHooks().performAddrSpaceCast(
99         *this, V, getASTAllocaAddressSpace(), LangAS::Default,
100         Ty->getPointerTo(DestAddrSpace), /*non-null*/ true);
101   }
102 
103   return Address(V, Ty, Align, KnownNonNull);
104 }
105 
106 /// CreateTempAlloca - This creates an alloca and inserts it into the entry
107 /// block if \p ArraySize is nullptr, otherwise inserts it at the current
108 /// insertion point of the builder.
109 llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty,
110                                                     const Twine &Name,
111                                                     llvm::Value *ArraySize) {
112   if (ArraySize)
113     return Builder.CreateAlloca(Ty, ArraySize, Name);
114   return new llvm::AllocaInst(Ty, CGM.getDataLayout().getAllocaAddrSpace(),
115                               ArraySize, Name, AllocaInsertPt);
116 }
117 
118 /// CreateDefaultAlignTempAlloca - This creates an alloca with the
119 /// default alignment of the corresponding LLVM type, which is *not*
120 /// guaranteed to be related in any way to the expected alignment of
121 /// an AST type that might have been lowered to Ty.
122 Address CodeGenFunction::CreateDefaultAlignTempAlloca(llvm::Type *Ty,
123                                                       const Twine &Name) {
124   CharUnits Align =
125       CharUnits::fromQuantity(CGM.getDataLayout().getPrefTypeAlign(Ty));
126   return CreateTempAlloca(Ty, Align, Name);
127 }
128 
129 Address CodeGenFunction::CreateIRTemp(QualType Ty, const Twine &Name) {
130   CharUnits Align = getContext().getTypeAlignInChars(Ty);
131   return CreateTempAlloca(ConvertType(Ty), Align, Name);
132 }
133 
134 Address CodeGenFunction::CreateMemTemp(QualType Ty, const Twine &Name,
135                                        Address *Alloca) {
136   // FIXME: Should we prefer the preferred type alignment here?
137   return CreateMemTemp(Ty, getContext().getTypeAlignInChars(Ty), Name, Alloca);
138 }
139 
140 Address CodeGenFunction::CreateMemTemp(QualType Ty, CharUnits Align,
141                                        const Twine &Name, Address *Alloca) {
142   Address Result = CreateTempAlloca(ConvertTypeForMem(Ty), Align, Name,
143                                     /*ArraySize=*/nullptr, Alloca);
144 
145   if (Ty->isConstantMatrixType()) {
146     auto *ArrayTy = cast<llvm::ArrayType>(Result.getElementType());
147     auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(),
148                                                 ArrayTy->getNumElements());
149 
150     Result = Address(Result.getPointer(), VectorTy, Result.getAlignment(),
151                      KnownNonNull);
152   }
153   return Result;
154 }
155 
156 Address CodeGenFunction::CreateMemTempWithoutCast(QualType Ty, CharUnits Align,
157                                                   const Twine &Name) {
158   return CreateTempAllocaWithoutCast(ConvertTypeForMem(Ty), Align, Name);
159 }
160 
161 Address CodeGenFunction::CreateMemTempWithoutCast(QualType Ty,
162                                                   const Twine &Name) {
163   return CreateMemTempWithoutCast(Ty, getContext().getTypeAlignInChars(Ty),
164                                   Name);
165 }
166 
167 /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
168 /// expression and compare the result against zero, returning an Int1Ty value.
169 llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) {
170   PGO.setCurrentStmt(E);
171   if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) {
172     llvm::Value *MemPtr = EmitScalarExpr(E);
173     return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT);
174   }
175 
176   QualType BoolTy = getContext().BoolTy;
177   SourceLocation Loc = E->getExprLoc();
178   CGFPOptionsRAII FPOptsRAII(*this, E);
179   if (!E->getType()->isAnyComplexType())
180     return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy, Loc);
181 
182   return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(), BoolTy,
183                                        Loc);
184 }
185 
186 /// EmitIgnoredExpr - Emit code to compute the specified expression,
187 /// ignoring the result.
188 void CodeGenFunction::EmitIgnoredExpr(const Expr *E) {
189   if (E->isPRValue())
190     return (void)EmitAnyExpr(E, AggValueSlot::ignored(), true);
191 
192   // if this is a bitfield-resulting conditional operator, we can special case
193   // emit this. The normal 'EmitLValue' version of this is particularly
194   // difficult to codegen for, since creating a single "LValue" for two
195   // different sized arguments here is not particularly doable.
196   if (const auto *CondOp = dyn_cast<AbstractConditionalOperator>(
197           E->IgnoreParenNoopCasts(getContext()))) {
198     if (CondOp->getObjectKind() == OK_BitField)
199       return EmitIgnoredConditionalOperator(CondOp);
200   }
201 
202   // Just emit it as an l-value and drop the result.
203   EmitLValue(E);
204 }
205 
206 /// EmitAnyExpr - Emit code to compute the specified expression which
207 /// can have any type.  The result is returned as an RValue struct.
208 /// If this is an aggregate expression, AggSlot indicates where the
209 /// result should be returned.
210 RValue CodeGenFunction::EmitAnyExpr(const Expr *E,
211                                     AggValueSlot aggSlot,
212                                     bool ignoreResult) {
213   switch (getEvaluationKind(E->getType())) {
214   case TEK_Scalar:
215     return RValue::get(EmitScalarExpr(E, ignoreResult));
216   case TEK_Complex:
217     return RValue::getComplex(EmitComplexExpr(E, ignoreResult, ignoreResult));
218   case TEK_Aggregate:
219     if (!ignoreResult && aggSlot.isIgnored())
220       aggSlot = CreateAggTemp(E->getType(), "agg-temp");
221     EmitAggExpr(E, aggSlot);
222     return aggSlot.asRValue();
223   }
224   llvm_unreachable("bad evaluation kind");
225 }
226 
227 /// EmitAnyExprToTemp - Similar to EmitAnyExpr(), however, the result will
228 /// always be accessible even if no aggregate location is provided.
229 RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E) {
230   AggValueSlot AggSlot = AggValueSlot::ignored();
231 
232   if (hasAggregateEvaluationKind(E->getType()))
233     AggSlot = CreateAggTemp(E->getType(), "agg.tmp");
234   return EmitAnyExpr(E, AggSlot);
235 }
236 
237 /// EmitAnyExprToMem - Evaluate an expression into a given memory
238 /// location.
239 void CodeGenFunction::EmitAnyExprToMem(const Expr *E,
240                                        Address Location,
241                                        Qualifiers Quals,
242                                        bool IsInit) {
243   // FIXME: This function should take an LValue as an argument.
244   switch (getEvaluationKind(E->getType())) {
245   case TEK_Complex:
246     EmitComplexExprIntoLValue(E, MakeAddrLValue(Location, E->getType()),
247                               /*isInit*/ false);
248     return;
249 
250   case TEK_Aggregate: {
251     EmitAggExpr(E, AggValueSlot::forAddr(Location, Quals,
252                                          AggValueSlot::IsDestructed_t(IsInit),
253                                          AggValueSlot::DoesNotNeedGCBarriers,
254                                          AggValueSlot::IsAliased_t(!IsInit),
255                                          AggValueSlot::MayOverlap));
256     return;
257   }
258 
259   case TEK_Scalar: {
260     RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false));
261     LValue LV = MakeAddrLValue(Location, E->getType());
262     EmitStoreThroughLValue(RV, LV);
263     return;
264   }
265   }
266   llvm_unreachable("bad evaluation kind");
267 }
268 
269 static void
270 pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M,
271                      const Expr *E, Address ReferenceTemporary) {
272   // Objective-C++ ARC:
273   //   If we are binding a reference to a temporary that has ownership, we
274   //   need to perform retain/release operations on the temporary.
275   //
276   // FIXME: This should be looking at E, not M.
277   if (auto Lifetime = M->getType().getObjCLifetime()) {
278     switch (Lifetime) {
279     case Qualifiers::OCL_None:
280     case Qualifiers::OCL_ExplicitNone:
281       // Carry on to normal cleanup handling.
282       break;
283 
284     case Qualifiers::OCL_Autoreleasing:
285       // Nothing to do; cleaned up by an autorelease pool.
286       return;
287 
288     case Qualifiers::OCL_Strong:
289     case Qualifiers::OCL_Weak:
290       switch (StorageDuration Duration = M->getStorageDuration()) {
291       case SD_Static:
292         // Note: we intentionally do not register a cleanup to release
293         // the object on program termination.
294         return;
295 
296       case SD_Thread:
297         // FIXME: We should probably register a cleanup in this case.
298         return;
299 
300       case SD_Automatic:
301       case SD_FullExpression:
302         CodeGenFunction::Destroyer *Destroy;
303         CleanupKind CleanupKind;
304         if (Lifetime == Qualifiers::OCL_Strong) {
305           const ValueDecl *VD = M->getExtendingDecl();
306           bool Precise =
307               VD && isa<VarDecl>(VD) && VD->hasAttr<ObjCPreciseLifetimeAttr>();
308           CleanupKind = CGF.getARCCleanupKind();
309           Destroy = Precise ? &CodeGenFunction::destroyARCStrongPrecise
310                             : &CodeGenFunction::destroyARCStrongImprecise;
311         } else {
312           // __weak objects always get EH cleanups; otherwise, exceptions
313           // could cause really nasty crashes instead of mere leaks.
314           CleanupKind = NormalAndEHCleanup;
315           Destroy = &CodeGenFunction::destroyARCWeak;
316         }
317         if (Duration == SD_FullExpression)
318           CGF.pushDestroy(CleanupKind, ReferenceTemporary,
319                           M->getType(), *Destroy,
320                           CleanupKind & EHCleanup);
321         else
322           CGF.pushLifetimeExtendedDestroy(CleanupKind, ReferenceTemporary,
323                                           M->getType(),
324                                           *Destroy, CleanupKind & EHCleanup);
325         return;
326 
327       case SD_Dynamic:
328         llvm_unreachable("temporary cannot have dynamic storage duration");
329       }
330       llvm_unreachable("unknown storage duration");
331     }
332   }
333 
334   CXXDestructorDecl *ReferenceTemporaryDtor = nullptr;
335   if (const RecordType *RT =
336           E->getType()->getBaseElementTypeUnsafe()->getAs<RecordType>()) {
337     // Get the destructor for the reference temporary.
338     auto *ClassDecl = cast<CXXRecordDecl>(RT->getDecl());
339     if (!ClassDecl->hasTrivialDestructor())
340       ReferenceTemporaryDtor = ClassDecl->getDestructor();
341   }
342 
343   if (!ReferenceTemporaryDtor)
344     return;
345 
346   // Call the destructor for the temporary.
347   switch (M->getStorageDuration()) {
348   case SD_Static:
349   case SD_Thread: {
350     llvm::FunctionCallee CleanupFn;
351     llvm::Constant *CleanupArg;
352     if (E->getType()->isArrayType()) {
353       CleanupFn = CodeGenFunction(CGF.CGM).generateDestroyHelper(
354           ReferenceTemporary, E->getType(),
355           CodeGenFunction::destroyCXXObject, CGF.getLangOpts().Exceptions,
356           dyn_cast_or_null<VarDecl>(M->getExtendingDecl()));
357       CleanupArg = llvm::Constant::getNullValue(CGF.Int8PtrTy);
358     } else {
359       CleanupFn = CGF.CGM.getAddrAndTypeOfCXXStructor(
360           GlobalDecl(ReferenceTemporaryDtor, Dtor_Complete));
361       CleanupArg = cast<llvm::Constant>(ReferenceTemporary.getPointer());
362     }
363     CGF.CGM.getCXXABI().registerGlobalDtor(
364         CGF, *cast<VarDecl>(M->getExtendingDecl()), CleanupFn, CleanupArg);
365     break;
366   }
367 
368   case SD_FullExpression:
369     CGF.pushDestroy(NormalAndEHCleanup, ReferenceTemporary, E->getType(),
370                     CodeGenFunction::destroyCXXObject,
371                     CGF.getLangOpts().Exceptions);
372     break;
373 
374   case SD_Automatic:
375     CGF.pushLifetimeExtendedDestroy(NormalAndEHCleanup,
376                                     ReferenceTemporary, E->getType(),
377                                     CodeGenFunction::destroyCXXObject,
378                                     CGF.getLangOpts().Exceptions);
379     break;
380 
381   case SD_Dynamic:
382     llvm_unreachable("temporary cannot have dynamic storage duration");
383   }
384 }
385 
386 static Address createReferenceTemporary(CodeGenFunction &CGF,
387                                         const MaterializeTemporaryExpr *M,
388                                         const Expr *Inner,
389                                         Address *Alloca = nullptr) {
390   auto &TCG = CGF.getTargetHooks();
391   switch (M->getStorageDuration()) {
392   case SD_FullExpression:
393   case SD_Automatic: {
394     // If we have a constant temporary array or record try to promote it into a
395     // constant global under the same rules a normal constant would've been
396     // promoted. This is easier on the optimizer and generally emits fewer
397     // instructions.
398     QualType Ty = Inner->getType();
399     if (CGF.CGM.getCodeGenOpts().MergeAllConstants &&
400         (Ty->isArrayType() || Ty->isRecordType()) &&
401         Ty.isConstantStorage(CGF.getContext(), true, false))
402       if (auto Init = ConstantEmitter(CGF).tryEmitAbstract(Inner, Ty)) {
403         auto AS = CGF.CGM.GetGlobalConstantAddressSpace();
404         auto *GV = new llvm::GlobalVariable(
405             CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
406             llvm::GlobalValue::PrivateLinkage, Init, ".ref.tmp", nullptr,
407             llvm::GlobalValue::NotThreadLocal,
408             CGF.getContext().getTargetAddressSpace(AS));
409         CharUnits alignment = CGF.getContext().getTypeAlignInChars(Ty);
410         GV->setAlignment(alignment.getAsAlign());
411         llvm::Constant *C = GV;
412         if (AS != LangAS::Default)
413           C = TCG.performAddrSpaceCast(
414               CGF.CGM, GV, AS, LangAS::Default,
415               GV->getValueType()->getPointerTo(
416                   CGF.getContext().getTargetAddressSpace(LangAS::Default)));
417         // FIXME: Should we put the new global into a COMDAT?
418         return Address(C, GV->getValueType(), alignment);
419       }
420     return CGF.CreateMemTemp(Ty, "ref.tmp", Alloca);
421   }
422   case SD_Thread:
423   case SD_Static:
424     return CGF.CGM.GetAddrOfGlobalTemporary(M, Inner);
425 
426   case SD_Dynamic:
427     llvm_unreachable("temporary can't have dynamic storage duration");
428   }
429   llvm_unreachable("unknown storage duration");
430 }
431 
432 /// Helper method to check if the underlying ABI is AAPCS
433 static bool isAAPCS(const TargetInfo &TargetInfo) {
434   return TargetInfo.getABI().starts_with("aapcs");
435 }
436 
437 LValue CodeGenFunction::
438 EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) {
439   const Expr *E = M->getSubExpr();
440 
441   assert((!M->getExtendingDecl() || !isa<VarDecl>(M->getExtendingDecl()) ||
442           !cast<VarDecl>(M->getExtendingDecl())->isARCPseudoStrong()) &&
443          "Reference should never be pseudo-strong!");
444 
445   // FIXME: ideally this would use EmitAnyExprToMem, however, we cannot do so
446   // as that will cause the lifetime adjustment to be lost for ARC
447   auto ownership = M->getType().getObjCLifetime();
448   if (ownership != Qualifiers::OCL_None &&
449       ownership != Qualifiers::OCL_ExplicitNone) {
450     Address Object = createReferenceTemporary(*this, M, E);
451     if (auto *Var = dyn_cast<llvm::GlobalVariable>(Object.getPointer())) {
452       llvm::Type *Ty = ConvertTypeForMem(E->getType());
453       Object = Object.withElementType(Ty);
454 
455       // createReferenceTemporary will promote the temporary to a global with a
456       // constant initializer if it can.  It can only do this to a value of
457       // ARC-manageable type if the value is global and therefore "immune" to
458       // ref-counting operations.  Therefore we have no need to emit either a
459       // dynamic initialization or a cleanup and we can just return the address
460       // of the temporary.
461       if (Var->hasInitializer())
462         return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl);
463 
464       Var->setInitializer(CGM.EmitNullConstant(E->getType()));
465     }
466     LValue RefTempDst = MakeAddrLValue(Object, M->getType(),
467                                        AlignmentSource::Decl);
468 
469     switch (getEvaluationKind(E->getType())) {
470     default: llvm_unreachable("expected scalar or aggregate expression");
471     case TEK_Scalar:
472       EmitScalarInit(E, M->getExtendingDecl(), RefTempDst, false);
473       break;
474     case TEK_Aggregate: {
475       EmitAggExpr(E, AggValueSlot::forAddr(Object,
476                                            E->getType().getQualifiers(),
477                                            AggValueSlot::IsDestructed,
478                                            AggValueSlot::DoesNotNeedGCBarriers,
479                                            AggValueSlot::IsNotAliased,
480                                            AggValueSlot::DoesNotOverlap));
481       break;
482     }
483     }
484 
485     pushTemporaryCleanup(*this, M, E, Object);
486     return RefTempDst;
487   }
488 
489   SmallVector<const Expr *, 2> CommaLHSs;
490   SmallVector<SubobjectAdjustment, 2> Adjustments;
491   E = E->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments);
492 
493   for (const auto &Ignored : CommaLHSs)
494     EmitIgnoredExpr(Ignored);
495 
496   if (const auto *opaque = dyn_cast<OpaqueValueExpr>(E)) {
497     if (opaque->getType()->isRecordType()) {
498       assert(Adjustments.empty());
499       return EmitOpaqueValueLValue(opaque);
500     }
501   }
502 
503   // Create and initialize the reference temporary.
504   Address Alloca = Address::invalid();
505   Address Object = createReferenceTemporary(*this, M, E, &Alloca);
506   if (auto *Var = dyn_cast<llvm::GlobalVariable>(
507           Object.getPointer()->stripPointerCasts())) {
508     llvm::Type *TemporaryType = ConvertTypeForMem(E->getType());
509     Object = Object.withElementType(TemporaryType);
510     // If the temporary is a global and has a constant initializer or is a
511     // constant temporary that we promoted to a global, we may have already
512     // initialized it.
513     if (!Var->hasInitializer()) {
514       Var->setInitializer(CGM.EmitNullConstant(E->getType()));
515       EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true);
516     }
517   } else {
518     switch (M->getStorageDuration()) {
519     case SD_Automatic:
520       if (auto *Size = EmitLifetimeStart(
521               CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()),
522               Alloca.getPointer())) {
523         pushCleanupAfterFullExpr<CallLifetimeEnd>(NormalEHLifetimeMarker,
524                                                   Alloca, Size);
525       }
526       break;
527 
528     case SD_FullExpression: {
529       if (!ShouldEmitLifetimeMarkers)
530         break;
531 
532       // Avoid creating a conditional cleanup just to hold an llvm.lifetime.end
533       // marker. Instead, start the lifetime of a conditional temporary earlier
534       // so that it's unconditional. Don't do this with sanitizers which need
535       // more precise lifetime marks. However when inside an "await.suspend"
536       // block, we should always avoid conditional cleanup because it creates
537       // boolean marker that lives across await_suspend, which can destroy coro
538       // frame.
539       ConditionalEvaluation *OldConditional = nullptr;
540       CGBuilderTy::InsertPoint OldIP;
541       if (isInConditionalBranch() && !E->getType().isDestructedType() &&
542           ((!SanOpts.has(SanitizerKind::HWAddress) &&
543             !SanOpts.has(SanitizerKind::Memory) &&
544             !CGM.getCodeGenOpts().SanitizeAddressUseAfterScope) ||
545            inSuspendBlock())) {
546         OldConditional = OutermostConditional;
547         OutermostConditional = nullptr;
548 
549         OldIP = Builder.saveIP();
550         llvm::BasicBlock *Block = OldConditional->getStartingBlock();
551         Builder.restoreIP(CGBuilderTy::InsertPoint(
552             Block, llvm::BasicBlock::iterator(Block->back())));
553       }
554 
555       if (auto *Size = EmitLifetimeStart(
556               CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()),
557               Alloca.getPointer())) {
558         pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, Alloca,
559                                              Size);
560       }
561 
562       if (OldConditional) {
563         OutermostConditional = OldConditional;
564         Builder.restoreIP(OldIP);
565       }
566       break;
567     }
568 
569     default:
570       break;
571     }
572     EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true);
573   }
574   pushTemporaryCleanup(*this, M, E, Object);
575 
576   // Perform derived-to-base casts and/or field accesses, to get from the
577   // temporary object we created (and, potentially, for which we extended
578   // the lifetime) to the subobject we're binding the reference to.
579   for (SubobjectAdjustment &Adjustment : llvm::reverse(Adjustments)) {
580     switch (Adjustment.Kind) {
581     case SubobjectAdjustment::DerivedToBaseAdjustment:
582       Object =
583           GetAddressOfBaseClass(Object, Adjustment.DerivedToBase.DerivedClass,
584                                 Adjustment.DerivedToBase.BasePath->path_begin(),
585                                 Adjustment.DerivedToBase.BasePath->path_end(),
586                                 /*NullCheckValue=*/ false, E->getExprLoc());
587       break;
588 
589     case SubobjectAdjustment::FieldAdjustment: {
590       LValue LV = MakeAddrLValue(Object, E->getType(), AlignmentSource::Decl);
591       LV = EmitLValueForField(LV, Adjustment.Field);
592       assert(LV.isSimple() &&
593              "materialized temporary field is not a simple lvalue");
594       Object = LV.getAddress(*this);
595       break;
596     }
597 
598     case SubobjectAdjustment::MemberPointerAdjustment: {
599       llvm::Value *Ptr = EmitScalarExpr(Adjustment.Ptr.RHS);
600       Object = EmitCXXMemberDataPointerAddress(E, Object, Ptr,
601                                                Adjustment.Ptr.MPT);
602       break;
603     }
604     }
605   }
606 
607   return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl);
608 }
609 
610 RValue
611 CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E) {
612   // Emit the expression as an lvalue.
613   LValue LV = EmitLValue(E);
614   assert(LV.isSimple());
615   llvm::Value *Value = LV.getPointer(*this);
616 
617   if (sanitizePerformTypeCheck() && !E->getType()->isFunctionType()) {
618     // C++11 [dcl.ref]p5 (as amended by core issue 453):
619     //   If a glvalue to which a reference is directly bound designates neither
620     //   an existing object or function of an appropriate type nor a region of
621     //   storage of suitable size and alignment to contain an object of the
622     //   reference's type, the behavior is undefined.
623     QualType Ty = E->getType();
624     EmitTypeCheck(TCK_ReferenceBinding, E->getExprLoc(), Value, Ty);
625   }
626 
627   return RValue::get(Value);
628 }
629 
630 
631 /// getAccessedFieldNo - Given an encoded value and a result number, return the
632 /// input field number being accessed.
633 unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx,
634                                              const llvm::Constant *Elts) {
635   return cast<llvm::ConstantInt>(Elts->getAggregateElement(Idx))
636       ->getZExtValue();
637 }
638 
639 /// Emit the hash_16_bytes function from include/llvm/ADT/Hashing.h.
640 static llvm::Value *emitHash16Bytes(CGBuilderTy &Builder, llvm::Value *Low,
641                                     llvm::Value *High) {
642   llvm::Value *KMul = Builder.getInt64(0x9ddfea08eb382d69ULL);
643   llvm::Value *K47 = Builder.getInt64(47);
644   llvm::Value *A0 = Builder.CreateMul(Builder.CreateXor(Low, High), KMul);
645   llvm::Value *A1 = Builder.CreateXor(Builder.CreateLShr(A0, K47), A0);
646   llvm::Value *B0 = Builder.CreateMul(Builder.CreateXor(High, A1), KMul);
647   llvm::Value *B1 = Builder.CreateXor(Builder.CreateLShr(B0, K47), B0);
648   return Builder.CreateMul(B1, KMul);
649 }
650 
651 bool CodeGenFunction::isNullPointerAllowed(TypeCheckKind TCK) {
652   return TCK == TCK_DowncastPointer || TCK == TCK_Upcast ||
653          TCK == TCK_UpcastToVirtualBase || TCK == TCK_DynamicOperation;
654 }
655 
656 bool CodeGenFunction::isVptrCheckRequired(TypeCheckKind TCK, QualType Ty) {
657   CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
658   return (RD && RD->hasDefinition() && RD->isDynamicClass()) &&
659          (TCK == TCK_MemberAccess || TCK == TCK_MemberCall ||
660           TCK == TCK_DowncastPointer || TCK == TCK_DowncastReference ||
661           TCK == TCK_UpcastToVirtualBase || TCK == TCK_DynamicOperation);
662 }
663 
664 bool CodeGenFunction::sanitizePerformTypeCheck() const {
665   return SanOpts.has(SanitizerKind::Null) ||
666          SanOpts.has(SanitizerKind::Alignment) ||
667          SanOpts.has(SanitizerKind::ObjectSize) ||
668          SanOpts.has(SanitizerKind::Vptr);
669 }
670 
671 void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
672                                     llvm::Value *Ptr, QualType Ty,
673                                     CharUnits Alignment,
674                                     SanitizerSet SkippedChecks,
675                                     llvm::Value *ArraySize) {
676   if (!sanitizePerformTypeCheck())
677     return;
678 
679   // Don't check pointers outside the default address space. The null check
680   // isn't correct, the object-size check isn't supported by LLVM, and we can't
681   // communicate the addresses to the runtime handler for the vptr check.
682   if (Ptr->getType()->getPointerAddressSpace())
683     return;
684 
685   // Don't check pointers to volatile data. The behavior here is implementation-
686   // defined.
687   if (Ty.isVolatileQualified())
688     return;
689 
690   SanitizerScope SanScope(this);
691 
692   SmallVector<std::pair<llvm::Value *, SanitizerMask>, 3> Checks;
693   llvm::BasicBlock *Done = nullptr;
694 
695   // Quickly determine whether we have a pointer to an alloca. It's possible
696   // to skip null checks, and some alignment checks, for these pointers. This
697   // can reduce compile-time significantly.
698   auto PtrToAlloca = dyn_cast<llvm::AllocaInst>(Ptr->stripPointerCasts());
699 
700   llvm::Value *True = llvm::ConstantInt::getTrue(getLLVMContext());
701   llvm::Value *IsNonNull = nullptr;
702   bool IsGuaranteedNonNull =
703       SkippedChecks.has(SanitizerKind::Null) || PtrToAlloca;
704   bool AllowNullPointers = isNullPointerAllowed(TCK);
705   if ((SanOpts.has(SanitizerKind::Null) || AllowNullPointers) &&
706       !IsGuaranteedNonNull) {
707     // The glvalue must not be an empty glvalue.
708     IsNonNull = Builder.CreateIsNotNull(Ptr);
709 
710     // The IR builder can constant-fold the null check if the pointer points to
711     // a constant.
712     IsGuaranteedNonNull = IsNonNull == True;
713 
714     // Skip the null check if the pointer is known to be non-null.
715     if (!IsGuaranteedNonNull) {
716       if (AllowNullPointers) {
717         // When performing pointer casts, it's OK if the value is null.
718         // Skip the remaining checks in that case.
719         Done = createBasicBlock("null");
720         llvm::BasicBlock *Rest = createBasicBlock("not.null");
721         Builder.CreateCondBr(IsNonNull, Rest, Done);
722         EmitBlock(Rest);
723       } else {
724         Checks.push_back(std::make_pair(IsNonNull, SanitizerKind::Null));
725       }
726     }
727   }
728 
729   if (SanOpts.has(SanitizerKind::ObjectSize) &&
730       !SkippedChecks.has(SanitizerKind::ObjectSize) &&
731       !Ty->isIncompleteType()) {
732     uint64_t TySize = CGM.getMinimumObjectSize(Ty).getQuantity();
733     llvm::Value *Size = llvm::ConstantInt::get(IntPtrTy, TySize);
734     if (ArraySize)
735       Size = Builder.CreateMul(Size, ArraySize);
736 
737     // Degenerate case: new X[0] does not need an objectsize check.
738     llvm::Constant *ConstantSize = dyn_cast<llvm::Constant>(Size);
739     if (!ConstantSize || !ConstantSize->isNullValue()) {
740       // The glvalue must refer to a large enough storage region.
741       // FIXME: If Address Sanitizer is enabled, insert dynamic instrumentation
742       //        to check this.
743       // FIXME: Get object address space
744       llvm::Type *Tys[2] = { IntPtrTy, Int8PtrTy };
745       llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, Tys);
746       llvm::Value *Min = Builder.getFalse();
747       llvm::Value *NullIsUnknown = Builder.getFalse();
748       llvm::Value *Dynamic = Builder.getFalse();
749       llvm::Value *LargeEnough = Builder.CreateICmpUGE(
750           Builder.CreateCall(F, {Ptr, Min, NullIsUnknown, Dynamic}), Size);
751       Checks.push_back(std::make_pair(LargeEnough, SanitizerKind::ObjectSize));
752     }
753   }
754 
755   llvm::MaybeAlign AlignVal;
756   llvm::Value *PtrAsInt = nullptr;
757 
758   if (SanOpts.has(SanitizerKind::Alignment) &&
759       !SkippedChecks.has(SanitizerKind::Alignment)) {
760     AlignVal = Alignment.getAsMaybeAlign();
761     if (!Ty->isIncompleteType() && !AlignVal)
762       AlignVal = CGM.getNaturalTypeAlignment(Ty, nullptr, nullptr,
763                                              /*ForPointeeType=*/true)
764                      .getAsMaybeAlign();
765 
766     // The glvalue must be suitably aligned.
767     if (AlignVal && *AlignVal > llvm::Align(1) &&
768         (!PtrToAlloca || PtrToAlloca->getAlign() < *AlignVal)) {
769       PtrAsInt = Builder.CreatePtrToInt(Ptr, IntPtrTy);
770       llvm::Value *Align = Builder.CreateAnd(
771           PtrAsInt, llvm::ConstantInt::get(IntPtrTy, AlignVal->value() - 1));
772       llvm::Value *Aligned =
773           Builder.CreateICmpEQ(Align, llvm::ConstantInt::get(IntPtrTy, 0));
774       if (Aligned != True)
775         Checks.push_back(std::make_pair(Aligned, SanitizerKind::Alignment));
776     }
777   }
778 
779   if (Checks.size() > 0) {
780     llvm::Constant *StaticData[] = {
781         EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(Ty),
782         llvm::ConstantInt::get(Int8Ty, AlignVal ? llvm::Log2(*AlignVal) : 1),
783         llvm::ConstantInt::get(Int8Ty, TCK)};
784     EmitCheck(Checks, SanitizerHandler::TypeMismatch, StaticData,
785               PtrAsInt ? PtrAsInt : Ptr);
786   }
787 
788   // If possible, check that the vptr indicates that there is a subobject of
789   // type Ty at offset zero within this object.
790   //
791   // C++11 [basic.life]p5,6:
792   //   [For storage which does not refer to an object within its lifetime]
793   //   The program has undefined behavior if:
794   //    -- the [pointer or glvalue] is used to access a non-static data member
795   //       or call a non-static member function
796   if (SanOpts.has(SanitizerKind::Vptr) &&
797       !SkippedChecks.has(SanitizerKind::Vptr) && isVptrCheckRequired(TCK, Ty)) {
798     // Ensure that the pointer is non-null before loading it. If there is no
799     // compile-time guarantee, reuse the run-time null check or emit a new one.
800     if (!IsGuaranteedNonNull) {
801       if (!IsNonNull)
802         IsNonNull = Builder.CreateIsNotNull(Ptr);
803       if (!Done)
804         Done = createBasicBlock("vptr.null");
805       llvm::BasicBlock *VptrNotNull = createBasicBlock("vptr.not.null");
806       Builder.CreateCondBr(IsNonNull, VptrNotNull, Done);
807       EmitBlock(VptrNotNull);
808     }
809 
810     // Compute a hash of the mangled name of the type.
811     //
812     // FIXME: This is not guaranteed to be deterministic! Move to a
813     //        fingerprinting mechanism once LLVM provides one. For the time
814     //        being the implementation happens to be deterministic.
815     SmallString<64> MangledName;
816     llvm::raw_svector_ostream Out(MangledName);
817     CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty.getUnqualifiedType(),
818                                                      Out);
819 
820     // Contained in NoSanitizeList based on the mangled type.
821     if (!CGM.getContext().getNoSanitizeList().containsType(SanitizerKind::Vptr,
822                                                            Out.str())) {
823       llvm::hash_code TypeHash = hash_value(Out.str());
824 
825       // Load the vptr, and compute hash_16_bytes(TypeHash, vptr).
826       llvm::Value *Low = llvm::ConstantInt::get(Int64Ty, TypeHash);
827       Address VPtrAddr(Ptr, IntPtrTy, getPointerAlign());
828       llvm::Value *VPtrVal = Builder.CreateLoad(VPtrAddr);
829       llvm::Value *High = Builder.CreateZExt(VPtrVal, Int64Ty);
830 
831       llvm::Value *Hash = emitHash16Bytes(Builder, Low, High);
832       Hash = Builder.CreateTrunc(Hash, IntPtrTy);
833 
834       // Look the hash up in our cache.
835       const int CacheSize = 128;
836       llvm::Type *HashTable = llvm::ArrayType::get(IntPtrTy, CacheSize);
837       llvm::Value *Cache = CGM.CreateRuntimeVariable(HashTable,
838                                                      "__ubsan_vptr_type_cache");
839       llvm::Value *Slot = Builder.CreateAnd(Hash,
840                                             llvm::ConstantInt::get(IntPtrTy,
841                                                                    CacheSize-1));
842       llvm::Value *Indices[] = { Builder.getInt32(0), Slot };
843       llvm::Value *CacheVal = Builder.CreateAlignedLoad(
844           IntPtrTy, Builder.CreateInBoundsGEP(HashTable, Cache, Indices),
845           getPointerAlign());
846 
847       // If the hash isn't in the cache, call a runtime handler to perform the
848       // hard work of checking whether the vptr is for an object of the right
849       // type. This will either fill in the cache and return, or produce a
850       // diagnostic.
851       llvm::Value *EqualHash = Builder.CreateICmpEQ(CacheVal, Hash);
852       llvm::Constant *StaticData[] = {
853         EmitCheckSourceLocation(Loc),
854         EmitCheckTypeDescriptor(Ty),
855         CGM.GetAddrOfRTTIDescriptor(Ty.getUnqualifiedType()),
856         llvm::ConstantInt::get(Int8Ty, TCK)
857       };
858       llvm::Value *DynamicData[] = { Ptr, Hash };
859       EmitCheck(std::make_pair(EqualHash, SanitizerKind::Vptr),
860                 SanitizerHandler::DynamicTypeCacheMiss, StaticData,
861                 DynamicData);
862     }
863   }
864 
865   if (Done) {
866     Builder.CreateBr(Done);
867     EmitBlock(Done);
868   }
869 }
870 
871 llvm::Value *CodeGenFunction::LoadPassedObjectSize(const Expr *E,
872                                                    QualType EltTy) {
873   ASTContext &C = getContext();
874   uint64_t EltSize = C.getTypeSizeInChars(EltTy).getQuantity();
875   if (!EltSize)
876     return nullptr;
877 
878   auto *ArrayDeclRef = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts());
879   if (!ArrayDeclRef)
880     return nullptr;
881 
882   auto *ParamDecl = dyn_cast<ParmVarDecl>(ArrayDeclRef->getDecl());
883   if (!ParamDecl)
884     return nullptr;
885 
886   auto *POSAttr = ParamDecl->getAttr<PassObjectSizeAttr>();
887   if (!POSAttr)
888     return nullptr;
889 
890   // Don't load the size if it's a lower bound.
891   int POSType = POSAttr->getType();
892   if (POSType != 0 && POSType != 1)
893     return nullptr;
894 
895   // Find the implicit size parameter.
896   auto PassedSizeIt = SizeArguments.find(ParamDecl);
897   if (PassedSizeIt == SizeArguments.end())
898     return nullptr;
899 
900   const ImplicitParamDecl *PassedSizeDecl = PassedSizeIt->second;
901   assert(LocalDeclMap.count(PassedSizeDecl) && "Passed size not loadable");
902   Address AddrOfSize = LocalDeclMap.find(PassedSizeDecl)->second;
903   llvm::Value *SizeInBytes = EmitLoadOfScalar(AddrOfSize, /*Volatile=*/false,
904                                               C.getSizeType(), E->getExprLoc());
905   llvm::Value *SizeOfElement =
906       llvm::ConstantInt::get(SizeInBytes->getType(), EltSize);
907   return Builder.CreateUDiv(SizeInBytes, SizeOfElement);
908 }
909 
910 /// If Base is known to point to the start of an array, return the length of
911 /// that array. Return 0 if the length cannot be determined.
912 static llvm::Value *getArrayIndexingBound(CodeGenFunction &CGF,
913                                           const Expr *Base,
914                                           QualType &IndexedType,
915                                           LangOptions::StrictFlexArraysLevelKind
916                                           StrictFlexArraysLevel) {
917   // For the vector indexing extension, the bound is the number of elements.
918   if (const VectorType *VT = Base->getType()->getAs<VectorType>()) {
919     IndexedType = Base->getType();
920     return CGF.Builder.getInt32(VT->getNumElements());
921   }
922 
923   Base = Base->IgnoreParens();
924 
925   if (const auto *CE = dyn_cast<CastExpr>(Base)) {
926     if (CE->getCastKind() == CK_ArrayToPointerDecay &&
927         !CE->getSubExpr()->isFlexibleArrayMemberLike(CGF.getContext(),
928                                                      StrictFlexArraysLevel)) {
929       CodeGenFunction::SanitizerScope SanScope(&CGF);
930 
931       IndexedType = CE->getSubExpr()->getType();
932       const ArrayType *AT = IndexedType->castAsArrayTypeUnsafe();
933       if (const auto *CAT = dyn_cast<ConstantArrayType>(AT))
934         return CGF.Builder.getInt(CAT->getSize());
935 
936       if (const auto *VAT = dyn_cast<VariableArrayType>(AT))
937         return CGF.getVLASize(VAT).NumElts;
938       // Ignore pass_object_size here. It's not applicable on decayed pointers.
939     }
940 
941     if (const ValueDecl *VD = CGF.FindCountedByField(Base)) {
942       IndexedType = Base->getType();
943       const Expr *E = CGF.BuildCountedByFieldExpr(Base, VD);
944       return CGF.EmitAnyExprToTemp(E).getScalarVal();
945     }
946   }
947 
948   CodeGenFunction::SanitizerScope SanScope(&CGF);
949 
950   QualType EltTy{Base->getType()->getPointeeOrArrayElementType(), 0};
951   if (llvm::Value *POS = CGF.LoadPassedObjectSize(Base, EltTy)) {
952     IndexedType = Base->getType();
953     return POS;
954   }
955 
956   return nullptr;
957 }
958 
959 const Expr *
960 CodeGenFunction::BuildCountedByFieldExpr(const Expr *Base,
961                                          const ValueDecl *CountedByVD) {
962   // Find the outer struct expr (i.e. p in p->a.b.c.d).
963   Expr *CountedByExpr = const_cast<Expr *>(Base)->IgnoreParenImpCasts();
964 
965   // Work our way up the expression until we reach the DeclRefExpr.
966   while (!isa<DeclRefExpr>(CountedByExpr))
967     if (const auto *ME = dyn_cast<MemberExpr>(CountedByExpr))
968       CountedByExpr = ME->getBase()->IgnoreParenImpCasts();
969 
970   // Add back an implicit cast to create the required pr-value.
971   CountedByExpr = ImplicitCastExpr::Create(
972       getContext(), CountedByExpr->getType(), CK_LValueToRValue, CountedByExpr,
973       nullptr, VK_PRValue, FPOptionsOverride());
974 
975   if (const auto *IFD = dyn_cast<IndirectFieldDecl>(CountedByVD)) {
976     // The counted_by field is inside an anonymous struct / union. The
977     // IndirectFieldDecl has the correct order of FieldDecls to build this
978     // easily. (Yay!)
979     for (NamedDecl *ND : IFD->chain()) {
980       auto *VD = cast<ValueDecl>(ND);
981       CountedByExpr =
982           MemberExpr::CreateImplicit(getContext(), CountedByExpr,
983                                      CountedByExpr->getType()->isPointerType(),
984                                      VD, VD->getType(), VK_LValue, OK_Ordinary);
985     }
986   } else {
987     CountedByExpr = MemberExpr::CreateImplicit(
988         getContext(), const_cast<Expr *>(CountedByExpr),
989         CountedByExpr->getType()->isPointerType(),
990         const_cast<ValueDecl *>(CountedByVD), CountedByVD->getType(), VK_LValue,
991         OK_Ordinary);
992   }
993 
994   return CountedByExpr;
995 }
996 
997 const ValueDecl *
998 CodeGenFunction::FindFlexibleArrayMemberField(ASTContext &Ctx,
999                                               const RecordDecl *RD) {
1000   const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
1001       getLangOpts().getStrictFlexArraysLevel();
1002 
1003   for (const Decl *D : RD->decls()) {
1004     if (const auto *VD = dyn_cast<ValueDecl>(D);
1005         VD && Decl::isFlexibleArrayMemberLike(
1006                   Ctx, VD, VD->getType(), StrictFlexArraysLevel,
1007                   /*IgnoreTemplateOrMacroSubstitution=*/true))
1008       return VD;
1009 
1010     if (const auto *Record = dyn_cast<RecordDecl>(D))
1011       if (const ValueDecl *VD = FindFlexibleArrayMemberField(Ctx, Record))
1012         return VD;
1013   }
1014 
1015   return nullptr;
1016 }
1017 
1018 const ValueDecl *CodeGenFunction::FindCountedByField(const Expr *Base) {
1019   ASTContext &Ctx = getContext();
1020   const RecordDecl *OuterRD = nullptr;
1021   const FieldDecl *FD = nullptr;
1022 
1023   Base = Base->IgnoreParenImpCasts();
1024 
1025   // Get the outer-most lexical RecordDecl.
1026   if (const auto *DRE = dyn_cast<DeclRefExpr>(Base)) {
1027     QualType Ty = DRE->getDecl()->getType();
1028     if (Ty->isPointerType())
1029       Ty = Ty->getPointeeType();
1030 
1031     if (const auto *RD = Ty->getAsRecordDecl())
1032       OuterRD = RD->getOuterLexicalRecordContext();
1033   } else if (const auto *ME = dyn_cast<MemberExpr>(Base)) {
1034     if (const ValueDecl *MD = ME->getMemberDecl()) {
1035       OuterRD = MD->getDeclContext()->getOuterLexicalRecordContext();
1036 
1037       const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
1038           getLangOpts().getStrictFlexArraysLevel();
1039       if (Decl::isFlexibleArrayMemberLike(
1040               Ctx, MD, MD->getType(), StrictFlexArraysLevel,
1041               /*IgnoreTemplateOrMacroSubstitution=*/true))
1042         // Base is referencing the FAM itself.
1043         FD = dyn_cast<FieldDecl>(MD);
1044     }
1045   }
1046 
1047   if (!OuterRD)
1048     return nullptr;
1049 
1050   if (!FD) {
1051     const ValueDecl *VD = FindFlexibleArrayMemberField(Ctx, OuterRD);
1052     FD = dyn_cast_if_present<FieldDecl>(VD);
1053     if (!FD)
1054       return nullptr;
1055   }
1056 
1057   const auto *CBA = FD->getAttr<CountedByAttr>();
1058   if (!CBA)
1059     return nullptr;
1060 
1061   DeclarationName DName(CBA->getCountedByField());
1062   DeclContext::lookup_result Lookup = OuterRD->lookup(DName);
1063 
1064   if (Lookup.empty())
1065     return nullptr;
1066 
1067   return dyn_cast<ValueDecl>(Lookup.front());
1068 }
1069 
1070 void CodeGenFunction::EmitBoundsCheck(const Expr *E, const Expr *Base,
1071                                       llvm::Value *Index, QualType IndexType,
1072                                       bool Accessed) {
1073   assert(SanOpts.has(SanitizerKind::ArrayBounds) &&
1074          "should not be called unless adding bounds checks");
1075   const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
1076     getLangOpts().getStrictFlexArraysLevel();
1077 
1078   QualType IndexedType;
1079   llvm::Value *Bound =
1080       getArrayIndexingBound(*this, Base, IndexedType, StrictFlexArraysLevel);
1081   if (!Bound)
1082     return;
1083 
1084   SanitizerScope SanScope(this);
1085 
1086   bool IndexSigned = IndexType->isSignedIntegerOrEnumerationType();
1087   llvm::Value *IndexVal = Builder.CreateIntCast(Index, SizeTy, IndexSigned);
1088   llvm::Value *BoundVal = Builder.CreateIntCast(Bound, SizeTy, false);
1089 
1090   llvm::Constant *StaticData[] = {
1091     EmitCheckSourceLocation(E->getExprLoc()),
1092     EmitCheckTypeDescriptor(IndexedType),
1093     EmitCheckTypeDescriptor(IndexType)
1094   };
1095   llvm::Value *Check = Accessed ? Builder.CreateICmpULT(IndexVal, BoundVal)
1096                                 : Builder.CreateICmpULE(IndexVal, BoundVal);
1097   EmitCheck(std::make_pair(Check, SanitizerKind::ArrayBounds),
1098             SanitizerHandler::OutOfBounds, StaticData, Index);
1099 }
1100 
1101 
1102 CodeGenFunction::ComplexPairTy CodeGenFunction::
1103 EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
1104                          bool isInc, bool isPre) {
1105   ComplexPairTy InVal = EmitLoadOfComplex(LV, E->getExprLoc());
1106 
1107   llvm::Value *NextVal;
1108   if (isa<llvm::IntegerType>(InVal.first->getType())) {
1109     uint64_t AmountVal = isInc ? 1 : -1;
1110     NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true);
1111 
1112     // Add the inc/dec to the real part.
1113     NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
1114   } else {
1115     QualType ElemTy = E->getType()->castAs<ComplexType>()->getElementType();
1116     llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1);
1117     if (!isInc)
1118       FVal.changeSign();
1119     NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal);
1120 
1121     // Add the inc/dec to the real part.
1122     NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
1123   }
1124 
1125   ComplexPairTy IncVal(NextVal, InVal.second);
1126 
1127   // Store the updated result through the lvalue.
1128   EmitStoreOfComplex(IncVal, LV, /*init*/ false);
1129   if (getLangOpts().OpenMP)
1130     CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(*this,
1131                                                               E->getSubExpr());
1132 
1133   // If this is a postinc, return the value read from memory, otherwise use the
1134   // updated value.
1135   return isPre ? IncVal : InVal;
1136 }
1137 
1138 void CodeGenModule::EmitExplicitCastExprType(const ExplicitCastExpr *E,
1139                                              CodeGenFunction *CGF) {
1140   // Bind VLAs in the cast type.
1141   if (CGF && E->getType()->isVariablyModifiedType())
1142     CGF->EmitVariablyModifiedType(E->getType());
1143 
1144   if (CGDebugInfo *DI = getModuleDebugInfo())
1145     DI->EmitExplicitCastType(E->getType());
1146 }
1147 
1148 //===----------------------------------------------------------------------===//
1149 //                         LValue Expression Emission
1150 //===----------------------------------------------------------------------===//
1151 
1152 static Address EmitPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo,
1153                                         TBAAAccessInfo *TBAAInfo,
1154                                         KnownNonNull_t IsKnownNonNull,
1155                                         CodeGenFunction &CGF) {
1156   // We allow this with ObjC object pointers because of fragile ABIs.
1157   assert(E->getType()->isPointerType() ||
1158          E->getType()->isObjCObjectPointerType());
1159   E = E->IgnoreParens();
1160 
1161   // Casts:
1162   if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
1163     if (const auto *ECE = dyn_cast<ExplicitCastExpr>(CE))
1164       CGF.CGM.EmitExplicitCastExprType(ECE, &CGF);
1165 
1166     switch (CE->getCastKind()) {
1167     // Non-converting casts (but not C's implicit conversion from void*).
1168     case CK_BitCast:
1169     case CK_NoOp:
1170     case CK_AddressSpaceConversion:
1171       if (auto PtrTy = CE->getSubExpr()->getType()->getAs<PointerType>()) {
1172         if (PtrTy->getPointeeType()->isVoidType())
1173           break;
1174 
1175         LValueBaseInfo InnerBaseInfo;
1176         TBAAAccessInfo InnerTBAAInfo;
1177         Address Addr = CGF.EmitPointerWithAlignment(
1178             CE->getSubExpr(), &InnerBaseInfo, &InnerTBAAInfo, IsKnownNonNull);
1179         if (BaseInfo) *BaseInfo = InnerBaseInfo;
1180         if (TBAAInfo) *TBAAInfo = InnerTBAAInfo;
1181 
1182         if (isa<ExplicitCastExpr>(CE)) {
1183           LValueBaseInfo TargetTypeBaseInfo;
1184           TBAAAccessInfo TargetTypeTBAAInfo;
1185           CharUnits Align = CGF.CGM.getNaturalPointeeTypeAlignment(
1186               E->getType(), &TargetTypeBaseInfo, &TargetTypeTBAAInfo);
1187           if (TBAAInfo)
1188             *TBAAInfo =
1189                 CGF.CGM.mergeTBAAInfoForCast(*TBAAInfo, TargetTypeTBAAInfo);
1190           // If the source l-value is opaque, honor the alignment of the
1191           // casted-to type.
1192           if (InnerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) {
1193             if (BaseInfo)
1194               BaseInfo->mergeForCast(TargetTypeBaseInfo);
1195             Addr = Address(Addr.getPointer(), Addr.getElementType(), Align,
1196                            IsKnownNonNull);
1197           }
1198         }
1199 
1200         if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast) &&
1201             CE->getCastKind() == CK_BitCast) {
1202           if (auto PT = E->getType()->getAs<PointerType>())
1203             CGF.EmitVTablePtrCheckForCast(PT->getPointeeType(), Addr,
1204                                           /*MayBeNull=*/true,
1205                                           CodeGenFunction::CFITCK_UnrelatedCast,
1206                                           CE->getBeginLoc());
1207         }
1208 
1209         llvm::Type *ElemTy =
1210             CGF.ConvertTypeForMem(E->getType()->getPointeeType());
1211         Addr = Addr.withElementType(ElemTy);
1212         if (CE->getCastKind() == CK_AddressSpaceConversion)
1213           Addr = CGF.Builder.CreateAddrSpaceCast(Addr,
1214                                                  CGF.ConvertType(E->getType()));
1215         return Addr;
1216       }
1217       break;
1218 
1219     // Array-to-pointer decay.
1220     case CK_ArrayToPointerDecay:
1221       return CGF.EmitArrayToPointerDecay(CE->getSubExpr(), BaseInfo, TBAAInfo);
1222 
1223     // Derived-to-base conversions.
1224     case CK_UncheckedDerivedToBase:
1225     case CK_DerivedToBase: {
1226       // TODO: Support accesses to members of base classes in TBAA. For now, we
1227       // conservatively pretend that the complete object is of the base class
1228       // type.
1229       if (TBAAInfo)
1230         *TBAAInfo = CGF.CGM.getTBAAAccessInfo(E->getType());
1231       Address Addr = CGF.EmitPointerWithAlignment(
1232           CE->getSubExpr(), BaseInfo, nullptr,
1233           (KnownNonNull_t)(IsKnownNonNull ||
1234                            CE->getCastKind() == CK_UncheckedDerivedToBase));
1235       auto Derived = CE->getSubExpr()->getType()->getPointeeCXXRecordDecl();
1236       return CGF.GetAddressOfBaseClass(
1237           Addr, Derived, CE->path_begin(), CE->path_end(),
1238           CGF.ShouldNullCheckClassCastValue(CE), CE->getExprLoc());
1239     }
1240 
1241     // TODO: Is there any reason to treat base-to-derived conversions
1242     // specially?
1243     default:
1244       break;
1245     }
1246   }
1247 
1248   // Unary &.
1249   if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
1250     if (UO->getOpcode() == UO_AddrOf) {
1251       LValue LV = CGF.EmitLValue(UO->getSubExpr(), IsKnownNonNull);
1252       if (BaseInfo) *BaseInfo = LV.getBaseInfo();
1253       if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();
1254       return LV.getAddress(CGF);
1255     }
1256   }
1257 
1258   // std::addressof and variants.
1259   if (auto *Call = dyn_cast<CallExpr>(E)) {
1260     switch (Call->getBuiltinCallee()) {
1261     default:
1262       break;
1263     case Builtin::BIaddressof:
1264     case Builtin::BI__addressof:
1265     case Builtin::BI__builtin_addressof: {
1266       LValue LV = CGF.EmitLValue(Call->getArg(0), IsKnownNonNull);
1267       if (BaseInfo) *BaseInfo = LV.getBaseInfo();
1268       if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();
1269       return LV.getAddress(CGF);
1270     }
1271     }
1272   }
1273 
1274   // TODO: conditional operators, comma.
1275 
1276   // Otherwise, use the alignment of the type.
1277   CharUnits Align =
1278       CGF.CGM.getNaturalPointeeTypeAlignment(E->getType(), BaseInfo, TBAAInfo);
1279   llvm::Type *ElemTy = CGF.ConvertTypeForMem(E->getType()->getPointeeType());
1280   return Address(CGF.EmitScalarExpr(E), ElemTy, Align, IsKnownNonNull);
1281 }
1282 
1283 /// EmitPointerWithAlignment - Given an expression of pointer type, try to
1284 /// derive a more accurate bound on the alignment of the pointer.
1285 Address CodeGenFunction::EmitPointerWithAlignment(
1286     const Expr *E, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo,
1287     KnownNonNull_t IsKnownNonNull) {
1288   Address Addr =
1289       ::EmitPointerWithAlignment(E, BaseInfo, TBAAInfo, IsKnownNonNull, *this);
1290   if (IsKnownNonNull && !Addr.isKnownNonNull())
1291     Addr.setKnownNonNull();
1292   return Addr;
1293 }
1294 
1295 llvm::Value *CodeGenFunction::EmitNonNullRValueCheck(RValue RV, QualType T) {
1296   llvm::Value *V = RV.getScalarVal();
1297   if (auto MPT = T->getAs<MemberPointerType>())
1298     return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, V, MPT);
1299   return Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
1300 }
1301 
1302 RValue CodeGenFunction::GetUndefRValue(QualType Ty) {
1303   if (Ty->isVoidType())
1304     return RValue::get(nullptr);
1305 
1306   switch (getEvaluationKind(Ty)) {
1307   case TEK_Complex: {
1308     llvm::Type *EltTy =
1309       ConvertType(Ty->castAs<ComplexType>()->getElementType());
1310     llvm::Value *U = llvm::UndefValue::get(EltTy);
1311     return RValue::getComplex(std::make_pair(U, U));
1312   }
1313 
1314   // If this is a use of an undefined aggregate type, the aggregate must have an
1315   // identifiable address.  Just because the contents of the value are undefined
1316   // doesn't mean that the address can't be taken and compared.
1317   case TEK_Aggregate: {
1318     Address DestPtr = CreateMemTemp(Ty, "undef.agg.tmp");
1319     return RValue::getAggregate(DestPtr);
1320   }
1321 
1322   case TEK_Scalar:
1323     return RValue::get(llvm::UndefValue::get(ConvertType(Ty)));
1324   }
1325   llvm_unreachable("bad evaluation kind");
1326 }
1327 
1328 RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E,
1329                                               const char *Name) {
1330   ErrorUnsupported(E, Name);
1331   return GetUndefRValue(E->getType());
1332 }
1333 
1334 LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E,
1335                                               const char *Name) {
1336   ErrorUnsupported(E, Name);
1337   llvm::Type *ElTy = ConvertType(E->getType());
1338   llvm::Type *Ty = UnqualPtrTy;
1339   return MakeAddrLValue(
1340       Address(llvm::UndefValue::get(Ty), ElTy, CharUnits::One()), E->getType());
1341 }
1342 
1343 bool CodeGenFunction::IsWrappedCXXThis(const Expr *Obj) {
1344   const Expr *Base = Obj;
1345   while (!isa<CXXThisExpr>(Base)) {
1346     // The result of a dynamic_cast can be null.
1347     if (isa<CXXDynamicCastExpr>(Base))
1348       return false;
1349 
1350     if (const auto *CE = dyn_cast<CastExpr>(Base)) {
1351       Base = CE->getSubExpr();
1352     } else if (const auto *PE = dyn_cast<ParenExpr>(Base)) {
1353       Base = PE->getSubExpr();
1354     } else if (const auto *UO = dyn_cast<UnaryOperator>(Base)) {
1355       if (UO->getOpcode() == UO_Extension)
1356         Base = UO->getSubExpr();
1357       else
1358         return false;
1359     } else {
1360       return false;
1361     }
1362   }
1363   return true;
1364 }
1365 
1366 LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) {
1367   LValue LV;
1368   if (SanOpts.has(SanitizerKind::ArrayBounds) && isa<ArraySubscriptExpr>(E))
1369     LV = EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E), /*Accessed*/true);
1370   else
1371     LV = EmitLValue(E);
1372   if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple()) {
1373     SanitizerSet SkippedChecks;
1374     if (const auto *ME = dyn_cast<MemberExpr>(E)) {
1375       bool IsBaseCXXThis = IsWrappedCXXThis(ME->getBase());
1376       if (IsBaseCXXThis)
1377         SkippedChecks.set(SanitizerKind::Alignment, true);
1378       if (IsBaseCXXThis || isa<DeclRefExpr>(ME->getBase()))
1379         SkippedChecks.set(SanitizerKind::Null, true);
1380     }
1381     EmitTypeCheck(TCK, E->getExprLoc(), LV.getPointer(*this), E->getType(),
1382                   LV.getAlignment(), SkippedChecks);
1383   }
1384   return LV;
1385 }
1386 
1387 /// EmitLValue - Emit code to compute a designator that specifies the location
1388 /// of the expression.
1389 ///
1390 /// This can return one of two things: a simple address or a bitfield reference.
1391 /// In either case, the LLVM Value* in the LValue structure is guaranteed to be
1392 /// an LLVM pointer type.
1393 ///
1394 /// If this returns a bitfield reference, nothing about the pointee type of the
1395 /// LLVM value is known: For example, it may not be a pointer to an integer.
1396 ///
1397 /// If this returns a normal address, and if the lvalue's C type is fixed size,
1398 /// this method guarantees that the returned pointer type will point to an LLVM
1399 /// type of the same size of the lvalue's type.  If the lvalue has a variable
1400 /// length type, this is not possible.
1401 ///
1402 LValue CodeGenFunction::EmitLValue(const Expr *E,
1403                                    KnownNonNull_t IsKnownNonNull) {
1404   LValue LV = EmitLValueHelper(E, IsKnownNonNull);
1405   if (IsKnownNonNull && !LV.isKnownNonNull())
1406     LV.setKnownNonNull();
1407   return LV;
1408 }
1409 
1410 LValue CodeGenFunction::EmitLValueHelper(const Expr *E,
1411                                          KnownNonNull_t IsKnownNonNull) {
1412   ApplyDebugLocation DL(*this, E);
1413   switch (E->getStmtClass()) {
1414   default: return EmitUnsupportedLValue(E, "l-value expression");
1415 
1416   case Expr::ObjCPropertyRefExprClass:
1417     llvm_unreachable("cannot emit a property reference directly");
1418 
1419   case Expr::ObjCSelectorExprClass:
1420     return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E));
1421   case Expr::ObjCIsaExprClass:
1422     return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E));
1423   case Expr::BinaryOperatorClass:
1424     return EmitBinaryOperatorLValue(cast<BinaryOperator>(E));
1425   case Expr::CompoundAssignOperatorClass: {
1426     QualType Ty = E->getType();
1427     if (const AtomicType *AT = Ty->getAs<AtomicType>())
1428       Ty = AT->getValueType();
1429     if (!Ty->isAnyComplexType())
1430       return EmitCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
1431     return EmitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
1432   }
1433   case Expr::CallExprClass:
1434   case Expr::CXXMemberCallExprClass:
1435   case Expr::CXXOperatorCallExprClass:
1436   case Expr::UserDefinedLiteralClass:
1437     return EmitCallExprLValue(cast<CallExpr>(E));
1438   case Expr::CXXRewrittenBinaryOperatorClass:
1439     return EmitLValue(cast<CXXRewrittenBinaryOperator>(E)->getSemanticForm(),
1440                       IsKnownNonNull);
1441   case Expr::VAArgExprClass:
1442     return EmitVAArgExprLValue(cast<VAArgExpr>(E));
1443   case Expr::DeclRefExprClass:
1444     return EmitDeclRefLValue(cast<DeclRefExpr>(E));
1445   case Expr::ConstantExprClass: {
1446     const ConstantExpr *CE = cast<ConstantExpr>(E);
1447     if (llvm::Value *Result = ConstantEmitter(*this).tryEmitConstantExpr(CE)) {
1448       QualType RetType = cast<CallExpr>(CE->getSubExpr()->IgnoreImplicit())
1449                              ->getCallReturnType(getContext())
1450                              ->getPointeeType();
1451       return MakeNaturalAlignAddrLValue(Result, RetType);
1452     }
1453     return EmitLValue(cast<ConstantExpr>(E)->getSubExpr(), IsKnownNonNull);
1454   }
1455   case Expr::ParenExprClass:
1456     return EmitLValue(cast<ParenExpr>(E)->getSubExpr(), IsKnownNonNull);
1457   case Expr::GenericSelectionExprClass:
1458     return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr(),
1459                       IsKnownNonNull);
1460   case Expr::PredefinedExprClass:
1461     return EmitPredefinedLValue(cast<PredefinedExpr>(E));
1462   case Expr::StringLiteralClass:
1463     return EmitStringLiteralLValue(cast<StringLiteral>(E));
1464   case Expr::ObjCEncodeExprClass:
1465     return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E));
1466   case Expr::PseudoObjectExprClass:
1467     return EmitPseudoObjectLValue(cast<PseudoObjectExpr>(E));
1468   case Expr::InitListExprClass:
1469     return EmitInitListLValue(cast<InitListExpr>(E));
1470   case Expr::CXXTemporaryObjectExprClass:
1471   case Expr::CXXConstructExprClass:
1472     return EmitCXXConstructLValue(cast<CXXConstructExpr>(E));
1473   case Expr::CXXBindTemporaryExprClass:
1474     return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E));
1475   case Expr::CXXUuidofExprClass:
1476     return EmitCXXUuidofLValue(cast<CXXUuidofExpr>(E));
1477   case Expr::LambdaExprClass:
1478     return EmitAggExprToLValue(E);
1479 
1480   case Expr::ExprWithCleanupsClass: {
1481     const auto *cleanups = cast<ExprWithCleanups>(E);
1482     RunCleanupsScope Scope(*this);
1483     LValue LV = EmitLValue(cleanups->getSubExpr(), IsKnownNonNull);
1484     if (LV.isSimple()) {
1485       // Defend against branches out of gnu statement expressions surrounded by
1486       // cleanups.
1487       Address Addr = LV.getAddress(*this);
1488       llvm::Value *V = Addr.getPointer();
1489       Scope.ForceCleanup({&V});
1490       return LValue::MakeAddr(Addr.withPointer(V, Addr.isKnownNonNull()),
1491                               LV.getType(), getContext(), LV.getBaseInfo(),
1492                               LV.getTBAAInfo());
1493     }
1494     // FIXME: Is it possible to create an ExprWithCleanups that produces a
1495     // bitfield lvalue or some other non-simple lvalue?
1496     return LV;
1497   }
1498 
1499   case Expr::CXXDefaultArgExprClass: {
1500     auto *DAE = cast<CXXDefaultArgExpr>(E);
1501     CXXDefaultArgExprScope Scope(*this, DAE);
1502     return EmitLValue(DAE->getExpr(), IsKnownNonNull);
1503   }
1504   case Expr::CXXDefaultInitExprClass: {
1505     auto *DIE = cast<CXXDefaultInitExpr>(E);
1506     CXXDefaultInitExprScope Scope(*this, DIE);
1507     return EmitLValue(DIE->getExpr(), IsKnownNonNull);
1508   }
1509   case Expr::CXXTypeidExprClass:
1510     return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E));
1511 
1512   case Expr::ObjCMessageExprClass:
1513     return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E));
1514   case Expr::ObjCIvarRefExprClass:
1515     return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E));
1516   case Expr::StmtExprClass:
1517     return EmitStmtExprLValue(cast<StmtExpr>(E));
1518   case Expr::UnaryOperatorClass:
1519     return EmitUnaryOpLValue(cast<UnaryOperator>(E));
1520   case Expr::ArraySubscriptExprClass:
1521     return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E));
1522   case Expr::MatrixSubscriptExprClass:
1523     return EmitMatrixSubscriptExpr(cast<MatrixSubscriptExpr>(E));
1524   case Expr::OMPArraySectionExprClass:
1525     return EmitOMPArraySectionExpr(cast<OMPArraySectionExpr>(E));
1526   case Expr::ExtVectorElementExprClass:
1527     return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E));
1528   case Expr::CXXThisExprClass:
1529     return MakeAddrLValue(LoadCXXThisAddress(), E->getType());
1530   case Expr::MemberExprClass:
1531     return EmitMemberExpr(cast<MemberExpr>(E));
1532   case Expr::CompoundLiteralExprClass:
1533     return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E));
1534   case Expr::ConditionalOperatorClass:
1535     return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E));
1536   case Expr::BinaryConditionalOperatorClass:
1537     return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(E));
1538   case Expr::ChooseExprClass:
1539     return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(), IsKnownNonNull);
1540   case Expr::OpaqueValueExprClass:
1541     return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E));
1542   case Expr::SubstNonTypeTemplateParmExprClass:
1543     return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement(),
1544                       IsKnownNonNull);
1545   case Expr::ImplicitCastExprClass:
1546   case Expr::CStyleCastExprClass:
1547   case Expr::CXXFunctionalCastExprClass:
1548   case Expr::CXXStaticCastExprClass:
1549   case Expr::CXXDynamicCastExprClass:
1550   case Expr::CXXReinterpretCastExprClass:
1551   case Expr::CXXConstCastExprClass:
1552   case Expr::CXXAddrspaceCastExprClass:
1553   case Expr::ObjCBridgedCastExprClass:
1554     return EmitCastLValue(cast<CastExpr>(E));
1555 
1556   case Expr::MaterializeTemporaryExprClass:
1557     return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E));
1558 
1559   case Expr::CoawaitExprClass:
1560     return EmitCoawaitLValue(cast<CoawaitExpr>(E));
1561   case Expr::CoyieldExprClass:
1562     return EmitCoyieldLValue(cast<CoyieldExpr>(E));
1563   }
1564 }
1565 
1566 /// Given an object of the given canonical type, can we safely copy a
1567 /// value out of it based on its initializer?
1568 static bool isConstantEmittableObjectType(QualType type) {
1569   assert(type.isCanonical());
1570   assert(!type->isReferenceType());
1571 
1572   // Must be const-qualified but non-volatile.
1573   Qualifiers qs = type.getLocalQualifiers();
1574   if (!qs.hasConst() || qs.hasVolatile()) return false;
1575 
1576   // Otherwise, all object types satisfy this except C++ classes with
1577   // mutable subobjects or non-trivial copy/destroy behavior.
1578   if (const auto *RT = dyn_cast<RecordType>(type))
1579     if (const auto *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1580       if (RD->hasMutableFields() || !RD->isTrivial())
1581         return false;
1582 
1583   return true;
1584 }
1585 
1586 /// Can we constant-emit a load of a reference to a variable of the
1587 /// given type?  This is different from predicates like
1588 /// Decl::mightBeUsableInConstantExpressions because we do want it to apply
1589 /// in situations that don't necessarily satisfy the language's rules
1590 /// for this (e.g. C++'s ODR-use rules).  For example, we want to able
1591 /// to do this with const float variables even if those variables
1592 /// aren't marked 'constexpr'.
1593 enum ConstantEmissionKind {
1594   CEK_None,
1595   CEK_AsReferenceOnly,
1596   CEK_AsValueOrReference,
1597   CEK_AsValueOnly
1598 };
1599 static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type) {
1600   type = type.getCanonicalType();
1601   if (const auto *ref = dyn_cast<ReferenceType>(type)) {
1602     if (isConstantEmittableObjectType(ref->getPointeeType()))
1603       return CEK_AsValueOrReference;
1604     return CEK_AsReferenceOnly;
1605   }
1606   if (isConstantEmittableObjectType(type))
1607     return CEK_AsValueOnly;
1608   return CEK_None;
1609 }
1610 
1611 /// Try to emit a reference to the given value without producing it as
1612 /// an l-value.  This is just an optimization, but it avoids us needing
1613 /// to emit global copies of variables if they're named without triggering
1614 /// a formal use in a context where we can't emit a direct reference to them,
1615 /// for instance if a block or lambda or a member of a local class uses a
1616 /// const int variable or constexpr variable from an enclosing function.
1617 CodeGenFunction::ConstantEmission
1618 CodeGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) {
1619   ValueDecl *value = refExpr->getDecl();
1620 
1621   // The value needs to be an enum constant or a constant variable.
1622   ConstantEmissionKind CEK;
1623   if (isa<ParmVarDecl>(value)) {
1624     CEK = CEK_None;
1625   } else if (auto *var = dyn_cast<VarDecl>(value)) {
1626     CEK = checkVarTypeForConstantEmission(var->getType());
1627   } else if (isa<EnumConstantDecl>(value)) {
1628     CEK = CEK_AsValueOnly;
1629   } else {
1630     CEK = CEK_None;
1631   }
1632   if (CEK == CEK_None) return ConstantEmission();
1633 
1634   Expr::EvalResult result;
1635   bool resultIsReference;
1636   QualType resultType;
1637 
1638   // It's best to evaluate all the way as an r-value if that's permitted.
1639   if (CEK != CEK_AsReferenceOnly &&
1640       refExpr->EvaluateAsRValue(result, getContext())) {
1641     resultIsReference = false;
1642     resultType = refExpr->getType();
1643 
1644   // Otherwise, try to evaluate as an l-value.
1645   } else if (CEK != CEK_AsValueOnly &&
1646              refExpr->EvaluateAsLValue(result, getContext())) {
1647     resultIsReference = true;
1648     resultType = value->getType();
1649 
1650   // Failure.
1651   } else {
1652     return ConstantEmission();
1653   }
1654 
1655   // In any case, if the initializer has side-effects, abandon ship.
1656   if (result.HasSideEffects)
1657     return ConstantEmission();
1658 
1659   // In CUDA/HIP device compilation, a lambda may capture a reference variable
1660   // referencing a global host variable by copy. In this case the lambda should
1661   // make a copy of the value of the global host variable. The DRE of the
1662   // captured reference variable cannot be emitted as load from the host
1663   // global variable as compile time constant, since the host variable is not
1664   // accessible on device. The DRE of the captured reference variable has to be
1665   // loaded from captures.
1666   if (CGM.getLangOpts().CUDAIsDevice && result.Val.isLValue() &&
1667       refExpr->refersToEnclosingVariableOrCapture()) {
1668     auto *MD = dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl);
1669     if (MD && MD->getParent()->isLambda() &&
1670         MD->getOverloadedOperator() == OO_Call) {
1671       const APValue::LValueBase &base = result.Val.getLValueBase();
1672       if (const ValueDecl *D = base.dyn_cast<const ValueDecl *>()) {
1673         if (const VarDecl *VD = dyn_cast<const VarDecl>(D)) {
1674           if (!VD->hasAttr<CUDADeviceAttr>()) {
1675             return ConstantEmission();
1676           }
1677         }
1678       }
1679     }
1680   }
1681 
1682   // Emit as a constant.
1683   auto C = ConstantEmitter(*this).emitAbstract(refExpr->getLocation(),
1684                                                result.Val, resultType);
1685 
1686   // Make sure we emit a debug reference to the global variable.
1687   // This should probably fire even for
1688   if (isa<VarDecl>(value)) {
1689     if (!getContext().DeclMustBeEmitted(cast<VarDecl>(value)))
1690       EmitDeclRefExprDbgValue(refExpr, result.Val);
1691   } else {
1692     assert(isa<EnumConstantDecl>(value));
1693     EmitDeclRefExprDbgValue(refExpr, result.Val);
1694   }
1695 
1696   // If we emitted a reference constant, we need to dereference that.
1697   if (resultIsReference)
1698     return ConstantEmission::forReference(C);
1699 
1700   return ConstantEmission::forValue(C);
1701 }
1702 
1703 static DeclRefExpr *tryToConvertMemberExprToDeclRefExpr(CodeGenFunction &CGF,
1704                                                         const MemberExpr *ME) {
1705   if (auto *VD = dyn_cast<VarDecl>(ME->getMemberDecl())) {
1706     // Try to emit static variable member expressions as DREs.
1707     return DeclRefExpr::Create(
1708         CGF.getContext(), NestedNameSpecifierLoc(), SourceLocation(), VD,
1709         /*RefersToEnclosingVariableOrCapture=*/false, ME->getExprLoc(),
1710         ME->getType(), ME->getValueKind(), nullptr, nullptr, ME->isNonOdrUse());
1711   }
1712   return nullptr;
1713 }
1714 
1715 CodeGenFunction::ConstantEmission
1716 CodeGenFunction::tryEmitAsConstant(const MemberExpr *ME) {
1717   if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(*this, ME))
1718     return tryEmitAsConstant(DRE);
1719   return ConstantEmission();
1720 }
1721 
1722 llvm::Value *CodeGenFunction::emitScalarConstant(
1723     const CodeGenFunction::ConstantEmission &Constant, Expr *E) {
1724   assert(Constant && "not a constant");
1725   if (Constant.isReference())
1726     return EmitLoadOfLValue(Constant.getReferenceLValue(*this, E),
1727                             E->getExprLoc())
1728         .getScalarVal();
1729   return Constant.getValue();
1730 }
1731 
1732 llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue,
1733                                                SourceLocation Loc) {
1734   return EmitLoadOfScalar(lvalue.getAddress(*this), lvalue.isVolatile(),
1735                           lvalue.getType(), Loc, lvalue.getBaseInfo(),
1736                           lvalue.getTBAAInfo(), lvalue.isNontemporal());
1737 }
1738 
1739 static bool hasBooleanRepresentation(QualType Ty) {
1740   if (Ty->isBooleanType())
1741     return true;
1742 
1743   if (const EnumType *ET = Ty->getAs<EnumType>())
1744     return ET->getDecl()->getIntegerType()->isBooleanType();
1745 
1746   if (const AtomicType *AT = Ty->getAs<AtomicType>())
1747     return hasBooleanRepresentation(AT->getValueType());
1748 
1749   return false;
1750 }
1751 
1752 static bool getRangeForType(CodeGenFunction &CGF, QualType Ty,
1753                             llvm::APInt &Min, llvm::APInt &End,
1754                             bool StrictEnums, bool IsBool) {
1755   const EnumType *ET = Ty->getAs<EnumType>();
1756   bool IsRegularCPlusPlusEnum = CGF.getLangOpts().CPlusPlus && StrictEnums &&
1757                                 ET && !ET->getDecl()->isFixed();
1758   if (!IsBool && !IsRegularCPlusPlusEnum)
1759     return false;
1760 
1761   if (IsBool) {
1762     Min = llvm::APInt(CGF.getContext().getTypeSize(Ty), 0);
1763     End = llvm::APInt(CGF.getContext().getTypeSize(Ty), 2);
1764   } else {
1765     const EnumDecl *ED = ET->getDecl();
1766     ED->getValueRange(End, Min);
1767   }
1768   return true;
1769 }
1770 
1771 llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) {
1772   llvm::APInt Min, End;
1773   if (!getRangeForType(*this, Ty, Min, End, CGM.getCodeGenOpts().StrictEnums,
1774                        hasBooleanRepresentation(Ty)))
1775     return nullptr;
1776 
1777   llvm::MDBuilder MDHelper(getLLVMContext());
1778   return MDHelper.createRange(Min, End);
1779 }
1780 
1781 bool CodeGenFunction::EmitScalarRangeCheck(llvm::Value *Value, QualType Ty,
1782                                            SourceLocation Loc) {
1783   bool HasBoolCheck = SanOpts.has(SanitizerKind::Bool);
1784   bool HasEnumCheck = SanOpts.has(SanitizerKind::Enum);
1785   if (!HasBoolCheck && !HasEnumCheck)
1786     return false;
1787 
1788   bool IsBool = hasBooleanRepresentation(Ty) ||
1789                 NSAPI(CGM.getContext()).isObjCBOOLType(Ty);
1790   bool NeedsBoolCheck = HasBoolCheck && IsBool;
1791   bool NeedsEnumCheck = HasEnumCheck && Ty->getAs<EnumType>();
1792   if (!NeedsBoolCheck && !NeedsEnumCheck)
1793     return false;
1794 
1795   // Single-bit booleans don't need to be checked. Special-case this to avoid
1796   // a bit width mismatch when handling bitfield values. This is handled by
1797   // EmitFromMemory for the non-bitfield case.
1798   if (IsBool &&
1799       cast<llvm::IntegerType>(Value->getType())->getBitWidth() == 1)
1800     return false;
1801 
1802   llvm::APInt Min, End;
1803   if (!getRangeForType(*this, Ty, Min, End, /*StrictEnums=*/true, IsBool))
1804     return true;
1805 
1806   auto &Ctx = getLLVMContext();
1807   SanitizerScope SanScope(this);
1808   llvm::Value *Check;
1809   --End;
1810   if (!Min) {
1811     Check = Builder.CreateICmpULE(Value, llvm::ConstantInt::get(Ctx, End));
1812   } else {
1813     llvm::Value *Upper =
1814         Builder.CreateICmpSLE(Value, llvm::ConstantInt::get(Ctx, End));
1815     llvm::Value *Lower =
1816         Builder.CreateICmpSGE(Value, llvm::ConstantInt::get(Ctx, Min));
1817     Check = Builder.CreateAnd(Upper, Lower);
1818   }
1819   llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc),
1820                                   EmitCheckTypeDescriptor(Ty)};
1821   SanitizerMask Kind =
1822       NeedsEnumCheck ? SanitizerKind::Enum : SanitizerKind::Bool;
1823   EmitCheck(std::make_pair(Check, Kind), SanitizerHandler::LoadInvalidValue,
1824             StaticArgs, EmitCheckValue(Value));
1825   return true;
1826 }
1827 
1828 llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile,
1829                                                QualType Ty,
1830                                                SourceLocation Loc,
1831                                                LValueBaseInfo BaseInfo,
1832                                                TBAAAccessInfo TBAAInfo,
1833                                                bool isNontemporal) {
1834   if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr.getPointer()))
1835     if (GV->isThreadLocal())
1836       Addr = Addr.withPointer(Builder.CreateThreadLocalAddress(GV),
1837                               NotKnownNonNull);
1838 
1839   if (const auto *ClangVecTy = Ty->getAs<VectorType>()) {
1840     // Boolean vectors use `iN` as storage type.
1841     if (ClangVecTy->isExtVectorBoolType()) {
1842       llvm::Type *ValTy = ConvertType(Ty);
1843       unsigned ValNumElems =
1844           cast<llvm::FixedVectorType>(ValTy)->getNumElements();
1845       // Load the `iP` storage object (P is the padded vector size).
1846       auto *RawIntV = Builder.CreateLoad(Addr, Volatile, "load_bits");
1847       const auto *RawIntTy = RawIntV->getType();
1848       assert(RawIntTy->isIntegerTy() && "compressed iN storage for bitvectors");
1849       // Bitcast iP --> <P x i1>.
1850       auto *PaddedVecTy = llvm::FixedVectorType::get(
1851           Builder.getInt1Ty(), RawIntTy->getPrimitiveSizeInBits());
1852       llvm::Value *V = Builder.CreateBitCast(RawIntV, PaddedVecTy);
1853       // Shuffle <P x i1> --> <N x i1> (N is the actual bit size).
1854       V = emitBoolVecConversion(V, ValNumElems, "extractvec");
1855 
1856       return EmitFromMemory(V, Ty);
1857     }
1858 
1859     // Handle vectors of size 3 like size 4 for better performance.
1860     const llvm::Type *EltTy = Addr.getElementType();
1861     const auto *VTy = cast<llvm::FixedVectorType>(EltTy);
1862 
1863     if (!CGM.getCodeGenOpts().PreserveVec3Type && VTy->getNumElements() == 3) {
1864 
1865       llvm::VectorType *vec4Ty =
1866           llvm::FixedVectorType::get(VTy->getElementType(), 4);
1867       Address Cast = Addr.withElementType(vec4Ty);
1868       // Now load value.
1869       llvm::Value *V = Builder.CreateLoad(Cast, Volatile, "loadVec4");
1870 
1871       // Shuffle vector to get vec3.
1872       V = Builder.CreateShuffleVector(V, ArrayRef<int>{0, 1, 2}, "extractVec");
1873       return EmitFromMemory(V, Ty);
1874     }
1875   }
1876 
1877   // Atomic operations have to be done on integral types.
1878   LValue AtomicLValue =
1879       LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo);
1880   if (Ty->isAtomicType() || LValueIsSuitableForInlineAtomic(AtomicLValue)) {
1881     return EmitAtomicLoad(AtomicLValue, Loc).getScalarVal();
1882   }
1883 
1884   llvm::LoadInst *Load = Builder.CreateLoad(Addr, Volatile);
1885   if (isNontemporal) {
1886     llvm::MDNode *Node = llvm::MDNode::get(
1887         Load->getContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
1888     Load->setMetadata(llvm::LLVMContext::MD_nontemporal, Node);
1889   }
1890 
1891   CGM.DecorateInstructionWithTBAA(Load, TBAAInfo);
1892 
1893   if (EmitScalarRangeCheck(Load, Ty, Loc)) {
1894     // In order to prevent the optimizer from throwing away the check, don't
1895     // attach range metadata to the load.
1896   } else if (CGM.getCodeGenOpts().OptimizationLevel > 0)
1897     if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty)) {
1898       Load->setMetadata(llvm::LLVMContext::MD_range, RangeInfo);
1899       Load->setMetadata(llvm::LLVMContext::MD_noundef,
1900                         llvm::MDNode::get(getLLVMContext(), std::nullopt));
1901     }
1902 
1903   return EmitFromMemory(Load, Ty);
1904 }
1905 
1906 llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) {
1907   // Bool has a different representation in memory than in registers.
1908   if (hasBooleanRepresentation(Ty)) {
1909     // This should really always be an i1, but sometimes it's already
1910     // an i8, and it's awkward to track those cases down.
1911     if (Value->getType()->isIntegerTy(1))
1912       return Builder.CreateZExt(Value, ConvertTypeForMem(Ty), "frombool");
1913     assert(Value->getType()->isIntegerTy(getContext().getTypeSize(Ty)) &&
1914            "wrong value rep of bool");
1915   }
1916 
1917   return Value;
1918 }
1919 
1920 llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
1921   // Bool has a different representation in memory than in registers.
1922   if (hasBooleanRepresentation(Ty)) {
1923     assert(Value->getType()->isIntegerTy(getContext().getTypeSize(Ty)) &&
1924            "wrong value rep of bool");
1925     return Builder.CreateTrunc(Value, Builder.getInt1Ty(), "tobool");
1926   }
1927   if (Ty->isExtVectorBoolType()) {
1928     const auto *RawIntTy = Value->getType();
1929     // Bitcast iP --> <P x i1>.
1930     auto *PaddedVecTy = llvm::FixedVectorType::get(
1931         Builder.getInt1Ty(), RawIntTy->getPrimitiveSizeInBits());
1932     auto *V = Builder.CreateBitCast(Value, PaddedVecTy);
1933     // Shuffle <P x i1> --> <N x i1> (N is the actual bit size).
1934     llvm::Type *ValTy = ConvertType(Ty);
1935     unsigned ValNumElems = cast<llvm::FixedVectorType>(ValTy)->getNumElements();
1936     return emitBoolVecConversion(V, ValNumElems, "extractvec");
1937   }
1938 
1939   return Value;
1940 }
1941 
1942 // Convert the pointer of \p Addr to a pointer to a vector (the value type of
1943 // MatrixType), if it points to a array (the memory type of MatrixType).
1944 static Address MaybeConvertMatrixAddress(Address Addr, CodeGenFunction &CGF,
1945                                          bool IsVector = true) {
1946   auto *ArrayTy = dyn_cast<llvm::ArrayType>(Addr.getElementType());
1947   if (ArrayTy && IsVector) {
1948     auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(),
1949                                                 ArrayTy->getNumElements());
1950 
1951     return Addr.withElementType(VectorTy);
1952   }
1953   auto *VectorTy = dyn_cast<llvm::VectorType>(Addr.getElementType());
1954   if (VectorTy && !IsVector) {
1955     auto *ArrayTy = llvm::ArrayType::get(
1956         VectorTy->getElementType(),
1957         cast<llvm::FixedVectorType>(VectorTy)->getNumElements());
1958 
1959     return Addr.withElementType(ArrayTy);
1960   }
1961 
1962   return Addr;
1963 }
1964 
1965 // Emit a store of a matrix LValue. This may require casting the original
1966 // pointer to memory address (ArrayType) to a pointer to the value type
1967 // (VectorType).
1968 static void EmitStoreOfMatrixScalar(llvm::Value *value, LValue lvalue,
1969                                     bool isInit, CodeGenFunction &CGF) {
1970   Address Addr = MaybeConvertMatrixAddress(lvalue.getAddress(CGF), CGF,
1971                                            value->getType()->isVectorTy());
1972   CGF.EmitStoreOfScalar(value, Addr, lvalue.isVolatile(), lvalue.getType(),
1973                         lvalue.getBaseInfo(), lvalue.getTBAAInfo(), isInit,
1974                         lvalue.isNontemporal());
1975 }
1976 
1977 void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, Address Addr,
1978                                         bool Volatile, QualType Ty,
1979                                         LValueBaseInfo BaseInfo,
1980                                         TBAAAccessInfo TBAAInfo,
1981                                         bool isInit, bool isNontemporal) {
1982   if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr.getPointer()))
1983     if (GV->isThreadLocal())
1984       Addr = Addr.withPointer(Builder.CreateThreadLocalAddress(GV),
1985                               NotKnownNonNull);
1986 
1987   llvm::Type *SrcTy = Value->getType();
1988   if (const auto *ClangVecTy = Ty->getAs<VectorType>()) {
1989     auto *VecTy = dyn_cast<llvm::FixedVectorType>(SrcTy);
1990     if (VecTy && ClangVecTy->isExtVectorBoolType()) {
1991       auto *MemIntTy = cast<llvm::IntegerType>(Addr.getElementType());
1992       // Expand to the memory bit width.
1993       unsigned MemNumElems = MemIntTy->getPrimitiveSizeInBits();
1994       // <N x i1> --> <P x i1>.
1995       Value = emitBoolVecConversion(Value, MemNumElems, "insertvec");
1996       // <P x i1> --> iP.
1997       Value = Builder.CreateBitCast(Value, MemIntTy);
1998     } else if (!CGM.getCodeGenOpts().PreserveVec3Type) {
1999       // Handle vec3 special.
2000       if (VecTy && cast<llvm::FixedVectorType>(VecTy)->getNumElements() == 3) {
2001         // Our source is a vec3, do a shuffle vector to make it a vec4.
2002         Value = Builder.CreateShuffleVector(Value, ArrayRef<int>{0, 1, 2, -1},
2003                                             "extractVec");
2004         SrcTy = llvm::FixedVectorType::get(VecTy->getElementType(), 4);
2005       }
2006       if (Addr.getElementType() != SrcTy) {
2007         Addr = Addr.withElementType(SrcTy);
2008       }
2009     }
2010   }
2011 
2012   Value = EmitToMemory(Value, Ty);
2013 
2014   LValue AtomicLValue =
2015       LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo);
2016   if (Ty->isAtomicType() ||
2017       (!isInit && LValueIsSuitableForInlineAtomic(AtomicLValue))) {
2018     EmitAtomicStore(RValue::get(Value), AtomicLValue, isInit);
2019     return;
2020   }
2021 
2022   llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile);
2023   if (isNontemporal) {
2024     llvm::MDNode *Node =
2025         llvm::MDNode::get(Store->getContext(),
2026                           llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
2027     Store->setMetadata(llvm::LLVMContext::MD_nontemporal, Node);
2028   }
2029 
2030   CGM.DecorateInstructionWithTBAA(Store, TBAAInfo);
2031 }
2032 
2033 void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
2034                                         bool isInit) {
2035   if (lvalue.getType()->isConstantMatrixType()) {
2036     EmitStoreOfMatrixScalar(value, lvalue, isInit, *this);
2037     return;
2038   }
2039 
2040   EmitStoreOfScalar(value, lvalue.getAddress(*this), lvalue.isVolatile(),
2041                     lvalue.getType(), lvalue.getBaseInfo(),
2042                     lvalue.getTBAAInfo(), isInit, lvalue.isNontemporal());
2043 }
2044 
2045 // Emit a load of a LValue of matrix type. This may require casting the pointer
2046 // to memory address (ArrayType) to a pointer to the value type (VectorType).
2047 static RValue EmitLoadOfMatrixLValue(LValue LV, SourceLocation Loc,
2048                                      CodeGenFunction &CGF) {
2049   assert(LV.getType()->isConstantMatrixType());
2050   Address Addr = MaybeConvertMatrixAddress(LV.getAddress(CGF), CGF);
2051   LV.setAddress(Addr);
2052   return RValue::get(CGF.EmitLoadOfScalar(LV, Loc));
2053 }
2054 
2055 /// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
2056 /// method emits the address of the lvalue, then loads the result as an rvalue,
2057 /// returning the rvalue.
2058 RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
2059   if (LV.isObjCWeak()) {
2060     // load of a __weak object.
2061     Address AddrWeakObj = LV.getAddress(*this);
2062     return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this,
2063                                                              AddrWeakObj));
2064   }
2065   if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) {
2066     // In MRC mode, we do a load+autorelease.
2067     if (!getLangOpts().ObjCAutoRefCount) {
2068       return RValue::get(EmitARCLoadWeak(LV.getAddress(*this)));
2069     }
2070 
2071     // In ARC mode, we load retained and then consume the value.
2072     llvm::Value *Object = EmitARCLoadWeakRetained(LV.getAddress(*this));
2073     Object = EmitObjCConsumeObject(LV.getType(), Object);
2074     return RValue::get(Object);
2075   }
2076 
2077   if (LV.isSimple()) {
2078     assert(!LV.getType()->isFunctionType());
2079 
2080     if (LV.getType()->isConstantMatrixType())
2081       return EmitLoadOfMatrixLValue(LV, Loc, *this);
2082 
2083     // Everything needs a load.
2084     return RValue::get(EmitLoadOfScalar(LV, Loc));
2085   }
2086 
2087   if (LV.isVectorElt()) {
2088     llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddress(),
2089                                               LV.isVolatileQualified());
2090     return RValue::get(Builder.CreateExtractElement(Load, LV.getVectorIdx(),
2091                                                     "vecext"));
2092   }
2093 
2094   // If this is a reference to a subset of the elements of a vector, either
2095   // shuffle the input or extract/insert them as appropriate.
2096   if (LV.isExtVectorElt()) {
2097     return EmitLoadOfExtVectorElementLValue(LV);
2098   }
2099 
2100   // Global Register variables always invoke intrinsics
2101   if (LV.isGlobalReg())
2102     return EmitLoadOfGlobalRegLValue(LV);
2103 
2104   if (LV.isMatrixElt()) {
2105     llvm::Value *Idx = LV.getMatrixIdx();
2106     if (CGM.getCodeGenOpts().OptimizationLevel > 0) {
2107       const auto *const MatTy = LV.getType()->castAs<ConstantMatrixType>();
2108       llvm::MatrixBuilder MB(Builder);
2109       MB.CreateIndexAssumption(Idx, MatTy->getNumElementsFlattened());
2110     }
2111     llvm::LoadInst *Load =
2112         Builder.CreateLoad(LV.getMatrixAddress(), LV.isVolatileQualified());
2113     return RValue::get(Builder.CreateExtractElement(Load, Idx, "matrixext"));
2114   }
2115 
2116   assert(LV.isBitField() && "Unknown LValue type!");
2117   return EmitLoadOfBitfieldLValue(LV, Loc);
2118 }
2119 
2120 RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV,
2121                                                  SourceLocation Loc) {
2122   const CGBitFieldInfo &Info = LV.getBitFieldInfo();
2123 
2124   // Get the output type.
2125   llvm::Type *ResLTy = ConvertType(LV.getType());
2126 
2127   Address Ptr = LV.getBitFieldAddress();
2128   llvm::Value *Val =
2129       Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "bf.load");
2130 
2131   bool UseVolatile = LV.isVolatileQualified() &&
2132                      Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget());
2133   const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset;
2134   const unsigned StorageSize =
2135       UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
2136   if (Info.IsSigned) {
2137     assert(static_cast<unsigned>(Offset + Info.Size) <= StorageSize);
2138     unsigned HighBits = StorageSize - Offset - Info.Size;
2139     if (HighBits)
2140       Val = Builder.CreateShl(Val, HighBits, "bf.shl");
2141     if (Offset + HighBits)
2142       Val = Builder.CreateAShr(Val, Offset + HighBits, "bf.ashr");
2143   } else {
2144     if (Offset)
2145       Val = Builder.CreateLShr(Val, Offset, "bf.lshr");
2146     if (static_cast<unsigned>(Offset) + Info.Size < StorageSize)
2147       Val = Builder.CreateAnd(
2148           Val, llvm::APInt::getLowBitsSet(StorageSize, Info.Size), "bf.clear");
2149   }
2150   Val = Builder.CreateIntCast(Val, ResLTy, Info.IsSigned, "bf.cast");
2151   EmitScalarRangeCheck(Val, LV.getType(), Loc);
2152   return RValue::get(Val);
2153 }
2154 
2155 // If this is a reference to a subset of the elements of a vector, create an
2156 // appropriate shufflevector.
2157 RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) {
2158   llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddress(),
2159                                         LV.isVolatileQualified());
2160 
2161   // HLSL allows treating scalars as one-element vectors. Converting the scalar
2162   // IR value to a vector here allows the rest of codegen to behave as normal.
2163   if (getLangOpts().HLSL && !Vec->getType()->isVectorTy()) {
2164     llvm::Type *DstTy = llvm::FixedVectorType::get(Vec->getType(), 1);
2165     llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty);
2166     Vec = Builder.CreateInsertElement(DstTy, Vec, Zero, "cast.splat");
2167   }
2168 
2169   const llvm::Constant *Elts = LV.getExtVectorElts();
2170 
2171   // If the result of the expression is a non-vector type, we must be extracting
2172   // a single element.  Just codegen as an extractelement.
2173   const VectorType *ExprVT = LV.getType()->getAs<VectorType>();
2174   if (!ExprVT) {
2175     unsigned InIdx = getAccessedFieldNo(0, Elts);
2176     llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx);
2177     return RValue::get(Builder.CreateExtractElement(Vec, Elt));
2178   }
2179 
2180   // Always use shuffle vector to try to retain the original program structure
2181   unsigned NumResultElts = ExprVT->getNumElements();
2182 
2183   SmallVector<int, 4> Mask;
2184   for (unsigned i = 0; i != NumResultElts; ++i)
2185     Mask.push_back(getAccessedFieldNo(i, Elts));
2186 
2187   Vec = Builder.CreateShuffleVector(Vec, Mask);
2188   return RValue::get(Vec);
2189 }
2190 
2191 /// Generates lvalue for partial ext_vector access.
2192 Address CodeGenFunction::EmitExtVectorElementLValue(LValue LV) {
2193   Address VectorAddress = LV.getExtVectorAddress();
2194   QualType EQT = LV.getType()->castAs<VectorType>()->getElementType();
2195   llvm::Type *VectorElementTy = CGM.getTypes().ConvertType(EQT);
2196 
2197   Address CastToPointerElement = VectorAddress.withElementType(VectorElementTy);
2198 
2199   const llvm::Constant *Elts = LV.getExtVectorElts();
2200   unsigned ix = getAccessedFieldNo(0, Elts);
2201 
2202   Address VectorBasePtrPlusIx =
2203     Builder.CreateConstInBoundsGEP(CastToPointerElement, ix,
2204                                    "vector.elt");
2205 
2206   return VectorBasePtrPlusIx;
2207 }
2208 
2209 /// Load of global gamed gegisters are always calls to intrinsics.
2210 RValue CodeGenFunction::EmitLoadOfGlobalRegLValue(LValue LV) {
2211   assert((LV.getType()->isIntegerType() || LV.getType()->isPointerType()) &&
2212          "Bad type for register variable");
2213   llvm::MDNode *RegName = cast<llvm::MDNode>(
2214       cast<llvm::MetadataAsValue>(LV.getGlobalReg())->getMetadata());
2215 
2216   // We accept integer and pointer types only
2217   llvm::Type *OrigTy = CGM.getTypes().ConvertType(LV.getType());
2218   llvm::Type *Ty = OrigTy;
2219   if (OrigTy->isPointerTy())
2220     Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy);
2221   llvm::Type *Types[] = { Ty };
2222 
2223   llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
2224   llvm::Value *Call = Builder.CreateCall(
2225       F, llvm::MetadataAsValue::get(Ty->getContext(), RegName));
2226   if (OrigTy->isPointerTy())
2227     Call = Builder.CreateIntToPtr(Call, OrigTy);
2228   return RValue::get(Call);
2229 }
2230 
2231 /// EmitStoreThroughLValue - Store the specified rvalue into the specified
2232 /// lvalue, where both are guaranteed to the have the same type, and that type
2233 /// is 'Ty'.
2234 void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
2235                                              bool isInit) {
2236   if (!Dst.isSimple()) {
2237     if (Dst.isVectorElt()) {
2238       // Read/modify/write the vector, inserting the new element.
2239       llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddress(),
2240                                             Dst.isVolatileQualified());
2241       auto *IRStoreTy = dyn_cast<llvm::IntegerType>(Vec->getType());
2242       if (IRStoreTy) {
2243         auto *IRVecTy = llvm::FixedVectorType::get(
2244             Builder.getInt1Ty(), IRStoreTy->getPrimitiveSizeInBits());
2245         Vec = Builder.CreateBitCast(Vec, IRVecTy);
2246         // iN --> <N x i1>.
2247       }
2248       Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(),
2249                                         Dst.getVectorIdx(), "vecins");
2250       if (IRStoreTy) {
2251         // <N x i1> --> <iN>.
2252         Vec = Builder.CreateBitCast(Vec, IRStoreTy);
2253       }
2254       Builder.CreateStore(Vec, Dst.getVectorAddress(),
2255                           Dst.isVolatileQualified());
2256       return;
2257     }
2258 
2259     // If this is an update of extended vector elements, insert them as
2260     // appropriate.
2261     if (Dst.isExtVectorElt())
2262       return EmitStoreThroughExtVectorComponentLValue(Src, Dst);
2263 
2264     if (Dst.isGlobalReg())
2265       return EmitStoreThroughGlobalRegLValue(Src, Dst);
2266 
2267     if (Dst.isMatrixElt()) {
2268       llvm::Value *Idx = Dst.getMatrixIdx();
2269       if (CGM.getCodeGenOpts().OptimizationLevel > 0) {
2270         const auto *const MatTy = Dst.getType()->castAs<ConstantMatrixType>();
2271         llvm::MatrixBuilder MB(Builder);
2272         MB.CreateIndexAssumption(Idx, MatTy->getNumElementsFlattened());
2273       }
2274       llvm::Instruction *Load = Builder.CreateLoad(Dst.getMatrixAddress());
2275       llvm::Value *Vec =
2276           Builder.CreateInsertElement(Load, Src.getScalarVal(), Idx, "matins");
2277       Builder.CreateStore(Vec, Dst.getMatrixAddress(),
2278                           Dst.isVolatileQualified());
2279       return;
2280     }
2281 
2282     assert(Dst.isBitField() && "Unknown LValue type");
2283     return EmitStoreThroughBitfieldLValue(Src, Dst);
2284   }
2285 
2286   // There's special magic for assigning into an ARC-qualified l-value.
2287   if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) {
2288     switch (Lifetime) {
2289     case Qualifiers::OCL_None:
2290       llvm_unreachable("present but none");
2291 
2292     case Qualifiers::OCL_ExplicitNone:
2293       // nothing special
2294       break;
2295 
2296     case Qualifiers::OCL_Strong:
2297       if (isInit) {
2298         Src = RValue::get(EmitARCRetain(Dst.getType(), Src.getScalarVal()));
2299         break;
2300       }
2301       EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true);
2302       return;
2303 
2304     case Qualifiers::OCL_Weak:
2305       if (isInit)
2306         // Initialize and then skip the primitive store.
2307         EmitARCInitWeak(Dst.getAddress(*this), Src.getScalarVal());
2308       else
2309         EmitARCStoreWeak(Dst.getAddress(*this), Src.getScalarVal(),
2310                          /*ignore*/ true);
2311       return;
2312 
2313     case Qualifiers::OCL_Autoreleasing:
2314       Src = RValue::get(EmitObjCExtendObjectLifetime(Dst.getType(),
2315                                                      Src.getScalarVal()));
2316       // fall into the normal path
2317       break;
2318     }
2319   }
2320 
2321   if (Dst.isObjCWeak() && !Dst.isNonGC()) {
2322     // load of a __weak object.
2323     Address LvalueDst = Dst.getAddress(*this);
2324     llvm::Value *src = Src.getScalarVal();
2325      CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst);
2326     return;
2327   }
2328 
2329   if (Dst.isObjCStrong() && !Dst.isNonGC()) {
2330     // load of a __strong object.
2331     Address LvalueDst = Dst.getAddress(*this);
2332     llvm::Value *src = Src.getScalarVal();
2333     if (Dst.isObjCIvar()) {
2334       assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL");
2335       llvm::Type *ResultType = IntPtrTy;
2336       Address dst = EmitPointerWithAlignment(Dst.getBaseIvarExp());
2337       llvm::Value *RHS = dst.getPointer();
2338       RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
2339       llvm::Value *LHS =
2340         Builder.CreatePtrToInt(LvalueDst.getPointer(), ResultType,
2341                                "sub.ptr.lhs.cast");
2342       llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset");
2343       CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst,
2344                                               BytesBetween);
2345     } else if (Dst.isGlobalObjCRef()) {
2346       CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst,
2347                                                 Dst.isThreadLocalRef());
2348     }
2349     else
2350       CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst);
2351     return;
2352   }
2353 
2354   assert(Src.isScalar() && "Can't emit an agg store with this method");
2355   EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit);
2356 }
2357 
2358 void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
2359                                                      llvm::Value **Result) {
2360   const CGBitFieldInfo &Info = Dst.getBitFieldInfo();
2361   llvm::Type *ResLTy = ConvertTypeForMem(Dst.getType());
2362   Address Ptr = Dst.getBitFieldAddress();
2363 
2364   // Get the source value, truncated to the width of the bit-field.
2365   llvm::Value *SrcVal = Src.getScalarVal();
2366 
2367   // Cast the source to the storage type and shift it into place.
2368   SrcVal = Builder.CreateIntCast(SrcVal, Ptr.getElementType(),
2369                                  /*isSigned=*/false);
2370   llvm::Value *MaskedVal = SrcVal;
2371 
2372   const bool UseVolatile =
2373       CGM.getCodeGenOpts().AAPCSBitfieldWidth && Dst.isVolatileQualified() &&
2374       Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget());
2375   const unsigned StorageSize =
2376       UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
2377   const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset;
2378   // See if there are other bits in the bitfield's storage we'll need to load
2379   // and mask together with source before storing.
2380   if (StorageSize != Info.Size) {
2381     assert(StorageSize > Info.Size && "Invalid bitfield size.");
2382     llvm::Value *Val =
2383         Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), "bf.load");
2384 
2385     // Mask the source value as needed.
2386     if (!hasBooleanRepresentation(Dst.getType()))
2387       SrcVal = Builder.CreateAnd(
2388           SrcVal, llvm::APInt::getLowBitsSet(StorageSize, Info.Size),
2389           "bf.value");
2390     MaskedVal = SrcVal;
2391     if (Offset)
2392       SrcVal = Builder.CreateShl(SrcVal, Offset, "bf.shl");
2393 
2394     // Mask out the original value.
2395     Val = Builder.CreateAnd(
2396         Val, ~llvm::APInt::getBitsSet(StorageSize, Offset, Offset + Info.Size),
2397         "bf.clear");
2398 
2399     // Or together the unchanged values and the source value.
2400     SrcVal = Builder.CreateOr(Val, SrcVal, "bf.set");
2401   } else {
2402     assert(Offset == 0);
2403     // According to the AACPS:
2404     // When a volatile bit-field is written, and its container does not overlap
2405     // with any non-bit-field member, its container must be read exactly once
2406     // and written exactly once using the access width appropriate to the type
2407     // of the container. The two accesses are not atomic.
2408     if (Dst.isVolatileQualified() && isAAPCS(CGM.getTarget()) &&
2409         CGM.getCodeGenOpts().ForceAAPCSBitfieldLoad)
2410       Builder.CreateLoad(Ptr, true, "bf.load");
2411   }
2412 
2413   // Write the new value back out.
2414   Builder.CreateStore(SrcVal, Ptr, Dst.isVolatileQualified());
2415 
2416   // Return the new value of the bit-field, if requested.
2417   if (Result) {
2418     llvm::Value *ResultVal = MaskedVal;
2419 
2420     // Sign extend the value if needed.
2421     if (Info.IsSigned) {
2422       assert(Info.Size <= StorageSize);
2423       unsigned HighBits = StorageSize - Info.Size;
2424       if (HighBits) {
2425         ResultVal = Builder.CreateShl(ResultVal, HighBits, "bf.result.shl");
2426         ResultVal = Builder.CreateAShr(ResultVal, HighBits, "bf.result.ashr");
2427       }
2428     }
2429 
2430     ResultVal = Builder.CreateIntCast(ResultVal, ResLTy, Info.IsSigned,
2431                                       "bf.result.cast");
2432     *Result = EmitFromMemory(ResultVal, Dst.getType());
2433   }
2434 }
2435 
2436 void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
2437                                                                LValue Dst) {
2438   // HLSL allows storing to scalar values through ExtVector component LValues.
2439   // To support this we need to handle the case where the destination address is
2440   // a scalar.
2441   Address DstAddr = Dst.getExtVectorAddress();
2442   if (!DstAddr.getElementType()->isVectorTy()) {
2443     assert(!Dst.getType()->isVectorType() &&
2444            "this should only occur for non-vector l-values");
2445     Builder.CreateStore(Src.getScalarVal(), DstAddr, Dst.isVolatileQualified());
2446     return;
2447   }
2448 
2449   // This access turns into a read/modify/write of the vector.  Load the input
2450   // value now.
2451   llvm::Value *Vec = Builder.CreateLoad(DstAddr, Dst.isVolatileQualified());
2452   const llvm::Constant *Elts = Dst.getExtVectorElts();
2453 
2454   llvm::Value *SrcVal = Src.getScalarVal();
2455 
2456   if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) {
2457     unsigned NumSrcElts = VTy->getNumElements();
2458     unsigned NumDstElts =
2459         cast<llvm::FixedVectorType>(Vec->getType())->getNumElements();
2460     if (NumDstElts == NumSrcElts) {
2461       // Use shuffle vector is the src and destination are the same number of
2462       // elements and restore the vector mask since it is on the side it will be
2463       // stored.
2464       SmallVector<int, 4> Mask(NumDstElts);
2465       for (unsigned i = 0; i != NumSrcElts; ++i)
2466         Mask[getAccessedFieldNo(i, Elts)] = i;
2467 
2468       Vec = Builder.CreateShuffleVector(SrcVal, Mask);
2469     } else if (NumDstElts > NumSrcElts) {
2470       // Extended the source vector to the same length and then shuffle it
2471       // into the destination.
2472       // FIXME: since we're shuffling with undef, can we just use the indices
2473       //        into that?  This could be simpler.
2474       SmallVector<int, 4> ExtMask;
2475       for (unsigned i = 0; i != NumSrcElts; ++i)
2476         ExtMask.push_back(i);
2477       ExtMask.resize(NumDstElts, -1);
2478       llvm::Value *ExtSrcVal = Builder.CreateShuffleVector(SrcVal, ExtMask);
2479       // build identity
2480       SmallVector<int, 4> Mask;
2481       for (unsigned i = 0; i != NumDstElts; ++i)
2482         Mask.push_back(i);
2483 
2484       // When the vector size is odd and .odd or .hi is used, the last element
2485       // of the Elts constant array will be one past the size of the vector.
2486       // Ignore the last element here, if it is greater than the mask size.
2487       if (getAccessedFieldNo(NumSrcElts - 1, Elts) == Mask.size())
2488         NumSrcElts--;
2489 
2490       // modify when what gets shuffled in
2491       for (unsigned i = 0; i != NumSrcElts; ++i)
2492         Mask[getAccessedFieldNo(i, Elts)] = i + NumDstElts;
2493       Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, Mask);
2494     } else {
2495       // We should never shorten the vector
2496       llvm_unreachable("unexpected shorten vector length");
2497     }
2498   } else {
2499     // If the Src is a scalar (not a vector), and the target is a vector it must
2500     // be updating one element.
2501     unsigned InIdx = getAccessedFieldNo(0, Elts);
2502     llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx);
2503     Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt);
2504   }
2505 
2506   Builder.CreateStore(Vec, Dst.getExtVectorAddress(),
2507                       Dst.isVolatileQualified());
2508 }
2509 
2510 /// Store of global named registers are always calls to intrinsics.
2511 void CodeGenFunction::EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst) {
2512   assert((Dst.getType()->isIntegerType() || Dst.getType()->isPointerType()) &&
2513          "Bad type for register variable");
2514   llvm::MDNode *RegName = cast<llvm::MDNode>(
2515       cast<llvm::MetadataAsValue>(Dst.getGlobalReg())->getMetadata());
2516   assert(RegName && "Register LValue is not metadata");
2517 
2518   // We accept integer and pointer types only
2519   llvm::Type *OrigTy = CGM.getTypes().ConvertType(Dst.getType());
2520   llvm::Type *Ty = OrigTy;
2521   if (OrigTy->isPointerTy())
2522     Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy);
2523   llvm::Type *Types[] = { Ty };
2524 
2525   llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
2526   llvm::Value *Value = Src.getScalarVal();
2527   if (OrigTy->isPointerTy())
2528     Value = Builder.CreatePtrToInt(Value, Ty);
2529   Builder.CreateCall(
2530       F, {llvm::MetadataAsValue::get(Ty->getContext(), RegName), Value});
2531 }
2532 
2533 // setObjCGCLValueClass - sets class of the lvalue for the purpose of
2534 // generating write-barries API. It is currently a global, ivar,
2535 // or neither.
2536 static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
2537                                  LValue &LV,
2538                                  bool IsMemberAccess=false) {
2539   if (Ctx.getLangOpts().getGC() == LangOptions::NonGC)
2540     return;
2541 
2542   if (isa<ObjCIvarRefExpr>(E)) {
2543     QualType ExpTy = E->getType();
2544     if (IsMemberAccess && ExpTy->isPointerType()) {
2545       // If ivar is a structure pointer, assigning to field of
2546       // this struct follows gcc's behavior and makes it a non-ivar
2547       // writer-barrier conservatively.
2548       ExpTy = ExpTy->castAs<PointerType>()->getPointeeType();
2549       if (ExpTy->isRecordType()) {
2550         LV.setObjCIvar(false);
2551         return;
2552       }
2553     }
2554     LV.setObjCIvar(true);
2555     auto *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr *>(E));
2556     LV.setBaseIvarExp(Exp->getBase());
2557     LV.setObjCArray(E->getType()->isArrayType());
2558     return;
2559   }
2560 
2561   if (const auto *Exp = dyn_cast<DeclRefExpr>(E)) {
2562     if (const auto *VD = dyn_cast<VarDecl>(Exp->getDecl())) {
2563       if (VD->hasGlobalStorage()) {
2564         LV.setGlobalObjCRef(true);
2565         LV.setThreadLocalRef(VD->getTLSKind() != VarDecl::TLS_None);
2566       }
2567     }
2568     LV.setObjCArray(E->getType()->isArrayType());
2569     return;
2570   }
2571 
2572   if (const auto *Exp = dyn_cast<UnaryOperator>(E)) {
2573     setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2574     return;
2575   }
2576 
2577   if (const auto *Exp = dyn_cast<ParenExpr>(E)) {
2578     setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2579     if (LV.isObjCIvar()) {
2580       // If cast is to a structure pointer, follow gcc's behavior and make it
2581       // a non-ivar write-barrier.
2582       QualType ExpTy = E->getType();
2583       if (ExpTy->isPointerType())
2584         ExpTy = ExpTy->castAs<PointerType>()->getPointeeType();
2585       if (ExpTy->isRecordType())
2586         LV.setObjCIvar(false);
2587     }
2588     return;
2589   }
2590 
2591   if (const auto *Exp = dyn_cast<GenericSelectionExpr>(E)) {
2592     setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV);
2593     return;
2594   }
2595 
2596   if (const auto *Exp = dyn_cast<ImplicitCastExpr>(E)) {
2597     setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2598     return;
2599   }
2600 
2601   if (const auto *Exp = dyn_cast<CStyleCastExpr>(E)) {
2602     setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2603     return;
2604   }
2605 
2606   if (const auto *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) {
2607     setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2608     return;
2609   }
2610 
2611   if (const auto *Exp = dyn_cast<ArraySubscriptExpr>(E)) {
2612     setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
2613     if (LV.isObjCIvar() && !LV.isObjCArray())
2614       // Using array syntax to assigning to what an ivar points to is not
2615       // same as assigning to the ivar itself. {id *Names;} Names[i] = 0;
2616       LV.setObjCIvar(false);
2617     else if (LV.isGlobalObjCRef() && !LV.isObjCArray())
2618       // Using array syntax to assigning to what global points to is not
2619       // same as assigning to the global itself. {id *G;} G[i] = 0;
2620       LV.setGlobalObjCRef(false);
2621     return;
2622   }
2623 
2624   if (const auto *Exp = dyn_cast<MemberExpr>(E)) {
2625     setObjCGCLValueClass(Ctx, Exp->getBase(), LV, true);
2626     // We don't know if member is an 'ivar', but this flag is looked at
2627     // only in the context of LV.isObjCIvar().
2628     LV.setObjCArray(E->getType()->isArrayType());
2629     return;
2630   }
2631 }
2632 
2633 static LValue EmitThreadPrivateVarDeclLValue(
2634     CodeGenFunction &CGF, const VarDecl *VD, QualType T, Address Addr,
2635     llvm::Type *RealVarTy, SourceLocation Loc) {
2636   if (CGF.CGM.getLangOpts().OpenMPIRBuilder)
2637     Addr = CodeGenFunction::OMPBuilderCBHelpers::getAddrOfThreadPrivate(
2638         CGF, VD, Addr, Loc);
2639   else
2640     Addr =
2641         CGF.CGM.getOpenMPRuntime().getAddrOfThreadPrivate(CGF, VD, Addr, Loc);
2642 
2643   Addr = Addr.withElementType(RealVarTy);
2644   return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
2645 }
2646 
2647 static Address emitDeclTargetVarDeclLValue(CodeGenFunction &CGF,
2648                                            const VarDecl *VD, QualType T) {
2649   std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
2650       OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
2651   // Return an invalid address if variable is MT_To (or MT_Enter starting with
2652   // OpenMP 5.2) and unified memory is not enabled. For all other cases: MT_Link
2653   // and MT_To (or MT_Enter) with unified memory, return a valid address.
2654   if (!Res || ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
2655                 *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
2656                !CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory()))
2657     return Address::invalid();
2658   assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) ||
2659           ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
2660             *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
2661            CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory())) &&
2662          "Expected link clause OR to clause with unified memory enabled.");
2663   QualType PtrTy = CGF.getContext().getPointerType(VD->getType());
2664   Address Addr = CGF.CGM.getOpenMPRuntime().getAddrOfDeclareTargetVar(VD);
2665   return CGF.EmitLoadOfPointer(Addr, PtrTy->castAs<PointerType>());
2666 }
2667 
2668 Address
2669 CodeGenFunction::EmitLoadOfReference(LValue RefLVal,
2670                                      LValueBaseInfo *PointeeBaseInfo,
2671                                      TBAAAccessInfo *PointeeTBAAInfo) {
2672   llvm::LoadInst *Load =
2673       Builder.CreateLoad(RefLVal.getAddress(*this), RefLVal.isVolatile());
2674   CGM.DecorateInstructionWithTBAA(Load, RefLVal.getTBAAInfo());
2675 
2676   QualType PointeeType = RefLVal.getType()->getPointeeType();
2677   CharUnits Align = CGM.getNaturalTypeAlignment(
2678       PointeeType, PointeeBaseInfo, PointeeTBAAInfo,
2679       /* forPointeeType= */ true);
2680   return Address(Load, ConvertTypeForMem(PointeeType), Align);
2681 }
2682 
2683 LValue CodeGenFunction::EmitLoadOfReferenceLValue(LValue RefLVal) {
2684   LValueBaseInfo PointeeBaseInfo;
2685   TBAAAccessInfo PointeeTBAAInfo;
2686   Address PointeeAddr = EmitLoadOfReference(RefLVal, &PointeeBaseInfo,
2687                                             &PointeeTBAAInfo);
2688   return MakeAddrLValue(PointeeAddr, RefLVal.getType()->getPointeeType(),
2689                         PointeeBaseInfo, PointeeTBAAInfo);
2690 }
2691 
2692 Address CodeGenFunction::EmitLoadOfPointer(Address Ptr,
2693                                            const PointerType *PtrTy,
2694                                            LValueBaseInfo *BaseInfo,
2695                                            TBAAAccessInfo *TBAAInfo) {
2696   llvm::Value *Addr = Builder.CreateLoad(Ptr);
2697   return Address(Addr, ConvertTypeForMem(PtrTy->getPointeeType()),
2698                  CGM.getNaturalTypeAlignment(PtrTy->getPointeeType(), BaseInfo,
2699                                              TBAAInfo,
2700                                              /*forPointeeType=*/true));
2701 }
2702 
2703 LValue CodeGenFunction::EmitLoadOfPointerLValue(Address PtrAddr,
2704                                                 const PointerType *PtrTy) {
2705   LValueBaseInfo BaseInfo;
2706   TBAAAccessInfo TBAAInfo;
2707   Address Addr = EmitLoadOfPointer(PtrAddr, PtrTy, &BaseInfo, &TBAAInfo);
2708   return MakeAddrLValue(Addr, PtrTy->getPointeeType(), BaseInfo, TBAAInfo);
2709 }
2710 
2711 static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
2712                                       const Expr *E, const VarDecl *VD) {
2713   QualType T = E->getType();
2714 
2715   // If it's thread_local, emit a call to its wrapper function instead.
2716   if (VD->getTLSKind() == VarDecl::TLS_Dynamic &&
2717       CGF.CGM.getCXXABI().usesThreadWrapperFunction(VD))
2718     return CGF.CGM.getCXXABI().EmitThreadLocalVarDeclLValue(CGF, VD, T);
2719   // Check if the variable is marked as declare target with link clause in
2720   // device codegen.
2721   if (CGF.getLangOpts().OpenMPIsTargetDevice) {
2722     Address Addr = emitDeclTargetVarDeclLValue(CGF, VD, T);
2723     if (Addr.isValid())
2724       return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
2725   }
2726 
2727   llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
2728 
2729   if (VD->getTLSKind() != VarDecl::TLS_None)
2730     V = CGF.Builder.CreateThreadLocalAddress(V);
2731 
2732   llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType());
2733   CharUnits Alignment = CGF.getContext().getDeclAlign(VD);
2734   Address Addr(V, RealVarTy, Alignment);
2735   // Emit reference to the private copy of the variable if it is an OpenMP
2736   // threadprivate variable.
2737   if (CGF.getLangOpts().OpenMP && !CGF.getLangOpts().OpenMPSimd &&
2738       VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
2739     return EmitThreadPrivateVarDeclLValue(CGF, VD, T, Addr, RealVarTy,
2740                                           E->getExprLoc());
2741   }
2742   LValue LV = VD->getType()->isReferenceType() ?
2743       CGF.EmitLoadOfReferenceLValue(Addr, VD->getType(),
2744                                     AlignmentSource::Decl) :
2745       CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
2746   setObjCGCLValueClass(CGF.getContext(), E, LV);
2747   return LV;
2748 }
2749 
2750 static llvm::Constant *EmitFunctionDeclPointer(CodeGenModule &CGM,
2751                                                GlobalDecl GD) {
2752   const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
2753   if (FD->hasAttr<WeakRefAttr>()) {
2754     ConstantAddress aliasee = CGM.GetWeakRefReference(FD);
2755     return aliasee.getPointer();
2756   }
2757 
2758   llvm::Constant *V = CGM.GetAddrOfFunction(GD);
2759   return V;
2760 }
2761 
2762 static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, const Expr *E,
2763                                      GlobalDecl GD) {
2764   const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
2765   llvm::Value *V = EmitFunctionDeclPointer(CGF.CGM, GD);
2766   CharUnits Alignment = CGF.getContext().getDeclAlign(FD);
2767   return CGF.MakeAddrLValue(V, E->getType(), Alignment,
2768                             AlignmentSource::Decl);
2769 }
2770 
2771 static LValue EmitCapturedFieldLValue(CodeGenFunction &CGF, const FieldDecl *FD,
2772                                       llvm::Value *ThisValue) {
2773 
2774   return CGF.EmitLValueForLambdaField(FD, ThisValue);
2775 }
2776 
2777 /// Named Registers are named metadata pointing to the register name
2778 /// which will be read from/written to as an argument to the intrinsic
2779 /// @llvm.read/write_register.
2780 /// So far, only the name is being passed down, but other options such as
2781 /// register type, allocation type or even optimization options could be
2782 /// passed down via the metadata node.
2783 static LValue EmitGlobalNamedRegister(const VarDecl *VD, CodeGenModule &CGM) {
2784   SmallString<64> Name("llvm.named.register.");
2785   AsmLabelAttr *Asm = VD->getAttr<AsmLabelAttr>();
2786   assert(Asm->getLabel().size() < 64-Name.size() &&
2787       "Register name too big");
2788   Name.append(Asm->getLabel());
2789   llvm::NamedMDNode *M =
2790     CGM.getModule().getOrInsertNamedMetadata(Name);
2791   if (M->getNumOperands() == 0) {
2792     llvm::MDString *Str = llvm::MDString::get(CGM.getLLVMContext(),
2793                                               Asm->getLabel());
2794     llvm::Metadata *Ops[] = {Str};
2795     M->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops));
2796   }
2797 
2798   CharUnits Alignment = CGM.getContext().getDeclAlign(VD);
2799 
2800   llvm::Value *Ptr =
2801     llvm::MetadataAsValue::get(CGM.getLLVMContext(), M->getOperand(0));
2802   return LValue::MakeGlobalReg(Ptr, Alignment, VD->getType());
2803 }
2804 
2805 /// Determine whether we can emit a reference to \p VD from the current
2806 /// context, despite not necessarily having seen an odr-use of the variable in
2807 /// this context.
2808 static bool canEmitSpuriousReferenceToVariable(CodeGenFunction &CGF,
2809                                                const DeclRefExpr *E,
2810                                                const VarDecl *VD) {
2811   // For a variable declared in an enclosing scope, do not emit a spurious
2812   // reference even if we have a capture, as that will emit an unwarranted
2813   // reference to our capture state, and will likely generate worse code than
2814   // emitting a local copy.
2815   if (E->refersToEnclosingVariableOrCapture())
2816     return false;
2817 
2818   // For a local declaration declared in this function, we can always reference
2819   // it even if we don't have an odr-use.
2820   if (VD->hasLocalStorage()) {
2821     return VD->getDeclContext() ==
2822            dyn_cast_or_null<DeclContext>(CGF.CurCodeDecl);
2823   }
2824 
2825   // For a global declaration, we can emit a reference to it if we know
2826   // for sure that we are able to emit a definition of it.
2827   VD = VD->getDefinition(CGF.getContext());
2828   if (!VD)
2829     return false;
2830 
2831   // Don't emit a spurious reference if it might be to a variable that only
2832   // exists on a different device / target.
2833   // FIXME: This is unnecessarily broad. Check whether this would actually be a
2834   // cross-target reference.
2835   if (CGF.getLangOpts().OpenMP || CGF.getLangOpts().CUDA ||
2836       CGF.getLangOpts().OpenCL) {
2837     return false;
2838   }
2839 
2840   // We can emit a spurious reference only if the linkage implies that we'll
2841   // be emitting a non-interposable symbol that will be retained until link
2842   // time.
2843   switch (CGF.CGM.getLLVMLinkageVarDefinition(VD)) {
2844   case llvm::GlobalValue::ExternalLinkage:
2845   case llvm::GlobalValue::LinkOnceODRLinkage:
2846   case llvm::GlobalValue::WeakODRLinkage:
2847   case llvm::GlobalValue::InternalLinkage:
2848   case llvm::GlobalValue::PrivateLinkage:
2849     return true;
2850   default:
2851     return false;
2852   }
2853 }
2854 
2855 LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
2856   const NamedDecl *ND = E->getDecl();
2857   QualType T = E->getType();
2858 
2859   assert(E->isNonOdrUse() != NOUR_Unevaluated &&
2860          "should not emit an unevaluated operand");
2861 
2862   if (const auto *VD = dyn_cast<VarDecl>(ND)) {
2863     // Global Named registers access via intrinsics only
2864     if (VD->getStorageClass() == SC_Register &&
2865         VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl())
2866       return EmitGlobalNamedRegister(VD, CGM);
2867 
2868     // If this DeclRefExpr does not constitute an odr-use of the variable,
2869     // we're not permitted to emit a reference to it in general, and it might
2870     // not be captured if capture would be necessary for a use. Emit the
2871     // constant value directly instead.
2872     if (E->isNonOdrUse() == NOUR_Constant &&
2873         (VD->getType()->isReferenceType() ||
2874          !canEmitSpuriousReferenceToVariable(*this, E, VD))) {
2875       VD->getAnyInitializer(VD);
2876       llvm::Constant *Val = ConstantEmitter(*this).emitAbstract(
2877           E->getLocation(), *VD->evaluateValue(), VD->getType());
2878       assert(Val && "failed to emit constant expression");
2879 
2880       Address Addr = Address::invalid();
2881       if (!VD->getType()->isReferenceType()) {
2882         // Spill the constant value to a global.
2883         Addr = CGM.createUnnamedGlobalFrom(*VD, Val,
2884                                            getContext().getDeclAlign(VD));
2885         llvm::Type *VarTy = getTypes().ConvertTypeForMem(VD->getType());
2886         auto *PTy = llvm::PointerType::get(
2887             VarTy, getTypes().getTargetAddressSpace(VD->getType()));
2888         Addr = Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, PTy, VarTy);
2889       } else {
2890         // Should we be using the alignment of the constant pointer we emitted?
2891         CharUnits Alignment =
2892             CGM.getNaturalTypeAlignment(E->getType(),
2893                                         /* BaseInfo= */ nullptr,
2894                                         /* TBAAInfo= */ nullptr,
2895                                         /* forPointeeType= */ true);
2896         Addr = Address(Val, ConvertTypeForMem(E->getType()), Alignment);
2897       }
2898       return MakeAddrLValue(Addr, T, AlignmentSource::Decl);
2899     }
2900 
2901     // FIXME: Handle other kinds of non-odr-use DeclRefExprs.
2902 
2903     // Check for captured variables.
2904     if (E->refersToEnclosingVariableOrCapture()) {
2905       VD = VD->getCanonicalDecl();
2906       if (auto *FD = LambdaCaptureFields.lookup(VD))
2907         return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue);
2908       if (CapturedStmtInfo) {
2909         auto I = LocalDeclMap.find(VD);
2910         if (I != LocalDeclMap.end()) {
2911           LValue CapLVal;
2912           if (VD->getType()->isReferenceType())
2913             CapLVal = EmitLoadOfReferenceLValue(I->second, VD->getType(),
2914                                                 AlignmentSource::Decl);
2915           else
2916             CapLVal = MakeAddrLValue(I->second, T);
2917           // Mark lvalue as nontemporal if the variable is marked as nontemporal
2918           // in simd context.
2919           if (getLangOpts().OpenMP &&
2920               CGM.getOpenMPRuntime().isNontemporalDecl(VD))
2921             CapLVal.setNontemporal(/*Value=*/true);
2922           return CapLVal;
2923         }
2924         LValue CapLVal =
2925             EmitCapturedFieldLValue(*this, CapturedStmtInfo->lookup(VD),
2926                                     CapturedStmtInfo->getContextValue());
2927         Address LValueAddress = CapLVal.getAddress(*this);
2928         CapLVal = MakeAddrLValue(
2929             Address(LValueAddress.getPointer(), LValueAddress.getElementType(),
2930                     getContext().getDeclAlign(VD)),
2931             CapLVal.getType(), LValueBaseInfo(AlignmentSource::Decl),
2932             CapLVal.getTBAAInfo());
2933         // Mark lvalue as nontemporal if the variable is marked as nontemporal
2934         // in simd context.
2935         if (getLangOpts().OpenMP &&
2936             CGM.getOpenMPRuntime().isNontemporalDecl(VD))
2937           CapLVal.setNontemporal(/*Value=*/true);
2938         return CapLVal;
2939       }
2940 
2941       assert(isa<BlockDecl>(CurCodeDecl));
2942       Address addr = GetAddrOfBlockDecl(VD);
2943       return MakeAddrLValue(addr, T, AlignmentSource::Decl);
2944     }
2945   }
2946 
2947   // FIXME: We should be able to assert this for FunctionDecls as well!
2948   // FIXME: We should be able to assert this for all DeclRefExprs, not just
2949   // those with a valid source location.
2950   assert((ND->isUsed(false) || !isa<VarDecl>(ND) || E->isNonOdrUse() ||
2951           !E->getLocation().isValid()) &&
2952          "Should not use decl without marking it used!");
2953 
2954   if (ND->hasAttr<WeakRefAttr>()) {
2955     const auto *VD = cast<ValueDecl>(ND);
2956     ConstantAddress Aliasee = CGM.GetWeakRefReference(VD);
2957     return MakeAddrLValue(Aliasee, T, AlignmentSource::Decl);
2958   }
2959 
2960   if (const auto *VD = dyn_cast<VarDecl>(ND)) {
2961     // Check if this is a global variable.
2962     if (VD->hasLinkage() || VD->isStaticDataMember())
2963       return EmitGlobalVarDeclLValue(*this, E, VD);
2964 
2965     Address addr = Address::invalid();
2966 
2967     // The variable should generally be present in the local decl map.
2968     auto iter = LocalDeclMap.find(VD);
2969     if (iter != LocalDeclMap.end()) {
2970       addr = iter->second;
2971 
2972     // Otherwise, it might be static local we haven't emitted yet for
2973     // some reason; most likely, because it's in an outer function.
2974     } else if (VD->isStaticLocal()) {
2975       llvm::Constant *var = CGM.getOrCreateStaticVarDecl(
2976           *VD, CGM.getLLVMLinkageVarDefinition(VD));
2977       addr = Address(
2978           var, ConvertTypeForMem(VD->getType()), getContext().getDeclAlign(VD));
2979 
2980     // No other cases for now.
2981     } else {
2982       llvm_unreachable("DeclRefExpr for Decl not entered in LocalDeclMap?");
2983     }
2984 
2985     // Handle threadlocal function locals.
2986     if (VD->getTLSKind() != VarDecl::TLS_None)
2987       addr = addr.withPointer(
2988           Builder.CreateThreadLocalAddress(addr.getPointer()), NotKnownNonNull);
2989 
2990     // Check for OpenMP threadprivate variables.
2991     if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd &&
2992         VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
2993       return EmitThreadPrivateVarDeclLValue(
2994           *this, VD, T, addr, getTypes().ConvertTypeForMem(VD->getType()),
2995           E->getExprLoc());
2996     }
2997 
2998     // Drill into block byref variables.
2999     bool isBlockByref = VD->isEscapingByref();
3000     if (isBlockByref) {
3001       addr = emitBlockByrefAddress(addr, VD);
3002     }
3003 
3004     // Drill into reference types.
3005     LValue LV = VD->getType()->isReferenceType() ?
3006         EmitLoadOfReferenceLValue(addr, VD->getType(), AlignmentSource::Decl) :
3007         MakeAddrLValue(addr, T, AlignmentSource::Decl);
3008 
3009     bool isLocalStorage = VD->hasLocalStorage();
3010 
3011     bool NonGCable = isLocalStorage &&
3012                      !VD->getType()->isReferenceType() &&
3013                      !isBlockByref;
3014     if (NonGCable) {
3015       LV.getQuals().removeObjCGCAttr();
3016       LV.setNonGC(true);
3017     }
3018 
3019     bool isImpreciseLifetime =
3020       (isLocalStorage && !VD->hasAttr<ObjCPreciseLifetimeAttr>());
3021     if (isImpreciseLifetime)
3022       LV.setARCPreciseLifetime(ARCImpreciseLifetime);
3023     setObjCGCLValueClass(getContext(), E, LV);
3024     return LV;
3025   }
3026 
3027   if (const auto *FD = dyn_cast<FunctionDecl>(ND)) {
3028     LValue LV = EmitFunctionDeclLValue(*this, E, FD);
3029 
3030     // Emit debuginfo for the function declaration if the target wants to.
3031     if (getContext().getTargetInfo().allowDebugInfoForExternalRef()) {
3032       if (CGDebugInfo *DI = CGM.getModuleDebugInfo()) {
3033         auto *Fn =
3034             cast<llvm::Function>(LV.getPointer(*this)->stripPointerCasts());
3035         if (!Fn->getSubprogram())
3036           DI->EmitFunctionDecl(FD, FD->getLocation(), T, Fn);
3037       }
3038     }
3039 
3040     return LV;
3041   }
3042 
3043   // FIXME: While we're emitting a binding from an enclosing scope, all other
3044   // DeclRefExprs we see should be implicitly treated as if they also refer to
3045   // an enclosing scope.
3046   if (const auto *BD = dyn_cast<BindingDecl>(ND)) {
3047     if (E->refersToEnclosingVariableOrCapture()) {
3048       auto *FD = LambdaCaptureFields.lookup(BD);
3049       return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue);
3050     }
3051     return EmitLValue(BD->getBinding());
3052   }
3053 
3054   // We can form DeclRefExprs naming GUID declarations when reconstituting
3055   // non-type template parameters into expressions.
3056   if (const auto *GD = dyn_cast<MSGuidDecl>(ND))
3057     return MakeAddrLValue(CGM.GetAddrOfMSGuidDecl(GD), T,
3058                           AlignmentSource::Decl);
3059 
3060   if (const auto *TPO = dyn_cast<TemplateParamObjectDecl>(ND)) {
3061     auto ATPO = CGM.GetAddrOfTemplateParamObject(TPO);
3062     auto AS = getLangASFromTargetAS(ATPO.getAddressSpace());
3063 
3064     if (AS != T.getAddressSpace()) {
3065       auto TargetAS = getContext().getTargetAddressSpace(T.getAddressSpace());
3066       auto PtrTy = ATPO.getElementType()->getPointerTo(TargetAS);
3067       auto ASC = getTargetHooks().performAddrSpaceCast(
3068           CGM, ATPO.getPointer(), AS, T.getAddressSpace(), PtrTy);
3069       ATPO = ConstantAddress(ASC, ATPO.getElementType(), ATPO.getAlignment());
3070     }
3071 
3072     return MakeAddrLValue(ATPO, T, AlignmentSource::Decl);
3073   }
3074 
3075   llvm_unreachable("Unhandled DeclRefExpr");
3076 }
3077 
3078 LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
3079   // __extension__ doesn't affect lvalue-ness.
3080   if (E->getOpcode() == UO_Extension)
3081     return EmitLValue(E->getSubExpr());
3082 
3083   QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType());
3084   switch (E->getOpcode()) {
3085   default: llvm_unreachable("Unknown unary operator lvalue!");
3086   case UO_Deref: {
3087     QualType T = E->getSubExpr()->getType()->getPointeeType();
3088     assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
3089 
3090     LValueBaseInfo BaseInfo;
3091     TBAAAccessInfo TBAAInfo;
3092     Address Addr = EmitPointerWithAlignment(E->getSubExpr(), &BaseInfo,
3093                                             &TBAAInfo);
3094     LValue LV = MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo);
3095     LV.getQuals().setAddressSpace(ExprTy.getAddressSpace());
3096 
3097     // We should not generate __weak write barrier on indirect reference
3098     // of a pointer to object; as in void foo (__weak id *param); *param = 0;
3099     // But, we continue to generate __strong write barrier on indirect write
3100     // into a pointer to object.
3101     if (getLangOpts().ObjC &&
3102         getLangOpts().getGC() != LangOptions::NonGC &&
3103         LV.isObjCWeak())
3104       LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
3105     return LV;
3106   }
3107   case UO_Real:
3108   case UO_Imag: {
3109     LValue LV = EmitLValue(E->getSubExpr());
3110     assert(LV.isSimple() && "real/imag on non-ordinary l-value");
3111 
3112     // __real is valid on scalars.  This is a faster way of testing that.
3113     // __imag can only produce an rvalue on scalars.
3114     if (E->getOpcode() == UO_Real &&
3115         !LV.getAddress(*this).getElementType()->isStructTy()) {
3116       assert(E->getSubExpr()->getType()->isArithmeticType());
3117       return LV;
3118     }
3119 
3120     QualType T = ExprTy->castAs<ComplexType>()->getElementType();
3121 
3122     Address Component =
3123         (E->getOpcode() == UO_Real
3124              ? emitAddrOfRealComponent(LV.getAddress(*this), LV.getType())
3125              : emitAddrOfImagComponent(LV.getAddress(*this), LV.getType()));
3126     LValue ElemLV = MakeAddrLValue(Component, T, LV.getBaseInfo(),
3127                                    CGM.getTBAAInfoForSubobject(LV, T));
3128     ElemLV.getQuals().addQualifiers(LV.getQuals());
3129     return ElemLV;
3130   }
3131   case UO_PreInc:
3132   case UO_PreDec: {
3133     LValue LV = EmitLValue(E->getSubExpr());
3134     bool isInc = E->getOpcode() == UO_PreInc;
3135 
3136     if (E->getType()->isAnyComplexType())
3137       EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/);
3138     else
3139       EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/);
3140     return LV;
3141   }
3142   }
3143 }
3144 
3145 LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) {
3146   return MakeAddrLValue(CGM.GetAddrOfConstantStringFromLiteral(E),
3147                         E->getType(), AlignmentSource::Decl);
3148 }
3149 
3150 LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) {
3151   return MakeAddrLValue(CGM.GetAddrOfConstantStringFromObjCEncode(E),
3152                         E->getType(), AlignmentSource::Decl);
3153 }
3154 
3155 LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
3156   auto SL = E->getFunctionName();
3157   assert(SL != nullptr && "No StringLiteral name in PredefinedExpr");
3158   StringRef FnName = CurFn->getName();
3159   if (FnName.starts_with("\01"))
3160     FnName = FnName.substr(1);
3161   StringRef NameItems[] = {
3162       PredefinedExpr::getIdentKindName(E->getIdentKind()), FnName};
3163   std::string GVName = llvm::join(NameItems, NameItems + 2, ".");
3164   if (auto *BD = dyn_cast_or_null<BlockDecl>(CurCodeDecl)) {
3165     std::string Name = std::string(SL->getString());
3166     if (!Name.empty()) {
3167       unsigned Discriminator =
3168           CGM.getCXXABI().getMangleContext().getBlockId(BD, true);
3169       if (Discriminator)
3170         Name += "_" + Twine(Discriminator + 1).str();
3171       auto C = CGM.GetAddrOfConstantCString(Name, GVName.c_str());
3172       return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl);
3173     } else {
3174       auto C =
3175           CGM.GetAddrOfConstantCString(std::string(FnName), GVName.c_str());
3176       return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl);
3177     }
3178   }
3179   auto C = CGM.GetAddrOfConstantStringFromLiteral(SL, GVName);
3180   return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl);
3181 }
3182 
3183 /// Emit a type description suitable for use by a runtime sanitizer library. The
3184 /// format of a type descriptor is
3185 ///
3186 /// \code
3187 ///   { i16 TypeKind, i16 TypeInfo }
3188 /// \endcode
3189 ///
3190 /// followed by an array of i8 containing the type name. TypeKind is 0 for an
3191 /// integer, 1 for a floating point value, and -1 for anything else.
3192 llvm::Constant *CodeGenFunction::EmitCheckTypeDescriptor(QualType T) {
3193   // Only emit each type's descriptor once.
3194   if (llvm::Constant *C = CGM.getTypeDescriptorFromMap(T))
3195     return C;
3196 
3197   uint16_t TypeKind = -1;
3198   uint16_t TypeInfo = 0;
3199 
3200   if (T->isIntegerType()) {
3201     TypeKind = 0;
3202     TypeInfo = (llvm::Log2_32(getContext().getTypeSize(T)) << 1) |
3203                (T->isSignedIntegerType() ? 1 : 0);
3204   } else if (T->isFloatingType()) {
3205     TypeKind = 1;
3206     TypeInfo = getContext().getTypeSize(T);
3207   }
3208 
3209   // Format the type name as if for a diagnostic, including quotes and
3210   // optionally an 'aka'.
3211   SmallString<32> Buffer;
3212   CGM.getDiags().ConvertArgToString(
3213       DiagnosticsEngine::ak_qualtype, (intptr_t)T.getAsOpaquePtr(), StringRef(),
3214       StringRef(), std::nullopt, Buffer, std::nullopt);
3215 
3216   llvm::Constant *Components[] = {
3217     Builder.getInt16(TypeKind), Builder.getInt16(TypeInfo),
3218     llvm::ConstantDataArray::getString(getLLVMContext(), Buffer)
3219   };
3220   llvm::Constant *Descriptor = llvm::ConstantStruct::getAnon(Components);
3221 
3222   auto *GV = new llvm::GlobalVariable(
3223       CGM.getModule(), Descriptor->getType(),
3224       /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage, Descriptor);
3225   GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3226   CGM.getSanitizerMetadata()->disableSanitizerForGlobal(GV);
3227 
3228   // Remember the descriptor for this type.
3229   CGM.setTypeDescriptorInMap(T, GV);
3230 
3231   return GV;
3232 }
3233 
3234 llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) {
3235   llvm::Type *TargetTy = IntPtrTy;
3236 
3237   if (V->getType() == TargetTy)
3238     return V;
3239 
3240   // Floating-point types which fit into intptr_t are bitcast to integers
3241   // and then passed directly (after zero-extension, if necessary).
3242   if (V->getType()->isFloatingPointTy()) {
3243     unsigned Bits = V->getType()->getPrimitiveSizeInBits().getFixedValue();
3244     if (Bits <= TargetTy->getIntegerBitWidth())
3245       V = Builder.CreateBitCast(V, llvm::Type::getIntNTy(getLLVMContext(),
3246                                                          Bits));
3247   }
3248 
3249   // Integers which fit in intptr_t are zero-extended and passed directly.
3250   if (V->getType()->isIntegerTy() &&
3251       V->getType()->getIntegerBitWidth() <= TargetTy->getIntegerBitWidth())
3252     return Builder.CreateZExt(V, TargetTy);
3253 
3254   // Pointers are passed directly, everything else is passed by address.
3255   if (!V->getType()->isPointerTy()) {
3256     Address Ptr = CreateDefaultAlignTempAlloca(V->getType());
3257     Builder.CreateStore(V, Ptr);
3258     V = Ptr.getPointer();
3259   }
3260   return Builder.CreatePtrToInt(V, TargetTy);
3261 }
3262 
3263 /// Emit a representation of a SourceLocation for passing to a handler
3264 /// in a sanitizer runtime library. The format for this data is:
3265 /// \code
3266 ///   struct SourceLocation {
3267 ///     const char *Filename;
3268 ///     int32_t Line, Column;
3269 ///   };
3270 /// \endcode
3271 /// For an invalid SourceLocation, the Filename pointer is null.
3272 llvm::Constant *CodeGenFunction::EmitCheckSourceLocation(SourceLocation Loc) {
3273   llvm::Constant *Filename;
3274   int Line, Column;
3275 
3276   PresumedLoc PLoc = getContext().getSourceManager().getPresumedLoc(Loc);
3277   if (PLoc.isValid()) {
3278     StringRef FilenameString = PLoc.getFilename();
3279 
3280     int PathComponentsToStrip =
3281         CGM.getCodeGenOpts().EmitCheckPathComponentsToStrip;
3282     if (PathComponentsToStrip < 0) {
3283       assert(PathComponentsToStrip != INT_MIN);
3284       int PathComponentsToKeep = -PathComponentsToStrip;
3285       auto I = llvm::sys::path::rbegin(FilenameString);
3286       auto E = llvm::sys::path::rend(FilenameString);
3287       while (I != E && --PathComponentsToKeep)
3288         ++I;
3289 
3290       FilenameString = FilenameString.substr(I - E);
3291     } else if (PathComponentsToStrip > 0) {
3292       auto I = llvm::sys::path::begin(FilenameString);
3293       auto E = llvm::sys::path::end(FilenameString);
3294       while (I != E && PathComponentsToStrip--)
3295         ++I;
3296 
3297       if (I != E)
3298         FilenameString =
3299             FilenameString.substr(I - llvm::sys::path::begin(FilenameString));
3300       else
3301         FilenameString = llvm::sys::path::filename(FilenameString);
3302     }
3303 
3304     auto FilenameGV =
3305         CGM.GetAddrOfConstantCString(std::string(FilenameString), ".src");
3306     CGM.getSanitizerMetadata()->disableSanitizerForGlobal(
3307         cast<llvm::GlobalVariable>(
3308             FilenameGV.getPointer()->stripPointerCasts()));
3309     Filename = FilenameGV.getPointer();
3310     Line = PLoc.getLine();
3311     Column = PLoc.getColumn();
3312   } else {
3313     Filename = llvm::Constant::getNullValue(Int8PtrTy);
3314     Line = Column = 0;
3315   }
3316 
3317   llvm::Constant *Data[] = {Filename, Builder.getInt32(Line),
3318                             Builder.getInt32(Column)};
3319 
3320   return llvm::ConstantStruct::getAnon(Data);
3321 }
3322 
3323 namespace {
3324 /// Specify under what conditions this check can be recovered
3325 enum class CheckRecoverableKind {
3326   /// Always terminate program execution if this check fails.
3327   Unrecoverable,
3328   /// Check supports recovering, runtime has both fatal (noreturn) and
3329   /// non-fatal handlers for this check.
3330   Recoverable,
3331   /// Runtime conditionally aborts, always need to support recovery.
3332   AlwaysRecoverable
3333 };
3334 }
3335 
3336 static CheckRecoverableKind getRecoverableKind(SanitizerMask Kind) {
3337   assert(Kind.countPopulation() == 1);
3338   if (Kind == SanitizerKind::Vptr)
3339     return CheckRecoverableKind::AlwaysRecoverable;
3340   else if (Kind == SanitizerKind::Return || Kind == SanitizerKind::Unreachable)
3341     return CheckRecoverableKind::Unrecoverable;
3342   else
3343     return CheckRecoverableKind::Recoverable;
3344 }
3345 
3346 namespace {
3347 struct SanitizerHandlerInfo {
3348   char const *const Name;
3349   unsigned Version;
3350 };
3351 }
3352 
3353 const SanitizerHandlerInfo SanitizerHandlers[] = {
3354 #define SANITIZER_CHECK(Enum, Name, Version) {#Name, Version},
3355     LIST_SANITIZER_CHECKS
3356 #undef SANITIZER_CHECK
3357 };
3358 
3359 static void emitCheckHandlerCall(CodeGenFunction &CGF,
3360                                  llvm::FunctionType *FnType,
3361                                  ArrayRef<llvm::Value *> FnArgs,
3362                                  SanitizerHandler CheckHandler,
3363                                  CheckRecoverableKind RecoverKind, bool IsFatal,
3364                                  llvm::BasicBlock *ContBB) {
3365   assert(IsFatal || RecoverKind != CheckRecoverableKind::Unrecoverable);
3366   std::optional<ApplyDebugLocation> DL;
3367   if (!CGF.Builder.getCurrentDebugLocation()) {
3368     // Ensure that the call has at least an artificial debug location.
3369     DL.emplace(CGF, SourceLocation());
3370   }
3371   bool NeedsAbortSuffix =
3372       IsFatal && RecoverKind != CheckRecoverableKind::Unrecoverable;
3373   bool MinimalRuntime = CGF.CGM.getCodeGenOpts().SanitizeMinimalRuntime;
3374   const SanitizerHandlerInfo &CheckInfo = SanitizerHandlers[CheckHandler];
3375   const StringRef CheckName = CheckInfo.Name;
3376   std::string FnName = "__ubsan_handle_" + CheckName.str();
3377   if (CheckInfo.Version && !MinimalRuntime)
3378     FnName += "_v" + llvm::utostr(CheckInfo.Version);
3379   if (MinimalRuntime)
3380     FnName += "_minimal";
3381   if (NeedsAbortSuffix)
3382     FnName += "_abort";
3383   bool MayReturn =
3384       !IsFatal || RecoverKind == CheckRecoverableKind::AlwaysRecoverable;
3385 
3386   llvm::AttrBuilder B(CGF.getLLVMContext());
3387   if (!MayReturn) {
3388     B.addAttribute(llvm::Attribute::NoReturn)
3389         .addAttribute(llvm::Attribute::NoUnwind);
3390   }
3391   B.addUWTableAttr(llvm::UWTableKind::Default);
3392 
3393   llvm::FunctionCallee Fn = CGF.CGM.CreateRuntimeFunction(
3394       FnType, FnName,
3395       llvm::AttributeList::get(CGF.getLLVMContext(),
3396                                llvm::AttributeList::FunctionIndex, B),
3397       /*Local=*/true);
3398   llvm::CallInst *HandlerCall = CGF.EmitNounwindRuntimeCall(Fn, FnArgs);
3399   if (!MayReturn) {
3400     HandlerCall->setDoesNotReturn();
3401     CGF.Builder.CreateUnreachable();
3402   } else {
3403     CGF.Builder.CreateBr(ContBB);
3404   }
3405 }
3406 
3407 void CodeGenFunction::EmitCheck(
3408     ArrayRef<std::pair<llvm::Value *, SanitizerMask>> Checked,
3409     SanitizerHandler CheckHandler, ArrayRef<llvm::Constant *> StaticArgs,
3410     ArrayRef<llvm::Value *> DynamicArgs) {
3411   assert(IsSanitizerScope);
3412   assert(Checked.size() > 0);
3413   assert(CheckHandler >= 0 &&
3414          size_t(CheckHandler) < std::size(SanitizerHandlers));
3415   const StringRef CheckName = SanitizerHandlers[CheckHandler].Name;
3416 
3417   llvm::Value *FatalCond = nullptr;
3418   llvm::Value *RecoverableCond = nullptr;
3419   llvm::Value *TrapCond = nullptr;
3420   for (int i = 0, n = Checked.size(); i < n; ++i) {
3421     llvm::Value *Check = Checked[i].first;
3422     // -fsanitize-trap= overrides -fsanitize-recover=.
3423     llvm::Value *&Cond =
3424         CGM.getCodeGenOpts().SanitizeTrap.has(Checked[i].second)
3425             ? TrapCond
3426             : CGM.getCodeGenOpts().SanitizeRecover.has(Checked[i].second)
3427                   ? RecoverableCond
3428                   : FatalCond;
3429     Cond = Cond ? Builder.CreateAnd(Cond, Check) : Check;
3430   }
3431 
3432   if (TrapCond)
3433     EmitTrapCheck(TrapCond, CheckHandler);
3434   if (!FatalCond && !RecoverableCond)
3435     return;
3436 
3437   llvm::Value *JointCond;
3438   if (FatalCond && RecoverableCond)
3439     JointCond = Builder.CreateAnd(FatalCond, RecoverableCond);
3440   else
3441     JointCond = FatalCond ? FatalCond : RecoverableCond;
3442   assert(JointCond);
3443 
3444   CheckRecoverableKind RecoverKind = getRecoverableKind(Checked[0].second);
3445   assert(SanOpts.has(Checked[0].second));
3446 #ifndef NDEBUG
3447   for (int i = 1, n = Checked.size(); i < n; ++i) {
3448     assert(RecoverKind == getRecoverableKind(Checked[i].second) &&
3449            "All recoverable kinds in a single check must be same!");
3450     assert(SanOpts.has(Checked[i].second));
3451   }
3452 #endif
3453 
3454   llvm::BasicBlock *Cont = createBasicBlock("cont");
3455   llvm::BasicBlock *Handlers = createBasicBlock("handler." + CheckName);
3456   llvm::Instruction *Branch = Builder.CreateCondBr(JointCond, Cont, Handlers);
3457   // Give hint that we very much don't expect to execute the handler
3458   // Value chosen to match UR_NONTAKEN_WEIGHT, see BranchProbabilityInfo.cpp
3459   llvm::MDBuilder MDHelper(getLLVMContext());
3460   llvm::MDNode *Node = MDHelper.createBranchWeights((1U << 20) - 1, 1);
3461   Branch->setMetadata(llvm::LLVMContext::MD_prof, Node);
3462   EmitBlock(Handlers);
3463 
3464   // Handler functions take an i8* pointing to the (handler-specific) static
3465   // information block, followed by a sequence of intptr_t arguments
3466   // representing operand values.
3467   SmallVector<llvm::Value *, 4> Args;
3468   SmallVector<llvm::Type *, 4> ArgTypes;
3469   if (!CGM.getCodeGenOpts().SanitizeMinimalRuntime) {
3470     Args.reserve(DynamicArgs.size() + 1);
3471     ArgTypes.reserve(DynamicArgs.size() + 1);
3472 
3473     // Emit handler arguments and create handler function type.
3474     if (!StaticArgs.empty()) {
3475       llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
3476       auto *InfoPtr = new llvm::GlobalVariable(
3477           CGM.getModule(), Info->getType(), false,
3478           llvm::GlobalVariable::PrivateLinkage, Info, "", nullptr,
3479           llvm::GlobalVariable::NotThreadLocal,
3480           CGM.getDataLayout().getDefaultGlobalsAddressSpace());
3481       InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3482       CGM.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr);
3483       Args.push_back(InfoPtr);
3484       ArgTypes.push_back(Args.back()->getType());
3485     }
3486 
3487     for (size_t i = 0, n = DynamicArgs.size(); i != n; ++i) {
3488       Args.push_back(EmitCheckValue(DynamicArgs[i]));
3489       ArgTypes.push_back(IntPtrTy);
3490     }
3491   }
3492 
3493   llvm::FunctionType *FnType =
3494     llvm::FunctionType::get(CGM.VoidTy, ArgTypes, false);
3495 
3496   if (!FatalCond || !RecoverableCond) {
3497     // Simple case: we need to generate a single handler call, either
3498     // fatal, or non-fatal.
3499     emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind,
3500                          (FatalCond != nullptr), Cont);
3501   } else {
3502     // Emit two handler calls: first one for set of unrecoverable checks,
3503     // another one for recoverable.
3504     llvm::BasicBlock *NonFatalHandlerBB =
3505         createBasicBlock("non_fatal." + CheckName);
3506     llvm::BasicBlock *FatalHandlerBB = createBasicBlock("fatal." + CheckName);
3507     Builder.CreateCondBr(FatalCond, NonFatalHandlerBB, FatalHandlerBB);
3508     EmitBlock(FatalHandlerBB);
3509     emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, true,
3510                          NonFatalHandlerBB);
3511     EmitBlock(NonFatalHandlerBB);
3512     emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, false,
3513                          Cont);
3514   }
3515 
3516   EmitBlock(Cont);
3517 }
3518 
3519 void CodeGenFunction::EmitCfiSlowPathCheck(
3520     SanitizerMask Kind, llvm::Value *Cond, llvm::ConstantInt *TypeId,
3521     llvm::Value *Ptr, ArrayRef<llvm::Constant *> StaticArgs) {
3522   llvm::BasicBlock *Cont = createBasicBlock("cfi.cont");
3523 
3524   llvm::BasicBlock *CheckBB = createBasicBlock("cfi.slowpath");
3525   llvm::BranchInst *BI = Builder.CreateCondBr(Cond, Cont, CheckBB);
3526 
3527   llvm::MDBuilder MDHelper(getLLVMContext());
3528   llvm::MDNode *Node = MDHelper.createBranchWeights((1U << 20) - 1, 1);
3529   BI->setMetadata(llvm::LLVMContext::MD_prof, Node);
3530 
3531   EmitBlock(CheckBB);
3532 
3533   bool WithDiag = !CGM.getCodeGenOpts().SanitizeTrap.has(Kind);
3534 
3535   llvm::CallInst *CheckCall;
3536   llvm::FunctionCallee SlowPathFn;
3537   if (WithDiag) {
3538     llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
3539     auto *InfoPtr =
3540         new llvm::GlobalVariable(CGM.getModule(), Info->getType(), false,
3541                                  llvm::GlobalVariable::PrivateLinkage, Info);
3542     InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3543     CGM.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr);
3544 
3545     SlowPathFn = CGM.getModule().getOrInsertFunction(
3546         "__cfi_slowpath_diag",
3547         llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy, Int8PtrTy},
3548                                 false));
3549     CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr, InfoPtr});
3550   } else {
3551     SlowPathFn = CGM.getModule().getOrInsertFunction(
3552         "__cfi_slowpath",
3553         llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy}, false));
3554     CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr});
3555   }
3556 
3557   CGM.setDSOLocal(
3558       cast<llvm::GlobalValue>(SlowPathFn.getCallee()->stripPointerCasts()));
3559   CheckCall->setDoesNotThrow();
3560 
3561   EmitBlock(Cont);
3562 }
3563 
3564 // Emit a stub for __cfi_check function so that the linker knows about this
3565 // symbol in LTO mode.
3566 void CodeGenFunction::EmitCfiCheckStub() {
3567   llvm::Module *M = &CGM.getModule();
3568   auto &Ctx = M->getContext();
3569   llvm::Function *F = llvm::Function::Create(
3570       llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy, Int8PtrTy}, false),
3571       llvm::GlobalValue::WeakAnyLinkage, "__cfi_check", M);
3572   F->setAlignment(llvm::Align(4096));
3573   CGM.setDSOLocal(F);
3574   llvm::BasicBlock *BB = llvm::BasicBlock::Create(Ctx, "entry", F);
3575   // CrossDSOCFI pass is not executed if there is no executable code.
3576   SmallVector<llvm::Value*> Args{F->getArg(2), F->getArg(1)};
3577   llvm::CallInst::Create(M->getFunction("__cfi_check_fail"), Args, "", BB);
3578   llvm::ReturnInst::Create(Ctx, nullptr, BB);
3579 }
3580 
3581 // This function is basically a switch over the CFI failure kind, which is
3582 // extracted from CFICheckFailData (1st function argument). Each case is either
3583 // llvm.trap or a call to one of the two runtime handlers, based on
3584 // -fsanitize-trap and -fsanitize-recover settings.  Default case (invalid
3585 // failure kind) traps, but this should really never happen.  CFICheckFailData
3586 // can be nullptr if the calling module has -fsanitize-trap behavior for this
3587 // check kind; in this case __cfi_check_fail traps as well.
3588 void CodeGenFunction::EmitCfiCheckFail() {
3589   SanitizerScope SanScope(this);
3590   FunctionArgList Args;
3591   ImplicitParamDecl ArgData(getContext(), getContext().VoidPtrTy,
3592                             ImplicitParamKind::Other);
3593   ImplicitParamDecl ArgAddr(getContext(), getContext().VoidPtrTy,
3594                             ImplicitParamKind::Other);
3595   Args.push_back(&ArgData);
3596   Args.push_back(&ArgAddr);
3597 
3598   const CGFunctionInfo &FI =
3599     CGM.getTypes().arrangeBuiltinFunctionDeclaration(getContext().VoidTy, Args);
3600 
3601   llvm::Function *F = llvm::Function::Create(
3602       llvm::FunctionType::get(VoidTy, {VoidPtrTy, VoidPtrTy}, false),
3603       llvm::GlobalValue::WeakODRLinkage, "__cfi_check_fail", &CGM.getModule());
3604 
3605   CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, F, /*IsThunk=*/false);
3606   CGM.SetLLVMFunctionAttributesForDefinition(nullptr, F);
3607   F->setVisibility(llvm::GlobalValue::HiddenVisibility);
3608 
3609   StartFunction(GlobalDecl(), CGM.getContext().VoidTy, F, FI, Args,
3610                 SourceLocation());
3611 
3612   // This function is not affected by NoSanitizeList. This function does
3613   // not have a source location, but "src:*" would still apply. Revert any
3614   // changes to SanOpts made in StartFunction.
3615   SanOpts = CGM.getLangOpts().Sanitize;
3616 
3617   llvm::Value *Data =
3618       EmitLoadOfScalar(GetAddrOfLocalVar(&ArgData), /*Volatile=*/false,
3619                        CGM.getContext().VoidPtrTy, ArgData.getLocation());
3620   llvm::Value *Addr =
3621       EmitLoadOfScalar(GetAddrOfLocalVar(&ArgAddr), /*Volatile=*/false,
3622                        CGM.getContext().VoidPtrTy, ArgAddr.getLocation());
3623 
3624   // Data == nullptr means the calling module has trap behaviour for this check.
3625   llvm::Value *DataIsNotNullPtr =
3626       Builder.CreateICmpNE(Data, llvm::ConstantPointerNull::get(Int8PtrTy));
3627   EmitTrapCheck(DataIsNotNullPtr, SanitizerHandler::CFICheckFail);
3628 
3629   llvm::StructType *SourceLocationTy =
3630       llvm::StructType::get(VoidPtrTy, Int32Ty, Int32Ty);
3631   llvm::StructType *CfiCheckFailDataTy =
3632       llvm::StructType::get(Int8Ty, SourceLocationTy, VoidPtrTy);
3633 
3634   llvm::Value *V = Builder.CreateConstGEP2_32(
3635       CfiCheckFailDataTy,
3636       Builder.CreatePointerCast(Data, CfiCheckFailDataTy->getPointerTo(0)), 0,
3637       0);
3638 
3639   Address CheckKindAddr(V, Int8Ty, getIntAlign());
3640   llvm::Value *CheckKind = Builder.CreateLoad(CheckKindAddr);
3641 
3642   llvm::Value *AllVtables = llvm::MetadataAsValue::get(
3643       CGM.getLLVMContext(),
3644       llvm::MDString::get(CGM.getLLVMContext(), "all-vtables"));
3645   llvm::Value *ValidVtable = Builder.CreateZExt(
3646       Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test),
3647                          {Addr, AllVtables}),
3648       IntPtrTy);
3649 
3650   const std::pair<int, SanitizerMask> CheckKinds[] = {
3651       {CFITCK_VCall, SanitizerKind::CFIVCall},
3652       {CFITCK_NVCall, SanitizerKind::CFINVCall},
3653       {CFITCK_DerivedCast, SanitizerKind::CFIDerivedCast},
3654       {CFITCK_UnrelatedCast, SanitizerKind::CFIUnrelatedCast},
3655       {CFITCK_ICall, SanitizerKind::CFIICall}};
3656 
3657   SmallVector<std::pair<llvm::Value *, SanitizerMask>, 5> Checks;
3658   for (auto CheckKindMaskPair : CheckKinds) {
3659     int Kind = CheckKindMaskPair.first;
3660     SanitizerMask Mask = CheckKindMaskPair.second;
3661     llvm::Value *Cond =
3662         Builder.CreateICmpNE(CheckKind, llvm::ConstantInt::get(Int8Ty, Kind));
3663     if (CGM.getLangOpts().Sanitize.has(Mask))
3664       EmitCheck(std::make_pair(Cond, Mask), SanitizerHandler::CFICheckFail, {},
3665                 {Data, Addr, ValidVtable});
3666     else
3667       EmitTrapCheck(Cond, SanitizerHandler::CFICheckFail);
3668   }
3669 
3670   FinishFunction();
3671   // The only reference to this function will be created during LTO link.
3672   // Make sure it survives until then.
3673   CGM.addUsedGlobal(F);
3674 }
3675 
3676 void CodeGenFunction::EmitUnreachable(SourceLocation Loc) {
3677   if (SanOpts.has(SanitizerKind::Unreachable)) {
3678     SanitizerScope SanScope(this);
3679     EmitCheck(std::make_pair(static_cast<llvm::Value *>(Builder.getFalse()),
3680                              SanitizerKind::Unreachable),
3681               SanitizerHandler::BuiltinUnreachable,
3682               EmitCheckSourceLocation(Loc), std::nullopt);
3683   }
3684   Builder.CreateUnreachable();
3685 }
3686 
3687 void CodeGenFunction::EmitTrapCheck(llvm::Value *Checked,
3688                                     SanitizerHandler CheckHandlerID) {
3689   llvm::BasicBlock *Cont = createBasicBlock("cont");
3690 
3691   // If we're optimizing, collapse all calls to trap down to just one per
3692   // check-type per function to save on code size.
3693   if (TrapBBs.size() <= CheckHandlerID)
3694     TrapBBs.resize(CheckHandlerID + 1);
3695 
3696   llvm::BasicBlock *&TrapBB = TrapBBs[CheckHandlerID];
3697 
3698   if (!ClSanitizeDebugDeoptimization &&
3699       CGM.getCodeGenOpts().OptimizationLevel && TrapBB &&
3700       (!CurCodeDecl || !CurCodeDecl->hasAttr<OptimizeNoneAttr>())) {
3701     auto Call = TrapBB->begin();
3702     assert(isa<llvm::CallInst>(Call) && "Expected call in trap BB");
3703 
3704     Call->applyMergedLocation(Call->getDebugLoc(),
3705                               Builder.getCurrentDebugLocation());
3706     Builder.CreateCondBr(Checked, Cont, TrapBB);
3707   } else {
3708     TrapBB = createBasicBlock("trap");
3709     Builder.CreateCondBr(Checked, Cont, TrapBB);
3710     EmitBlock(TrapBB);
3711 
3712     llvm::CallInst *TrapCall = Builder.CreateCall(
3713         CGM.getIntrinsic(llvm::Intrinsic::ubsantrap),
3714         llvm::ConstantInt::get(CGM.Int8Ty, ClSanitizeDebugDeoptimization
3715                                                ? TrapBB->getParent()->size()
3716                                                : CheckHandlerID));
3717 
3718     if (!CGM.getCodeGenOpts().TrapFuncName.empty()) {
3719       auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name",
3720                                     CGM.getCodeGenOpts().TrapFuncName);
3721       TrapCall->addFnAttr(A);
3722     }
3723     TrapCall->setDoesNotReturn();
3724     TrapCall->setDoesNotThrow();
3725     Builder.CreateUnreachable();
3726   }
3727 
3728   EmitBlock(Cont);
3729 }
3730 
3731 llvm::CallInst *CodeGenFunction::EmitTrapCall(llvm::Intrinsic::ID IntrID) {
3732   llvm::CallInst *TrapCall =
3733       Builder.CreateCall(CGM.getIntrinsic(IntrID));
3734 
3735   if (!CGM.getCodeGenOpts().TrapFuncName.empty()) {
3736     auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name",
3737                                   CGM.getCodeGenOpts().TrapFuncName);
3738     TrapCall->addFnAttr(A);
3739   }
3740 
3741   return TrapCall;
3742 }
3743 
3744 Address CodeGenFunction::EmitArrayToPointerDecay(const Expr *E,
3745                                                  LValueBaseInfo *BaseInfo,
3746                                                  TBAAAccessInfo *TBAAInfo) {
3747   assert(E->getType()->isArrayType() &&
3748          "Array to pointer decay must have array source type!");
3749 
3750   // Expressions of array type can't be bitfields or vector elements.
3751   LValue LV = EmitLValue(E);
3752   Address Addr = LV.getAddress(*this);
3753 
3754   // If the array type was an incomplete type, we need to make sure
3755   // the decay ends up being the right type.
3756   llvm::Type *NewTy = ConvertType(E->getType());
3757   Addr = Addr.withElementType(NewTy);
3758 
3759   // Note that VLA pointers are always decayed, so we don't need to do
3760   // anything here.
3761   if (!E->getType()->isVariableArrayType()) {
3762     assert(isa<llvm::ArrayType>(Addr.getElementType()) &&
3763            "Expected pointer to array");
3764     Addr = Builder.CreateConstArrayGEP(Addr, 0, "arraydecay");
3765   }
3766 
3767   // The result of this decay conversion points to an array element within the
3768   // base lvalue. However, since TBAA currently does not support representing
3769   // accesses to elements of member arrays, we conservatively represent accesses
3770   // to the pointee object as if it had no any base lvalue specified.
3771   // TODO: Support TBAA for member arrays.
3772   QualType EltType = E->getType()->castAsArrayTypeUnsafe()->getElementType();
3773   if (BaseInfo) *BaseInfo = LV.getBaseInfo();
3774   if (TBAAInfo) *TBAAInfo = CGM.getTBAAAccessInfo(EltType);
3775 
3776   return Addr.withElementType(ConvertTypeForMem(EltType));
3777 }
3778 
3779 /// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an
3780 /// array to pointer, return the array subexpression.
3781 static const Expr *isSimpleArrayDecayOperand(const Expr *E) {
3782   // If this isn't just an array->pointer decay, bail out.
3783   const auto *CE = dyn_cast<CastExpr>(E);
3784   if (!CE || CE->getCastKind() != CK_ArrayToPointerDecay)
3785     return nullptr;
3786 
3787   // If this is a decay from variable width array, bail out.
3788   const Expr *SubExpr = CE->getSubExpr();
3789   if (SubExpr->getType()->isVariableArrayType())
3790     return nullptr;
3791 
3792   return SubExpr;
3793 }
3794 
3795 static llvm::Value *emitArraySubscriptGEP(CodeGenFunction &CGF,
3796                                           llvm::Type *elemType,
3797                                           llvm::Value *ptr,
3798                                           ArrayRef<llvm::Value*> indices,
3799                                           bool inbounds,
3800                                           bool signedIndices,
3801                                           SourceLocation loc,
3802                                     const llvm::Twine &name = "arrayidx") {
3803   if (inbounds) {
3804     return CGF.EmitCheckedInBoundsGEP(elemType, ptr, indices, signedIndices,
3805                                       CodeGenFunction::NotSubtraction, loc,
3806                                       name);
3807   } else {
3808     return CGF.Builder.CreateGEP(elemType, ptr, indices, name);
3809   }
3810 }
3811 
3812 static CharUnits getArrayElementAlign(CharUnits arrayAlign,
3813                                       llvm::Value *idx,
3814                                       CharUnits eltSize) {
3815   // If we have a constant index, we can use the exact offset of the
3816   // element we're accessing.
3817   if (auto constantIdx = dyn_cast<llvm::ConstantInt>(idx)) {
3818     CharUnits offset = constantIdx->getZExtValue() * eltSize;
3819     return arrayAlign.alignmentAtOffset(offset);
3820 
3821   // Otherwise, use the worst-case alignment for any element.
3822   } else {
3823     return arrayAlign.alignmentOfArrayElement(eltSize);
3824   }
3825 }
3826 
3827 static QualType getFixedSizeElementType(const ASTContext &ctx,
3828                                         const VariableArrayType *vla) {
3829   QualType eltType;
3830   do {
3831     eltType = vla->getElementType();
3832   } while ((vla = ctx.getAsVariableArrayType(eltType)));
3833   return eltType;
3834 }
3835 
3836 static bool hasBPFPreserveStaticOffset(const RecordDecl *D) {
3837   return D && D->hasAttr<BPFPreserveStaticOffsetAttr>();
3838 }
3839 
3840 static bool hasBPFPreserveStaticOffset(const Expr *E) {
3841   if (!E)
3842     return false;
3843   QualType PointeeType = E->getType()->getPointeeType();
3844   if (PointeeType.isNull())
3845     return false;
3846   if (const auto *BaseDecl = PointeeType->getAsRecordDecl())
3847     return hasBPFPreserveStaticOffset(BaseDecl);
3848   return false;
3849 }
3850 
3851 // Wraps Addr with a call to llvm.preserve.static.offset intrinsic.
3852 static Address wrapWithBPFPreserveStaticOffset(CodeGenFunction &CGF,
3853                                                Address &Addr) {
3854   if (!CGF.getTarget().getTriple().isBPF())
3855     return Addr;
3856 
3857   llvm::Function *Fn =
3858       CGF.CGM.getIntrinsic(llvm::Intrinsic::preserve_static_offset);
3859   llvm::CallInst *Call = CGF.Builder.CreateCall(Fn, {Addr.getPointer()});
3860   return Address(Call, Addr.getElementType(), Addr.getAlignment());
3861 }
3862 
3863 /// Given an array base, check whether its member access belongs to a record
3864 /// with preserve_access_index attribute or not.
3865 static bool IsPreserveAIArrayBase(CodeGenFunction &CGF, const Expr *ArrayBase) {
3866   if (!ArrayBase || !CGF.getDebugInfo())
3867     return false;
3868 
3869   // Only support base as either a MemberExpr or DeclRefExpr.
3870   // DeclRefExpr to cover cases like:
3871   //    struct s { int a; int b[10]; };
3872   //    struct s *p;
3873   //    p[1].a
3874   // p[1] will generate a DeclRefExpr and p[1].a is a MemberExpr.
3875   // p->b[5] is a MemberExpr example.
3876   const Expr *E = ArrayBase->IgnoreImpCasts();
3877   if (const auto *ME = dyn_cast<MemberExpr>(E))
3878     return ME->getMemberDecl()->hasAttr<BPFPreserveAccessIndexAttr>();
3879 
3880   if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
3881     const auto *VarDef = dyn_cast<VarDecl>(DRE->getDecl());
3882     if (!VarDef)
3883       return false;
3884 
3885     const auto *PtrT = VarDef->getType()->getAs<PointerType>();
3886     if (!PtrT)
3887       return false;
3888 
3889     const auto *PointeeT = PtrT->getPointeeType()
3890                              ->getUnqualifiedDesugaredType();
3891     if (const auto *RecT = dyn_cast<RecordType>(PointeeT))
3892       return RecT->getDecl()->hasAttr<BPFPreserveAccessIndexAttr>();
3893     return false;
3894   }
3895 
3896   return false;
3897 }
3898 
3899 static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr,
3900                                      ArrayRef<llvm::Value *> indices,
3901                                      QualType eltType, bool inbounds,
3902                                      bool signedIndices, SourceLocation loc,
3903                                      QualType *arrayType = nullptr,
3904                                      const Expr *Base = nullptr,
3905                                      const llvm::Twine &name = "arrayidx") {
3906   // All the indices except that last must be zero.
3907 #ifndef NDEBUG
3908   for (auto *idx : indices.drop_back())
3909     assert(isa<llvm::ConstantInt>(idx) &&
3910            cast<llvm::ConstantInt>(idx)->isZero());
3911 #endif
3912 
3913   // Determine the element size of the statically-sized base.  This is
3914   // the thing that the indices are expressed in terms of.
3915   if (auto vla = CGF.getContext().getAsVariableArrayType(eltType)) {
3916     eltType = getFixedSizeElementType(CGF.getContext(), vla);
3917   }
3918 
3919   // We can use that to compute the best alignment of the element.
3920   CharUnits eltSize = CGF.getContext().getTypeSizeInChars(eltType);
3921   CharUnits eltAlign =
3922     getArrayElementAlign(addr.getAlignment(), indices.back(), eltSize);
3923 
3924   if (hasBPFPreserveStaticOffset(Base))
3925     addr = wrapWithBPFPreserveStaticOffset(CGF, addr);
3926 
3927   llvm::Value *eltPtr;
3928   auto LastIndex = dyn_cast<llvm::ConstantInt>(indices.back());
3929   if (!LastIndex ||
3930       (!CGF.IsInPreservedAIRegion && !IsPreserveAIArrayBase(CGF, Base))) {
3931     eltPtr = emitArraySubscriptGEP(
3932         CGF, addr.getElementType(), addr.getPointer(), indices, inbounds,
3933         signedIndices, loc, name);
3934   } else {
3935     // Remember the original array subscript for bpf target
3936     unsigned idx = LastIndex->getZExtValue();
3937     llvm::DIType *DbgInfo = nullptr;
3938     if (arrayType)
3939       DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(*arrayType, loc);
3940     eltPtr = CGF.Builder.CreatePreserveArrayAccessIndex(addr.getElementType(),
3941                                                         addr.getPointer(),
3942                                                         indices.size() - 1,
3943                                                         idx, DbgInfo);
3944   }
3945 
3946   return Address(eltPtr, CGF.ConvertTypeForMem(eltType), eltAlign);
3947 }
3948 
3949 LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
3950                                                bool Accessed) {
3951   // The index must always be an integer, which is not an aggregate.  Emit it
3952   // in lexical order (this complexity is, sadly, required by C++17).
3953   llvm::Value *IdxPre =
3954       (E->getLHS() == E->getIdx()) ? EmitScalarExpr(E->getIdx()) : nullptr;
3955   bool SignedIndices = false;
3956   auto EmitIdxAfterBase = [&, IdxPre](bool Promote) -> llvm::Value * {
3957     auto *Idx = IdxPre;
3958     if (E->getLHS() != E->getIdx()) {
3959       assert(E->getRHS() == E->getIdx() && "index was neither LHS nor RHS");
3960       Idx = EmitScalarExpr(E->getIdx());
3961     }
3962 
3963     QualType IdxTy = E->getIdx()->getType();
3964     bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType();
3965     SignedIndices |= IdxSigned;
3966 
3967     if (SanOpts.has(SanitizerKind::ArrayBounds))
3968       EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, Accessed);
3969 
3970     // Extend or truncate the index type to 32 or 64-bits.
3971     if (Promote && Idx->getType() != IntPtrTy)
3972       Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom");
3973 
3974     return Idx;
3975   };
3976   IdxPre = nullptr;
3977 
3978   // If the base is a vector type, then we are forming a vector element lvalue
3979   // with this subscript.
3980   if (E->getBase()->getType()->isVectorType() &&
3981       !isa<ExtVectorElementExpr>(E->getBase())) {
3982     // Emit the vector as an lvalue to get its address.
3983     LValue LHS = EmitLValue(E->getBase());
3984     auto *Idx = EmitIdxAfterBase(/*Promote*/false);
3985     assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
3986     return LValue::MakeVectorElt(LHS.getAddress(*this), Idx,
3987                                  E->getBase()->getType(), LHS.getBaseInfo(),
3988                                  TBAAAccessInfo());
3989   }
3990 
3991   // All the other cases basically behave like simple offsetting.
3992 
3993   // Handle the extvector case we ignored above.
3994   if (isa<ExtVectorElementExpr>(E->getBase())) {
3995     LValue LV = EmitLValue(E->getBase());
3996     auto *Idx = EmitIdxAfterBase(/*Promote*/true);
3997     Address Addr = EmitExtVectorElementLValue(LV);
3998 
3999     QualType EltType = LV.getType()->castAs<VectorType>()->getElementType();
4000     Addr = emitArraySubscriptGEP(*this, Addr, Idx, EltType, /*inbounds*/ true,
4001                                  SignedIndices, E->getExprLoc());
4002     return MakeAddrLValue(Addr, EltType, LV.getBaseInfo(),
4003                           CGM.getTBAAInfoForSubobject(LV, EltType));
4004   }
4005 
4006   LValueBaseInfo EltBaseInfo;
4007   TBAAAccessInfo EltTBAAInfo;
4008   Address Addr = Address::invalid();
4009   if (const VariableArrayType *vla =
4010            getContext().getAsVariableArrayType(E->getType())) {
4011     // The base must be a pointer, which is not an aggregate.  Emit
4012     // it.  It needs to be emitted first in case it's what captures
4013     // the VLA bounds.
4014     Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
4015     auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4016 
4017     // The element count here is the total number of non-VLA elements.
4018     llvm::Value *numElements = getVLASize(vla).NumElts;
4019 
4020     // Effectively, the multiply by the VLA size is part of the GEP.
4021     // GEP indexes are signed, and scaling an index isn't permitted to
4022     // signed-overflow, so we use the same semantics for our explicit
4023     // multiply.  We suppress this if overflow is not undefined behavior.
4024     if (getLangOpts().isSignedOverflowDefined()) {
4025       Idx = Builder.CreateMul(Idx, numElements);
4026     } else {
4027       Idx = Builder.CreateNSWMul(Idx, numElements);
4028     }
4029 
4030     Addr = emitArraySubscriptGEP(*this, Addr, Idx, vla->getElementType(),
4031                                  !getLangOpts().isSignedOverflowDefined(),
4032                                  SignedIndices, E->getExprLoc());
4033 
4034   } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
4035     // Indexing over an interface, as in "NSString *P; P[4];"
4036 
4037     // Emit the base pointer.
4038     Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
4039     auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4040 
4041     CharUnits InterfaceSize = getContext().getTypeSizeInChars(OIT);
4042     llvm::Value *InterfaceSizeVal =
4043         llvm::ConstantInt::get(Idx->getType(), InterfaceSize.getQuantity());
4044 
4045     llvm::Value *ScaledIdx = Builder.CreateMul(Idx, InterfaceSizeVal);
4046 
4047     // We don't necessarily build correct LLVM struct types for ObjC
4048     // interfaces, so we can't rely on GEP to do this scaling
4049     // correctly, so we need to cast to i8*.  FIXME: is this actually
4050     // true?  A lot of other things in the fragile ABI would break...
4051     llvm::Type *OrigBaseElemTy = Addr.getElementType();
4052 
4053     // Do the GEP.
4054     CharUnits EltAlign =
4055       getArrayElementAlign(Addr.getAlignment(), Idx, InterfaceSize);
4056     llvm::Value *EltPtr =
4057         emitArraySubscriptGEP(*this, Int8Ty, Addr.getPointer(), ScaledIdx,
4058                               false, SignedIndices, E->getExprLoc());
4059     Addr = Address(EltPtr, OrigBaseElemTy, EltAlign);
4060   } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
4061     // If this is A[i] where A is an array, the frontend will have decayed the
4062     // base to be a ArrayToPointerDecay implicit cast.  While correct, it is
4063     // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
4064     // "gep x, i" here.  Emit one "gep A, 0, i".
4065     assert(Array->getType()->isArrayType() &&
4066            "Array to pointer decay must have array source type!");
4067     LValue ArrayLV;
4068     // For simple multidimensional array indexing, set the 'accessed' flag for
4069     // better bounds-checking of the base expression.
4070     if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array))
4071       ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true);
4072     else
4073       ArrayLV = EmitLValue(Array);
4074     auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4075 
4076     // Propagate the alignment from the array itself to the result.
4077     QualType arrayType = Array->getType();
4078     Addr = emitArraySubscriptGEP(
4079         *this, ArrayLV.getAddress(*this), {CGM.getSize(CharUnits::Zero()), Idx},
4080         E->getType(), !getLangOpts().isSignedOverflowDefined(), SignedIndices,
4081         E->getExprLoc(), &arrayType, E->getBase());
4082     EltBaseInfo = ArrayLV.getBaseInfo();
4083     EltTBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, E->getType());
4084   } else {
4085     // The base must be a pointer; emit it with an estimate of its alignment.
4086     Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
4087     auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4088     QualType ptrType = E->getBase()->getType();
4089     Addr = emitArraySubscriptGEP(*this, Addr, Idx, E->getType(),
4090                                  !getLangOpts().isSignedOverflowDefined(),
4091                                  SignedIndices, E->getExprLoc(), &ptrType,
4092                                  E->getBase());
4093   }
4094 
4095   LValue LV = MakeAddrLValue(Addr, E->getType(), EltBaseInfo, EltTBAAInfo);
4096 
4097   if (getLangOpts().ObjC &&
4098       getLangOpts().getGC() != LangOptions::NonGC) {
4099     LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
4100     setObjCGCLValueClass(getContext(), E, LV);
4101   }
4102   return LV;
4103 }
4104 
4105 LValue CodeGenFunction::EmitMatrixSubscriptExpr(const MatrixSubscriptExpr *E) {
4106   assert(
4107       !E->isIncomplete() &&
4108       "incomplete matrix subscript expressions should be rejected during Sema");
4109   LValue Base = EmitLValue(E->getBase());
4110   llvm::Value *RowIdx = EmitScalarExpr(E->getRowIdx());
4111   llvm::Value *ColIdx = EmitScalarExpr(E->getColumnIdx());
4112   llvm::Value *NumRows = Builder.getIntN(
4113       RowIdx->getType()->getScalarSizeInBits(),
4114       E->getBase()->getType()->castAs<ConstantMatrixType>()->getNumRows());
4115   llvm::Value *FinalIdx =
4116       Builder.CreateAdd(Builder.CreateMul(ColIdx, NumRows), RowIdx);
4117   return LValue::MakeMatrixElt(
4118       MaybeConvertMatrixAddress(Base.getAddress(*this), *this), FinalIdx,
4119       E->getBase()->getType(), Base.getBaseInfo(), TBAAAccessInfo());
4120 }
4121 
4122 static Address emitOMPArraySectionBase(CodeGenFunction &CGF, const Expr *Base,
4123                                        LValueBaseInfo &BaseInfo,
4124                                        TBAAAccessInfo &TBAAInfo,
4125                                        QualType BaseTy, QualType ElTy,
4126                                        bool IsLowerBound) {
4127   LValue BaseLVal;
4128   if (auto *ASE = dyn_cast<OMPArraySectionExpr>(Base->IgnoreParenImpCasts())) {
4129     BaseLVal = CGF.EmitOMPArraySectionExpr(ASE, IsLowerBound);
4130     if (BaseTy->isArrayType()) {
4131       Address Addr = BaseLVal.getAddress(CGF);
4132       BaseInfo = BaseLVal.getBaseInfo();
4133 
4134       // If the array type was an incomplete type, we need to make sure
4135       // the decay ends up being the right type.
4136       llvm::Type *NewTy = CGF.ConvertType(BaseTy);
4137       Addr = Addr.withElementType(NewTy);
4138 
4139       // Note that VLA pointers are always decayed, so we don't need to do
4140       // anything here.
4141       if (!BaseTy->isVariableArrayType()) {
4142         assert(isa<llvm::ArrayType>(Addr.getElementType()) &&
4143                "Expected pointer to array");
4144         Addr = CGF.Builder.CreateConstArrayGEP(Addr, 0, "arraydecay");
4145       }
4146 
4147       return Addr.withElementType(CGF.ConvertTypeForMem(ElTy));
4148     }
4149     LValueBaseInfo TypeBaseInfo;
4150     TBAAAccessInfo TypeTBAAInfo;
4151     CharUnits Align =
4152         CGF.CGM.getNaturalTypeAlignment(ElTy, &TypeBaseInfo, &TypeTBAAInfo);
4153     BaseInfo.mergeForCast(TypeBaseInfo);
4154     TBAAInfo = CGF.CGM.mergeTBAAInfoForCast(TBAAInfo, TypeTBAAInfo);
4155     return Address(CGF.Builder.CreateLoad(BaseLVal.getAddress(CGF)),
4156                    CGF.ConvertTypeForMem(ElTy), Align);
4157   }
4158   return CGF.EmitPointerWithAlignment(Base, &BaseInfo, &TBAAInfo);
4159 }
4160 
4161 LValue CodeGenFunction::EmitOMPArraySectionExpr(const OMPArraySectionExpr *E,
4162                                                 bool IsLowerBound) {
4163   QualType BaseTy = OMPArraySectionExpr::getBaseOriginalType(E->getBase());
4164   QualType ResultExprTy;
4165   if (auto *AT = getContext().getAsArrayType(BaseTy))
4166     ResultExprTy = AT->getElementType();
4167   else
4168     ResultExprTy = BaseTy->getPointeeType();
4169   llvm::Value *Idx = nullptr;
4170   if (IsLowerBound || E->getColonLocFirst().isInvalid()) {
4171     // Requesting lower bound or upper bound, but without provided length and
4172     // without ':' symbol for the default length -> length = 1.
4173     // Idx = LowerBound ?: 0;
4174     if (auto *LowerBound = E->getLowerBound()) {
4175       Idx = Builder.CreateIntCast(
4176           EmitScalarExpr(LowerBound), IntPtrTy,
4177           LowerBound->getType()->hasSignedIntegerRepresentation());
4178     } else
4179       Idx = llvm::ConstantInt::getNullValue(IntPtrTy);
4180   } else {
4181     // Try to emit length or lower bound as constant. If this is possible, 1
4182     // is subtracted from constant length or lower bound. Otherwise, emit LLVM
4183     // IR (LB + Len) - 1.
4184     auto &C = CGM.getContext();
4185     auto *Length = E->getLength();
4186     llvm::APSInt ConstLength;
4187     if (Length) {
4188       // Idx = LowerBound + Length - 1;
4189       if (std::optional<llvm::APSInt> CL = Length->getIntegerConstantExpr(C)) {
4190         ConstLength = CL->zextOrTrunc(PointerWidthInBits);
4191         Length = nullptr;
4192       }
4193       auto *LowerBound = E->getLowerBound();
4194       llvm::APSInt ConstLowerBound(PointerWidthInBits, /*isUnsigned=*/false);
4195       if (LowerBound) {
4196         if (std::optional<llvm::APSInt> LB =
4197                 LowerBound->getIntegerConstantExpr(C)) {
4198           ConstLowerBound = LB->zextOrTrunc(PointerWidthInBits);
4199           LowerBound = nullptr;
4200         }
4201       }
4202       if (!Length)
4203         --ConstLength;
4204       else if (!LowerBound)
4205         --ConstLowerBound;
4206 
4207       if (Length || LowerBound) {
4208         auto *LowerBoundVal =
4209             LowerBound
4210                 ? Builder.CreateIntCast(
4211                       EmitScalarExpr(LowerBound), IntPtrTy,
4212                       LowerBound->getType()->hasSignedIntegerRepresentation())
4213                 : llvm::ConstantInt::get(IntPtrTy, ConstLowerBound);
4214         auto *LengthVal =
4215             Length
4216                 ? Builder.CreateIntCast(
4217                       EmitScalarExpr(Length), IntPtrTy,
4218                       Length->getType()->hasSignedIntegerRepresentation())
4219                 : llvm::ConstantInt::get(IntPtrTy, ConstLength);
4220         Idx = Builder.CreateAdd(LowerBoundVal, LengthVal, "lb_add_len",
4221                                 /*HasNUW=*/false,
4222                                 !getLangOpts().isSignedOverflowDefined());
4223         if (Length && LowerBound) {
4224           Idx = Builder.CreateSub(
4225               Idx, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "idx_sub_1",
4226               /*HasNUW=*/false, !getLangOpts().isSignedOverflowDefined());
4227         }
4228       } else
4229         Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength + ConstLowerBound);
4230     } else {
4231       // Idx = ArraySize - 1;
4232       QualType ArrayTy = BaseTy->isPointerType()
4233                              ? E->getBase()->IgnoreParenImpCasts()->getType()
4234                              : BaseTy;
4235       if (auto *VAT = C.getAsVariableArrayType(ArrayTy)) {
4236         Length = VAT->getSizeExpr();
4237         if (std::optional<llvm::APSInt> L = Length->getIntegerConstantExpr(C)) {
4238           ConstLength = *L;
4239           Length = nullptr;
4240         }
4241       } else {
4242         auto *CAT = C.getAsConstantArrayType(ArrayTy);
4243         assert(CAT && "unexpected type for array initializer");
4244         ConstLength = CAT->getSize();
4245       }
4246       if (Length) {
4247         auto *LengthVal = Builder.CreateIntCast(
4248             EmitScalarExpr(Length), IntPtrTy,
4249             Length->getType()->hasSignedIntegerRepresentation());
4250         Idx = Builder.CreateSub(
4251             LengthVal, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "len_sub_1",
4252             /*HasNUW=*/false, !getLangOpts().isSignedOverflowDefined());
4253       } else {
4254         ConstLength = ConstLength.zextOrTrunc(PointerWidthInBits);
4255         --ConstLength;
4256         Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength);
4257       }
4258     }
4259   }
4260   assert(Idx);
4261 
4262   Address EltPtr = Address::invalid();
4263   LValueBaseInfo BaseInfo;
4264   TBAAAccessInfo TBAAInfo;
4265   if (auto *VLA = getContext().getAsVariableArrayType(ResultExprTy)) {
4266     // The base must be a pointer, which is not an aggregate.  Emit
4267     // it.  It needs to be emitted first in case it's what captures
4268     // the VLA bounds.
4269     Address Base =
4270         emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, TBAAInfo,
4271                                 BaseTy, VLA->getElementType(), IsLowerBound);
4272     // The element count here is the total number of non-VLA elements.
4273     llvm::Value *NumElements = getVLASize(VLA).NumElts;
4274 
4275     // Effectively, the multiply by the VLA size is part of the GEP.
4276     // GEP indexes are signed, and scaling an index isn't permitted to
4277     // signed-overflow, so we use the same semantics for our explicit
4278     // multiply.  We suppress this if overflow is not undefined behavior.
4279     if (getLangOpts().isSignedOverflowDefined())
4280       Idx = Builder.CreateMul(Idx, NumElements);
4281     else
4282       Idx = Builder.CreateNSWMul(Idx, NumElements);
4283     EltPtr = emitArraySubscriptGEP(*this, Base, Idx, VLA->getElementType(),
4284                                    !getLangOpts().isSignedOverflowDefined(),
4285                                    /*signedIndices=*/false, E->getExprLoc());
4286   } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
4287     // If this is A[i] where A is an array, the frontend will have decayed the
4288     // base to be a ArrayToPointerDecay implicit cast.  While correct, it is
4289     // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
4290     // "gep x, i" here.  Emit one "gep A, 0, i".
4291     assert(Array->getType()->isArrayType() &&
4292            "Array to pointer decay must have array source type!");
4293     LValue ArrayLV;
4294     // For simple multidimensional array indexing, set the 'accessed' flag for
4295     // better bounds-checking of the base expression.
4296     if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array))
4297       ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true);
4298     else
4299       ArrayLV = EmitLValue(Array);
4300 
4301     // Propagate the alignment from the array itself to the result.
4302     EltPtr = emitArraySubscriptGEP(
4303         *this, ArrayLV.getAddress(*this), {CGM.getSize(CharUnits::Zero()), Idx},
4304         ResultExprTy, !getLangOpts().isSignedOverflowDefined(),
4305         /*signedIndices=*/false, E->getExprLoc());
4306     BaseInfo = ArrayLV.getBaseInfo();
4307     TBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, ResultExprTy);
4308   } else {
4309     Address Base = emitOMPArraySectionBase(*this, E->getBase(), BaseInfo,
4310                                            TBAAInfo, BaseTy, ResultExprTy,
4311                                            IsLowerBound);
4312     EltPtr = emitArraySubscriptGEP(*this, Base, Idx, ResultExprTy,
4313                                    !getLangOpts().isSignedOverflowDefined(),
4314                                    /*signedIndices=*/false, E->getExprLoc());
4315   }
4316 
4317   return MakeAddrLValue(EltPtr, ResultExprTy, BaseInfo, TBAAInfo);
4318 }
4319 
4320 LValue CodeGenFunction::
4321 EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
4322   // Emit the base vector as an l-value.
4323   LValue Base;
4324 
4325   // ExtVectorElementExpr's base can either be a vector or pointer to vector.
4326   if (E->isArrow()) {
4327     // If it is a pointer to a vector, emit the address and form an lvalue with
4328     // it.
4329     LValueBaseInfo BaseInfo;
4330     TBAAAccessInfo TBAAInfo;
4331     Address Ptr = EmitPointerWithAlignment(E->getBase(), &BaseInfo, &TBAAInfo);
4332     const auto *PT = E->getBase()->getType()->castAs<PointerType>();
4333     Base = MakeAddrLValue(Ptr, PT->getPointeeType(), BaseInfo, TBAAInfo);
4334     Base.getQuals().removeObjCGCAttr();
4335   } else if (E->getBase()->isGLValue()) {
4336     // Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
4337     // emit the base as an lvalue.
4338     assert(E->getBase()->getType()->isVectorType());
4339     Base = EmitLValue(E->getBase());
4340   } else {
4341     // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
4342     assert(E->getBase()->getType()->isVectorType() &&
4343            "Result must be a vector");
4344     llvm::Value *Vec = EmitScalarExpr(E->getBase());
4345 
4346     // Store the vector to memory (because LValue wants an address).
4347     Address VecMem = CreateMemTemp(E->getBase()->getType());
4348     Builder.CreateStore(Vec, VecMem);
4349     Base = MakeAddrLValue(VecMem, E->getBase()->getType(),
4350                           AlignmentSource::Decl);
4351   }
4352 
4353   QualType type =
4354     E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers());
4355 
4356   // Encode the element access list into a vector of unsigned indices.
4357   SmallVector<uint32_t, 4> Indices;
4358   E->getEncodedElementAccess(Indices);
4359 
4360   if (Base.isSimple()) {
4361     llvm::Constant *CV =
4362         llvm::ConstantDataVector::get(getLLVMContext(), Indices);
4363     return LValue::MakeExtVectorElt(Base.getAddress(*this), CV, type,
4364                                     Base.getBaseInfo(), TBAAAccessInfo());
4365   }
4366   assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
4367 
4368   llvm::Constant *BaseElts = Base.getExtVectorElts();
4369   SmallVector<llvm::Constant *, 4> CElts;
4370 
4371   for (unsigned i = 0, e = Indices.size(); i != e; ++i)
4372     CElts.push_back(BaseElts->getAggregateElement(Indices[i]));
4373   llvm::Constant *CV = llvm::ConstantVector::get(CElts);
4374   return LValue::MakeExtVectorElt(Base.getExtVectorAddress(), CV, type,
4375                                   Base.getBaseInfo(), TBAAAccessInfo());
4376 }
4377 
4378 LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
4379   if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(*this, E)) {
4380     EmitIgnoredExpr(E->getBase());
4381     return EmitDeclRefLValue(DRE);
4382   }
4383 
4384   Expr *BaseExpr = E->getBase();
4385   // If this is s.x, emit s as an lvalue.  If it is s->x, emit s as a scalar.
4386   LValue BaseLV;
4387   if (E->isArrow()) {
4388     LValueBaseInfo BaseInfo;
4389     TBAAAccessInfo TBAAInfo;
4390     Address Addr = EmitPointerWithAlignment(BaseExpr, &BaseInfo, &TBAAInfo);
4391     QualType PtrTy = BaseExpr->getType()->getPointeeType();
4392     SanitizerSet SkippedChecks;
4393     bool IsBaseCXXThis = IsWrappedCXXThis(BaseExpr);
4394     if (IsBaseCXXThis)
4395       SkippedChecks.set(SanitizerKind::Alignment, true);
4396     if (IsBaseCXXThis || isa<DeclRefExpr>(BaseExpr))
4397       SkippedChecks.set(SanitizerKind::Null, true);
4398     EmitTypeCheck(TCK_MemberAccess, E->getExprLoc(), Addr.getPointer(), PtrTy,
4399                   /*Alignment=*/CharUnits::Zero(), SkippedChecks);
4400     BaseLV = MakeAddrLValue(Addr, PtrTy, BaseInfo, TBAAInfo);
4401   } else
4402     BaseLV = EmitCheckedLValue(BaseExpr, TCK_MemberAccess);
4403 
4404   NamedDecl *ND = E->getMemberDecl();
4405   if (auto *Field = dyn_cast<FieldDecl>(ND)) {
4406     LValue LV = EmitLValueForField(BaseLV, Field);
4407     setObjCGCLValueClass(getContext(), E, LV);
4408     if (getLangOpts().OpenMP) {
4409       // If the member was explicitly marked as nontemporal, mark it as
4410       // nontemporal. If the base lvalue is marked as nontemporal, mark access
4411       // to children as nontemporal too.
4412       if ((IsWrappedCXXThis(BaseExpr) &&
4413            CGM.getOpenMPRuntime().isNontemporalDecl(Field)) ||
4414           BaseLV.isNontemporal())
4415         LV.setNontemporal(/*Value=*/true);
4416     }
4417     return LV;
4418   }
4419 
4420   if (const auto *FD = dyn_cast<FunctionDecl>(ND))
4421     return EmitFunctionDeclLValue(*this, E, FD);
4422 
4423   llvm_unreachable("Unhandled member declaration!");
4424 }
4425 
4426 /// Given that we are currently emitting a lambda, emit an l-value for
4427 /// one of its members.
4428 ///
4429 LValue CodeGenFunction::EmitLValueForLambdaField(const FieldDecl *Field,
4430                                                  llvm::Value *ThisValue) {
4431   bool HasExplicitObjectParameter = false;
4432   if (const auto *MD = dyn_cast_if_present<CXXMethodDecl>(CurCodeDecl)) {
4433     HasExplicitObjectParameter = MD->isExplicitObjectMemberFunction();
4434     assert(MD->getParent()->isLambda());
4435     assert(MD->getParent() == Field->getParent());
4436   }
4437   LValue LambdaLV;
4438   if (HasExplicitObjectParameter) {
4439     const VarDecl *D = cast<CXXMethodDecl>(CurCodeDecl)->getParamDecl(0);
4440     auto It = LocalDeclMap.find(D);
4441     assert(It != LocalDeclMap.end() && "explicit parameter not loaded?");
4442     Address AddrOfExplicitObject = It->getSecond();
4443     if (D->getType()->isReferenceType())
4444       LambdaLV = EmitLoadOfReferenceLValue(AddrOfExplicitObject, D->getType(),
4445                                            AlignmentSource::Decl);
4446     else
4447       LambdaLV = MakeNaturalAlignAddrLValue(AddrOfExplicitObject.getPointer(),
4448                                             D->getType().getNonReferenceType());
4449   } else {
4450     QualType LambdaTagType = getContext().getTagDeclType(Field->getParent());
4451     LambdaLV = MakeNaturalAlignAddrLValue(ThisValue, LambdaTagType);
4452   }
4453   return EmitLValueForField(LambdaLV, Field);
4454 }
4455 
4456 LValue CodeGenFunction::EmitLValueForLambdaField(const FieldDecl *Field) {
4457   return EmitLValueForLambdaField(Field, CXXABIThisValue);
4458 }
4459 
4460 /// Get the field index in the debug info. The debug info structure/union
4461 /// will ignore the unnamed bitfields.
4462 unsigned CodeGenFunction::getDebugInfoFIndex(const RecordDecl *Rec,
4463                                              unsigned FieldIndex) {
4464   unsigned I = 0, Skipped = 0;
4465 
4466   for (auto *F : Rec->getDefinition()->fields()) {
4467     if (I == FieldIndex)
4468       break;
4469     if (F->isUnnamedBitfield())
4470       Skipped++;
4471     I++;
4472   }
4473 
4474   return FieldIndex - Skipped;
4475 }
4476 
4477 /// Get the address of a zero-sized field within a record. The resulting
4478 /// address doesn't necessarily have the right type.
4479 static Address emitAddrOfZeroSizeField(CodeGenFunction &CGF, Address Base,
4480                                        const FieldDecl *Field) {
4481   CharUnits Offset = CGF.getContext().toCharUnitsFromBits(
4482       CGF.getContext().getFieldOffset(Field));
4483   if (Offset.isZero())
4484     return Base;
4485   Base = Base.withElementType(CGF.Int8Ty);
4486   return CGF.Builder.CreateConstInBoundsByteGEP(Base, Offset);
4487 }
4488 
4489 /// Drill down to the storage of a field without walking into
4490 /// reference types.
4491 ///
4492 /// The resulting address doesn't necessarily have the right type.
4493 static Address emitAddrOfFieldStorage(CodeGenFunction &CGF, Address base,
4494                                       const FieldDecl *field) {
4495   if (field->isZeroSize(CGF.getContext()))
4496     return emitAddrOfZeroSizeField(CGF, base, field);
4497 
4498   const RecordDecl *rec = field->getParent();
4499 
4500   unsigned idx =
4501     CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
4502 
4503   return CGF.Builder.CreateStructGEP(base, idx, field->getName());
4504 }
4505 
4506 static Address emitPreserveStructAccess(CodeGenFunction &CGF, LValue base,
4507                                         Address addr, const FieldDecl *field) {
4508   const RecordDecl *rec = field->getParent();
4509   llvm::DIType *DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(
4510       base.getType(), rec->getLocation());
4511 
4512   unsigned idx =
4513       CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
4514 
4515   return CGF.Builder.CreatePreserveStructAccessIndex(
4516       addr, idx, CGF.getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo);
4517 }
4518 
4519 static bool hasAnyVptr(const QualType Type, const ASTContext &Context) {
4520   const auto *RD = Type.getTypePtr()->getAsCXXRecordDecl();
4521   if (!RD)
4522     return false;
4523 
4524   if (RD->isDynamicClass())
4525     return true;
4526 
4527   for (const auto &Base : RD->bases())
4528     if (hasAnyVptr(Base.getType(), Context))
4529       return true;
4530 
4531   for (const FieldDecl *Field : RD->fields())
4532     if (hasAnyVptr(Field->getType(), Context))
4533       return true;
4534 
4535   return false;
4536 }
4537 
4538 LValue CodeGenFunction::EmitLValueForField(LValue base,
4539                                            const FieldDecl *field) {
4540   LValueBaseInfo BaseInfo = base.getBaseInfo();
4541 
4542   if (field->isBitField()) {
4543     const CGRecordLayout &RL =
4544         CGM.getTypes().getCGRecordLayout(field->getParent());
4545     const CGBitFieldInfo &Info = RL.getBitFieldInfo(field);
4546     const bool UseVolatile = isAAPCS(CGM.getTarget()) &&
4547                              CGM.getCodeGenOpts().AAPCSBitfieldWidth &&
4548                              Info.VolatileStorageSize != 0 &&
4549                              field->getType()
4550                                  .withCVRQualifiers(base.getVRQualifiers())
4551                                  .isVolatileQualified();
4552     Address Addr = base.getAddress(*this);
4553     unsigned Idx = RL.getLLVMFieldNo(field);
4554     const RecordDecl *rec = field->getParent();
4555     if (hasBPFPreserveStaticOffset(rec))
4556       Addr = wrapWithBPFPreserveStaticOffset(*this, Addr);
4557     if (!UseVolatile) {
4558       if (!IsInPreservedAIRegion &&
4559           (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>())) {
4560         if (Idx != 0)
4561           // For structs, we GEP to the field that the record layout suggests.
4562           Addr = Builder.CreateStructGEP(Addr, Idx, field->getName());
4563       } else {
4564         llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateRecordType(
4565             getContext().getRecordType(rec), rec->getLocation());
4566         Addr = Builder.CreatePreserveStructAccessIndex(
4567             Addr, Idx, getDebugInfoFIndex(rec, field->getFieldIndex()),
4568             DbgInfo);
4569       }
4570     }
4571     const unsigned SS =
4572         UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
4573     // Get the access type.
4574     llvm::Type *FieldIntTy = llvm::Type::getIntNTy(getLLVMContext(), SS);
4575     Addr = Addr.withElementType(FieldIntTy);
4576     if (UseVolatile) {
4577       const unsigned VolatileOffset = Info.VolatileStorageOffset.getQuantity();
4578       if (VolatileOffset)
4579         Addr = Builder.CreateConstInBoundsGEP(Addr, VolatileOffset);
4580     }
4581 
4582     QualType fieldType =
4583         field->getType().withCVRQualifiers(base.getVRQualifiers());
4584     // TODO: Support TBAA for bit fields.
4585     LValueBaseInfo FieldBaseInfo(BaseInfo.getAlignmentSource());
4586     return LValue::MakeBitfield(Addr, Info, fieldType, FieldBaseInfo,
4587                                 TBAAAccessInfo());
4588   }
4589 
4590   // Fields of may-alias structures are may-alias themselves.
4591   // FIXME: this should get propagated down through anonymous structs
4592   // and unions.
4593   QualType FieldType = field->getType();
4594   const RecordDecl *rec = field->getParent();
4595   AlignmentSource BaseAlignSource = BaseInfo.getAlignmentSource();
4596   LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(BaseAlignSource));
4597   TBAAAccessInfo FieldTBAAInfo;
4598   if (base.getTBAAInfo().isMayAlias() ||
4599           rec->hasAttr<MayAliasAttr>() || FieldType->isVectorType()) {
4600     FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo();
4601   } else if (rec->isUnion()) {
4602     // TODO: Support TBAA for unions.
4603     FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo();
4604   } else {
4605     // If no base type been assigned for the base access, then try to generate
4606     // one for this base lvalue.
4607     FieldTBAAInfo = base.getTBAAInfo();
4608     if (!FieldTBAAInfo.BaseType) {
4609         FieldTBAAInfo.BaseType = CGM.getTBAABaseTypeInfo(base.getType());
4610         assert(!FieldTBAAInfo.Offset &&
4611                "Nonzero offset for an access with no base type!");
4612     }
4613 
4614     // Adjust offset to be relative to the base type.
4615     const ASTRecordLayout &Layout =
4616         getContext().getASTRecordLayout(field->getParent());
4617     unsigned CharWidth = getContext().getCharWidth();
4618     if (FieldTBAAInfo.BaseType)
4619       FieldTBAAInfo.Offset +=
4620           Layout.getFieldOffset(field->getFieldIndex()) / CharWidth;
4621 
4622     // Update the final access type and size.
4623     FieldTBAAInfo.AccessType = CGM.getTBAATypeInfo(FieldType);
4624     FieldTBAAInfo.Size =
4625         getContext().getTypeSizeInChars(FieldType).getQuantity();
4626   }
4627 
4628   Address addr = base.getAddress(*this);
4629   if (hasBPFPreserveStaticOffset(rec))
4630     addr = wrapWithBPFPreserveStaticOffset(*this, addr);
4631   if (auto *ClassDef = dyn_cast<CXXRecordDecl>(rec)) {
4632     if (CGM.getCodeGenOpts().StrictVTablePointers &&
4633         ClassDef->isDynamicClass()) {
4634       // Getting to any field of dynamic object requires stripping dynamic
4635       // information provided by invariant.group.  This is because accessing
4636       // fields may leak the real address of dynamic object, which could result
4637       // in miscompilation when leaked pointer would be compared.
4638       auto *stripped = Builder.CreateStripInvariantGroup(addr.getPointer());
4639       addr = Address(stripped, addr.getElementType(), addr.getAlignment());
4640     }
4641   }
4642 
4643   unsigned RecordCVR = base.getVRQualifiers();
4644   if (rec->isUnion()) {
4645     // For unions, there is no pointer adjustment.
4646     if (CGM.getCodeGenOpts().StrictVTablePointers &&
4647         hasAnyVptr(FieldType, getContext()))
4648       // Because unions can easily skip invariant.barriers, we need to add
4649       // a barrier every time CXXRecord field with vptr is referenced.
4650       addr = Builder.CreateLaunderInvariantGroup(addr);
4651 
4652     if (IsInPreservedAIRegion ||
4653         (getDebugInfo() && rec->hasAttr<BPFPreserveAccessIndexAttr>())) {
4654       // Remember the original union field index
4655       llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(base.getType(),
4656           rec->getLocation());
4657       addr = Address(
4658           Builder.CreatePreserveUnionAccessIndex(
4659               addr.getPointer(), getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo),
4660           addr.getElementType(), addr.getAlignment());
4661     }
4662 
4663     if (FieldType->isReferenceType())
4664       addr = addr.withElementType(CGM.getTypes().ConvertTypeForMem(FieldType));
4665   } else {
4666     if (!IsInPreservedAIRegion &&
4667         (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>()))
4668       // For structs, we GEP to the field that the record layout suggests.
4669       addr = emitAddrOfFieldStorage(*this, addr, field);
4670     else
4671       // Remember the original struct field index
4672       addr = emitPreserveStructAccess(*this, base, addr, field);
4673   }
4674 
4675   // If this is a reference field, load the reference right now.
4676   if (FieldType->isReferenceType()) {
4677     LValue RefLVal =
4678         MakeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo);
4679     if (RecordCVR & Qualifiers::Volatile)
4680       RefLVal.getQuals().addVolatile();
4681     addr = EmitLoadOfReference(RefLVal, &FieldBaseInfo, &FieldTBAAInfo);
4682 
4683     // Qualifiers on the struct don't apply to the referencee.
4684     RecordCVR = 0;
4685     FieldType = FieldType->getPointeeType();
4686   }
4687 
4688   // Make sure that the address is pointing to the right type.  This is critical
4689   // for both unions and structs.
4690   addr = addr.withElementType(CGM.getTypes().ConvertTypeForMem(FieldType));
4691 
4692   if (field->hasAttr<AnnotateAttr>())
4693     addr = EmitFieldAnnotations(field, addr);
4694 
4695   LValue LV = MakeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo);
4696   LV.getQuals().addCVRQualifiers(RecordCVR);
4697 
4698   // __weak attribute on a field is ignored.
4699   if (LV.getQuals().getObjCGCAttr() == Qualifiers::Weak)
4700     LV.getQuals().removeObjCGCAttr();
4701 
4702   return LV;
4703 }
4704 
4705 LValue
4706 CodeGenFunction::EmitLValueForFieldInitialization(LValue Base,
4707                                                   const FieldDecl *Field) {
4708   QualType FieldType = Field->getType();
4709 
4710   if (!FieldType->isReferenceType())
4711     return EmitLValueForField(Base, Field);
4712 
4713   Address V = emitAddrOfFieldStorage(*this, Base.getAddress(*this), Field);
4714 
4715   // Make sure that the address is pointing to the right type.
4716   llvm::Type *llvmType = ConvertTypeForMem(FieldType);
4717   V = V.withElementType(llvmType);
4718 
4719   // TODO: Generate TBAA information that describes this access as a structure
4720   // member access and not just an access to an object of the field's type. This
4721   // should be similar to what we do in EmitLValueForField().
4722   LValueBaseInfo BaseInfo = Base.getBaseInfo();
4723   AlignmentSource FieldAlignSource = BaseInfo.getAlignmentSource();
4724   LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(FieldAlignSource));
4725   return MakeAddrLValue(V, FieldType, FieldBaseInfo,
4726                         CGM.getTBAAInfoForSubobject(Base, FieldType));
4727 }
4728 
4729 LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){
4730   if (E->isFileScope()) {
4731     ConstantAddress GlobalPtr = CGM.GetAddrOfConstantCompoundLiteral(E);
4732     return MakeAddrLValue(GlobalPtr, E->getType(), AlignmentSource::Decl);
4733   }
4734   if (E->getType()->isVariablyModifiedType())
4735     // make sure to emit the VLA size.
4736     EmitVariablyModifiedType(E->getType());
4737 
4738   Address DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral");
4739   const Expr *InitExpr = E->getInitializer();
4740   LValue Result = MakeAddrLValue(DeclPtr, E->getType(), AlignmentSource::Decl);
4741 
4742   EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(),
4743                    /*Init*/ true);
4744 
4745   // Block-scope compound literals are destroyed at the end of the enclosing
4746   // scope in C.
4747   if (!getLangOpts().CPlusPlus)
4748     if (QualType::DestructionKind DtorKind = E->getType().isDestructedType())
4749       pushLifetimeExtendedDestroy(getCleanupKind(DtorKind), DeclPtr,
4750                                   E->getType(), getDestroyer(DtorKind),
4751                                   DtorKind & EHCleanup);
4752 
4753   return Result;
4754 }
4755 
4756 LValue CodeGenFunction::EmitInitListLValue(const InitListExpr *E) {
4757   if (!E->isGLValue())
4758     // Initializing an aggregate temporary in C++11: T{...}.
4759     return EmitAggExprToLValue(E);
4760 
4761   // An lvalue initializer list must be initializing a reference.
4762   assert(E->isTransparent() && "non-transparent glvalue init list");
4763   return EmitLValue(E->getInit(0));
4764 }
4765 
4766 /// Emit the operand of a glvalue conditional operator. This is either a glvalue
4767 /// or a (possibly-parenthesized) throw-expression. If this is a throw, no
4768 /// LValue is returned and the current block has been terminated.
4769 static std::optional<LValue> EmitLValueOrThrowExpression(CodeGenFunction &CGF,
4770                                                          const Expr *Operand) {
4771   if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Operand->IgnoreParens())) {
4772     CGF.EmitCXXThrowExpr(ThrowExpr, /*KeepInsertionPoint*/false);
4773     return std::nullopt;
4774   }
4775 
4776   return CGF.EmitLValue(Operand);
4777 }
4778 
4779 namespace {
4780 // Handle the case where the condition is a constant evaluatable simple integer,
4781 // which means we don't have to separately handle the true/false blocks.
4782 std::optional<LValue> HandleConditionalOperatorLValueSimpleCase(
4783     CodeGenFunction &CGF, const AbstractConditionalOperator *E) {
4784   const Expr *condExpr = E->getCond();
4785   bool CondExprBool;
4786   if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
4787     const Expr *Live = E->getTrueExpr(), *Dead = E->getFalseExpr();
4788     if (!CondExprBool)
4789       std::swap(Live, Dead);
4790 
4791     if (!CGF.ContainsLabel(Dead)) {
4792       // If the true case is live, we need to track its region.
4793       if (CondExprBool)
4794         CGF.incrementProfileCounter(E);
4795       // If a throw expression we emit it and return an undefined lvalue
4796       // because it can't be used.
4797       if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Live->IgnoreParens())) {
4798         CGF.EmitCXXThrowExpr(ThrowExpr);
4799         llvm::Type *ElemTy = CGF.ConvertType(Dead->getType());
4800         llvm::Type *Ty = CGF.UnqualPtrTy;
4801         return CGF.MakeAddrLValue(
4802             Address(llvm::UndefValue::get(Ty), ElemTy, CharUnits::One()),
4803             Dead->getType());
4804       }
4805       return CGF.EmitLValue(Live);
4806     }
4807   }
4808   return std::nullopt;
4809 }
4810 struct ConditionalInfo {
4811   llvm::BasicBlock *lhsBlock, *rhsBlock;
4812   std::optional<LValue> LHS, RHS;
4813 };
4814 
4815 // Create and generate the 3 blocks for a conditional operator.
4816 // Leaves the 'current block' in the continuation basic block.
4817 template<typename FuncTy>
4818 ConditionalInfo EmitConditionalBlocks(CodeGenFunction &CGF,
4819                                       const AbstractConditionalOperator *E,
4820                                       const FuncTy &BranchGenFunc) {
4821   ConditionalInfo Info{CGF.createBasicBlock("cond.true"),
4822                        CGF.createBasicBlock("cond.false"), std::nullopt,
4823                        std::nullopt};
4824   llvm::BasicBlock *endBlock = CGF.createBasicBlock("cond.end");
4825 
4826   CodeGenFunction::ConditionalEvaluation eval(CGF);
4827   CGF.EmitBranchOnBoolExpr(E->getCond(), Info.lhsBlock, Info.rhsBlock,
4828                            CGF.getProfileCount(E));
4829 
4830   // Any temporaries created here are conditional.
4831   CGF.EmitBlock(Info.lhsBlock);
4832   CGF.incrementProfileCounter(E);
4833   eval.begin(CGF);
4834   Info.LHS = BranchGenFunc(CGF, E->getTrueExpr());
4835   eval.end(CGF);
4836   Info.lhsBlock = CGF.Builder.GetInsertBlock();
4837 
4838   if (Info.LHS)
4839     CGF.Builder.CreateBr(endBlock);
4840 
4841   // Any temporaries created here are conditional.
4842   CGF.EmitBlock(Info.rhsBlock);
4843   eval.begin(CGF);
4844   Info.RHS = BranchGenFunc(CGF, E->getFalseExpr());
4845   eval.end(CGF);
4846   Info.rhsBlock = CGF.Builder.GetInsertBlock();
4847   CGF.EmitBlock(endBlock);
4848 
4849   return Info;
4850 }
4851 } // namespace
4852 
4853 void CodeGenFunction::EmitIgnoredConditionalOperator(
4854     const AbstractConditionalOperator *E) {
4855   if (!E->isGLValue()) {
4856     // ?: here should be an aggregate.
4857     assert(hasAggregateEvaluationKind(E->getType()) &&
4858            "Unexpected conditional operator!");
4859     return (void)EmitAggExprToLValue(E);
4860   }
4861 
4862   OpaqueValueMapping binding(*this, E);
4863   if (HandleConditionalOperatorLValueSimpleCase(*this, E))
4864     return;
4865 
4866   EmitConditionalBlocks(*this, E, [](CodeGenFunction &CGF, const Expr *E) {
4867     CGF.EmitIgnoredExpr(E);
4868     return LValue{};
4869   });
4870 }
4871 LValue CodeGenFunction::EmitConditionalOperatorLValue(
4872     const AbstractConditionalOperator *expr) {
4873   if (!expr->isGLValue()) {
4874     // ?: here should be an aggregate.
4875     assert(hasAggregateEvaluationKind(expr->getType()) &&
4876            "Unexpected conditional operator!");
4877     return EmitAggExprToLValue(expr);
4878   }
4879 
4880   OpaqueValueMapping binding(*this, expr);
4881   if (std::optional<LValue> Res =
4882           HandleConditionalOperatorLValueSimpleCase(*this, expr))
4883     return *Res;
4884 
4885   ConditionalInfo Info = EmitConditionalBlocks(
4886       *this, expr, [](CodeGenFunction &CGF, const Expr *E) {
4887         return EmitLValueOrThrowExpression(CGF, E);
4888       });
4889 
4890   if ((Info.LHS && !Info.LHS->isSimple()) ||
4891       (Info.RHS && !Info.RHS->isSimple()))
4892     return EmitUnsupportedLValue(expr, "conditional operator");
4893 
4894   if (Info.LHS && Info.RHS) {
4895     Address lhsAddr = Info.LHS->getAddress(*this);
4896     Address rhsAddr = Info.RHS->getAddress(*this);
4897     llvm::PHINode *phi = Builder.CreatePHI(lhsAddr.getType(), 2, "cond-lvalue");
4898     phi->addIncoming(lhsAddr.getPointer(), Info.lhsBlock);
4899     phi->addIncoming(rhsAddr.getPointer(), Info.rhsBlock);
4900     Address result(phi, lhsAddr.getElementType(),
4901                    std::min(lhsAddr.getAlignment(), rhsAddr.getAlignment()));
4902     AlignmentSource alignSource =
4903         std::max(Info.LHS->getBaseInfo().getAlignmentSource(),
4904                  Info.RHS->getBaseInfo().getAlignmentSource());
4905     TBAAAccessInfo TBAAInfo = CGM.mergeTBAAInfoForConditionalOperator(
4906         Info.LHS->getTBAAInfo(), Info.RHS->getTBAAInfo());
4907     return MakeAddrLValue(result, expr->getType(), LValueBaseInfo(alignSource),
4908                           TBAAInfo);
4909   } else {
4910     assert((Info.LHS || Info.RHS) &&
4911            "both operands of glvalue conditional are throw-expressions?");
4912     return Info.LHS ? *Info.LHS : *Info.RHS;
4913   }
4914 }
4915 
4916 /// EmitCastLValue - Casts are never lvalues unless that cast is to a reference
4917 /// type. If the cast is to a reference, we can have the usual lvalue result,
4918 /// otherwise if a cast is needed by the code generator in an lvalue context,
4919 /// then it must mean that we need the address of an aggregate in order to
4920 /// access one of its members.  This can happen for all the reasons that casts
4921 /// are permitted with aggregate result, including noop aggregate casts, and
4922 /// cast from scalar to union.
4923 LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
4924   switch (E->getCastKind()) {
4925   case CK_ToVoid:
4926   case CK_BitCast:
4927   case CK_LValueToRValueBitCast:
4928   case CK_ArrayToPointerDecay:
4929   case CK_FunctionToPointerDecay:
4930   case CK_NullToMemberPointer:
4931   case CK_NullToPointer:
4932   case CK_IntegralToPointer:
4933   case CK_PointerToIntegral:
4934   case CK_PointerToBoolean:
4935   case CK_IntegralCast:
4936   case CK_BooleanToSignedIntegral:
4937   case CK_IntegralToBoolean:
4938   case CK_IntegralToFloating:
4939   case CK_FloatingToIntegral:
4940   case CK_FloatingToBoolean:
4941   case CK_FloatingCast:
4942   case CK_FloatingRealToComplex:
4943   case CK_FloatingComplexToReal:
4944   case CK_FloatingComplexToBoolean:
4945   case CK_FloatingComplexCast:
4946   case CK_FloatingComplexToIntegralComplex:
4947   case CK_IntegralRealToComplex:
4948   case CK_IntegralComplexToReal:
4949   case CK_IntegralComplexToBoolean:
4950   case CK_IntegralComplexCast:
4951   case CK_IntegralComplexToFloatingComplex:
4952   case CK_DerivedToBaseMemberPointer:
4953   case CK_BaseToDerivedMemberPointer:
4954   case CK_MemberPointerToBoolean:
4955   case CK_ReinterpretMemberPointer:
4956   case CK_AnyPointerToBlockPointerCast:
4957   case CK_ARCProduceObject:
4958   case CK_ARCConsumeObject:
4959   case CK_ARCReclaimReturnedObject:
4960   case CK_ARCExtendBlockObject:
4961   case CK_CopyAndAutoreleaseBlockObject:
4962   case CK_IntToOCLSampler:
4963   case CK_FloatingToFixedPoint:
4964   case CK_FixedPointToFloating:
4965   case CK_FixedPointCast:
4966   case CK_FixedPointToBoolean:
4967   case CK_FixedPointToIntegral:
4968   case CK_IntegralToFixedPoint:
4969   case CK_MatrixCast:
4970     return EmitUnsupportedLValue(E, "unexpected cast lvalue");
4971 
4972   case CK_Dependent:
4973     llvm_unreachable("dependent cast kind in IR gen!");
4974 
4975   case CK_BuiltinFnToFnPtr:
4976     llvm_unreachable("builtin functions are handled elsewhere");
4977 
4978   // These are never l-values; just use the aggregate emission code.
4979   case CK_NonAtomicToAtomic:
4980   case CK_AtomicToNonAtomic:
4981     return EmitAggExprToLValue(E);
4982 
4983   case CK_Dynamic: {
4984     LValue LV = EmitLValue(E->getSubExpr());
4985     Address V = LV.getAddress(*this);
4986     const auto *DCE = cast<CXXDynamicCastExpr>(E);
4987     return MakeNaturalAlignAddrLValue(EmitDynamicCast(V, DCE), E->getType());
4988   }
4989 
4990   case CK_ConstructorConversion:
4991   case CK_UserDefinedConversion:
4992   case CK_CPointerToObjCPointerCast:
4993   case CK_BlockPointerToObjCPointerCast:
4994   case CK_LValueToRValue:
4995     return EmitLValue(E->getSubExpr());
4996 
4997   case CK_NoOp: {
4998     // CK_NoOp can model a qualification conversion, which can remove an array
4999     // bound and change the IR type.
5000     // FIXME: Once pointee types are removed from IR, remove this.
5001     LValue LV = EmitLValue(E->getSubExpr());
5002     // Propagate the volatile qualifer to LValue, if exist in E.
5003     if (E->changesVolatileQualification())
5004       LV.getQuals() = E->getType().getQualifiers();
5005     if (LV.isSimple()) {
5006       Address V = LV.getAddress(*this);
5007       if (V.isValid()) {
5008         llvm::Type *T = ConvertTypeForMem(E->getType());
5009         if (V.getElementType() != T)
5010           LV.setAddress(V.withElementType(T));
5011       }
5012     }
5013     return LV;
5014   }
5015 
5016   case CK_UncheckedDerivedToBase:
5017   case CK_DerivedToBase: {
5018     const auto *DerivedClassTy =
5019         E->getSubExpr()->getType()->castAs<RecordType>();
5020     auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl());
5021 
5022     LValue LV = EmitLValue(E->getSubExpr());
5023     Address This = LV.getAddress(*this);
5024 
5025     // Perform the derived-to-base conversion
5026     Address Base = GetAddressOfBaseClass(
5027         This, DerivedClassDecl, E->path_begin(), E->path_end(),
5028         /*NullCheckValue=*/false, E->getExprLoc());
5029 
5030     // TODO: Support accesses to members of base classes in TBAA. For now, we
5031     // conservatively pretend that the complete object is of the base class
5032     // type.
5033     return MakeAddrLValue(Base, E->getType(), LV.getBaseInfo(),
5034                           CGM.getTBAAInfoForSubobject(LV, E->getType()));
5035   }
5036   case CK_ToUnion:
5037     return EmitAggExprToLValue(E);
5038   case CK_BaseToDerived: {
5039     const auto *DerivedClassTy = E->getType()->castAs<RecordType>();
5040     auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl());
5041 
5042     LValue LV = EmitLValue(E->getSubExpr());
5043 
5044     // Perform the base-to-derived conversion
5045     Address Derived = GetAddressOfDerivedClass(
5046         LV.getAddress(*this), DerivedClassDecl, E->path_begin(), E->path_end(),
5047         /*NullCheckValue=*/false);
5048 
5049     // C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is
5050     // performed and the object is not of the derived type.
5051     if (sanitizePerformTypeCheck())
5052       EmitTypeCheck(TCK_DowncastReference, E->getExprLoc(),
5053                     Derived.getPointer(), E->getType());
5054 
5055     if (SanOpts.has(SanitizerKind::CFIDerivedCast))
5056       EmitVTablePtrCheckForCast(E->getType(), Derived,
5057                                 /*MayBeNull=*/false, CFITCK_DerivedCast,
5058                                 E->getBeginLoc());
5059 
5060     return MakeAddrLValue(Derived, E->getType(), LV.getBaseInfo(),
5061                           CGM.getTBAAInfoForSubobject(LV, E->getType()));
5062   }
5063   case CK_LValueBitCast: {
5064     // This must be a reinterpret_cast (or c-style equivalent).
5065     const auto *CE = cast<ExplicitCastExpr>(E);
5066 
5067     CGM.EmitExplicitCastExprType(CE, this);
5068     LValue LV = EmitLValue(E->getSubExpr());
5069     Address V = LV.getAddress(*this).withElementType(
5070         ConvertTypeForMem(CE->getTypeAsWritten()->getPointeeType()));
5071 
5072     if (SanOpts.has(SanitizerKind::CFIUnrelatedCast))
5073       EmitVTablePtrCheckForCast(E->getType(), V,
5074                                 /*MayBeNull=*/false, CFITCK_UnrelatedCast,
5075                                 E->getBeginLoc());
5076 
5077     return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(),
5078                           CGM.getTBAAInfoForSubobject(LV, E->getType()));
5079   }
5080   case CK_AddressSpaceConversion: {
5081     LValue LV = EmitLValue(E->getSubExpr());
5082     QualType DestTy = getContext().getPointerType(E->getType());
5083     llvm::Value *V = getTargetHooks().performAddrSpaceCast(
5084         *this, LV.getPointer(*this),
5085         E->getSubExpr()->getType().getAddressSpace(),
5086         E->getType().getAddressSpace(), ConvertType(DestTy));
5087     return MakeAddrLValue(Address(V, ConvertTypeForMem(E->getType()),
5088                                   LV.getAddress(*this).getAlignment()),
5089                           E->getType(), LV.getBaseInfo(), LV.getTBAAInfo());
5090   }
5091   case CK_ObjCObjectLValueCast: {
5092     LValue LV = EmitLValue(E->getSubExpr());
5093     Address V = LV.getAddress(*this).withElementType(ConvertType(E->getType()));
5094     return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(),
5095                           CGM.getTBAAInfoForSubobject(LV, E->getType()));
5096   }
5097   case CK_ZeroToOCLOpaqueType:
5098     llvm_unreachable("NULL to OpenCL opaque type lvalue cast is not valid");
5099 
5100   case CK_VectorSplat: {
5101     // LValue results of vector splats are only supported in HLSL.
5102     if (!getLangOpts().HLSL)
5103       return EmitUnsupportedLValue(E, "unexpected cast lvalue");
5104     return EmitLValue(E->getSubExpr());
5105   }
5106   }
5107 
5108   llvm_unreachable("Unhandled lvalue cast kind?");
5109 }
5110 
5111 LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) {
5112   assert(OpaqueValueMappingData::shouldBindAsLValue(e));
5113   return getOrCreateOpaqueLValueMapping(e);
5114 }
5115 
5116 LValue
5117 CodeGenFunction::getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e) {
5118   assert(OpaqueValueMapping::shouldBindAsLValue(e));
5119 
5120   llvm::DenseMap<const OpaqueValueExpr*,LValue>::iterator
5121       it = OpaqueLValues.find(e);
5122 
5123   if (it != OpaqueLValues.end())
5124     return it->second;
5125 
5126   assert(e->isUnique() && "LValue for a nonunique OVE hasn't been emitted");
5127   return EmitLValue(e->getSourceExpr());
5128 }
5129 
5130 RValue
5131 CodeGenFunction::getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e) {
5132   assert(!OpaqueValueMapping::shouldBindAsLValue(e));
5133 
5134   llvm::DenseMap<const OpaqueValueExpr*,RValue>::iterator
5135       it = OpaqueRValues.find(e);
5136 
5137   if (it != OpaqueRValues.end())
5138     return it->second;
5139 
5140   assert(e->isUnique() && "RValue for a nonunique OVE hasn't been emitted");
5141   return EmitAnyExpr(e->getSourceExpr());
5142 }
5143 
5144 RValue CodeGenFunction::EmitRValueForField(LValue LV,
5145                                            const FieldDecl *FD,
5146                                            SourceLocation Loc) {
5147   QualType FT = FD->getType();
5148   LValue FieldLV = EmitLValueForField(LV, FD);
5149   switch (getEvaluationKind(FT)) {
5150   case TEK_Complex:
5151     return RValue::getComplex(EmitLoadOfComplex(FieldLV, Loc));
5152   case TEK_Aggregate:
5153     return FieldLV.asAggregateRValue(*this);
5154   case TEK_Scalar:
5155     // This routine is used to load fields one-by-one to perform a copy, so
5156     // don't load reference fields.
5157     if (FD->getType()->isReferenceType())
5158       return RValue::get(FieldLV.getPointer(*this));
5159     // Call EmitLoadOfScalar except when the lvalue is a bitfield to emit a
5160     // primitive load.
5161     if (FieldLV.isBitField())
5162       return EmitLoadOfLValue(FieldLV, Loc);
5163     return RValue::get(EmitLoadOfScalar(FieldLV, Loc));
5164   }
5165   llvm_unreachable("bad evaluation kind");
5166 }
5167 
5168 //===--------------------------------------------------------------------===//
5169 //                             Expression Emission
5170 //===--------------------------------------------------------------------===//
5171 
5172 RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
5173                                      ReturnValueSlot ReturnValue) {
5174   // Builtins never have block type.
5175   if (E->getCallee()->getType()->isBlockPointerType())
5176     return EmitBlockCallExpr(E, ReturnValue);
5177 
5178   if (const auto *CE = dyn_cast<CXXMemberCallExpr>(E))
5179     return EmitCXXMemberCallExpr(CE, ReturnValue);
5180 
5181   if (const auto *CE = dyn_cast<CUDAKernelCallExpr>(E))
5182     return EmitCUDAKernelCallExpr(CE, ReturnValue);
5183 
5184   // A CXXOperatorCallExpr is created even for explicit object methods, but
5185   // these should be treated like static function call.
5186   if (const auto *CE = dyn_cast<CXXOperatorCallExpr>(E))
5187     if (const auto *MD =
5188             dyn_cast_if_present<CXXMethodDecl>(CE->getCalleeDecl());
5189         MD && MD->isImplicitObjectMemberFunction())
5190       return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue);
5191 
5192   CGCallee callee = EmitCallee(E->getCallee());
5193 
5194   if (callee.isBuiltin()) {
5195     return EmitBuiltinExpr(callee.getBuiltinDecl(), callee.getBuiltinID(),
5196                            E, ReturnValue);
5197   }
5198 
5199   if (callee.isPseudoDestructor()) {
5200     return EmitCXXPseudoDestructorExpr(callee.getPseudoDestructorExpr());
5201   }
5202 
5203   return EmitCall(E->getCallee()->getType(), callee, E, ReturnValue);
5204 }
5205 
5206 /// Emit a CallExpr without considering whether it might be a subclass.
5207 RValue CodeGenFunction::EmitSimpleCallExpr(const CallExpr *E,
5208                                            ReturnValueSlot ReturnValue) {
5209   CGCallee Callee = EmitCallee(E->getCallee());
5210   return EmitCall(E->getCallee()->getType(), Callee, E, ReturnValue);
5211 }
5212 
5213 // Detect the unusual situation where an inline version is shadowed by a
5214 // non-inline version. In that case we should pick the external one
5215 // everywhere. That's GCC behavior too.
5216 static bool OnlyHasInlineBuiltinDeclaration(const FunctionDecl *FD) {
5217   for (const FunctionDecl *PD = FD; PD; PD = PD->getPreviousDecl())
5218     if (!PD->isInlineBuiltinDeclaration())
5219       return false;
5220   return true;
5221 }
5222 
5223 static CGCallee EmitDirectCallee(CodeGenFunction &CGF, GlobalDecl GD) {
5224   const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
5225 
5226   if (auto builtinID = FD->getBuiltinID()) {
5227     std::string NoBuiltinFD = ("no-builtin-" + FD->getName()).str();
5228     std::string NoBuiltins = "no-builtins";
5229 
5230     StringRef Ident = CGF.CGM.getMangledName(GD);
5231     std::string FDInlineName = (Ident + ".inline").str();
5232 
5233     bool IsPredefinedLibFunction =
5234         CGF.getContext().BuiltinInfo.isPredefinedLibFunction(builtinID);
5235     bool HasAttributeNoBuiltin =
5236         CGF.CurFn->getAttributes().hasFnAttr(NoBuiltinFD) ||
5237         CGF.CurFn->getAttributes().hasFnAttr(NoBuiltins);
5238 
5239     // When directing calling an inline builtin, call it through it's mangled
5240     // name to make it clear it's not the actual builtin.
5241     if (CGF.CurFn->getName() != FDInlineName &&
5242         OnlyHasInlineBuiltinDeclaration(FD)) {
5243       llvm::Constant *CalleePtr = EmitFunctionDeclPointer(CGF.CGM, GD);
5244       llvm::Function *Fn = llvm::cast<llvm::Function>(CalleePtr);
5245       llvm::Module *M = Fn->getParent();
5246       llvm::Function *Clone = M->getFunction(FDInlineName);
5247       if (!Clone) {
5248         Clone = llvm::Function::Create(Fn->getFunctionType(),
5249                                        llvm::GlobalValue::InternalLinkage,
5250                                        Fn->getAddressSpace(), FDInlineName, M);
5251         Clone->addFnAttr(llvm::Attribute::AlwaysInline);
5252       }
5253       return CGCallee::forDirect(Clone, GD);
5254     }
5255 
5256     // Replaceable builtins provide their own implementation of a builtin. If we
5257     // are in an inline builtin implementation, avoid trivial infinite
5258     // recursion. Honor __attribute__((no_builtin("foo"))) or
5259     // __attribute__((no_builtin)) on the current function unless foo is
5260     // not a predefined library function which means we must generate the
5261     // builtin no matter what.
5262     else if (!IsPredefinedLibFunction || !HasAttributeNoBuiltin)
5263       return CGCallee::forBuiltin(builtinID, FD);
5264   }
5265 
5266   llvm::Constant *CalleePtr = EmitFunctionDeclPointer(CGF.CGM, GD);
5267   if (CGF.CGM.getLangOpts().CUDA && !CGF.CGM.getLangOpts().CUDAIsDevice &&
5268       FD->hasAttr<CUDAGlobalAttr>())
5269     CalleePtr = CGF.CGM.getCUDARuntime().getKernelStub(
5270         cast<llvm::GlobalValue>(CalleePtr->stripPointerCasts()));
5271 
5272   return CGCallee::forDirect(CalleePtr, GD);
5273 }
5274 
5275 CGCallee CodeGenFunction::EmitCallee(const Expr *E) {
5276   E = E->IgnoreParens();
5277 
5278   // Look through function-to-pointer decay.
5279   if (auto ICE = dyn_cast<ImplicitCastExpr>(E)) {
5280     if (ICE->getCastKind() == CK_FunctionToPointerDecay ||
5281         ICE->getCastKind() == CK_BuiltinFnToFnPtr) {
5282       return EmitCallee(ICE->getSubExpr());
5283     }
5284 
5285   // Resolve direct calls.
5286   } else if (auto DRE = dyn_cast<DeclRefExpr>(E)) {
5287     if (auto FD = dyn_cast<FunctionDecl>(DRE->getDecl())) {
5288       return EmitDirectCallee(*this, FD);
5289     }
5290   } else if (auto ME = dyn_cast<MemberExpr>(E)) {
5291     if (auto FD = dyn_cast<FunctionDecl>(ME->getMemberDecl())) {
5292       EmitIgnoredExpr(ME->getBase());
5293       return EmitDirectCallee(*this, FD);
5294     }
5295 
5296   // Look through template substitutions.
5297   } else if (auto NTTP = dyn_cast<SubstNonTypeTemplateParmExpr>(E)) {
5298     return EmitCallee(NTTP->getReplacement());
5299 
5300   // Treat pseudo-destructor calls differently.
5301   } else if (auto PDE = dyn_cast<CXXPseudoDestructorExpr>(E)) {
5302     return CGCallee::forPseudoDestructor(PDE);
5303   }
5304 
5305   // Otherwise, we have an indirect reference.
5306   llvm::Value *calleePtr;
5307   QualType functionType;
5308   if (auto ptrType = E->getType()->getAs<PointerType>()) {
5309     calleePtr = EmitScalarExpr(E);
5310     functionType = ptrType->getPointeeType();
5311   } else {
5312     functionType = E->getType();
5313     calleePtr = EmitLValue(E, KnownNonNull).getPointer(*this);
5314   }
5315   assert(functionType->isFunctionType());
5316 
5317   GlobalDecl GD;
5318   if (const auto *VD =
5319           dyn_cast_or_null<VarDecl>(E->getReferencedDeclOfCallee()))
5320     GD = GlobalDecl(VD);
5321 
5322   CGCalleeInfo calleeInfo(functionType->getAs<FunctionProtoType>(), GD);
5323   CGCallee callee(calleeInfo, calleePtr);
5324   return callee;
5325 }
5326 
5327 LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) {
5328   // Comma expressions just emit their LHS then their RHS as an l-value.
5329   if (E->getOpcode() == BO_Comma) {
5330     EmitIgnoredExpr(E->getLHS());
5331     EnsureInsertPoint();
5332     return EmitLValue(E->getRHS());
5333   }
5334 
5335   if (E->getOpcode() == BO_PtrMemD ||
5336       E->getOpcode() == BO_PtrMemI)
5337     return EmitPointerToDataMemberBinaryExpr(E);
5338 
5339   assert(E->getOpcode() == BO_Assign && "unexpected binary l-value");
5340 
5341   // Note that in all of these cases, __block variables need the RHS
5342   // evaluated first just in case the variable gets moved by the RHS.
5343 
5344   switch (getEvaluationKind(E->getType())) {
5345   case TEK_Scalar: {
5346     switch (E->getLHS()->getType().getObjCLifetime()) {
5347     case Qualifiers::OCL_Strong:
5348       return EmitARCStoreStrong(E, /*ignored*/ false).first;
5349 
5350     case Qualifiers::OCL_Autoreleasing:
5351       return EmitARCStoreAutoreleasing(E).first;
5352 
5353     // No reason to do any of these differently.
5354     case Qualifiers::OCL_None:
5355     case Qualifiers::OCL_ExplicitNone:
5356     case Qualifiers::OCL_Weak:
5357       break;
5358     }
5359 
5360     RValue RV = EmitAnyExpr(E->getRHS());
5361     LValue LV = EmitCheckedLValue(E->getLHS(), TCK_Store);
5362     if (RV.isScalar())
5363       EmitNullabilityCheck(LV, RV.getScalarVal(), E->getExprLoc());
5364     EmitStoreThroughLValue(RV, LV);
5365     if (getLangOpts().OpenMP)
5366       CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(*this,
5367                                                                 E->getLHS());
5368     return LV;
5369   }
5370 
5371   case TEK_Complex:
5372     return EmitComplexAssignmentLValue(E);
5373 
5374   case TEK_Aggregate:
5375     return EmitAggExprToLValue(E);
5376   }
5377   llvm_unreachable("bad evaluation kind");
5378 }
5379 
5380 LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) {
5381   RValue RV = EmitCallExpr(E);
5382 
5383   if (!RV.isScalar())
5384     return MakeAddrLValue(RV.getAggregateAddress(), E->getType(),
5385                           AlignmentSource::Decl);
5386 
5387   assert(E->getCallReturnType(getContext())->isReferenceType() &&
5388          "Can't have a scalar return unless the return type is a "
5389          "reference type!");
5390 
5391   return MakeNaturalAlignPointeeAddrLValue(RV.getScalarVal(), E->getType());
5392 }
5393 
5394 LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) {
5395   // FIXME: This shouldn't require another copy.
5396   return EmitAggExprToLValue(E);
5397 }
5398 
5399 LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) {
5400   assert(E->getType()->getAsCXXRecordDecl()->hasTrivialDestructor()
5401          && "binding l-value to type which needs a temporary");
5402   AggValueSlot Slot = CreateAggTemp(E->getType());
5403   EmitCXXConstructExpr(E, Slot);
5404   return MakeAddrLValue(Slot.getAddress(), E->getType(), AlignmentSource::Decl);
5405 }
5406 
5407 LValue
5408 CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) {
5409   return MakeNaturalAlignAddrLValue(EmitCXXTypeidExpr(E), E->getType());
5410 }
5411 
5412 Address CodeGenFunction::EmitCXXUuidofExpr(const CXXUuidofExpr *E) {
5413   return CGM.GetAddrOfMSGuidDecl(E->getGuidDecl())
5414       .withElementType(ConvertType(E->getType()));
5415 }
5416 
5417 LValue CodeGenFunction::EmitCXXUuidofLValue(const CXXUuidofExpr *E) {
5418   return MakeAddrLValue(EmitCXXUuidofExpr(E), E->getType(),
5419                         AlignmentSource::Decl);
5420 }
5421 
5422 LValue
5423 CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) {
5424   AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
5425   Slot.setExternallyDestructed();
5426   EmitAggExpr(E->getSubExpr(), Slot);
5427   EmitCXXTemporary(E->getTemporary(), E->getType(), Slot.getAddress());
5428   return MakeAddrLValue(Slot.getAddress(), E->getType(), AlignmentSource::Decl);
5429 }
5430 
5431 LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) {
5432   RValue RV = EmitObjCMessageExpr(E);
5433 
5434   if (!RV.isScalar())
5435     return MakeAddrLValue(RV.getAggregateAddress(), E->getType(),
5436                           AlignmentSource::Decl);
5437 
5438   assert(E->getMethodDecl()->getReturnType()->isReferenceType() &&
5439          "Can't have a scalar return unless the return type is a "
5440          "reference type!");
5441 
5442   return MakeNaturalAlignPointeeAddrLValue(RV.getScalarVal(), E->getType());
5443 }
5444 
5445 LValue CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr *E) {
5446   Address V =
5447     CGM.getObjCRuntime().GetAddrOfSelector(*this, E->getSelector());
5448   return MakeAddrLValue(V, E->getType(), AlignmentSource::Decl);
5449 }
5450 
5451 llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface,
5452                                              const ObjCIvarDecl *Ivar) {
5453   return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar);
5454 }
5455 
5456 llvm::Value *
5457 CodeGenFunction::EmitIvarOffsetAsPointerDiff(const ObjCInterfaceDecl *Interface,
5458                                              const ObjCIvarDecl *Ivar) {
5459   llvm::Value *OffsetValue = EmitIvarOffset(Interface, Ivar);
5460   QualType PointerDiffType = getContext().getPointerDiffType();
5461   return Builder.CreateZExtOrTrunc(OffsetValue,
5462                                    getTypes().ConvertType(PointerDiffType));
5463 }
5464 
5465 LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy,
5466                                           llvm::Value *BaseValue,
5467                                           const ObjCIvarDecl *Ivar,
5468                                           unsigned CVRQualifiers) {
5469   return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue,
5470                                                    Ivar, CVRQualifiers);
5471 }
5472 
5473 LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) {
5474   // FIXME: A lot of the code below could be shared with EmitMemberExpr.
5475   llvm::Value *BaseValue = nullptr;
5476   const Expr *BaseExpr = E->getBase();
5477   Qualifiers BaseQuals;
5478   QualType ObjectTy;
5479   if (E->isArrow()) {
5480     BaseValue = EmitScalarExpr(BaseExpr);
5481     ObjectTy = BaseExpr->getType()->getPointeeType();
5482     BaseQuals = ObjectTy.getQualifiers();
5483   } else {
5484     LValue BaseLV = EmitLValue(BaseExpr);
5485     BaseValue = BaseLV.getPointer(*this);
5486     ObjectTy = BaseExpr->getType();
5487     BaseQuals = ObjectTy.getQualifiers();
5488   }
5489 
5490   LValue LV =
5491     EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(),
5492                       BaseQuals.getCVRQualifiers());
5493   setObjCGCLValueClass(getContext(), E, LV);
5494   return LV;
5495 }
5496 
5497 LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) {
5498   // Can only get l-value for message expression returning aggregate type
5499   RValue RV = EmitAnyExprToTemp(E);
5500   return MakeAddrLValue(RV.getAggregateAddress(), E->getType(),
5501                         AlignmentSource::Decl);
5502 }
5503 
5504 RValue CodeGenFunction::EmitCall(QualType CalleeType, const CGCallee &OrigCallee,
5505                                  const CallExpr *E, ReturnValueSlot ReturnValue,
5506                                  llvm::Value *Chain) {
5507   // Get the actual function type. The callee type will always be a pointer to
5508   // function type or a block pointer type.
5509   assert(CalleeType->isFunctionPointerType() &&
5510          "Call must have function pointer type!");
5511 
5512   const Decl *TargetDecl =
5513       OrigCallee.getAbstractInfo().getCalleeDecl().getDecl();
5514 
5515   assert((!isa_and_present<FunctionDecl>(TargetDecl) ||
5516           !cast<FunctionDecl>(TargetDecl)->isImmediateFunction()) &&
5517          "trying to emit a call to an immediate function");
5518 
5519   CalleeType = getContext().getCanonicalType(CalleeType);
5520 
5521   auto PointeeType = cast<PointerType>(CalleeType)->getPointeeType();
5522 
5523   CGCallee Callee = OrigCallee;
5524 
5525   if (SanOpts.has(SanitizerKind::Function) &&
5526       (!TargetDecl || !isa<FunctionDecl>(TargetDecl)) &&
5527       !isa<FunctionNoProtoType>(PointeeType)) {
5528     if (llvm::Constant *PrefixSig =
5529             CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM)) {
5530       SanitizerScope SanScope(this);
5531       auto *TypeHash = getUBSanFunctionTypeHash(PointeeType);
5532 
5533       llvm::Type *PrefixSigType = PrefixSig->getType();
5534       llvm::StructType *PrefixStructTy = llvm::StructType::get(
5535           CGM.getLLVMContext(), {PrefixSigType, Int32Ty}, /*isPacked=*/true);
5536 
5537       llvm::Value *CalleePtr = Callee.getFunctionPointer();
5538 
5539       // On 32-bit Arm, the low bit of a function pointer indicates whether
5540       // it's using the Arm or Thumb instruction set. The actual first
5541       // instruction lives at the same address either way, so we must clear
5542       // that low bit before using the function address to find the prefix
5543       // structure.
5544       //
5545       // This applies to both Arm and Thumb target triples, because
5546       // either one could be used in an interworking context where it
5547       // might be passed function pointers of both types.
5548       llvm::Value *AlignedCalleePtr;
5549       if (CGM.getTriple().isARM() || CGM.getTriple().isThumb()) {
5550         llvm::Value *CalleeAddress =
5551             Builder.CreatePtrToInt(CalleePtr, IntPtrTy);
5552         llvm::Value *Mask = llvm::ConstantInt::get(IntPtrTy, ~1);
5553         llvm::Value *AlignedCalleeAddress =
5554             Builder.CreateAnd(CalleeAddress, Mask);
5555         AlignedCalleePtr =
5556             Builder.CreateIntToPtr(AlignedCalleeAddress, CalleePtr->getType());
5557       } else {
5558         AlignedCalleePtr = CalleePtr;
5559       }
5560 
5561       llvm::Value *CalleePrefixStruct = AlignedCalleePtr;
5562       llvm::Value *CalleeSigPtr =
5563           Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, -1, 0);
5564       llvm::Value *CalleeSig =
5565           Builder.CreateAlignedLoad(PrefixSigType, CalleeSigPtr, getIntAlign());
5566       llvm::Value *CalleeSigMatch = Builder.CreateICmpEQ(CalleeSig, PrefixSig);
5567 
5568       llvm::BasicBlock *Cont = createBasicBlock("cont");
5569       llvm::BasicBlock *TypeCheck = createBasicBlock("typecheck");
5570       Builder.CreateCondBr(CalleeSigMatch, TypeCheck, Cont);
5571 
5572       EmitBlock(TypeCheck);
5573       llvm::Value *CalleeTypeHash = Builder.CreateAlignedLoad(
5574           Int32Ty,
5575           Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, -1, 1),
5576           getPointerAlign());
5577       llvm::Value *CalleeTypeHashMatch =
5578           Builder.CreateICmpEQ(CalleeTypeHash, TypeHash);
5579       llvm::Constant *StaticData[] = {EmitCheckSourceLocation(E->getBeginLoc()),
5580                                       EmitCheckTypeDescriptor(CalleeType)};
5581       EmitCheck(std::make_pair(CalleeTypeHashMatch, SanitizerKind::Function),
5582                 SanitizerHandler::FunctionTypeMismatch, StaticData,
5583                 {CalleePtr});
5584 
5585       Builder.CreateBr(Cont);
5586       EmitBlock(Cont);
5587     }
5588   }
5589 
5590   const auto *FnType = cast<FunctionType>(PointeeType);
5591 
5592   // If we are checking indirect calls and this call is indirect, check that the
5593   // function pointer is a member of the bit set for the function type.
5594   if (SanOpts.has(SanitizerKind::CFIICall) &&
5595       (!TargetDecl || !isa<FunctionDecl>(TargetDecl))) {
5596     SanitizerScope SanScope(this);
5597     EmitSanitizerStatReport(llvm::SanStat_CFI_ICall);
5598 
5599     llvm::Metadata *MD;
5600     if (CGM.getCodeGenOpts().SanitizeCfiICallGeneralizePointers)
5601       MD = CGM.CreateMetadataIdentifierGeneralized(QualType(FnType, 0));
5602     else
5603       MD = CGM.CreateMetadataIdentifierForType(QualType(FnType, 0));
5604 
5605     llvm::Value *TypeId = llvm::MetadataAsValue::get(getLLVMContext(), MD);
5606 
5607     llvm::Value *CalleePtr = Callee.getFunctionPointer();
5608     llvm::Value *TypeTest = Builder.CreateCall(
5609         CGM.getIntrinsic(llvm::Intrinsic::type_test), {CalleePtr, TypeId});
5610 
5611     auto CrossDsoTypeId = CGM.CreateCrossDsoCfiTypeId(MD);
5612     llvm::Constant *StaticData[] = {
5613         llvm::ConstantInt::get(Int8Ty, CFITCK_ICall),
5614         EmitCheckSourceLocation(E->getBeginLoc()),
5615         EmitCheckTypeDescriptor(QualType(FnType, 0)),
5616     };
5617     if (CGM.getCodeGenOpts().SanitizeCfiCrossDso && CrossDsoTypeId) {
5618       EmitCfiSlowPathCheck(SanitizerKind::CFIICall, TypeTest, CrossDsoTypeId,
5619                            CalleePtr, StaticData);
5620     } else {
5621       EmitCheck(std::make_pair(TypeTest, SanitizerKind::CFIICall),
5622                 SanitizerHandler::CFICheckFail, StaticData,
5623                 {CalleePtr, llvm::UndefValue::get(IntPtrTy)});
5624     }
5625   }
5626 
5627   CallArgList Args;
5628   if (Chain)
5629     Args.add(RValue::get(Chain), CGM.getContext().VoidPtrTy);
5630 
5631   // C++17 requires that we evaluate arguments to a call using assignment syntax
5632   // right-to-left, and that we evaluate arguments to certain other operators
5633   // left-to-right. Note that we allow this to override the order dictated by
5634   // the calling convention on the MS ABI, which means that parameter
5635   // destruction order is not necessarily reverse construction order.
5636   // FIXME: Revisit this based on C++ committee response to unimplementability.
5637   EvaluationOrder Order = EvaluationOrder::Default;
5638   if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(E)) {
5639     if (OCE->isAssignmentOp())
5640       Order = EvaluationOrder::ForceRightToLeft;
5641     else {
5642       switch (OCE->getOperator()) {
5643       case OO_LessLess:
5644       case OO_GreaterGreater:
5645       case OO_AmpAmp:
5646       case OO_PipePipe:
5647       case OO_Comma:
5648       case OO_ArrowStar:
5649         Order = EvaluationOrder::ForceLeftToRight;
5650         break;
5651       default:
5652         break;
5653       }
5654     }
5655   }
5656 
5657   EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), E->arguments(),
5658                E->getDirectCallee(), /*ParamsToSkip*/ 0, Order);
5659 
5660   const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeFreeFunctionCall(
5661       Args, FnType, /*ChainCall=*/Chain);
5662 
5663   // C99 6.5.2.2p6:
5664   //   If the expression that denotes the called function has a type
5665   //   that does not include a prototype, [the default argument
5666   //   promotions are performed]. If the number of arguments does not
5667   //   equal the number of parameters, the behavior is undefined. If
5668   //   the function is defined with a type that includes a prototype,
5669   //   and either the prototype ends with an ellipsis (, ...) or the
5670   //   types of the arguments after promotion are not compatible with
5671   //   the types of the parameters, the behavior is undefined. If the
5672   //   function is defined with a type that does not include a
5673   //   prototype, and the types of the arguments after promotion are
5674   //   not compatible with those of the parameters after promotion,
5675   //   the behavior is undefined [except in some trivial cases].
5676   // That is, in the general case, we should assume that a call
5677   // through an unprototyped function type works like a *non-variadic*
5678   // call.  The way we make this work is to cast to the exact type
5679   // of the promoted arguments.
5680   //
5681   // Chain calls use this same code path to add the invisible chain parameter
5682   // to the function type.
5683   if (isa<FunctionNoProtoType>(FnType) || Chain) {
5684     llvm::Type *CalleeTy = getTypes().GetFunctionType(FnInfo);
5685     int AS = Callee.getFunctionPointer()->getType()->getPointerAddressSpace();
5686     CalleeTy = CalleeTy->getPointerTo(AS);
5687 
5688     llvm::Value *CalleePtr = Callee.getFunctionPointer();
5689     CalleePtr = Builder.CreateBitCast(CalleePtr, CalleeTy, "callee.knr.cast");
5690     Callee.setFunctionPointer(CalleePtr);
5691   }
5692 
5693   // HIP function pointer contains kernel handle when it is used in triple
5694   // chevron. The kernel stub needs to be loaded from kernel handle and used
5695   // as callee.
5696   if (CGM.getLangOpts().HIP && !CGM.getLangOpts().CUDAIsDevice &&
5697       isa<CUDAKernelCallExpr>(E) &&
5698       (!TargetDecl || !isa<FunctionDecl>(TargetDecl))) {
5699     llvm::Value *Handle = Callee.getFunctionPointer();
5700     auto *Stub = Builder.CreateLoad(
5701         Address(Handle, Handle->getType(), CGM.getPointerAlign()));
5702     Callee.setFunctionPointer(Stub);
5703   }
5704   llvm::CallBase *CallOrInvoke = nullptr;
5705   RValue Call = EmitCall(FnInfo, Callee, ReturnValue, Args, &CallOrInvoke,
5706                          E == MustTailCall, E->getExprLoc());
5707 
5708   // Generate function declaration DISuprogram in order to be used
5709   // in debug info about call sites.
5710   if (CGDebugInfo *DI = getDebugInfo()) {
5711     if (auto *CalleeDecl = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
5712       FunctionArgList Args;
5713       QualType ResTy = BuildFunctionArgList(CalleeDecl, Args);
5714       DI->EmitFuncDeclForCallSite(CallOrInvoke,
5715                                   DI->getFunctionType(CalleeDecl, ResTy, Args),
5716                                   CalleeDecl);
5717     }
5718   }
5719 
5720   return Call;
5721 }
5722 
5723 LValue CodeGenFunction::
5724 EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) {
5725   Address BaseAddr = Address::invalid();
5726   if (E->getOpcode() == BO_PtrMemI) {
5727     BaseAddr = EmitPointerWithAlignment(E->getLHS());
5728   } else {
5729     BaseAddr = EmitLValue(E->getLHS()).getAddress(*this);
5730   }
5731 
5732   llvm::Value *OffsetV = EmitScalarExpr(E->getRHS());
5733   const auto *MPT = E->getRHS()->getType()->castAs<MemberPointerType>();
5734 
5735   LValueBaseInfo BaseInfo;
5736   TBAAAccessInfo TBAAInfo;
5737   Address MemberAddr =
5738     EmitCXXMemberDataPointerAddress(E, BaseAddr, OffsetV, MPT, &BaseInfo,
5739                                     &TBAAInfo);
5740 
5741   return MakeAddrLValue(MemberAddr, MPT->getPointeeType(), BaseInfo, TBAAInfo);
5742 }
5743 
5744 /// Given the address of a temporary variable, produce an r-value of
5745 /// its type.
5746 RValue CodeGenFunction::convertTempToRValue(Address addr,
5747                                             QualType type,
5748                                             SourceLocation loc) {
5749   LValue lvalue = MakeAddrLValue(addr, type, AlignmentSource::Decl);
5750   switch (getEvaluationKind(type)) {
5751   case TEK_Complex:
5752     return RValue::getComplex(EmitLoadOfComplex(lvalue, loc));
5753   case TEK_Aggregate:
5754     return lvalue.asAggregateRValue(*this);
5755   case TEK_Scalar:
5756     return RValue::get(EmitLoadOfScalar(lvalue, loc));
5757   }
5758   llvm_unreachable("bad evaluation kind");
5759 }
5760 
5761 void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, float Accuracy) {
5762   assert(Val->getType()->isFPOrFPVectorTy());
5763   if (Accuracy == 0.0 || !isa<llvm::Instruction>(Val))
5764     return;
5765 
5766   llvm::MDBuilder MDHelper(getLLVMContext());
5767   llvm::MDNode *Node = MDHelper.createFPMath(Accuracy);
5768 
5769   cast<llvm::Instruction>(Val)->setMetadata(llvm::LLVMContext::MD_fpmath, Node);
5770 }
5771 
5772 void CodeGenFunction::SetSqrtFPAccuracy(llvm::Value *Val) {
5773   llvm::Type *EltTy = Val->getType()->getScalarType();
5774   if (!EltTy->isFloatTy())
5775     return;
5776 
5777   if ((getLangOpts().OpenCL &&
5778        !CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) ||
5779       (getLangOpts().HIP && getLangOpts().CUDAIsDevice &&
5780        !CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) {
5781     // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 3ulp
5782     //
5783     // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
5784     // build option allows an application to specify that single precision
5785     // floating-point divide (x/y and 1/x) and sqrt used in the program
5786     // source are correctly rounded.
5787     //
5788     // TODO: CUDA has a prec-sqrt flag
5789     SetFPAccuracy(Val, 3.0f);
5790   }
5791 }
5792 
5793 void CodeGenFunction::SetDivFPAccuracy(llvm::Value *Val) {
5794   llvm::Type *EltTy = Val->getType()->getScalarType();
5795   if (!EltTy->isFloatTy())
5796     return;
5797 
5798   if ((getLangOpts().OpenCL &&
5799        !CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) ||
5800       (getLangOpts().HIP && getLangOpts().CUDAIsDevice &&
5801        !CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) {
5802     // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp
5803     //
5804     // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
5805     // build option allows an application to specify that single precision
5806     // floating-point divide (x/y and 1/x) and sqrt used in the program
5807     // source are correctly rounded.
5808     //
5809     // TODO: CUDA has a prec-div flag
5810     SetFPAccuracy(Val, 2.5f);
5811   }
5812 }
5813 
5814 namespace {
5815   struct LValueOrRValue {
5816     LValue LV;
5817     RValue RV;
5818   };
5819 }
5820 
5821 static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF,
5822                                            const PseudoObjectExpr *E,
5823                                            bool forLValue,
5824                                            AggValueSlot slot) {
5825   SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques;
5826 
5827   // Find the result expression, if any.
5828   const Expr *resultExpr = E->getResultExpr();
5829   LValueOrRValue result;
5830 
5831   for (PseudoObjectExpr::const_semantics_iterator
5832          i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) {
5833     const Expr *semantic = *i;
5834 
5835     // If this semantic expression is an opaque value, bind it
5836     // to the result of its source expression.
5837     if (const auto *ov = dyn_cast<OpaqueValueExpr>(semantic)) {
5838       // Skip unique OVEs.
5839       if (ov->isUnique()) {
5840         assert(ov != resultExpr &&
5841                "A unique OVE cannot be used as the result expression");
5842         continue;
5843       }
5844 
5845       // If this is the result expression, we may need to evaluate
5846       // directly into the slot.
5847       typedef CodeGenFunction::OpaqueValueMappingData OVMA;
5848       OVMA opaqueData;
5849       if (ov == resultExpr && ov->isPRValue() && !forLValue &&
5850           CodeGenFunction::hasAggregateEvaluationKind(ov->getType())) {
5851         CGF.EmitAggExpr(ov->getSourceExpr(), slot);
5852         LValue LV = CGF.MakeAddrLValue(slot.getAddress(), ov->getType(),
5853                                        AlignmentSource::Decl);
5854         opaqueData = OVMA::bind(CGF, ov, LV);
5855         result.RV = slot.asRValue();
5856 
5857       // Otherwise, emit as normal.
5858       } else {
5859         opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr());
5860 
5861         // If this is the result, also evaluate the result now.
5862         if (ov == resultExpr) {
5863           if (forLValue)
5864             result.LV = CGF.EmitLValue(ov);
5865           else
5866             result.RV = CGF.EmitAnyExpr(ov, slot);
5867         }
5868       }
5869 
5870       opaques.push_back(opaqueData);
5871 
5872     // Otherwise, if the expression is the result, evaluate it
5873     // and remember the result.
5874     } else if (semantic == resultExpr) {
5875       if (forLValue)
5876         result.LV = CGF.EmitLValue(semantic);
5877       else
5878         result.RV = CGF.EmitAnyExpr(semantic, slot);
5879 
5880     // Otherwise, evaluate the expression in an ignored context.
5881     } else {
5882       CGF.EmitIgnoredExpr(semantic);
5883     }
5884   }
5885 
5886   // Unbind all the opaques now.
5887   for (unsigned i = 0, e = opaques.size(); i != e; ++i)
5888     opaques[i].unbind(CGF);
5889 
5890   return result;
5891 }
5892 
5893 RValue CodeGenFunction::EmitPseudoObjectRValue(const PseudoObjectExpr *E,
5894                                                AggValueSlot slot) {
5895   return emitPseudoObjectExpr(*this, E, false, slot).RV;
5896 }
5897 
5898 LValue CodeGenFunction::EmitPseudoObjectLValue(const PseudoObjectExpr *E) {
5899   return emitPseudoObjectExpr(*this, E, true, AggValueSlot::ignored()).LV;
5900 }
5901