1 //===--- CGClass.cpp - Emit LLVM Code for C++ classes ---------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code dealing with C++ code generation of classes
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CGBlocks.h"
15 #include "CGCXXABI.h"
16 #include "CGDebugInfo.h"
17 #include "CGRecordLayout.h"
18 #include "CodeGenFunction.h"
19 #include "clang/AST/CXXInheritance.h"
20 #include "clang/AST/DeclTemplate.h"
21 #include "clang/AST/EvaluatedExprVisitor.h"
22 #include "clang/AST/RecordLayout.h"
23 #include "clang/AST/StmtCXX.h"
24 #include "clang/Basic/TargetBuiltins.h"
25 #include "clang/CodeGen/CGFunctionInfo.h"
26 #include "clang/Frontend/CodeGenOptions.h"
27 #include "llvm/IR/Intrinsics.h"
28 
29 using namespace clang;
30 using namespace CodeGen;
31 
32 static CharUnits
33 ComputeNonVirtualBaseClassOffset(ASTContext &Context,
34                                  const CXXRecordDecl *DerivedClass,
35                                  CastExpr::path_const_iterator Start,
36                                  CastExpr::path_const_iterator End) {
37   CharUnits Offset = CharUnits::Zero();
38 
39   const CXXRecordDecl *RD = DerivedClass;
40 
41   for (CastExpr::path_const_iterator I = Start; I != End; ++I) {
42     const CXXBaseSpecifier *Base = *I;
43     assert(!Base->isVirtual() && "Should not see virtual bases here!");
44 
45     // Get the layout.
46     const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
47 
48     const CXXRecordDecl *BaseDecl =
49       cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
50 
51     // Add the offset.
52     Offset += Layout.getBaseClassOffset(BaseDecl);
53 
54     RD = BaseDecl;
55   }
56 
57   return Offset;
58 }
59 
60 llvm::Constant *
61 CodeGenModule::GetNonVirtualBaseClassOffset(const CXXRecordDecl *ClassDecl,
62                                    CastExpr::path_const_iterator PathBegin,
63                                    CastExpr::path_const_iterator PathEnd) {
64   assert(PathBegin != PathEnd && "Base path should not be empty!");
65 
66   CharUnits Offset =
67     ComputeNonVirtualBaseClassOffset(getContext(), ClassDecl,
68                                      PathBegin, PathEnd);
69   if (Offset.isZero())
70     return nullptr;
71 
72   llvm::Type *PtrDiffTy =
73   Types.ConvertType(getContext().getPointerDiffType());
74 
75   return llvm::ConstantInt::get(PtrDiffTy, Offset.getQuantity());
76 }
77 
78 /// Gets the address of a direct base class within a complete object.
79 /// This should only be used for (1) non-virtual bases or (2) virtual bases
80 /// when the type is known to be complete (e.g. in complete destructors).
81 ///
82 /// The object pointed to by 'This' is assumed to be non-null.
83 llvm::Value *
84 CodeGenFunction::GetAddressOfDirectBaseInCompleteClass(llvm::Value *This,
85                                                    const CXXRecordDecl *Derived,
86                                                    const CXXRecordDecl *Base,
87                                                    bool BaseIsVirtual) {
88   // 'this' must be a pointer (in some address space) to Derived.
89   assert(This->getType()->isPointerTy() &&
90          cast<llvm::PointerType>(This->getType())->getElementType()
91            == ConvertType(Derived));
92 
93   // Compute the offset of the virtual base.
94   CharUnits Offset;
95   const ASTRecordLayout &Layout = getContext().getASTRecordLayout(Derived);
96   if (BaseIsVirtual)
97     Offset = Layout.getVBaseClassOffset(Base);
98   else
99     Offset = Layout.getBaseClassOffset(Base);
100 
101   // Shift and cast down to the base type.
102   // TODO: for complete types, this should be possible with a GEP.
103   llvm::Value *V = This;
104   if (Offset.isPositive()) {
105     V = Builder.CreateBitCast(V, Int8PtrTy);
106     V = Builder.CreateConstInBoundsGEP1_64(V, Offset.getQuantity());
107   }
108   V = Builder.CreateBitCast(V, ConvertType(Base)->getPointerTo());
109 
110   return V;
111 }
112 
113 static llvm::Value *
114 ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, llvm::Value *ptr,
115                                 CharUnits nonVirtualOffset,
116                                 llvm::Value *virtualOffset) {
117   // Assert that we have something to do.
118   assert(!nonVirtualOffset.isZero() || virtualOffset != nullptr);
119 
120   // Compute the offset from the static and dynamic components.
121   llvm::Value *baseOffset;
122   if (!nonVirtualOffset.isZero()) {
123     baseOffset = llvm::ConstantInt::get(CGF.PtrDiffTy,
124                                         nonVirtualOffset.getQuantity());
125     if (virtualOffset) {
126       baseOffset = CGF.Builder.CreateAdd(virtualOffset, baseOffset);
127     }
128   } else {
129     baseOffset = virtualOffset;
130   }
131 
132   // Apply the base offset.
133   ptr = CGF.Builder.CreateBitCast(ptr, CGF.Int8PtrTy);
134   ptr = CGF.Builder.CreateInBoundsGEP(ptr, baseOffset, "add.ptr");
135   return ptr;
136 }
137 
138 llvm::Value *CodeGenFunction::GetAddressOfBaseClass(
139     llvm::Value *Value, const CXXRecordDecl *Derived,
140     CastExpr::path_const_iterator PathBegin,
141     CastExpr::path_const_iterator PathEnd, bool NullCheckValue,
142     SourceLocation Loc) {
143   assert(PathBegin != PathEnd && "Base path should not be empty!");
144 
145   CastExpr::path_const_iterator Start = PathBegin;
146   const CXXRecordDecl *VBase = nullptr;
147 
148   // Sema has done some convenient canonicalization here: if the
149   // access path involved any virtual steps, the conversion path will
150   // *start* with a step down to the correct virtual base subobject,
151   // and hence will not require any further steps.
152   if ((*Start)->isVirtual()) {
153     VBase =
154       cast<CXXRecordDecl>((*Start)->getType()->getAs<RecordType>()->getDecl());
155     ++Start;
156   }
157 
158   // Compute the static offset of the ultimate destination within its
159   // allocating subobject (the virtual base, if there is one, or else
160   // the "complete" object that we see).
161   CharUnits NonVirtualOffset =
162     ComputeNonVirtualBaseClassOffset(getContext(), VBase ? VBase : Derived,
163                                      Start, PathEnd);
164 
165   // If there's a virtual step, we can sometimes "devirtualize" it.
166   // For now, that's limited to when the derived type is final.
167   // TODO: "devirtualize" this for accesses to known-complete objects.
168   if (VBase && Derived->hasAttr<FinalAttr>()) {
169     const ASTRecordLayout &layout = getContext().getASTRecordLayout(Derived);
170     CharUnits vBaseOffset = layout.getVBaseClassOffset(VBase);
171     NonVirtualOffset += vBaseOffset;
172     VBase = nullptr; // we no longer have a virtual step
173   }
174 
175   // Get the base pointer type.
176   llvm::Type *BasePtrTy =
177     ConvertType((PathEnd[-1])->getType())->getPointerTo();
178 
179   QualType DerivedTy = getContext().getRecordType(Derived);
180   CharUnits DerivedAlign = getContext().getTypeAlignInChars(DerivedTy);
181 
182   // If the static offset is zero and we don't have a virtual step,
183   // just do a bitcast; null checks are unnecessary.
184   if (NonVirtualOffset.isZero() && !VBase) {
185     if (sanitizePerformTypeCheck()) {
186       EmitTypeCheck(TCK_Upcast, Loc, Value, DerivedTy, DerivedAlign,
187                     !NullCheckValue);
188     }
189     return Builder.CreateBitCast(Value, BasePtrTy);
190   }
191 
192   llvm::BasicBlock *origBB = nullptr;
193   llvm::BasicBlock *endBB = nullptr;
194 
195   // Skip over the offset (and the vtable load) if we're supposed to
196   // null-check the pointer.
197   if (NullCheckValue) {
198     origBB = Builder.GetInsertBlock();
199     llvm::BasicBlock *notNullBB = createBasicBlock("cast.notnull");
200     endBB = createBasicBlock("cast.end");
201 
202     llvm::Value *isNull = Builder.CreateIsNull(Value);
203     Builder.CreateCondBr(isNull, endBB, notNullBB);
204     EmitBlock(notNullBB);
205   }
206 
207   if (sanitizePerformTypeCheck()) {
208     EmitTypeCheck(VBase ? TCK_UpcastToVirtualBase : TCK_Upcast, Loc, Value,
209                   DerivedTy, DerivedAlign, true);
210   }
211 
212   // Compute the virtual offset.
213   llvm::Value *VirtualOffset = nullptr;
214   if (VBase) {
215     VirtualOffset =
216       CGM.getCXXABI().GetVirtualBaseClassOffset(*this, Value, Derived, VBase);
217   }
218 
219   // Apply both offsets.
220   Value = ApplyNonVirtualAndVirtualOffset(*this, Value,
221                                           NonVirtualOffset,
222                                           VirtualOffset);
223 
224   // Cast to the destination type.
225   Value = Builder.CreateBitCast(Value, BasePtrTy);
226 
227   // Build a phi if we needed a null check.
228   if (NullCheckValue) {
229     llvm::BasicBlock *notNullBB = Builder.GetInsertBlock();
230     Builder.CreateBr(endBB);
231     EmitBlock(endBB);
232 
233     llvm::PHINode *PHI = Builder.CreatePHI(BasePtrTy, 2, "cast.result");
234     PHI->addIncoming(Value, notNullBB);
235     PHI->addIncoming(llvm::Constant::getNullValue(BasePtrTy), origBB);
236     Value = PHI;
237   }
238 
239   return Value;
240 }
241 
242 llvm::Value *
243 CodeGenFunction::GetAddressOfDerivedClass(llvm::Value *Value,
244                                           const CXXRecordDecl *Derived,
245                                         CastExpr::path_const_iterator PathBegin,
246                                           CastExpr::path_const_iterator PathEnd,
247                                           bool NullCheckValue) {
248   assert(PathBegin != PathEnd && "Base path should not be empty!");
249 
250   QualType DerivedTy =
251     getContext().getCanonicalType(getContext().getTagDeclType(Derived));
252   llvm::Type *DerivedPtrTy = ConvertType(DerivedTy)->getPointerTo();
253 
254   llvm::Value *NonVirtualOffset =
255     CGM.GetNonVirtualBaseClassOffset(Derived, PathBegin, PathEnd);
256 
257   if (!NonVirtualOffset) {
258     // No offset, we can just cast back.
259     return Builder.CreateBitCast(Value, DerivedPtrTy);
260   }
261 
262   llvm::BasicBlock *CastNull = nullptr;
263   llvm::BasicBlock *CastNotNull = nullptr;
264   llvm::BasicBlock *CastEnd = nullptr;
265 
266   if (NullCheckValue) {
267     CastNull = createBasicBlock("cast.null");
268     CastNotNull = createBasicBlock("cast.notnull");
269     CastEnd = createBasicBlock("cast.end");
270 
271     llvm::Value *IsNull = Builder.CreateIsNull(Value);
272     Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
273     EmitBlock(CastNotNull);
274   }
275 
276   // Apply the offset.
277   Value = Builder.CreateBitCast(Value, Int8PtrTy);
278   Value = Builder.CreateGEP(Value, Builder.CreateNeg(NonVirtualOffset),
279                             "sub.ptr");
280 
281   // Just cast.
282   Value = Builder.CreateBitCast(Value, DerivedPtrTy);
283 
284   if (NullCheckValue) {
285     Builder.CreateBr(CastEnd);
286     EmitBlock(CastNull);
287     Builder.CreateBr(CastEnd);
288     EmitBlock(CastEnd);
289 
290     llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
291     PHI->addIncoming(Value, CastNotNull);
292     PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()),
293                      CastNull);
294     Value = PHI;
295   }
296 
297   return Value;
298 }
299 
300 llvm::Value *CodeGenFunction::GetVTTParameter(GlobalDecl GD,
301                                               bool ForVirtualBase,
302                                               bool Delegating) {
303   if (!CGM.getCXXABI().NeedsVTTParameter(GD)) {
304     // This constructor/destructor does not need a VTT parameter.
305     return nullptr;
306   }
307 
308   const CXXRecordDecl *RD = cast<CXXMethodDecl>(CurCodeDecl)->getParent();
309   const CXXRecordDecl *Base = cast<CXXMethodDecl>(GD.getDecl())->getParent();
310 
311   llvm::Value *VTT;
312 
313   uint64_t SubVTTIndex;
314 
315   if (Delegating) {
316     // If this is a delegating constructor call, just load the VTT.
317     return LoadCXXVTT();
318   } else if (RD == Base) {
319     // If the record matches the base, this is the complete ctor/dtor
320     // variant calling the base variant in a class with virtual bases.
321     assert(!CGM.getCXXABI().NeedsVTTParameter(CurGD) &&
322            "doing no-op VTT offset in base dtor/ctor?");
323     assert(!ForVirtualBase && "Can't have same class as virtual base!");
324     SubVTTIndex = 0;
325   } else {
326     const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
327     CharUnits BaseOffset = ForVirtualBase ?
328       Layout.getVBaseClassOffset(Base) :
329       Layout.getBaseClassOffset(Base);
330 
331     SubVTTIndex =
332       CGM.getVTables().getSubVTTIndex(RD, BaseSubobject(Base, BaseOffset));
333     assert(SubVTTIndex != 0 && "Sub-VTT index must be greater than zero!");
334   }
335 
336   if (CGM.getCXXABI().NeedsVTTParameter(CurGD)) {
337     // A VTT parameter was passed to the constructor, use it.
338     VTT = LoadCXXVTT();
339     VTT = Builder.CreateConstInBoundsGEP1_64(VTT, SubVTTIndex);
340   } else {
341     // We're the complete constructor, so get the VTT by name.
342     VTT = CGM.getVTables().GetAddrOfVTT(RD);
343     VTT = Builder.CreateConstInBoundsGEP2_64(VTT, 0, SubVTTIndex);
344   }
345 
346   return VTT;
347 }
348 
349 namespace {
350   /// Call the destructor for a direct base class.
351   struct CallBaseDtor : EHScopeStack::Cleanup {
352     const CXXRecordDecl *BaseClass;
353     bool BaseIsVirtual;
354     CallBaseDtor(const CXXRecordDecl *Base, bool BaseIsVirtual)
355       : BaseClass(Base), BaseIsVirtual(BaseIsVirtual) {}
356 
357     void Emit(CodeGenFunction &CGF, Flags flags) override {
358       const CXXRecordDecl *DerivedClass =
359         cast<CXXMethodDecl>(CGF.CurCodeDecl)->getParent();
360 
361       const CXXDestructorDecl *D = BaseClass->getDestructor();
362       llvm::Value *Addr =
363         CGF.GetAddressOfDirectBaseInCompleteClass(CGF.LoadCXXThis(),
364                                                   DerivedClass, BaseClass,
365                                                   BaseIsVirtual);
366       CGF.EmitCXXDestructorCall(D, Dtor_Base, BaseIsVirtual,
367                                 /*Delegating=*/false, Addr);
368     }
369   };
370 
371   /// A visitor which checks whether an initializer uses 'this' in a
372   /// way which requires the vtable to be properly set.
373   struct DynamicThisUseChecker : EvaluatedExprVisitor<DynamicThisUseChecker> {
374     typedef EvaluatedExprVisitor<DynamicThisUseChecker> super;
375 
376     bool UsesThis;
377 
378     DynamicThisUseChecker(ASTContext &C) : super(C), UsesThis(false) {}
379 
380     // Black-list all explicit and implicit references to 'this'.
381     //
382     // Do we need to worry about external references to 'this' derived
383     // from arbitrary code?  If so, then anything which runs arbitrary
384     // external code might potentially access the vtable.
385     void VisitCXXThisExpr(CXXThisExpr *E) { UsesThis = true; }
386   };
387 }
388 
389 static bool BaseInitializerUsesThis(ASTContext &C, const Expr *Init) {
390   DynamicThisUseChecker Checker(C);
391   Checker.Visit(const_cast<Expr*>(Init));
392   return Checker.UsesThis;
393 }
394 
395 static void EmitBaseInitializer(CodeGenFunction &CGF,
396                                 const CXXRecordDecl *ClassDecl,
397                                 CXXCtorInitializer *BaseInit,
398                                 CXXCtorType CtorType) {
399   assert(BaseInit->isBaseInitializer() &&
400          "Must have base initializer!");
401 
402   llvm::Value *ThisPtr = CGF.LoadCXXThis();
403 
404   const Type *BaseType = BaseInit->getBaseClass();
405   CXXRecordDecl *BaseClassDecl =
406     cast<CXXRecordDecl>(BaseType->getAs<RecordType>()->getDecl());
407 
408   bool isBaseVirtual = BaseInit->isBaseVirtual();
409 
410   // The base constructor doesn't construct virtual bases.
411   if (CtorType == Ctor_Base && isBaseVirtual)
412     return;
413 
414   // If the initializer for the base (other than the constructor
415   // itself) accesses 'this' in any way, we need to initialize the
416   // vtables.
417   if (BaseInitializerUsesThis(CGF.getContext(), BaseInit->getInit()))
418     CGF.InitializeVTablePointers(ClassDecl);
419 
420   // We can pretend to be a complete class because it only matters for
421   // virtual bases, and we only do virtual bases for complete ctors.
422   llvm::Value *V =
423     CGF.GetAddressOfDirectBaseInCompleteClass(ThisPtr, ClassDecl,
424                                               BaseClassDecl,
425                                               isBaseVirtual);
426   CharUnits Alignment = CGF.getContext().getTypeAlignInChars(BaseType);
427   AggValueSlot AggSlot =
428     AggValueSlot::forAddr(V, Alignment, Qualifiers(),
429                           AggValueSlot::IsDestructed,
430                           AggValueSlot::DoesNotNeedGCBarriers,
431                           AggValueSlot::IsNotAliased);
432 
433   CGF.EmitAggExpr(BaseInit->getInit(), AggSlot);
434 
435   if (CGF.CGM.getLangOpts().Exceptions &&
436       !BaseClassDecl->hasTrivialDestructor())
437     CGF.EHStack.pushCleanup<CallBaseDtor>(EHCleanup, BaseClassDecl,
438                                           isBaseVirtual);
439 }
440 
441 static void EmitAggMemberInitializer(CodeGenFunction &CGF,
442                                      LValue LHS,
443                                      Expr *Init,
444                                      llvm::Value *ArrayIndexVar,
445                                      QualType T,
446                                      ArrayRef<VarDecl *> ArrayIndexes,
447                                      unsigned Index) {
448   if (Index == ArrayIndexes.size()) {
449     LValue LV = LHS;
450 
451     if (ArrayIndexVar) {
452       // If we have an array index variable, load it and use it as an offset.
453       // Then, increment the value.
454       llvm::Value *Dest = LHS.getAddress();
455       llvm::Value *ArrayIndex = CGF.Builder.CreateLoad(ArrayIndexVar);
456       Dest = CGF.Builder.CreateInBoundsGEP(Dest, ArrayIndex, "destaddress");
457       llvm::Value *Next = llvm::ConstantInt::get(ArrayIndex->getType(), 1);
458       Next = CGF.Builder.CreateAdd(ArrayIndex, Next, "inc");
459       CGF.Builder.CreateStore(Next, ArrayIndexVar);
460 
461       // Update the LValue.
462       LV.setAddress(Dest);
463       CharUnits Align = CGF.getContext().getTypeAlignInChars(T);
464       LV.setAlignment(std::min(Align, LV.getAlignment()));
465     }
466 
467     switch (CGF.getEvaluationKind(T)) {
468     case TEK_Scalar:
469       CGF.EmitScalarInit(Init, /*decl*/ nullptr, LV, false);
470       break;
471     case TEK_Complex:
472       CGF.EmitComplexExprIntoLValue(Init, LV, /*isInit*/ true);
473       break;
474     case TEK_Aggregate: {
475       AggValueSlot Slot =
476         AggValueSlot::forLValue(LV,
477                                 AggValueSlot::IsDestructed,
478                                 AggValueSlot::DoesNotNeedGCBarriers,
479                                 AggValueSlot::IsNotAliased);
480 
481       CGF.EmitAggExpr(Init, Slot);
482       break;
483     }
484     }
485 
486     return;
487   }
488 
489   const ConstantArrayType *Array = CGF.getContext().getAsConstantArrayType(T);
490   assert(Array && "Array initialization without the array type?");
491   llvm::Value *IndexVar
492     = CGF.GetAddrOfLocalVar(ArrayIndexes[Index]);
493   assert(IndexVar && "Array index variable not loaded");
494 
495   // Initialize this index variable to zero.
496   llvm::Value* Zero
497     = llvm::Constant::getNullValue(
498                               CGF.ConvertType(CGF.getContext().getSizeType()));
499   CGF.Builder.CreateStore(Zero, IndexVar);
500 
501   // Start the loop with a block that tests the condition.
502   llvm::BasicBlock *CondBlock = CGF.createBasicBlock("for.cond");
503   llvm::BasicBlock *AfterFor = CGF.createBasicBlock("for.end");
504 
505   CGF.EmitBlock(CondBlock);
506 
507   llvm::BasicBlock *ForBody = CGF.createBasicBlock("for.body");
508   // Generate: if (loop-index < number-of-elements) fall to the loop body,
509   // otherwise, go to the block after the for-loop.
510   uint64_t NumElements = Array->getSize().getZExtValue();
511   llvm::Value *Counter = CGF.Builder.CreateLoad(IndexVar);
512   llvm::Value *NumElementsPtr =
513     llvm::ConstantInt::get(Counter->getType(), NumElements);
514   llvm::Value *IsLess = CGF.Builder.CreateICmpULT(Counter, NumElementsPtr,
515                                                   "isless");
516 
517   // If the condition is true, execute the body.
518   CGF.Builder.CreateCondBr(IsLess, ForBody, AfterFor);
519 
520   CGF.EmitBlock(ForBody);
521   llvm::BasicBlock *ContinueBlock = CGF.createBasicBlock("for.inc");
522 
523   // Inside the loop body recurse to emit the inner loop or, eventually, the
524   // constructor call.
525   EmitAggMemberInitializer(CGF, LHS, Init, ArrayIndexVar,
526                            Array->getElementType(), ArrayIndexes, Index + 1);
527 
528   CGF.EmitBlock(ContinueBlock);
529 
530   // Emit the increment of the loop counter.
531   llvm::Value *NextVal = llvm::ConstantInt::get(Counter->getType(), 1);
532   Counter = CGF.Builder.CreateLoad(IndexVar);
533   NextVal = CGF.Builder.CreateAdd(Counter, NextVal, "inc");
534   CGF.Builder.CreateStore(NextVal, IndexVar);
535 
536   // Finally, branch back up to the condition for the next iteration.
537   CGF.EmitBranch(CondBlock);
538 
539   // Emit the fall-through block.
540   CGF.EmitBlock(AfterFor, true);
541 }
542 
543 static bool isMemcpyEquivalentSpecialMember(const CXXMethodDecl *D) {
544   auto *CD = dyn_cast<CXXConstructorDecl>(D);
545   if (!(CD && CD->isCopyOrMoveConstructor()) &&
546       !D->isCopyAssignmentOperator() && !D->isMoveAssignmentOperator())
547     return false;
548 
549   // We can emit a memcpy for a trivial copy or move constructor/assignment.
550   if (D->isTrivial() && !D->getParent()->mayInsertExtraPadding())
551     return true;
552 
553   // We *must* emit a memcpy for a defaulted union copy or move op.
554   if (D->getParent()->isUnion() && D->isDefaulted())
555     return true;
556 
557   return false;
558 }
559 
560 static void EmitMemberInitializer(CodeGenFunction &CGF,
561                                   const CXXRecordDecl *ClassDecl,
562                                   CXXCtorInitializer *MemberInit,
563                                   const CXXConstructorDecl *Constructor,
564                                   FunctionArgList &Args) {
565   ApplyDebugLocation Loc(CGF, MemberInit->getSourceLocation());
566   assert(MemberInit->isAnyMemberInitializer() &&
567          "Must have member initializer!");
568   assert(MemberInit->getInit() && "Must have initializer!");
569 
570   // non-static data member initializers.
571   FieldDecl *Field = MemberInit->getAnyMember();
572   QualType FieldType = Field->getType();
573 
574   llvm::Value *ThisPtr = CGF.LoadCXXThis();
575   QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl);
576   LValue LHS = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy);
577 
578   if (MemberInit->isIndirectMemberInitializer()) {
579     // If we are initializing an anonymous union field, drill down to
580     // the field.
581     IndirectFieldDecl *IndirectField = MemberInit->getIndirectMember();
582     for (const auto *I : IndirectField->chain())
583       LHS = CGF.EmitLValueForFieldInitialization(LHS, cast<FieldDecl>(I));
584     FieldType = MemberInit->getIndirectMember()->getAnonField()->getType();
585   } else {
586     LHS = CGF.EmitLValueForFieldInitialization(LHS, Field);
587   }
588 
589   // Special case: if we are in a copy or move constructor, and we are copying
590   // an array of PODs or classes with trivial copy constructors, ignore the
591   // AST and perform the copy we know is equivalent.
592   // FIXME: This is hacky at best... if we had a bit more explicit information
593   // in the AST, we could generalize it more easily.
594   const ConstantArrayType *Array
595     = CGF.getContext().getAsConstantArrayType(FieldType);
596   if (Array && Constructor->isDefaulted() &&
597       Constructor->isCopyOrMoveConstructor()) {
598     QualType BaseElementTy = CGF.getContext().getBaseElementType(Array);
599     CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(MemberInit->getInit());
600     if (BaseElementTy.isPODType(CGF.getContext()) ||
601         (CE && isMemcpyEquivalentSpecialMember(CE->getConstructor()))) {
602       unsigned SrcArgIndex =
603           CGF.CGM.getCXXABI().getSrcArgforCopyCtor(Constructor, Args);
604       llvm::Value *SrcPtr
605         = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(Args[SrcArgIndex]));
606       LValue ThisRHSLV = CGF.MakeNaturalAlignAddrLValue(SrcPtr, RecordTy);
607       LValue Src = CGF.EmitLValueForFieldInitialization(ThisRHSLV, Field);
608 
609       // Copy the aggregate.
610       CGF.EmitAggregateCopy(LHS.getAddress(), Src.getAddress(), FieldType,
611                             LHS.isVolatileQualified());
612       return;
613     }
614   }
615 
616   ArrayRef<VarDecl *> ArrayIndexes;
617   if (MemberInit->getNumArrayIndices())
618     ArrayIndexes = MemberInit->getArrayIndexes();
619   CGF.EmitInitializerForField(Field, LHS, MemberInit->getInit(), ArrayIndexes);
620 }
621 
622 void CodeGenFunction::EmitInitializerForField(
623     FieldDecl *Field, LValue LHS, Expr *Init,
624     ArrayRef<VarDecl *> ArrayIndexes) {
625   QualType FieldType = Field->getType();
626   switch (getEvaluationKind(FieldType)) {
627   case TEK_Scalar:
628     if (LHS.isSimple()) {
629       EmitExprAsInit(Init, Field, LHS, false);
630     } else {
631       RValue RHS = RValue::get(EmitScalarExpr(Init));
632       EmitStoreThroughLValue(RHS, LHS);
633     }
634     break;
635   case TEK_Complex:
636     EmitComplexExprIntoLValue(Init, LHS, /*isInit*/ true);
637     break;
638   case TEK_Aggregate: {
639     llvm::Value *ArrayIndexVar = nullptr;
640     if (ArrayIndexes.size()) {
641       llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
642 
643       // The LHS is a pointer to the first object we'll be constructing, as
644       // a flat array.
645       QualType BaseElementTy = getContext().getBaseElementType(FieldType);
646       llvm::Type *BasePtr = ConvertType(BaseElementTy);
647       BasePtr = llvm::PointerType::getUnqual(BasePtr);
648       llvm::Value *BaseAddrPtr = Builder.CreateBitCast(LHS.getAddress(),
649                                                        BasePtr);
650       LHS = MakeAddrLValue(BaseAddrPtr, BaseElementTy);
651 
652       // Create an array index that will be used to walk over all of the
653       // objects we're constructing.
654       ArrayIndexVar = CreateTempAlloca(SizeTy, "object.index");
655       llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy);
656       Builder.CreateStore(Zero, ArrayIndexVar);
657 
658 
659       // Emit the block variables for the array indices, if any.
660       for (unsigned I = 0, N = ArrayIndexes.size(); I != N; ++I)
661         EmitAutoVarDecl(*ArrayIndexes[I]);
662     }
663 
664     EmitAggMemberInitializer(*this, LHS, Init, ArrayIndexVar, FieldType,
665                              ArrayIndexes, 0);
666   }
667   }
668 
669   // Ensure that we destroy this object if an exception is thrown
670   // later in the constructor.
671   QualType::DestructionKind dtorKind = FieldType.isDestructedType();
672   if (needsEHCleanup(dtorKind))
673     pushEHDestroy(dtorKind, LHS.getAddress(), FieldType);
674 }
675 
676 /// Checks whether the given constructor is a valid subject for the
677 /// complete-to-base constructor delegation optimization, i.e.
678 /// emitting the complete constructor as a simple call to the base
679 /// constructor.
680 static bool IsConstructorDelegationValid(const CXXConstructorDecl *Ctor) {
681 
682   // Currently we disable the optimization for classes with virtual
683   // bases because (1) the addresses of parameter variables need to be
684   // consistent across all initializers but (2) the delegate function
685   // call necessarily creates a second copy of the parameter variable.
686   //
687   // The limiting example (purely theoretical AFAIK):
688   //   struct A { A(int &c) { c++; } };
689   //   struct B : virtual A {
690   //     B(int count) : A(count) { printf("%d\n", count); }
691   //   };
692   // ...although even this example could in principle be emitted as a
693   // delegation since the address of the parameter doesn't escape.
694   if (Ctor->getParent()->getNumVBases()) {
695     // TODO: white-list trivial vbase initializers.  This case wouldn't
696     // be subject to the restrictions below.
697 
698     // TODO: white-list cases where:
699     //  - there are no non-reference parameters to the constructor
700     //  - the initializers don't access any non-reference parameters
701     //  - the initializers don't take the address of non-reference
702     //    parameters
703     //  - etc.
704     // If we ever add any of the above cases, remember that:
705     //  - function-try-blocks will always blacklist this optimization
706     //  - we need to perform the constructor prologue and cleanup in
707     //    EmitConstructorBody.
708 
709     return false;
710   }
711 
712   // We also disable the optimization for variadic functions because
713   // it's impossible to "re-pass" varargs.
714   if (Ctor->getType()->getAs<FunctionProtoType>()->isVariadic())
715     return false;
716 
717   // FIXME: Decide if we can do a delegation of a delegating constructor.
718   if (Ctor->isDelegatingConstructor())
719     return false;
720 
721   return true;
722 }
723 
724 // Emit code in ctor (Prologue==true) or dtor (Prologue==false)
725 // to poison the extra field paddings inserted under
726 // -fsanitize-address-field-padding=1|2.
727 void CodeGenFunction::EmitAsanPrologueOrEpilogue(bool Prologue) {
728   ASTContext &Context = getContext();
729   const CXXRecordDecl *ClassDecl =
730       Prologue ? cast<CXXConstructorDecl>(CurGD.getDecl())->getParent()
731                : cast<CXXDestructorDecl>(CurGD.getDecl())->getParent();
732   if (!ClassDecl->mayInsertExtraPadding()) return;
733 
734   struct SizeAndOffset {
735     uint64_t Size;
736     uint64_t Offset;
737   };
738 
739   unsigned PtrSize = CGM.getDataLayout().getPointerSizeInBits();
740   const ASTRecordLayout &Info = Context.getASTRecordLayout(ClassDecl);
741 
742   // Populate sizes and offsets of fields.
743   SmallVector<SizeAndOffset, 16> SSV(Info.getFieldCount());
744   for (unsigned i = 0, e = Info.getFieldCount(); i != e; ++i)
745     SSV[i].Offset =
746         Context.toCharUnitsFromBits(Info.getFieldOffset(i)).getQuantity();
747 
748   size_t NumFields = 0;
749   for (const auto *Field : ClassDecl->fields()) {
750     const FieldDecl *D = Field;
751     std::pair<CharUnits, CharUnits> FieldInfo =
752         Context.getTypeInfoInChars(D->getType());
753     CharUnits FieldSize = FieldInfo.first;
754     assert(NumFields < SSV.size());
755     SSV[NumFields].Size = D->isBitField() ? 0 : FieldSize.getQuantity();
756     NumFields++;
757   }
758   assert(NumFields == SSV.size());
759   if (SSV.size() <= 1) return;
760 
761   // We will insert calls to __asan_* run-time functions.
762   // LLVM AddressSanitizer pass may decide to inline them later.
763   llvm::Type *Args[2] = {IntPtrTy, IntPtrTy};
764   llvm::FunctionType *FTy =
765       llvm::FunctionType::get(CGM.VoidTy, Args, false);
766   llvm::Constant *F = CGM.CreateRuntimeFunction(
767       FTy, Prologue ? "__asan_poison_intra_object_redzone"
768                     : "__asan_unpoison_intra_object_redzone");
769 
770   llvm::Value *ThisPtr = LoadCXXThis();
771   ThisPtr = Builder.CreatePtrToInt(ThisPtr, IntPtrTy);
772   uint64_t TypeSize = Info.getNonVirtualSize().getQuantity();
773   // For each field check if it has sufficient padding,
774   // if so (un)poison it with a call.
775   for (size_t i = 0; i < SSV.size(); i++) {
776     uint64_t AsanAlignment = 8;
777     uint64_t NextField = i == SSV.size() - 1 ? TypeSize : SSV[i + 1].Offset;
778     uint64_t PoisonSize = NextField - SSV[i].Offset - SSV[i].Size;
779     uint64_t EndOffset = SSV[i].Offset + SSV[i].Size;
780     if (PoisonSize < AsanAlignment || !SSV[i].Size ||
781         (NextField % AsanAlignment) != 0)
782       continue;
783     Builder.CreateCall2(
784         F, Builder.CreateAdd(ThisPtr, Builder.getIntN(PtrSize, EndOffset)),
785         Builder.getIntN(PtrSize, PoisonSize));
786   }
787 }
788 
789 /// EmitConstructorBody - Emits the body of the current constructor.
790 void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) {
791   EmitAsanPrologueOrEpilogue(true);
792   const CXXConstructorDecl *Ctor = cast<CXXConstructorDecl>(CurGD.getDecl());
793   CXXCtorType CtorType = CurGD.getCtorType();
794 
795   assert((CGM.getTarget().getCXXABI().hasConstructorVariants() ||
796           CtorType == Ctor_Complete) &&
797          "can only generate complete ctor for this ABI");
798 
799   // Before we go any further, try the complete->base constructor
800   // delegation optimization.
801   if (CtorType == Ctor_Complete && IsConstructorDelegationValid(Ctor) &&
802       CGM.getTarget().getCXXABI().hasConstructorVariants()) {
803     EmitDelegateCXXConstructorCall(Ctor, Ctor_Base, Args, Ctor->getLocEnd());
804     return;
805   }
806 
807   const FunctionDecl *Definition = 0;
808   Stmt *Body = Ctor->getBody(Definition);
809   assert(Definition == Ctor && "emitting wrong constructor body");
810 
811   // Enter the function-try-block before the constructor prologue if
812   // applicable.
813   bool IsTryBody = (Body && isa<CXXTryStmt>(Body));
814   if (IsTryBody)
815     EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true);
816 
817   incrementProfileCounter(Body);
818 
819   RunCleanupsScope RunCleanups(*this);
820 
821   // TODO: in restricted cases, we can emit the vbase initializers of
822   // a complete ctor and then delegate to the base ctor.
823 
824   // Emit the constructor prologue, i.e. the base and member
825   // initializers.
826   EmitCtorPrologue(Ctor, CtorType, Args);
827 
828   // Emit the body of the statement.
829   if (IsTryBody)
830     EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock());
831   else if (Body)
832     EmitStmt(Body);
833 
834   // Emit any cleanup blocks associated with the member or base
835   // initializers, which includes (along the exceptional path) the
836   // destructors for those members and bases that were fully
837   // constructed.
838   RunCleanups.ForceCleanup();
839 
840   if (IsTryBody)
841     ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true);
842 }
843 
844 namespace {
845   /// RAII object to indicate that codegen is copying the value representation
846   /// instead of the object representation. Useful when copying a struct or
847   /// class which has uninitialized members and we're only performing
848   /// lvalue-to-rvalue conversion on the object but not its members.
849   class CopyingValueRepresentation {
850   public:
851     explicit CopyingValueRepresentation(CodeGenFunction &CGF)
852         : CGF(CGF), OldSanOpts(CGF.SanOpts) {
853       CGF.SanOpts.set(SanitizerKind::Bool, false);
854       CGF.SanOpts.set(SanitizerKind::Enum, false);
855     }
856     ~CopyingValueRepresentation() {
857       CGF.SanOpts = OldSanOpts;
858     }
859   private:
860     CodeGenFunction &CGF;
861     SanitizerSet OldSanOpts;
862   };
863 }
864 
865 namespace {
866   class FieldMemcpyizer {
867   public:
868     FieldMemcpyizer(CodeGenFunction &CGF, const CXXRecordDecl *ClassDecl,
869                     const VarDecl *SrcRec)
870       : CGF(CGF), ClassDecl(ClassDecl), SrcRec(SrcRec),
871         RecLayout(CGF.getContext().getASTRecordLayout(ClassDecl)),
872         FirstField(nullptr), LastField(nullptr), FirstFieldOffset(0),
873         LastFieldOffset(0), LastAddedFieldIndex(0) {}
874 
875     bool isMemcpyableField(FieldDecl *F) const {
876       // Never memcpy fields when we are adding poisoned paddings.
877       if (CGF.getContext().getLangOpts().SanitizeAddressFieldPadding)
878         return false;
879       Qualifiers Qual = F->getType().getQualifiers();
880       if (Qual.hasVolatile() || Qual.hasObjCLifetime())
881         return false;
882       return true;
883     }
884 
885     void addMemcpyableField(FieldDecl *F) {
886       if (!FirstField)
887         addInitialField(F);
888       else
889         addNextField(F);
890     }
891 
892     CharUnits getMemcpySize(uint64_t FirstByteOffset) const {
893       unsigned LastFieldSize =
894         LastField->isBitField() ?
895           LastField->getBitWidthValue(CGF.getContext()) :
896           CGF.getContext().getTypeSize(LastField->getType());
897       uint64_t MemcpySizeBits =
898         LastFieldOffset + LastFieldSize - FirstByteOffset +
899         CGF.getContext().getCharWidth() - 1;
900       CharUnits MemcpySize =
901         CGF.getContext().toCharUnitsFromBits(MemcpySizeBits);
902       return MemcpySize;
903     }
904 
905     void emitMemcpy() {
906       // Give the subclass a chance to bail out if it feels the memcpy isn't
907       // worth it (e.g. Hasn't aggregated enough data).
908       if (!FirstField) {
909         return;
910       }
911 
912       CharUnits Alignment;
913 
914       uint64_t FirstByteOffset;
915       if (FirstField->isBitField()) {
916         const CGRecordLayout &RL =
917           CGF.getTypes().getCGRecordLayout(FirstField->getParent());
918         const CGBitFieldInfo &BFInfo = RL.getBitFieldInfo(FirstField);
919         Alignment = CharUnits::fromQuantity(BFInfo.StorageAlignment);
920         // FirstFieldOffset is not appropriate for bitfields,
921         // it won't tell us what the storage offset should be and thus might not
922         // be properly aligned.
923         //
924         // Instead calculate the storage offset using the offset of the field in
925         // the struct type.
926         const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
927         FirstByteOffset =
928             DL.getStructLayout(RL.getLLVMType())
929                 ->getElementOffsetInBits(RL.getLLVMFieldNo(FirstField));
930       } else {
931         Alignment = CGF.getContext().getDeclAlign(FirstField);
932         FirstByteOffset = FirstFieldOffset;
933       }
934 
935       assert((CGF.getContext().toCharUnitsFromBits(FirstByteOffset) %
936               Alignment) == 0 && "Bad field alignment.");
937 
938       CharUnits MemcpySize = getMemcpySize(FirstByteOffset);
939       QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl);
940       llvm::Value *ThisPtr = CGF.LoadCXXThis();
941       LValue DestLV = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy);
942       LValue Dest = CGF.EmitLValueForFieldInitialization(DestLV, FirstField);
943       llvm::Value *SrcPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(SrcRec));
944       LValue SrcLV = CGF.MakeNaturalAlignAddrLValue(SrcPtr, RecordTy);
945       LValue Src = CGF.EmitLValueForFieldInitialization(SrcLV, FirstField);
946 
947       emitMemcpyIR(Dest.isBitField() ? Dest.getBitFieldAddr() : Dest.getAddress(),
948                    Src.isBitField() ? Src.getBitFieldAddr() : Src.getAddress(),
949                    MemcpySize, Alignment);
950       reset();
951     }
952 
953     void reset() {
954       FirstField = nullptr;
955     }
956 
957   protected:
958     CodeGenFunction &CGF;
959     const CXXRecordDecl *ClassDecl;
960 
961   private:
962 
963     void emitMemcpyIR(llvm::Value *DestPtr, llvm::Value *SrcPtr,
964                       CharUnits Size, CharUnits Alignment) {
965       llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType());
966       llvm::Type *DBP =
967         llvm::Type::getInt8PtrTy(CGF.getLLVMContext(), DPT->getAddressSpace());
968       DestPtr = CGF.Builder.CreateBitCast(DestPtr, DBP);
969 
970       llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType());
971       llvm::Type *SBP =
972         llvm::Type::getInt8PtrTy(CGF.getLLVMContext(), SPT->getAddressSpace());
973       SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, SBP);
974 
975       CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, Size.getQuantity(),
976                                Alignment.getQuantity());
977     }
978 
979     void addInitialField(FieldDecl *F) {
980         FirstField = F;
981         LastField = F;
982         FirstFieldOffset = RecLayout.getFieldOffset(F->getFieldIndex());
983         LastFieldOffset = FirstFieldOffset;
984         LastAddedFieldIndex = F->getFieldIndex();
985         return;
986       }
987 
988     void addNextField(FieldDecl *F) {
989       // For the most part, the following invariant will hold:
990       //   F->getFieldIndex() == LastAddedFieldIndex + 1
991       // The one exception is that Sema won't add a copy-initializer for an
992       // unnamed bitfield, which will show up here as a gap in the sequence.
993       assert(F->getFieldIndex() >= LastAddedFieldIndex + 1 &&
994              "Cannot aggregate fields out of order.");
995       LastAddedFieldIndex = F->getFieldIndex();
996 
997       // The 'first' and 'last' fields are chosen by offset, rather than field
998       // index. This allows the code to support bitfields, as well as regular
999       // fields.
1000       uint64_t FOffset = RecLayout.getFieldOffset(F->getFieldIndex());
1001       if (FOffset < FirstFieldOffset) {
1002         FirstField = F;
1003         FirstFieldOffset = FOffset;
1004       } else if (FOffset > LastFieldOffset) {
1005         LastField = F;
1006         LastFieldOffset = FOffset;
1007       }
1008     }
1009 
1010     const VarDecl *SrcRec;
1011     const ASTRecordLayout &RecLayout;
1012     FieldDecl *FirstField;
1013     FieldDecl *LastField;
1014     uint64_t FirstFieldOffset, LastFieldOffset;
1015     unsigned LastAddedFieldIndex;
1016   };
1017 
1018   class ConstructorMemcpyizer : public FieldMemcpyizer {
1019   private:
1020 
1021     /// Get source argument for copy constructor. Returns null if not a copy
1022     /// constructor.
1023     static const VarDecl *getTrivialCopySource(CodeGenFunction &CGF,
1024                                                const CXXConstructorDecl *CD,
1025                                                FunctionArgList &Args) {
1026       if (CD->isCopyOrMoveConstructor() && CD->isDefaulted())
1027         return Args[CGF.CGM.getCXXABI().getSrcArgforCopyCtor(CD, Args)];
1028       return nullptr;
1029     }
1030 
1031     // Returns true if a CXXCtorInitializer represents a member initialization
1032     // that can be rolled into a memcpy.
1033     bool isMemberInitMemcpyable(CXXCtorInitializer *MemberInit) const {
1034       if (!MemcpyableCtor)
1035         return false;
1036       FieldDecl *Field = MemberInit->getMember();
1037       assert(Field && "No field for member init.");
1038       QualType FieldType = Field->getType();
1039       CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(MemberInit->getInit());
1040 
1041       // Bail out on non-memcpyable, not-trivially-copyable members.
1042       if (!(CE && isMemcpyEquivalentSpecialMember(CE->getConstructor())) &&
1043           !(FieldType.isTriviallyCopyableType(CGF.getContext()) ||
1044             FieldType->isReferenceType()))
1045         return false;
1046 
1047       // Bail out on volatile fields.
1048       if (!isMemcpyableField(Field))
1049         return false;
1050 
1051       // Otherwise we're good.
1052       return true;
1053     }
1054 
1055   public:
1056     ConstructorMemcpyizer(CodeGenFunction &CGF, const CXXConstructorDecl *CD,
1057                           FunctionArgList &Args)
1058       : FieldMemcpyizer(CGF, CD->getParent(), getTrivialCopySource(CGF, CD, Args)),
1059         ConstructorDecl(CD),
1060         MemcpyableCtor(CD->isDefaulted() &&
1061                        CD->isCopyOrMoveConstructor() &&
1062                        CGF.getLangOpts().getGC() == LangOptions::NonGC),
1063         Args(Args) { }
1064 
1065     void addMemberInitializer(CXXCtorInitializer *MemberInit) {
1066       if (isMemberInitMemcpyable(MemberInit)) {
1067         AggregatedInits.push_back(MemberInit);
1068         addMemcpyableField(MemberInit->getMember());
1069       } else {
1070         emitAggregatedInits();
1071         EmitMemberInitializer(CGF, ConstructorDecl->getParent(), MemberInit,
1072                               ConstructorDecl, Args);
1073       }
1074     }
1075 
1076     void emitAggregatedInits() {
1077       if (AggregatedInits.size() <= 1) {
1078         // This memcpy is too small to be worthwhile. Fall back on default
1079         // codegen.
1080         if (!AggregatedInits.empty()) {
1081           CopyingValueRepresentation CVR(CGF);
1082           EmitMemberInitializer(CGF, ConstructorDecl->getParent(),
1083                                 AggregatedInits[0], ConstructorDecl, Args);
1084         }
1085         reset();
1086         return;
1087       }
1088 
1089       pushEHDestructors();
1090       emitMemcpy();
1091       AggregatedInits.clear();
1092     }
1093 
1094     void pushEHDestructors() {
1095       llvm::Value *ThisPtr = CGF.LoadCXXThis();
1096       QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl);
1097       LValue LHS = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy);
1098 
1099       for (unsigned i = 0; i < AggregatedInits.size(); ++i) {
1100         QualType FieldType = AggregatedInits[i]->getMember()->getType();
1101         QualType::DestructionKind dtorKind = FieldType.isDestructedType();
1102         if (CGF.needsEHCleanup(dtorKind))
1103           CGF.pushEHDestroy(dtorKind, LHS.getAddress(), FieldType);
1104       }
1105     }
1106 
1107     void finish() {
1108       emitAggregatedInits();
1109     }
1110 
1111   private:
1112     const CXXConstructorDecl *ConstructorDecl;
1113     bool MemcpyableCtor;
1114     FunctionArgList &Args;
1115     SmallVector<CXXCtorInitializer*, 16> AggregatedInits;
1116   };
1117 
1118   class AssignmentMemcpyizer : public FieldMemcpyizer {
1119   private:
1120 
1121     // Returns the memcpyable field copied by the given statement, if one
1122     // exists. Otherwise returns null.
1123     FieldDecl *getMemcpyableField(Stmt *S) {
1124       if (!AssignmentsMemcpyable)
1125         return nullptr;
1126       if (BinaryOperator *BO = dyn_cast<BinaryOperator>(S)) {
1127         // Recognise trivial assignments.
1128         if (BO->getOpcode() != BO_Assign)
1129           return nullptr;
1130         MemberExpr *ME = dyn_cast<MemberExpr>(BO->getLHS());
1131         if (!ME)
1132           return nullptr;
1133         FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl());
1134         if (!Field || !isMemcpyableField(Field))
1135           return nullptr;
1136         Stmt *RHS = BO->getRHS();
1137         if (ImplicitCastExpr *EC = dyn_cast<ImplicitCastExpr>(RHS))
1138           RHS = EC->getSubExpr();
1139         if (!RHS)
1140           return nullptr;
1141         MemberExpr *ME2 = dyn_cast<MemberExpr>(RHS);
1142         if (dyn_cast<FieldDecl>(ME2->getMemberDecl()) != Field)
1143           return nullptr;
1144         return Field;
1145       } else if (CXXMemberCallExpr *MCE = dyn_cast<CXXMemberCallExpr>(S)) {
1146         CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MCE->getCalleeDecl());
1147         if (!(MD && isMemcpyEquivalentSpecialMember(MD)))
1148           return nullptr;
1149         MemberExpr *IOA = dyn_cast<MemberExpr>(MCE->getImplicitObjectArgument());
1150         if (!IOA)
1151           return nullptr;
1152         FieldDecl *Field = dyn_cast<FieldDecl>(IOA->getMemberDecl());
1153         if (!Field || !isMemcpyableField(Field))
1154           return nullptr;
1155         MemberExpr *Arg0 = dyn_cast<MemberExpr>(MCE->getArg(0));
1156         if (!Arg0 || Field != dyn_cast<FieldDecl>(Arg0->getMemberDecl()))
1157           return nullptr;
1158         return Field;
1159       } else if (CallExpr *CE = dyn_cast<CallExpr>(S)) {
1160         FunctionDecl *FD = dyn_cast<FunctionDecl>(CE->getCalleeDecl());
1161         if (!FD || FD->getBuiltinID() != Builtin::BI__builtin_memcpy)
1162           return nullptr;
1163         Expr *DstPtr = CE->getArg(0);
1164         if (ImplicitCastExpr *DC = dyn_cast<ImplicitCastExpr>(DstPtr))
1165           DstPtr = DC->getSubExpr();
1166         UnaryOperator *DUO = dyn_cast<UnaryOperator>(DstPtr);
1167         if (!DUO || DUO->getOpcode() != UO_AddrOf)
1168           return nullptr;
1169         MemberExpr *ME = dyn_cast<MemberExpr>(DUO->getSubExpr());
1170         if (!ME)
1171           return nullptr;
1172         FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl());
1173         if (!Field || !isMemcpyableField(Field))
1174           return nullptr;
1175         Expr *SrcPtr = CE->getArg(1);
1176         if (ImplicitCastExpr *SC = dyn_cast<ImplicitCastExpr>(SrcPtr))
1177           SrcPtr = SC->getSubExpr();
1178         UnaryOperator *SUO = dyn_cast<UnaryOperator>(SrcPtr);
1179         if (!SUO || SUO->getOpcode() != UO_AddrOf)
1180           return nullptr;
1181         MemberExpr *ME2 = dyn_cast<MemberExpr>(SUO->getSubExpr());
1182         if (!ME2 || Field != dyn_cast<FieldDecl>(ME2->getMemberDecl()))
1183           return nullptr;
1184         return Field;
1185       }
1186 
1187       return nullptr;
1188     }
1189 
1190     bool AssignmentsMemcpyable;
1191     SmallVector<Stmt*, 16> AggregatedStmts;
1192 
1193   public:
1194 
1195     AssignmentMemcpyizer(CodeGenFunction &CGF, const CXXMethodDecl *AD,
1196                          FunctionArgList &Args)
1197       : FieldMemcpyizer(CGF, AD->getParent(), Args[Args.size() - 1]),
1198         AssignmentsMemcpyable(CGF.getLangOpts().getGC() == LangOptions::NonGC) {
1199       assert(Args.size() == 2);
1200     }
1201 
1202     void emitAssignment(Stmt *S) {
1203       FieldDecl *F = getMemcpyableField(S);
1204       if (F) {
1205         addMemcpyableField(F);
1206         AggregatedStmts.push_back(S);
1207       } else {
1208         emitAggregatedStmts();
1209         CGF.EmitStmt(S);
1210       }
1211     }
1212 
1213     void emitAggregatedStmts() {
1214       if (AggregatedStmts.size() <= 1) {
1215         if (!AggregatedStmts.empty()) {
1216           CopyingValueRepresentation CVR(CGF);
1217           CGF.EmitStmt(AggregatedStmts[0]);
1218         }
1219         reset();
1220       }
1221 
1222       emitMemcpy();
1223       AggregatedStmts.clear();
1224     }
1225 
1226     void finish() {
1227       emitAggregatedStmts();
1228     }
1229   };
1230 
1231 }
1232 
1233 /// EmitCtorPrologue - This routine generates necessary code to initialize
1234 /// base classes and non-static data members belonging to this constructor.
1235 void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD,
1236                                        CXXCtorType CtorType,
1237                                        FunctionArgList &Args) {
1238   if (CD->isDelegatingConstructor())
1239     return EmitDelegatingCXXConstructorCall(CD, Args);
1240 
1241   const CXXRecordDecl *ClassDecl = CD->getParent();
1242 
1243   CXXConstructorDecl::init_const_iterator B = CD->init_begin(),
1244                                           E = CD->init_end();
1245 
1246   llvm::BasicBlock *BaseCtorContinueBB = nullptr;
1247   if (ClassDecl->getNumVBases() &&
1248       !CGM.getTarget().getCXXABI().hasConstructorVariants()) {
1249     // The ABIs that don't have constructor variants need to put a branch
1250     // before the virtual base initialization code.
1251     BaseCtorContinueBB =
1252       CGM.getCXXABI().EmitCtorCompleteObjectHandler(*this, ClassDecl);
1253     assert(BaseCtorContinueBB);
1254   }
1255 
1256   // Virtual base initializers first.
1257   for (; B != E && (*B)->isBaseInitializer() && (*B)->isBaseVirtual(); B++) {
1258     EmitBaseInitializer(*this, ClassDecl, *B, CtorType);
1259   }
1260 
1261   if (BaseCtorContinueBB) {
1262     // Complete object handler should continue to the remaining initializers.
1263     Builder.CreateBr(BaseCtorContinueBB);
1264     EmitBlock(BaseCtorContinueBB);
1265   }
1266 
1267   // Then, non-virtual base initializers.
1268   for (; B != E && (*B)->isBaseInitializer(); B++) {
1269     assert(!(*B)->isBaseVirtual());
1270     EmitBaseInitializer(*this, ClassDecl, *B, CtorType);
1271   }
1272 
1273   InitializeVTablePointers(ClassDecl);
1274 
1275   // And finally, initialize class members.
1276   FieldConstructionScope FCS(*this, CXXThisValue);
1277   ConstructorMemcpyizer CM(*this, CD, Args);
1278   for (; B != E; B++) {
1279     CXXCtorInitializer *Member = (*B);
1280     assert(!Member->isBaseInitializer());
1281     assert(Member->isAnyMemberInitializer() &&
1282            "Delegating initializer on non-delegating constructor");
1283     CM.addMemberInitializer(Member);
1284   }
1285   CM.finish();
1286 }
1287 
1288 static bool
1289 FieldHasTrivialDestructorBody(ASTContext &Context, const FieldDecl *Field);
1290 
1291 static bool
1292 HasTrivialDestructorBody(ASTContext &Context,
1293                          const CXXRecordDecl *BaseClassDecl,
1294                          const CXXRecordDecl *MostDerivedClassDecl)
1295 {
1296   // If the destructor is trivial we don't have to check anything else.
1297   if (BaseClassDecl->hasTrivialDestructor())
1298     return true;
1299 
1300   if (!BaseClassDecl->getDestructor()->hasTrivialBody())
1301     return false;
1302 
1303   // Check fields.
1304   for (const auto *Field : BaseClassDecl->fields())
1305     if (!FieldHasTrivialDestructorBody(Context, Field))
1306       return false;
1307 
1308   // Check non-virtual bases.
1309   for (const auto &I : BaseClassDecl->bases()) {
1310     if (I.isVirtual())
1311       continue;
1312 
1313     const CXXRecordDecl *NonVirtualBase =
1314       cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
1315     if (!HasTrivialDestructorBody(Context, NonVirtualBase,
1316                                   MostDerivedClassDecl))
1317       return false;
1318   }
1319 
1320   if (BaseClassDecl == MostDerivedClassDecl) {
1321     // Check virtual bases.
1322     for (const auto &I : BaseClassDecl->vbases()) {
1323       const CXXRecordDecl *VirtualBase =
1324         cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
1325       if (!HasTrivialDestructorBody(Context, VirtualBase,
1326                                     MostDerivedClassDecl))
1327         return false;
1328     }
1329   }
1330 
1331   return true;
1332 }
1333 
1334 static bool
1335 FieldHasTrivialDestructorBody(ASTContext &Context,
1336                               const FieldDecl *Field)
1337 {
1338   QualType FieldBaseElementType = Context.getBaseElementType(Field->getType());
1339 
1340   const RecordType *RT = FieldBaseElementType->getAs<RecordType>();
1341   if (!RT)
1342     return true;
1343 
1344   CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(RT->getDecl());
1345   return HasTrivialDestructorBody(Context, FieldClassDecl, FieldClassDecl);
1346 }
1347 
1348 /// CanSkipVTablePointerInitialization - Check whether we need to initialize
1349 /// any vtable pointers before calling this destructor.
1350 static bool CanSkipVTablePointerInitialization(ASTContext &Context,
1351                                                const CXXDestructorDecl *Dtor) {
1352   if (!Dtor->hasTrivialBody())
1353     return false;
1354 
1355   // Check the fields.
1356   const CXXRecordDecl *ClassDecl = Dtor->getParent();
1357   for (const auto *Field : ClassDecl->fields())
1358     if (!FieldHasTrivialDestructorBody(Context, Field))
1359       return false;
1360 
1361   return true;
1362 }
1363 
1364 /// EmitDestructorBody - Emits the body of the current destructor.
1365 void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
1366   const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CurGD.getDecl());
1367   CXXDtorType DtorType = CurGD.getDtorType();
1368 
1369   // The call to operator delete in a deleting destructor happens
1370   // outside of the function-try-block, which means it's always
1371   // possible to delegate the destructor body to the complete
1372   // destructor.  Do so.
1373   if (DtorType == Dtor_Deleting) {
1374     EnterDtorCleanups(Dtor, Dtor_Deleting);
1375     EmitCXXDestructorCall(Dtor, Dtor_Complete, /*ForVirtualBase=*/false,
1376                           /*Delegating=*/false, LoadCXXThis());
1377     PopCleanupBlock();
1378     return;
1379   }
1380 
1381   Stmt *Body = Dtor->getBody();
1382 
1383   // If the body is a function-try-block, enter the try before
1384   // anything else.
1385   bool isTryBody = (Body && isa<CXXTryStmt>(Body));
1386   if (isTryBody)
1387     EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true);
1388   EmitAsanPrologueOrEpilogue(false);
1389 
1390   // Enter the epilogue cleanups.
1391   RunCleanupsScope DtorEpilogue(*this);
1392 
1393   // If this is the complete variant, just invoke the base variant;
1394   // the epilogue will destruct the virtual bases.  But we can't do
1395   // this optimization if the body is a function-try-block, because
1396   // we'd introduce *two* handler blocks.  In the Microsoft ABI, we
1397   // always delegate because we might not have a definition in this TU.
1398   switch (DtorType) {
1399   case Dtor_Comdat:
1400     llvm_unreachable("not expecting a COMDAT");
1401 
1402   case Dtor_Deleting: llvm_unreachable("already handled deleting case");
1403 
1404   case Dtor_Complete:
1405     assert((Body || getTarget().getCXXABI().isMicrosoft()) &&
1406            "can't emit a dtor without a body for non-Microsoft ABIs");
1407 
1408     // Enter the cleanup scopes for virtual bases.
1409     EnterDtorCleanups(Dtor, Dtor_Complete);
1410 
1411     if (!isTryBody) {
1412       EmitCXXDestructorCall(Dtor, Dtor_Base, /*ForVirtualBase=*/false,
1413                             /*Delegating=*/false, LoadCXXThis());
1414       break;
1415     }
1416     // Fallthrough: act like we're in the base variant.
1417 
1418   case Dtor_Base:
1419     assert(Body);
1420 
1421     incrementProfileCounter(Body);
1422 
1423     // Enter the cleanup scopes for fields and non-virtual bases.
1424     EnterDtorCleanups(Dtor, Dtor_Base);
1425 
1426     // Initialize the vtable pointers before entering the body.
1427     if (!CanSkipVTablePointerInitialization(getContext(), Dtor))
1428         InitializeVTablePointers(Dtor->getParent());
1429 
1430     if (isTryBody)
1431       EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock());
1432     else if (Body)
1433       EmitStmt(Body);
1434     else {
1435       assert(Dtor->isImplicit() && "bodyless dtor not implicit");
1436       // nothing to do besides what's in the epilogue
1437     }
1438     // -fapple-kext must inline any call to this dtor into
1439     // the caller's body.
1440     if (getLangOpts().AppleKext)
1441       CurFn->addFnAttr(llvm::Attribute::AlwaysInline);
1442     break;
1443   }
1444 
1445   // Jump out through the epilogue cleanups.
1446   DtorEpilogue.ForceCleanup();
1447 
1448   // Exit the try if applicable.
1449   if (isTryBody)
1450     ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true);
1451 }
1452 
1453 void CodeGenFunction::emitImplicitAssignmentOperatorBody(FunctionArgList &Args) {
1454   const CXXMethodDecl *AssignOp = cast<CXXMethodDecl>(CurGD.getDecl());
1455   const Stmt *RootS = AssignOp->getBody();
1456   assert(isa<CompoundStmt>(RootS) &&
1457          "Body of an implicit assignment operator should be compound stmt.");
1458   const CompoundStmt *RootCS = cast<CompoundStmt>(RootS);
1459 
1460   LexicalScope Scope(*this, RootCS->getSourceRange());
1461 
1462   AssignmentMemcpyizer AM(*this, AssignOp, Args);
1463   for (auto *I : RootCS->body())
1464     AM.emitAssignment(I);
1465   AM.finish();
1466 }
1467 
1468 namespace {
1469   /// Call the operator delete associated with the current destructor.
1470   struct CallDtorDelete : EHScopeStack::Cleanup {
1471     CallDtorDelete() {}
1472 
1473     void Emit(CodeGenFunction &CGF, Flags flags) override {
1474       const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CGF.CurCodeDecl);
1475       const CXXRecordDecl *ClassDecl = Dtor->getParent();
1476       CGF.EmitDeleteCall(Dtor->getOperatorDelete(), CGF.LoadCXXThis(),
1477                          CGF.getContext().getTagDeclType(ClassDecl));
1478     }
1479   };
1480 
1481   struct CallDtorDeleteConditional : EHScopeStack::Cleanup {
1482     llvm::Value *ShouldDeleteCondition;
1483   public:
1484     CallDtorDeleteConditional(llvm::Value *ShouldDeleteCondition)
1485       : ShouldDeleteCondition(ShouldDeleteCondition) {
1486       assert(ShouldDeleteCondition != nullptr);
1487     }
1488 
1489     void Emit(CodeGenFunction &CGF, Flags flags) override {
1490       llvm::BasicBlock *callDeleteBB = CGF.createBasicBlock("dtor.call_delete");
1491       llvm::BasicBlock *continueBB = CGF.createBasicBlock("dtor.continue");
1492       llvm::Value *ShouldCallDelete
1493         = CGF.Builder.CreateIsNull(ShouldDeleteCondition);
1494       CGF.Builder.CreateCondBr(ShouldCallDelete, continueBB, callDeleteBB);
1495 
1496       CGF.EmitBlock(callDeleteBB);
1497       const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CGF.CurCodeDecl);
1498       const CXXRecordDecl *ClassDecl = Dtor->getParent();
1499       CGF.EmitDeleteCall(Dtor->getOperatorDelete(), CGF.LoadCXXThis(),
1500                          CGF.getContext().getTagDeclType(ClassDecl));
1501       CGF.Builder.CreateBr(continueBB);
1502 
1503       CGF.EmitBlock(continueBB);
1504     }
1505   };
1506 
1507   class DestroyField  : public EHScopeStack::Cleanup {
1508     const FieldDecl *field;
1509     CodeGenFunction::Destroyer *destroyer;
1510     bool useEHCleanupForArray;
1511 
1512   public:
1513     DestroyField(const FieldDecl *field, CodeGenFunction::Destroyer *destroyer,
1514                  bool useEHCleanupForArray)
1515       : field(field), destroyer(destroyer),
1516         useEHCleanupForArray(useEHCleanupForArray) {}
1517 
1518     void Emit(CodeGenFunction &CGF, Flags flags) override {
1519       // Find the address of the field.
1520       llvm::Value *thisValue = CGF.LoadCXXThis();
1521       QualType RecordTy = CGF.getContext().getTagDeclType(field->getParent());
1522       LValue ThisLV = CGF.MakeAddrLValue(thisValue, RecordTy);
1523       LValue LV = CGF.EmitLValueForField(ThisLV, field);
1524       assert(LV.isSimple());
1525 
1526       CGF.emitDestroy(LV.getAddress(), field->getType(), destroyer,
1527                       flags.isForNormalCleanup() && useEHCleanupForArray);
1528     }
1529   };
1530 }
1531 
1532 /// \brief Emit all code that comes at the end of class's
1533 /// destructor. This is to call destructors on members and base classes
1534 /// in reverse order of their construction.
1535 void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD,
1536                                         CXXDtorType DtorType) {
1537   assert((!DD->isTrivial() || DD->hasAttr<DLLExportAttr>()) &&
1538          "Should not emit dtor epilogue for non-exported trivial dtor!");
1539 
1540   // The deleting-destructor phase just needs to call the appropriate
1541   // operator delete that Sema picked up.
1542   if (DtorType == Dtor_Deleting) {
1543     assert(DD->getOperatorDelete() &&
1544            "operator delete missing - EnterDtorCleanups");
1545     if (CXXStructorImplicitParamValue) {
1546       // If there is an implicit param to the deleting dtor, it's a boolean
1547       // telling whether we should call delete at the end of the dtor.
1548       EHStack.pushCleanup<CallDtorDeleteConditional>(
1549           NormalAndEHCleanup, CXXStructorImplicitParamValue);
1550     } else {
1551       EHStack.pushCleanup<CallDtorDelete>(NormalAndEHCleanup);
1552     }
1553     return;
1554   }
1555 
1556   const CXXRecordDecl *ClassDecl = DD->getParent();
1557 
1558   // Unions have no bases and do not call field destructors.
1559   if (ClassDecl->isUnion())
1560     return;
1561 
1562   // The complete-destructor phase just destructs all the virtual bases.
1563   if (DtorType == Dtor_Complete) {
1564 
1565     // We push them in the forward order so that they'll be popped in
1566     // the reverse order.
1567     for (const auto &Base : ClassDecl->vbases()) {
1568       CXXRecordDecl *BaseClassDecl
1569         = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl());
1570 
1571       // Ignore trivial destructors.
1572       if (BaseClassDecl->hasTrivialDestructor())
1573         continue;
1574 
1575       EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup,
1576                                         BaseClassDecl,
1577                                         /*BaseIsVirtual*/ true);
1578     }
1579 
1580     return;
1581   }
1582 
1583   assert(DtorType == Dtor_Base);
1584 
1585   // Destroy non-virtual bases.
1586   for (const auto &Base : ClassDecl->bases()) {
1587     // Ignore virtual bases.
1588     if (Base.isVirtual())
1589       continue;
1590 
1591     CXXRecordDecl *BaseClassDecl = Base.getType()->getAsCXXRecordDecl();
1592 
1593     // Ignore trivial destructors.
1594     if (BaseClassDecl->hasTrivialDestructor())
1595       continue;
1596 
1597     EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup,
1598                                       BaseClassDecl,
1599                                       /*BaseIsVirtual*/ false);
1600   }
1601 
1602   // Destroy direct fields.
1603   for (const auto *Field : ClassDecl->fields()) {
1604     QualType type = Field->getType();
1605     QualType::DestructionKind dtorKind = type.isDestructedType();
1606     if (!dtorKind) continue;
1607 
1608     // Anonymous union members do not have their destructors called.
1609     const RecordType *RT = type->getAsUnionType();
1610     if (RT && RT->getDecl()->isAnonymousStructOrUnion()) continue;
1611 
1612     CleanupKind cleanupKind = getCleanupKind(dtorKind);
1613     EHStack.pushCleanup<DestroyField>(cleanupKind, Field,
1614                                       getDestroyer(dtorKind),
1615                                       cleanupKind & EHCleanup);
1616   }
1617 }
1618 
1619 /// EmitCXXAggrConstructorCall - Emit a loop to call a particular
1620 /// constructor for each of several members of an array.
1621 ///
1622 /// \param ctor the constructor to call for each element
1623 /// \param arrayType the type of the array to initialize
1624 /// \param arrayBegin an arrayType*
1625 /// \param zeroInitialize true if each element should be
1626 ///   zero-initialized before it is constructed
1627 void CodeGenFunction::EmitCXXAggrConstructorCall(
1628     const CXXConstructorDecl *ctor, const ConstantArrayType *arrayType,
1629     llvm::Value *arrayBegin, const CXXConstructExpr *E, bool zeroInitialize) {
1630   QualType elementType;
1631   llvm::Value *numElements =
1632     emitArrayLength(arrayType, elementType, arrayBegin);
1633 
1634   EmitCXXAggrConstructorCall(ctor, numElements, arrayBegin, E, zeroInitialize);
1635 }
1636 
1637 /// EmitCXXAggrConstructorCall - Emit a loop to call a particular
1638 /// constructor for each of several members of an array.
1639 ///
1640 /// \param ctor the constructor to call for each element
1641 /// \param numElements the number of elements in the array;
1642 ///   may be zero
1643 /// \param arrayBegin a T*, where T is the type constructed by ctor
1644 /// \param zeroInitialize true if each element should be
1645 ///   zero-initialized before it is constructed
1646 void CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
1647                                                  llvm::Value *numElements,
1648                                                  llvm::Value *arrayBegin,
1649                                                  const CXXConstructExpr *E,
1650                                                  bool zeroInitialize) {
1651 
1652   // It's legal for numElements to be zero.  This can happen both
1653   // dynamically, because x can be zero in 'new A[x]', and statically,
1654   // because of GCC extensions that permit zero-length arrays.  There
1655   // are probably legitimate places where we could assume that this
1656   // doesn't happen, but it's not clear that it's worth it.
1657   llvm::BranchInst *zeroCheckBranch = nullptr;
1658 
1659   // Optimize for a constant count.
1660   llvm::ConstantInt *constantCount
1661     = dyn_cast<llvm::ConstantInt>(numElements);
1662   if (constantCount) {
1663     // Just skip out if the constant count is zero.
1664     if (constantCount->isZero()) return;
1665 
1666   // Otherwise, emit the check.
1667   } else {
1668     llvm::BasicBlock *loopBB = createBasicBlock("new.ctorloop");
1669     llvm::Value *iszero = Builder.CreateIsNull(numElements, "isempty");
1670     zeroCheckBranch = Builder.CreateCondBr(iszero, loopBB, loopBB);
1671     EmitBlock(loopBB);
1672   }
1673 
1674   // Find the end of the array.
1675   llvm::Value *arrayEnd = Builder.CreateInBoundsGEP(arrayBegin, numElements,
1676                                                     "arrayctor.end");
1677 
1678   // Enter the loop, setting up a phi for the current location to initialize.
1679   llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
1680   llvm::BasicBlock *loopBB = createBasicBlock("arrayctor.loop");
1681   EmitBlock(loopBB);
1682   llvm::PHINode *cur = Builder.CreatePHI(arrayBegin->getType(), 2,
1683                                          "arrayctor.cur");
1684   cur->addIncoming(arrayBegin, entryBB);
1685 
1686   // Inside the loop body, emit the constructor call on the array element.
1687 
1688   QualType type = getContext().getTypeDeclType(ctor->getParent());
1689 
1690   // Zero initialize the storage, if requested.
1691   if (zeroInitialize)
1692     EmitNullInitialization(cur, type);
1693 
1694   // C++ [class.temporary]p4:
1695   // There are two contexts in which temporaries are destroyed at a different
1696   // point than the end of the full-expression. The first context is when a
1697   // default constructor is called to initialize an element of an array.
1698   // If the constructor has one or more default arguments, the destruction of
1699   // every temporary created in a default argument expression is sequenced
1700   // before the construction of the next array element, if any.
1701 
1702   {
1703     RunCleanupsScope Scope(*this);
1704 
1705     // Evaluate the constructor and its arguments in a regular
1706     // partial-destroy cleanup.
1707     if (getLangOpts().Exceptions &&
1708         !ctor->getParent()->hasTrivialDestructor()) {
1709       Destroyer *destroyer = destroyCXXObject;
1710       pushRegularPartialArrayCleanup(arrayBegin, cur, type, *destroyer);
1711     }
1712 
1713     EmitCXXConstructorCall(ctor, Ctor_Complete, /*ForVirtualBase=*/false,
1714                            /*Delegating=*/false, cur, E);
1715   }
1716 
1717   // Go to the next element.
1718   llvm::Value *next =
1719     Builder.CreateInBoundsGEP(cur, llvm::ConstantInt::get(SizeTy, 1),
1720                               "arrayctor.next");
1721   cur->addIncoming(next, Builder.GetInsertBlock());
1722 
1723   // Check whether that's the end of the loop.
1724   llvm::Value *done = Builder.CreateICmpEQ(next, arrayEnd, "arrayctor.done");
1725   llvm::BasicBlock *contBB = createBasicBlock("arrayctor.cont");
1726   Builder.CreateCondBr(done, contBB, loopBB);
1727 
1728   // Patch the earlier check to skip over the loop.
1729   if (zeroCheckBranch) zeroCheckBranch->setSuccessor(0, contBB);
1730 
1731   EmitBlock(contBB);
1732 }
1733 
1734 void CodeGenFunction::destroyCXXObject(CodeGenFunction &CGF,
1735                                        llvm::Value *addr,
1736                                        QualType type) {
1737   const RecordType *rtype = type->castAs<RecordType>();
1738   const CXXRecordDecl *record = cast<CXXRecordDecl>(rtype->getDecl());
1739   const CXXDestructorDecl *dtor = record->getDestructor();
1740   assert(!dtor->isTrivial());
1741   CGF.EmitCXXDestructorCall(dtor, Dtor_Complete, /*for vbase*/ false,
1742                             /*Delegating=*/false, addr);
1743 }
1744 
1745 void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
1746                                              CXXCtorType Type,
1747                                              bool ForVirtualBase,
1748                                              bool Delegating, llvm::Value *This,
1749                                              const CXXConstructExpr *E) {
1750   // C++11 [class.mfct.non-static]p2:
1751   //   If a non-static member function of a class X is called for an object that
1752   //   is not of type X, or of a type derived from X, the behavior is undefined.
1753   // FIXME: Provide a source location here.
1754   EmitTypeCheck(CodeGenFunction::TCK_ConstructorCall, SourceLocation(), This,
1755                 getContext().getRecordType(D->getParent()));
1756 
1757   if (D->isTrivial() && D->isDefaultConstructor()) {
1758     assert(E->getNumArgs() == 0 && "trivial default ctor with args");
1759     return;
1760   }
1761 
1762   // If this is a trivial constructor, just emit what's needed. If this is a
1763   // union copy constructor, we must emit a memcpy, because the AST does not
1764   // model that copy.
1765   if (isMemcpyEquivalentSpecialMember(D)) {
1766     assert(E->getNumArgs() == 1 && "unexpected argcount for trivial ctor");
1767 
1768     const Expr *Arg = E->getArg(0);
1769     QualType SrcTy = Arg->getType();
1770     llvm::Value *Src = EmitLValue(Arg).getAddress();
1771     QualType DestTy = getContext().getTypeDeclType(D->getParent());
1772     EmitAggregateCopyCtor(This, Src, DestTy, SrcTy);
1773     return;
1774   }
1775 
1776   CallArgList Args;
1777 
1778   // Push the this ptr.
1779   Args.add(RValue::get(This), D->getThisType(getContext()));
1780 
1781   // Add the rest of the user-supplied arguments.
1782   const FunctionProtoType *FPT = D->getType()->castAs<FunctionProtoType>();
1783   EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end(), E->getConstructor());
1784 
1785   // Insert any ABI-specific implicit constructor arguments.
1786   unsigned ExtraArgs = CGM.getCXXABI().addImplicitConstructorArgs(
1787       *this, D, Type, ForVirtualBase, Delegating, Args);
1788 
1789   // Emit the call.
1790   llvm::Value *Callee = CGM.getAddrOfCXXStructor(D, getFromCtorType(Type));
1791   const CGFunctionInfo &Info =
1792       CGM.getTypes().arrangeCXXConstructorCall(Args, D, Type, ExtraArgs);
1793   EmitCall(Info, Callee, ReturnValueSlot(), Args, D);
1794 }
1795 
1796 void
1797 CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
1798                                         llvm::Value *This, llvm::Value *Src,
1799                                         const CXXConstructExpr *E) {
1800   if (isMemcpyEquivalentSpecialMember(D)) {
1801     assert(E->getNumArgs() == 1 && "unexpected argcount for trivial ctor");
1802     assert(D->isCopyOrMoveConstructor() &&
1803            "trivial 1-arg ctor not a copy/move ctor");
1804     EmitAggregateCopyCtor(This, Src,
1805                           getContext().getTypeDeclType(D->getParent()),
1806                           E->arg_begin()->getType());
1807     return;
1808   }
1809   llvm::Value *Callee = CGM.getAddrOfCXXStructor(D, StructorType::Complete);
1810   assert(D->isInstance() &&
1811          "Trying to emit a member call expr on a static method!");
1812 
1813   const FunctionProtoType *FPT = D->getType()->castAs<FunctionProtoType>();
1814 
1815   CallArgList Args;
1816 
1817   // Push the this ptr.
1818   Args.add(RValue::get(This), D->getThisType(getContext()));
1819 
1820   // Push the src ptr.
1821   QualType QT = *(FPT->param_type_begin());
1822   llvm::Type *t = CGM.getTypes().ConvertType(QT);
1823   Src = Builder.CreateBitCast(Src, t);
1824   Args.add(RValue::get(Src), QT);
1825 
1826   // Skip over first argument (Src).
1827   EmitCallArgs(Args, FPT, E->arg_begin() + 1, E->arg_end(), E->getConstructor(),
1828                /*ParamsToSkip*/ 1);
1829 
1830   EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, RequiredArgs::All),
1831            Callee, ReturnValueSlot(), Args, D);
1832 }
1833 
1834 void
1835 CodeGenFunction::EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
1836                                                 CXXCtorType CtorType,
1837                                                 const FunctionArgList &Args,
1838                                                 SourceLocation Loc) {
1839   CallArgList DelegateArgs;
1840 
1841   FunctionArgList::const_iterator I = Args.begin(), E = Args.end();
1842   assert(I != E && "no parameters to constructor");
1843 
1844   // this
1845   DelegateArgs.add(RValue::get(LoadCXXThis()), (*I)->getType());
1846   ++I;
1847 
1848   // vtt
1849   if (llvm::Value *VTT = GetVTTParameter(GlobalDecl(Ctor, CtorType),
1850                                          /*ForVirtualBase=*/false,
1851                                          /*Delegating=*/true)) {
1852     QualType VoidPP = getContext().getPointerType(getContext().VoidPtrTy);
1853     DelegateArgs.add(RValue::get(VTT), VoidPP);
1854 
1855     if (CGM.getCXXABI().NeedsVTTParameter(CurGD)) {
1856       assert(I != E && "cannot skip vtt parameter, already done with args");
1857       assert((*I)->getType() == VoidPP && "skipping parameter not of vtt type");
1858       ++I;
1859     }
1860   }
1861 
1862   // Explicit arguments.
1863   for (; I != E; ++I) {
1864     const VarDecl *param = *I;
1865     // FIXME: per-argument source location
1866     EmitDelegateCallArg(DelegateArgs, param, Loc);
1867   }
1868 
1869   llvm::Value *Callee =
1870       CGM.getAddrOfCXXStructor(Ctor, getFromCtorType(CtorType));
1871   EmitCall(CGM.getTypes()
1872                .arrangeCXXStructorDeclaration(Ctor, getFromCtorType(CtorType)),
1873            Callee, ReturnValueSlot(), DelegateArgs, Ctor);
1874 }
1875 
1876 namespace {
1877   struct CallDelegatingCtorDtor : EHScopeStack::Cleanup {
1878     const CXXDestructorDecl *Dtor;
1879     llvm::Value *Addr;
1880     CXXDtorType Type;
1881 
1882     CallDelegatingCtorDtor(const CXXDestructorDecl *D, llvm::Value *Addr,
1883                            CXXDtorType Type)
1884       : Dtor(D), Addr(Addr), Type(Type) {}
1885 
1886     void Emit(CodeGenFunction &CGF, Flags flags) override {
1887       CGF.EmitCXXDestructorCall(Dtor, Type, /*ForVirtualBase=*/false,
1888                                 /*Delegating=*/true, Addr);
1889     }
1890   };
1891 }
1892 
1893 void
1894 CodeGenFunction::EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor,
1895                                                   const FunctionArgList &Args) {
1896   assert(Ctor->isDelegatingConstructor());
1897 
1898   llvm::Value *ThisPtr = LoadCXXThis();
1899 
1900   QualType Ty = getContext().getTagDeclType(Ctor->getParent());
1901   CharUnits Alignment = getContext().getTypeAlignInChars(Ty);
1902   AggValueSlot AggSlot =
1903     AggValueSlot::forAddr(ThisPtr, Alignment, Qualifiers(),
1904                           AggValueSlot::IsDestructed,
1905                           AggValueSlot::DoesNotNeedGCBarriers,
1906                           AggValueSlot::IsNotAliased);
1907 
1908   EmitAggExpr(Ctor->init_begin()[0]->getInit(), AggSlot);
1909 
1910   const CXXRecordDecl *ClassDecl = Ctor->getParent();
1911   if (CGM.getLangOpts().Exceptions && !ClassDecl->hasTrivialDestructor()) {
1912     CXXDtorType Type =
1913       CurGD.getCtorType() == Ctor_Complete ? Dtor_Complete : Dtor_Base;
1914 
1915     EHStack.pushCleanup<CallDelegatingCtorDtor>(EHCleanup,
1916                                                 ClassDecl->getDestructor(),
1917                                                 ThisPtr, Type);
1918   }
1919 }
1920 
1921 void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *DD,
1922                                             CXXDtorType Type,
1923                                             bool ForVirtualBase,
1924                                             bool Delegating,
1925                                             llvm::Value *This) {
1926   CGM.getCXXABI().EmitDestructorCall(*this, DD, Type, ForVirtualBase,
1927                                      Delegating, This);
1928 }
1929 
1930 namespace {
1931   struct CallLocalDtor : EHScopeStack::Cleanup {
1932     const CXXDestructorDecl *Dtor;
1933     llvm::Value *Addr;
1934 
1935     CallLocalDtor(const CXXDestructorDecl *D, llvm::Value *Addr)
1936       : Dtor(D), Addr(Addr) {}
1937 
1938     void Emit(CodeGenFunction &CGF, Flags flags) override {
1939       CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
1940                                 /*ForVirtualBase=*/false,
1941                                 /*Delegating=*/false, Addr);
1942     }
1943   };
1944 }
1945 
1946 void CodeGenFunction::PushDestructorCleanup(const CXXDestructorDecl *D,
1947                                             llvm::Value *Addr) {
1948   EHStack.pushCleanup<CallLocalDtor>(NormalAndEHCleanup, D, Addr);
1949 }
1950 
1951 void CodeGenFunction::PushDestructorCleanup(QualType T, llvm::Value *Addr) {
1952   CXXRecordDecl *ClassDecl = T->getAsCXXRecordDecl();
1953   if (!ClassDecl) return;
1954   if (ClassDecl->hasTrivialDestructor()) return;
1955 
1956   const CXXDestructorDecl *D = ClassDecl->getDestructor();
1957   assert(D && D->isUsed() && "destructor not marked as used!");
1958   PushDestructorCleanup(D, Addr);
1959 }
1960 
1961 void
1962 CodeGenFunction::InitializeVTablePointer(BaseSubobject Base,
1963                                          const CXXRecordDecl *NearestVBase,
1964                                          CharUnits OffsetFromNearestVBase,
1965                                          const CXXRecordDecl *VTableClass) {
1966   const CXXRecordDecl *RD = Base.getBase();
1967 
1968   // Don't initialize the vtable pointer if the class is marked with the
1969   // 'novtable' attribute.
1970   if ((RD == VTableClass || RD == NearestVBase) &&
1971       VTableClass->hasAttr<MSNoVTableAttr>())
1972     return;
1973 
1974   // Compute the address point.
1975   bool NeedsVirtualOffset;
1976   llvm::Value *VTableAddressPoint =
1977       CGM.getCXXABI().getVTableAddressPointInStructor(
1978           *this, VTableClass, Base, NearestVBase, NeedsVirtualOffset);
1979   if (!VTableAddressPoint)
1980     return;
1981 
1982   // Compute where to store the address point.
1983   llvm::Value *VirtualOffset = nullptr;
1984   CharUnits NonVirtualOffset = CharUnits::Zero();
1985 
1986   if (NeedsVirtualOffset) {
1987     // We need to use the virtual base offset offset because the virtual base
1988     // might have a different offset in the most derived class.
1989     VirtualOffset = CGM.getCXXABI().GetVirtualBaseClassOffset(*this,
1990                                                               LoadCXXThis(),
1991                                                               VTableClass,
1992                                                               NearestVBase);
1993     NonVirtualOffset = OffsetFromNearestVBase;
1994   } else {
1995     // We can just use the base offset in the complete class.
1996     NonVirtualOffset = Base.getBaseOffset();
1997   }
1998 
1999   // Apply the offsets.
2000   llvm::Value *VTableField = LoadCXXThis();
2001 
2002   if (!NonVirtualOffset.isZero() || VirtualOffset)
2003     VTableField = ApplyNonVirtualAndVirtualOffset(*this, VTableField,
2004                                                   NonVirtualOffset,
2005                                                   VirtualOffset);
2006 
2007   // Finally, store the address point. Use the same LLVM types as the field to
2008   // support optimization.
2009   llvm::Type *VTablePtrTy =
2010       llvm::FunctionType::get(CGM.Int32Ty, /*isVarArg=*/true)
2011           ->getPointerTo()
2012           ->getPointerTo();
2013   VTableField = Builder.CreateBitCast(VTableField, VTablePtrTy->getPointerTo());
2014   VTableAddressPoint = Builder.CreateBitCast(VTableAddressPoint, VTablePtrTy);
2015   llvm::StoreInst *Store = Builder.CreateStore(VTableAddressPoint, VTableField);
2016   CGM.DecorateInstruction(Store, CGM.getTBAAInfoForVTablePtr());
2017 }
2018 
2019 void
2020 CodeGenFunction::InitializeVTablePointers(BaseSubobject Base,
2021                                           const CXXRecordDecl *NearestVBase,
2022                                           CharUnits OffsetFromNearestVBase,
2023                                           bool BaseIsNonVirtualPrimaryBase,
2024                                           const CXXRecordDecl *VTableClass,
2025                                           VisitedVirtualBasesSetTy& VBases) {
2026   // If this base is a non-virtual primary base the address point has already
2027   // been set.
2028   if (!BaseIsNonVirtualPrimaryBase) {
2029     // Initialize the vtable pointer for this base.
2030     InitializeVTablePointer(Base, NearestVBase, OffsetFromNearestVBase,
2031                             VTableClass);
2032   }
2033 
2034   const CXXRecordDecl *RD = Base.getBase();
2035 
2036   // Traverse bases.
2037   for (const auto &I : RD->bases()) {
2038     CXXRecordDecl *BaseDecl
2039       = cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
2040 
2041     // Ignore classes without a vtable.
2042     if (!BaseDecl->isDynamicClass())
2043       continue;
2044 
2045     CharUnits BaseOffset;
2046     CharUnits BaseOffsetFromNearestVBase;
2047     bool BaseDeclIsNonVirtualPrimaryBase;
2048 
2049     if (I.isVirtual()) {
2050       // Check if we've visited this virtual base before.
2051       if (!VBases.insert(BaseDecl).second)
2052         continue;
2053 
2054       const ASTRecordLayout &Layout =
2055         getContext().getASTRecordLayout(VTableClass);
2056 
2057       BaseOffset = Layout.getVBaseClassOffset(BaseDecl);
2058       BaseOffsetFromNearestVBase = CharUnits::Zero();
2059       BaseDeclIsNonVirtualPrimaryBase = false;
2060     } else {
2061       const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
2062 
2063       BaseOffset = Base.getBaseOffset() + Layout.getBaseClassOffset(BaseDecl);
2064       BaseOffsetFromNearestVBase =
2065         OffsetFromNearestVBase + Layout.getBaseClassOffset(BaseDecl);
2066       BaseDeclIsNonVirtualPrimaryBase = Layout.getPrimaryBase() == BaseDecl;
2067     }
2068 
2069     InitializeVTablePointers(BaseSubobject(BaseDecl, BaseOffset),
2070                              I.isVirtual() ? BaseDecl : NearestVBase,
2071                              BaseOffsetFromNearestVBase,
2072                              BaseDeclIsNonVirtualPrimaryBase,
2073                              VTableClass, VBases);
2074   }
2075 }
2076 
2077 void CodeGenFunction::InitializeVTablePointers(const CXXRecordDecl *RD) {
2078   // Ignore classes without a vtable.
2079   if (!RD->isDynamicClass())
2080     return;
2081 
2082   // Initialize the vtable pointers for this class and all of its bases.
2083   VisitedVirtualBasesSetTy VBases;
2084   InitializeVTablePointers(BaseSubobject(RD, CharUnits::Zero()),
2085                            /*NearestVBase=*/nullptr,
2086                            /*OffsetFromNearestVBase=*/CharUnits::Zero(),
2087                            /*BaseIsNonVirtualPrimaryBase=*/false, RD, VBases);
2088 
2089   if (RD->getNumVBases())
2090     CGM.getCXXABI().initializeHiddenVirtualInheritanceMembers(*this, RD);
2091 }
2092 
2093 llvm::Value *CodeGenFunction::GetVTablePtr(llvm::Value *This,
2094                                            llvm::Type *Ty) {
2095   llvm::Value *VTablePtrSrc = Builder.CreateBitCast(This, Ty->getPointerTo());
2096   llvm::Instruction *VTable = Builder.CreateLoad(VTablePtrSrc, "vtable");
2097   CGM.DecorateInstruction(VTable, CGM.getTBAAInfoForVTablePtr());
2098   return VTable;
2099 }
2100 
2101 // If a class has a single non-virtual base and does not introduce or override
2102 // virtual member functions or fields, it will have the same layout as its base.
2103 // This function returns the least derived such class.
2104 //
2105 // Casting an instance of a base class to such a derived class is technically
2106 // undefined behavior, but it is a relatively common hack for introducing member
2107 // functions on class instances with specific properties (e.g. llvm::Operator)
2108 // that works under most compilers and should not have security implications, so
2109 // we allow it by default. It can be disabled with -fsanitize=cfi-cast-strict.
2110 static const CXXRecordDecl *
2111 LeastDerivedClassWithSameLayout(const CXXRecordDecl *RD) {
2112   if (!RD->field_empty())
2113     return RD;
2114 
2115   if (RD->getNumVBases() != 0)
2116     return RD;
2117 
2118   if (RD->getNumBases() != 1)
2119     return RD;
2120 
2121   for (const CXXMethodDecl *MD : RD->methods()) {
2122     if (MD->isVirtual()) {
2123       // Virtual member functions are only ok if they are implicit destructors
2124       // because the implicit destructor will have the same semantics as the
2125       // base class's destructor if no fields are added.
2126       if (isa<CXXDestructorDecl>(MD) && MD->isImplicit())
2127         continue;
2128       return RD;
2129     }
2130   }
2131 
2132   return LeastDerivedClassWithSameLayout(
2133       RD->bases_begin()->getType()->getAsCXXRecordDecl());
2134 }
2135 
2136 void CodeGenFunction::EmitVTablePtrCheckForCall(const CXXMethodDecl *MD,
2137                                                 llvm::Value *VTable) {
2138   const CXXRecordDecl *ClassDecl = MD->getParent();
2139   if (!SanOpts.has(SanitizerKind::CFICastStrict))
2140     ClassDecl = LeastDerivedClassWithSameLayout(ClassDecl);
2141 
2142   EmitVTablePtrCheck(ClassDecl, VTable);
2143 }
2144 
2145 void CodeGenFunction::EmitVTablePtrCheckForCast(QualType T,
2146                                                 llvm::Value *Derived,
2147                                                 bool MayBeNull) {
2148   if (!getLangOpts().CPlusPlus)
2149     return;
2150 
2151   auto *ClassTy = T->getAs<RecordType>();
2152   if (!ClassTy)
2153     return;
2154 
2155   const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(ClassTy->getDecl());
2156 
2157   if (!ClassDecl->isCompleteDefinition() || !ClassDecl->isDynamicClass())
2158     return;
2159 
2160   SmallString<64> MangledName;
2161   llvm::raw_svector_ostream Out(MangledName);
2162   CGM.getCXXABI().getMangleContext().mangleCXXRTTI(T.getUnqualifiedType(),
2163                                                    Out);
2164 
2165   // Blacklist based on the mangled type.
2166   if (CGM.getContext().getSanitizerBlacklist().isBlacklistedType(Out.str()))
2167     return;
2168 
2169   if (!SanOpts.has(SanitizerKind::CFICastStrict))
2170     ClassDecl = LeastDerivedClassWithSameLayout(ClassDecl);
2171 
2172   llvm::BasicBlock *ContBlock = 0;
2173 
2174   if (MayBeNull) {
2175     llvm::Value *DerivedNotNull =
2176         Builder.CreateIsNotNull(Derived, "cast.nonnull");
2177 
2178     llvm::BasicBlock *CheckBlock = createBasicBlock("cast.check");
2179     ContBlock = createBasicBlock("cast.cont");
2180 
2181     Builder.CreateCondBr(DerivedNotNull, CheckBlock, ContBlock);
2182 
2183     EmitBlock(CheckBlock);
2184   }
2185 
2186   llvm::Value *VTable = GetVTablePtr(Derived, Int8PtrTy);
2187   EmitVTablePtrCheck(ClassDecl, VTable);
2188 
2189   if (MayBeNull) {
2190     Builder.CreateBr(ContBlock);
2191     EmitBlock(ContBlock);
2192   }
2193 }
2194 
2195 void CodeGenFunction::EmitVTablePtrCheck(const CXXRecordDecl *RD,
2196                                          llvm::Value *VTable) {
2197   // FIXME: Add blacklisting scheme.
2198   if (RD->isInStdNamespace())
2199     return;
2200 
2201   std::string OutName;
2202   llvm::raw_string_ostream Out(OutName);
2203   CGM.getCXXABI().getMangleContext().mangleCXXVTableBitSet(RD, Out);
2204 
2205   llvm::Value *BitSetName = llvm::MetadataAsValue::get(
2206       getLLVMContext(), llvm::MDString::get(getLLVMContext(), Out.str()));
2207 
2208   llvm::Value *BitSetTest = Builder.CreateCall2(
2209       CGM.getIntrinsic(llvm::Intrinsic::bitset_test),
2210       Builder.CreateBitCast(VTable, CGM.Int8PtrTy), BitSetName);
2211 
2212   llvm::BasicBlock *ContBlock = createBasicBlock("vtable.check.cont");
2213   llvm::BasicBlock *TrapBlock = createBasicBlock("vtable.check.trap");
2214 
2215   Builder.CreateCondBr(BitSetTest, ContBlock, TrapBlock);
2216 
2217   EmitBlock(TrapBlock);
2218   Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::trap));
2219   Builder.CreateUnreachable();
2220 
2221   EmitBlock(ContBlock);
2222 }
2223 
2224 // FIXME: Ideally Expr::IgnoreParenNoopCasts should do this, but it doesn't do
2225 // quite what we want.
2226 static const Expr *skipNoOpCastsAndParens(const Expr *E) {
2227   while (true) {
2228     if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
2229       E = PE->getSubExpr();
2230       continue;
2231     }
2232 
2233     if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
2234       if (CE->getCastKind() == CK_NoOp) {
2235         E = CE->getSubExpr();
2236         continue;
2237       }
2238     }
2239     if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
2240       if (UO->getOpcode() == UO_Extension) {
2241         E = UO->getSubExpr();
2242         continue;
2243       }
2244     }
2245     return E;
2246   }
2247 }
2248 
2249 bool
2250 CodeGenFunction::CanDevirtualizeMemberFunctionCall(const Expr *Base,
2251                                                    const CXXMethodDecl *MD) {
2252   // When building with -fapple-kext, all calls must go through the vtable since
2253   // the kernel linker can do runtime patching of vtables.
2254   if (getLangOpts().AppleKext)
2255     return false;
2256 
2257   // If the most derived class is marked final, we know that no subclass can
2258   // override this member function and so we can devirtualize it. For example:
2259   //
2260   // struct A { virtual void f(); }
2261   // struct B final : A { };
2262   //
2263   // void f(B *b) {
2264   //   b->f();
2265   // }
2266   //
2267   const CXXRecordDecl *MostDerivedClassDecl = Base->getBestDynamicClassType();
2268   if (MostDerivedClassDecl->hasAttr<FinalAttr>())
2269     return true;
2270 
2271   // If the member function is marked 'final', we know that it can't be
2272   // overridden and can therefore devirtualize it.
2273   if (MD->hasAttr<FinalAttr>())
2274     return true;
2275 
2276   // Similarly, if the class itself is marked 'final' it can't be overridden
2277   // and we can therefore devirtualize the member function call.
2278   if (MD->getParent()->hasAttr<FinalAttr>())
2279     return true;
2280 
2281   Base = skipNoOpCastsAndParens(Base);
2282   if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) {
2283     if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
2284       // This is a record decl. We know the type and can devirtualize it.
2285       return VD->getType()->isRecordType();
2286     }
2287 
2288     return false;
2289   }
2290 
2291   // We can devirtualize calls on an object accessed by a class member access
2292   // expression, since by C++11 [basic.life]p6 we know that it can't refer to
2293   // a derived class object constructed in the same location.
2294   if (const MemberExpr *ME = dyn_cast<MemberExpr>(Base))
2295     if (const ValueDecl *VD = dyn_cast<ValueDecl>(ME->getMemberDecl()))
2296       return VD->getType()->isRecordType();
2297 
2298   // We can always devirtualize calls on temporary object expressions.
2299   if (isa<CXXConstructExpr>(Base))
2300     return true;
2301 
2302   // And calls on bound temporaries.
2303   if (isa<CXXBindTemporaryExpr>(Base))
2304     return true;
2305 
2306   // Check if this is a call expr that returns a record type.
2307   if (const CallExpr *CE = dyn_cast<CallExpr>(Base))
2308     return CE->getCallReturnType(getContext())->isRecordType();
2309 
2310   // We can't devirtualize the call.
2311   return false;
2312 }
2313 
2314 void CodeGenFunction::EmitForwardingCallToLambda(
2315                                       const CXXMethodDecl *callOperator,
2316                                       CallArgList &callArgs) {
2317   // Get the address of the call operator.
2318   const CGFunctionInfo &calleeFnInfo =
2319     CGM.getTypes().arrangeCXXMethodDeclaration(callOperator);
2320   llvm::Value *callee =
2321     CGM.GetAddrOfFunction(GlobalDecl(callOperator),
2322                           CGM.getTypes().GetFunctionType(calleeFnInfo));
2323 
2324   // Prepare the return slot.
2325   const FunctionProtoType *FPT =
2326     callOperator->getType()->castAs<FunctionProtoType>();
2327   QualType resultType = FPT->getReturnType();
2328   ReturnValueSlot returnSlot;
2329   if (!resultType->isVoidType() &&
2330       calleeFnInfo.getReturnInfo().getKind() == ABIArgInfo::Indirect &&
2331       !hasScalarEvaluationKind(calleeFnInfo.getReturnType()))
2332     returnSlot = ReturnValueSlot(ReturnValue, resultType.isVolatileQualified());
2333 
2334   // We don't need to separately arrange the call arguments because
2335   // the call can't be variadic anyway --- it's impossible to forward
2336   // variadic arguments.
2337 
2338   // Now emit our call.
2339   RValue RV = EmitCall(calleeFnInfo, callee, returnSlot,
2340                        callArgs, callOperator);
2341 
2342   // If necessary, copy the returned value into the slot.
2343   if (!resultType->isVoidType() && returnSlot.isNull())
2344     EmitReturnOfRValue(RV, resultType);
2345   else
2346     EmitBranchThroughCleanup(ReturnBlock);
2347 }
2348 
2349 void CodeGenFunction::EmitLambdaBlockInvokeBody() {
2350   const BlockDecl *BD = BlockInfo->getBlockDecl();
2351   const VarDecl *variable = BD->capture_begin()->getVariable();
2352   const CXXRecordDecl *Lambda = variable->getType()->getAsCXXRecordDecl();
2353 
2354   // Start building arguments for forwarding call
2355   CallArgList CallArgs;
2356 
2357   QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda));
2358   llvm::Value *ThisPtr = GetAddrOfBlockDecl(variable, false);
2359   CallArgs.add(RValue::get(ThisPtr), ThisType);
2360 
2361   // Add the rest of the parameters.
2362   for (auto param : BD->params())
2363     EmitDelegateCallArg(CallArgs, param, param->getLocStart());
2364 
2365   assert(!Lambda->isGenericLambda() &&
2366             "generic lambda interconversion to block not implemented");
2367   EmitForwardingCallToLambda(Lambda->getLambdaCallOperator(), CallArgs);
2368 }
2369 
2370 void CodeGenFunction::EmitLambdaToBlockPointerBody(FunctionArgList &Args) {
2371   if (cast<CXXMethodDecl>(CurCodeDecl)->isVariadic()) {
2372     // FIXME: Making this work correctly is nasty because it requires either
2373     // cloning the body of the call operator or making the call operator forward.
2374     CGM.ErrorUnsupported(CurCodeDecl, "lambda conversion to variadic function");
2375     return;
2376   }
2377 
2378   EmitFunctionBody(Args, cast<FunctionDecl>(CurGD.getDecl())->getBody());
2379 }
2380 
2381 void CodeGenFunction::EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD) {
2382   const CXXRecordDecl *Lambda = MD->getParent();
2383 
2384   // Start building arguments for forwarding call
2385   CallArgList CallArgs;
2386 
2387   QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda));
2388   llvm::Value *ThisPtr = llvm::UndefValue::get(getTypes().ConvertType(ThisType));
2389   CallArgs.add(RValue::get(ThisPtr), ThisType);
2390 
2391   // Add the rest of the parameters.
2392   for (auto Param : MD->params())
2393     EmitDelegateCallArg(CallArgs, Param, Param->getLocStart());
2394 
2395   const CXXMethodDecl *CallOp = Lambda->getLambdaCallOperator();
2396   // For a generic lambda, find the corresponding call operator specialization
2397   // to which the call to the static-invoker shall be forwarded.
2398   if (Lambda->isGenericLambda()) {
2399     assert(MD->isFunctionTemplateSpecialization());
2400     const TemplateArgumentList *TAL = MD->getTemplateSpecializationArgs();
2401     FunctionTemplateDecl *CallOpTemplate = CallOp->getDescribedFunctionTemplate();
2402     void *InsertPos = nullptr;
2403     FunctionDecl *CorrespondingCallOpSpecialization =
2404         CallOpTemplate->findSpecialization(TAL->asArray(), InsertPos);
2405     assert(CorrespondingCallOpSpecialization);
2406     CallOp = cast<CXXMethodDecl>(CorrespondingCallOpSpecialization);
2407   }
2408   EmitForwardingCallToLambda(CallOp, CallArgs);
2409 }
2410 
2411 void CodeGenFunction::EmitLambdaStaticInvokeFunction(const CXXMethodDecl *MD) {
2412   if (MD->isVariadic()) {
2413     // FIXME: Making this work correctly is nasty because it requires either
2414     // cloning the body of the call operator or making the call operator forward.
2415     CGM.ErrorUnsupported(MD, "lambda conversion to variadic function");
2416     return;
2417   }
2418 
2419   EmitLambdaDelegatingInvokeBody(MD);
2420 }
2421