1 //===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code to emit Aggregate Expr nodes as LLVM code.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CodeGenFunction.h"
15 #include "CGObjCRuntime.h"
16 #include "CodeGenModule.h"
17 #include "ConstantEmitter.h"
18 #include "clang/AST/ASTContext.h"
19 #include "clang/AST/DeclCXX.h"
20 #include "clang/AST/DeclTemplate.h"
21 #include "clang/AST/StmtVisitor.h"
22 #include "llvm/IR/Constants.h"
23 #include "llvm/IR/Function.h"
24 #include "llvm/IR/GlobalVariable.h"
25 #include "llvm/IR/Intrinsics.h"
26 #include "llvm/IR/IntrinsicInst.h"
27 using namespace clang;
28 using namespace CodeGen;
29 
30 //===----------------------------------------------------------------------===//
31 //                        Aggregate Expression Emitter
32 //===----------------------------------------------------------------------===//
33 
34 namespace  {
35 class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
36   CodeGenFunction &CGF;
37   CGBuilderTy &Builder;
38   AggValueSlot Dest;
39   bool IsResultUnused;
40 
41   AggValueSlot EnsureSlot(QualType T) {
42     if (!Dest.isIgnored()) return Dest;
43     return CGF.CreateAggTemp(T, "agg.tmp.ensured");
44   }
45   void EnsureDest(QualType T) {
46     if (!Dest.isIgnored()) return;
47     Dest = CGF.CreateAggTemp(T, "agg.tmp.ensured");
48   }
49 
50   // Calls `Fn` with a valid return value slot, potentially creating a temporary
51   // to do so. If a temporary is created, an appropriate copy into `Dest` will
52   // be emitted, as will lifetime markers.
53   //
54   // The given function should take a ReturnValueSlot, and return an RValue that
55   // points to said slot.
56   void withReturnValueSlot(const Expr *E,
57                            llvm::function_ref<RValue(ReturnValueSlot)> Fn);
58 
59 public:
60   AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest, bool IsResultUnused)
61     : CGF(cgf), Builder(CGF.Builder), Dest(Dest),
62     IsResultUnused(IsResultUnused) { }
63 
64   //===--------------------------------------------------------------------===//
65   //                               Utilities
66   //===--------------------------------------------------------------------===//
67 
68   /// EmitAggLoadOfLValue - Given an expression with aggregate type that
69   /// represents a value lvalue, this method emits the address of the lvalue,
70   /// then loads the result into DestPtr.
71   void EmitAggLoadOfLValue(const Expr *E);
72 
73   enum ExprValueKind {
74     EVK_RValue,
75     EVK_NonRValue
76   };
77 
78   /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
79   /// SrcIsRValue is true if source comes from an RValue.
80   void EmitFinalDestCopy(QualType type, const LValue &src,
81                          ExprValueKind SrcValueKind = EVK_NonRValue);
82   void EmitFinalDestCopy(QualType type, RValue src);
83   void EmitCopy(QualType type, const AggValueSlot &dest,
84                 const AggValueSlot &src);
85 
86   void EmitMoveFromReturnSlot(const Expr *E, RValue Src);
87 
88   void EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
89                      QualType ArrayQTy, InitListExpr *E);
90 
91   AggValueSlot::NeedsGCBarriers_t needsGC(QualType T) {
92     if (CGF.getLangOpts().getGC() && TypeRequiresGCollection(T))
93       return AggValueSlot::NeedsGCBarriers;
94     return AggValueSlot::DoesNotNeedGCBarriers;
95   }
96 
97   bool TypeRequiresGCollection(QualType T);
98 
99   //===--------------------------------------------------------------------===//
100   //                            Visitor Methods
101   //===--------------------------------------------------------------------===//
102 
103   void Visit(Expr *E) {
104     ApplyDebugLocation DL(CGF, E);
105     StmtVisitor<AggExprEmitter>::Visit(E);
106   }
107 
108   void VisitStmt(Stmt *S) {
109     CGF.ErrorUnsupported(S, "aggregate expression");
110   }
111   void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); }
112   void VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
113     Visit(GE->getResultExpr());
114   }
115   void VisitCoawaitExpr(CoawaitExpr *E) {
116     CGF.EmitCoawaitExpr(*E, Dest, IsResultUnused);
117   }
118   void VisitCoyieldExpr(CoyieldExpr *E) {
119     CGF.EmitCoyieldExpr(*E, Dest, IsResultUnused);
120   }
121   void VisitUnaryCoawait(UnaryOperator *E) { Visit(E->getSubExpr()); }
122   void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); }
123   void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
124     return Visit(E->getReplacement());
125   }
126 
127   // l-values.
128   void VisitDeclRefExpr(DeclRefExpr *E) { EmitAggLoadOfLValue(E); }
129   void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); }
130   void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); }
131   void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); }
132   void VisitCompoundLiteralExpr(CompoundLiteralExpr *E);
133   void VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
134     EmitAggLoadOfLValue(E);
135   }
136   void VisitPredefinedExpr(const PredefinedExpr *E) {
137     EmitAggLoadOfLValue(E);
138   }
139 
140   // Operators.
141   void VisitCastExpr(CastExpr *E);
142   void VisitCallExpr(const CallExpr *E);
143   void VisitStmtExpr(const StmtExpr *E);
144   void VisitBinaryOperator(const BinaryOperator *BO);
145   void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *BO);
146   void VisitBinAssign(const BinaryOperator *E);
147   void VisitBinComma(const BinaryOperator *E);
148 
149   void VisitObjCMessageExpr(ObjCMessageExpr *E);
150   void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
151     EmitAggLoadOfLValue(E);
152   }
153 
154   void VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E);
155   void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO);
156   void VisitChooseExpr(const ChooseExpr *CE);
157   void VisitInitListExpr(InitListExpr *E);
158   void VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
159                               llvm::Value *outerBegin = nullptr);
160   void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
161   void VisitNoInitExpr(NoInitExpr *E) { } // Do nothing.
162   void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
163     Visit(DAE->getExpr());
164   }
165   void VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
166     CodeGenFunction::CXXDefaultInitExprScope Scope(CGF);
167     Visit(DIE->getExpr());
168   }
169   void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
170   void VisitCXXConstructExpr(const CXXConstructExpr *E);
171   void VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *E);
172   void VisitLambdaExpr(LambdaExpr *E);
173   void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E);
174   void VisitExprWithCleanups(ExprWithCleanups *E);
175   void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
176   void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); }
177   void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E);
178   void VisitOpaqueValueExpr(OpaqueValueExpr *E);
179 
180   void VisitPseudoObjectExpr(PseudoObjectExpr *E) {
181     if (E->isGLValue()) {
182       LValue LV = CGF.EmitPseudoObjectLValue(E);
183       return EmitFinalDestCopy(E->getType(), LV);
184     }
185 
186     CGF.EmitPseudoObjectRValue(E, EnsureSlot(E->getType()));
187   }
188 
189   void VisitVAArgExpr(VAArgExpr *E);
190 
191   void EmitInitializationToLValue(Expr *E, LValue Address);
192   void EmitNullInitializationToLValue(LValue Address);
193   //  case Expr::ChooseExprClass:
194   void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); }
195   void VisitAtomicExpr(AtomicExpr *E) {
196     RValue Res = CGF.EmitAtomicExpr(E);
197     EmitFinalDestCopy(E->getType(), Res);
198   }
199 };
200 }  // end anonymous namespace.
201 
202 //===----------------------------------------------------------------------===//
203 //                                Utilities
204 //===----------------------------------------------------------------------===//
205 
206 /// EmitAggLoadOfLValue - Given an expression with aggregate type that
207 /// represents a value lvalue, this method emits the address of the lvalue,
208 /// then loads the result into DestPtr.
209 void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) {
210   LValue LV = CGF.EmitLValue(E);
211 
212   // If the type of the l-value is atomic, then do an atomic load.
213   if (LV.getType()->isAtomicType() || CGF.LValueIsSuitableForInlineAtomic(LV)) {
214     CGF.EmitAtomicLoad(LV, E->getExprLoc(), Dest);
215     return;
216   }
217 
218   EmitFinalDestCopy(E->getType(), LV);
219 }
220 
221 /// \brief True if the given aggregate type requires special GC API calls.
222 bool AggExprEmitter::TypeRequiresGCollection(QualType T) {
223   // Only record types have members that might require garbage collection.
224   const RecordType *RecordTy = T->getAs<RecordType>();
225   if (!RecordTy) return false;
226 
227   // Don't mess with non-trivial C++ types.
228   RecordDecl *Record = RecordTy->getDecl();
229   if (isa<CXXRecordDecl>(Record) &&
230       (cast<CXXRecordDecl>(Record)->hasNonTrivialCopyConstructor() ||
231        !cast<CXXRecordDecl>(Record)->hasTrivialDestructor()))
232     return false;
233 
234   // Check whether the type has an object member.
235   return Record->hasObjectMember();
236 }
237 
238 void AggExprEmitter::withReturnValueSlot(
239     const Expr *E, llvm::function_ref<RValue(ReturnValueSlot)> EmitCall) {
240   QualType RetTy = E->getType();
241   bool RequiresDestruction =
242       Dest.isIgnored() &&
243       RetTy.isDestructedType() == QualType::DK_nontrivial_c_struct;
244 
245   // If it makes no observable difference, save a memcpy + temporary.
246   //
247   // We need to always provide our own temporary if destruction is required.
248   // Otherwise, EmitCall will emit its own, notice that it's "unused", and end
249   // its lifetime before we have the chance to emit a proper destructor call.
250   bool UseTemp = Dest.isPotentiallyAliased() || Dest.requiresGCollection() ||
251                  (RequiresDestruction && !Dest.getAddress().isValid());
252 
253   Address RetAddr = Address::invalid();
254 
255   EHScopeStack::stable_iterator LifetimeEndBlock;
256   llvm::Value *LifetimeSizePtr = nullptr;
257   llvm::IntrinsicInst *LifetimeStartInst = nullptr;
258   if (!UseTemp) {
259     RetAddr = Dest.getAddress();
260   } else {
261     RetAddr = CGF.CreateMemTemp(RetTy);
262     uint64_t Size =
263         CGF.CGM.getDataLayout().getTypeAllocSize(CGF.ConvertTypeForMem(RetTy));
264     LifetimeSizePtr = CGF.EmitLifetimeStart(Size, RetAddr.getPointer());
265     if (LifetimeSizePtr) {
266       LifetimeStartInst =
267           cast<llvm::IntrinsicInst>(std::prev(Builder.GetInsertPoint()));
268       assert(LifetimeStartInst->getIntrinsicID() ==
269                  llvm::Intrinsic::lifetime_start &&
270              "Last insertion wasn't a lifetime.start?");
271 
272       CGF.pushFullExprCleanup<CodeGenFunction::CallLifetimeEnd>(
273           NormalEHLifetimeMarker, RetAddr, LifetimeSizePtr);
274       LifetimeEndBlock = CGF.EHStack.stable_begin();
275     }
276   }
277 
278   RValue Src =
279       EmitCall(ReturnValueSlot(RetAddr, Dest.isVolatile(), IsResultUnused));
280 
281   if (RequiresDestruction)
282     CGF.pushDestroy(RetTy.isDestructedType(), Src.getAggregateAddress(), RetTy);
283 
284   if (!UseTemp)
285     return;
286 
287   assert(Dest.getPointer() != Src.getAggregatePointer());
288   EmitFinalDestCopy(E->getType(), Src);
289 
290   if (!RequiresDestruction && LifetimeStartInst) {
291     // If there's no dtor to run, the copy was the last use of our temporary.
292     // Since we're not guaranteed to be in an ExprWithCleanups, clean up
293     // eagerly.
294     CGF.DeactivateCleanupBlock(LifetimeEndBlock, LifetimeStartInst);
295     CGF.EmitLifetimeEnd(LifetimeSizePtr, RetAddr.getPointer());
296   }
297 }
298 
299 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
300 void AggExprEmitter::EmitFinalDestCopy(QualType type, RValue src) {
301   assert(src.isAggregate() && "value must be aggregate value!");
302   LValue srcLV = CGF.MakeAddrLValue(src.getAggregateAddress(), type);
303   EmitFinalDestCopy(type, srcLV, EVK_RValue);
304 }
305 
306 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
307 void AggExprEmitter::EmitFinalDestCopy(QualType type, const LValue &src,
308                                        ExprValueKind SrcValueKind) {
309   // If Dest is ignored, then we're evaluating an aggregate expression
310   // in a context that doesn't care about the result.  Note that loads
311   // from volatile l-values force the existence of a non-ignored
312   // destination.
313   if (Dest.isIgnored())
314     return;
315 
316   // Copy non-trivial C structs here.
317   LValue DstLV = CGF.MakeAddrLValue(
318       Dest.getAddress(), Dest.isVolatile() ? type.withVolatile() : type);
319 
320   if (SrcValueKind == EVK_RValue) {
321     if (type.isNonTrivialToPrimitiveDestructiveMove() == QualType::PCK_Struct) {
322       if (Dest.isPotentiallyAliased())
323         CGF.callCStructMoveAssignmentOperator(DstLV, src);
324       else
325         CGF.callCStructMoveConstructor(DstLV, src);
326       return;
327     }
328   } else {
329     if (type.isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) {
330       if (Dest.isPotentiallyAliased())
331         CGF.callCStructCopyAssignmentOperator(DstLV, src);
332       else
333         CGF.callCStructCopyConstructor(DstLV, src);
334       return;
335     }
336   }
337 
338   AggValueSlot srcAgg =
339     AggValueSlot::forLValue(src, AggValueSlot::IsDestructed,
340                             needsGC(type), AggValueSlot::IsAliased,
341                             AggValueSlot::MayOverlap);
342   EmitCopy(type, Dest, srcAgg);
343 }
344 
345 /// Perform a copy from the source into the destination.
346 ///
347 /// \param type - the type of the aggregate being copied; qualifiers are
348 ///   ignored
349 void AggExprEmitter::EmitCopy(QualType type, const AggValueSlot &dest,
350                               const AggValueSlot &src) {
351   if (dest.requiresGCollection()) {
352     CharUnits sz = dest.getPreferredSize(CGF.getContext(), type);
353     llvm::Value *size = llvm::ConstantInt::get(CGF.SizeTy, sz.getQuantity());
354     CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF,
355                                                       dest.getAddress(),
356                                                       src.getAddress(),
357                                                       size);
358     return;
359   }
360 
361   // If the result of the assignment is used, copy the LHS there also.
362   // It's volatile if either side is.  Use the minimum alignment of
363   // the two sides.
364   LValue DestLV = CGF.MakeAddrLValue(dest.getAddress(), type);
365   LValue SrcLV = CGF.MakeAddrLValue(src.getAddress(), type);
366   CGF.EmitAggregateCopy(DestLV, SrcLV, type, dest.mayOverlap(),
367                         dest.isVolatile() || src.isVolatile());
368 }
369 
370 /// \brief Emit the initializer for a std::initializer_list initialized with a
371 /// real initializer list.
372 void
373 AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) {
374   // Emit an array containing the elements.  The array is externally destructed
375   // if the std::initializer_list object is.
376   ASTContext &Ctx = CGF.getContext();
377   LValue Array = CGF.EmitLValue(E->getSubExpr());
378   assert(Array.isSimple() && "initializer_list array not a simple lvalue");
379   Address ArrayPtr = Array.getAddress();
380 
381   const ConstantArrayType *ArrayType =
382       Ctx.getAsConstantArrayType(E->getSubExpr()->getType());
383   assert(ArrayType && "std::initializer_list constructed from non-array");
384 
385   // FIXME: Perform the checks on the field types in SemaInit.
386   RecordDecl *Record = E->getType()->castAs<RecordType>()->getDecl();
387   RecordDecl::field_iterator Field = Record->field_begin();
388   if (Field == Record->field_end()) {
389     CGF.ErrorUnsupported(E, "weird std::initializer_list");
390     return;
391   }
392 
393   // Start pointer.
394   if (!Field->getType()->isPointerType() ||
395       !Ctx.hasSameType(Field->getType()->getPointeeType(),
396                        ArrayType->getElementType())) {
397     CGF.ErrorUnsupported(E, "weird std::initializer_list");
398     return;
399   }
400 
401   AggValueSlot Dest = EnsureSlot(E->getType());
402   LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
403   LValue Start = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
404   llvm::Value *Zero = llvm::ConstantInt::get(CGF.PtrDiffTy, 0);
405   llvm::Value *IdxStart[] = { Zero, Zero };
406   llvm::Value *ArrayStart =
407       Builder.CreateInBoundsGEP(ArrayPtr.getPointer(), IdxStart, "arraystart");
408   CGF.EmitStoreThroughLValue(RValue::get(ArrayStart), Start);
409   ++Field;
410 
411   if (Field == Record->field_end()) {
412     CGF.ErrorUnsupported(E, "weird std::initializer_list");
413     return;
414   }
415 
416   llvm::Value *Size = Builder.getInt(ArrayType->getSize());
417   LValue EndOrLength = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
418   if (Field->getType()->isPointerType() &&
419       Ctx.hasSameType(Field->getType()->getPointeeType(),
420                       ArrayType->getElementType())) {
421     // End pointer.
422     llvm::Value *IdxEnd[] = { Zero, Size };
423     llvm::Value *ArrayEnd =
424         Builder.CreateInBoundsGEP(ArrayPtr.getPointer(), IdxEnd, "arrayend");
425     CGF.EmitStoreThroughLValue(RValue::get(ArrayEnd), EndOrLength);
426   } else if (Ctx.hasSameType(Field->getType(), Ctx.getSizeType())) {
427     // Length.
428     CGF.EmitStoreThroughLValue(RValue::get(Size), EndOrLength);
429   } else {
430     CGF.ErrorUnsupported(E, "weird std::initializer_list");
431     return;
432   }
433 }
434 
435 /// \brief Determine if E is a trivial array filler, that is, one that is
436 /// equivalent to zero-initialization.
437 static bool isTrivialFiller(Expr *E) {
438   if (!E)
439     return true;
440 
441   if (isa<ImplicitValueInitExpr>(E))
442     return true;
443 
444   if (auto *ILE = dyn_cast<InitListExpr>(E)) {
445     if (ILE->getNumInits())
446       return false;
447     return isTrivialFiller(ILE->getArrayFiller());
448   }
449 
450   if (auto *Cons = dyn_cast_or_null<CXXConstructExpr>(E))
451     return Cons->getConstructor()->isDefaultConstructor() &&
452            Cons->getConstructor()->isTrivial();
453 
454   // FIXME: Are there other cases where we can avoid emitting an initializer?
455   return false;
456 }
457 
458 /// \brief Emit initialization of an array from an initializer list.
459 void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
460                                    QualType ArrayQTy, InitListExpr *E) {
461   uint64_t NumInitElements = E->getNumInits();
462 
463   uint64_t NumArrayElements = AType->getNumElements();
464   assert(NumInitElements <= NumArrayElements);
465 
466   QualType elementType =
467       CGF.getContext().getAsArrayType(ArrayQTy)->getElementType();
468 
469   // DestPtr is an array*.  Construct an elementType* by drilling
470   // down a level.
471   llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
472   llvm::Value *indices[] = { zero, zero };
473   llvm::Value *begin =
474     Builder.CreateInBoundsGEP(DestPtr.getPointer(), indices, "arrayinit.begin");
475 
476   CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
477   CharUnits elementAlign =
478     DestPtr.getAlignment().alignmentOfArrayElement(elementSize);
479 
480   // Consider initializing the array by copying from a global. For this to be
481   // more efficient than per-element initialization, the size of the elements
482   // with explicit initializers should be large enough.
483   if (NumInitElements * elementSize.getQuantity() > 16 &&
484       elementType.isTriviallyCopyableType(CGF.getContext())) {
485     CodeGen::CodeGenModule &CGM = CGF.CGM;
486     ConstantEmitter Emitter(CGM);
487     LangAS AS = ArrayQTy.getAddressSpace();
488     if (llvm::Constant *C = Emitter.tryEmitForInitializer(E, AS, ArrayQTy)) {
489       auto GV = new llvm::GlobalVariable(
490           CGM.getModule(), C->getType(),
491           CGM.isTypeConstant(ArrayQTy, /* ExcludeCtorDtor= */ true),
492           llvm::GlobalValue::PrivateLinkage, C, "constinit",
493           /* InsertBefore= */ nullptr, llvm::GlobalVariable::NotThreadLocal,
494           CGM.getContext().getTargetAddressSpace(AS));
495       Emitter.finalize(GV);
496       CharUnits Align = CGM.getContext().getTypeAlignInChars(ArrayQTy);
497       GV->setAlignment(Align.getQuantity());
498       EmitFinalDestCopy(ArrayQTy, CGF.MakeAddrLValue(GV, ArrayQTy, Align));
499       return;
500     }
501   }
502 
503   // Exception safety requires us to destroy all the
504   // already-constructed members if an initializer throws.
505   // For that, we'll need an EH cleanup.
506   QualType::DestructionKind dtorKind = elementType.isDestructedType();
507   Address endOfInit = Address::invalid();
508   EHScopeStack::stable_iterator cleanup;
509   llvm::Instruction *cleanupDominator = nullptr;
510   if (CGF.needsEHCleanup(dtorKind)) {
511     // In principle we could tell the cleanup where we are more
512     // directly, but the control flow can get so varied here that it
513     // would actually be quite complex.  Therefore we go through an
514     // alloca.
515     endOfInit = CGF.CreateTempAlloca(begin->getType(), CGF.getPointerAlign(),
516                                      "arrayinit.endOfInit");
517     cleanupDominator = Builder.CreateStore(begin, endOfInit);
518     CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType,
519                                          elementAlign,
520                                          CGF.getDestroyer(dtorKind));
521     cleanup = CGF.EHStack.stable_begin();
522 
523   // Otherwise, remember that we didn't need a cleanup.
524   } else {
525     dtorKind = QualType::DK_none;
526   }
527 
528   llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1);
529 
530   // The 'current element to initialize'.  The invariants on this
531   // variable are complicated.  Essentially, after each iteration of
532   // the loop, it points to the last initialized element, except
533   // that it points to the beginning of the array before any
534   // elements have been initialized.
535   llvm::Value *element = begin;
536 
537   // Emit the explicit initializers.
538   for (uint64_t i = 0; i != NumInitElements; ++i) {
539     // Advance to the next element.
540     if (i > 0) {
541       element = Builder.CreateInBoundsGEP(element, one, "arrayinit.element");
542 
543       // Tell the cleanup that it needs to destroy up to this
544       // element.  TODO: some of these stores can be trivially
545       // observed to be unnecessary.
546       if (endOfInit.isValid()) Builder.CreateStore(element, endOfInit);
547     }
548 
549     LValue elementLV =
550       CGF.MakeAddrLValue(Address(element, elementAlign), elementType);
551     EmitInitializationToLValue(E->getInit(i), elementLV);
552   }
553 
554   // Check whether there's a non-trivial array-fill expression.
555   Expr *filler = E->getArrayFiller();
556   bool hasTrivialFiller = isTrivialFiller(filler);
557 
558   // Any remaining elements need to be zero-initialized, possibly
559   // using the filler expression.  We can skip this if the we're
560   // emitting to zeroed memory.
561   if (NumInitElements != NumArrayElements &&
562       !(Dest.isZeroed() && hasTrivialFiller &&
563         CGF.getTypes().isZeroInitializable(elementType))) {
564 
565     // Use an actual loop.  This is basically
566     //   do { *array++ = filler; } while (array != end);
567 
568     // Advance to the start of the rest of the array.
569     if (NumInitElements) {
570       element = Builder.CreateInBoundsGEP(element, one, "arrayinit.start");
571       if (endOfInit.isValid()) Builder.CreateStore(element, endOfInit);
572     }
573 
574     // Compute the end of the array.
575     llvm::Value *end = Builder.CreateInBoundsGEP(begin,
576                       llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements),
577                                                  "arrayinit.end");
578 
579     llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
580     llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
581 
582     // Jump into the body.
583     CGF.EmitBlock(bodyBB);
584     llvm::PHINode *currentElement =
585       Builder.CreatePHI(element->getType(), 2, "arrayinit.cur");
586     currentElement->addIncoming(element, entryBB);
587 
588     // Emit the actual filler expression.
589     {
590       // C++1z [class.temporary]p5:
591       //   when a default constructor is called to initialize an element of
592       //   an array with no corresponding initializer [...] the destruction of
593       //   every temporary created in a default argument is sequenced before
594       //   the construction of the next array element, if any
595       CodeGenFunction::RunCleanupsScope CleanupsScope(CGF);
596       LValue elementLV =
597         CGF.MakeAddrLValue(Address(currentElement, elementAlign), elementType);
598       if (filler)
599         EmitInitializationToLValue(filler, elementLV);
600       else
601         EmitNullInitializationToLValue(elementLV);
602     }
603 
604     // Move on to the next element.
605     llvm::Value *nextElement =
606       Builder.CreateInBoundsGEP(currentElement, one, "arrayinit.next");
607 
608     // Tell the EH cleanup that we finished with the last element.
609     if (endOfInit.isValid()) Builder.CreateStore(nextElement, endOfInit);
610 
611     // Leave the loop if we're done.
612     llvm::Value *done = Builder.CreateICmpEQ(nextElement, end,
613                                              "arrayinit.done");
614     llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
615     Builder.CreateCondBr(done, endBB, bodyBB);
616     currentElement->addIncoming(nextElement, Builder.GetInsertBlock());
617 
618     CGF.EmitBlock(endBB);
619   }
620 
621   // Leave the partial-array cleanup if we entered one.
622   if (dtorKind) CGF.DeactivateCleanupBlock(cleanup, cleanupDominator);
623 }
624 
625 //===----------------------------------------------------------------------===//
626 //                            Visitor Methods
627 //===----------------------------------------------------------------------===//
628 
629 void AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E){
630   Visit(E->GetTemporaryExpr());
631 }
632 
633 void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) {
634   // If this is a unique OVE, just visit its source expression.
635   if (e->isUnique())
636     Visit(e->getSourceExpr());
637   else
638     EmitFinalDestCopy(e->getType(), CGF.getOrCreateOpaqueLValueMapping(e));
639 }
640 
641 void
642 AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
643   if (Dest.isPotentiallyAliased() &&
644       E->getType().isPODType(CGF.getContext())) {
645     // For a POD type, just emit a load of the lvalue + a copy, because our
646     // compound literal might alias the destination.
647     EmitAggLoadOfLValue(E);
648     return;
649   }
650 
651   AggValueSlot Slot = EnsureSlot(E->getType());
652   CGF.EmitAggExpr(E->getInitializer(), Slot);
653 }
654 
655 /// Attempt to look through various unimportant expressions to find a
656 /// cast of the given kind.
657 static Expr *findPeephole(Expr *op, CastKind kind) {
658   while (true) {
659     op = op->IgnoreParens();
660     if (CastExpr *castE = dyn_cast<CastExpr>(op)) {
661       if (castE->getCastKind() == kind)
662         return castE->getSubExpr();
663       if (castE->getCastKind() == CK_NoOp)
664         continue;
665     }
666     return nullptr;
667   }
668 }
669 
670 void AggExprEmitter::VisitCastExpr(CastExpr *E) {
671   if (const auto *ECE = dyn_cast<ExplicitCastExpr>(E))
672     CGF.CGM.EmitExplicitCastExprType(ECE, &CGF);
673   switch (E->getCastKind()) {
674   case CK_Dynamic: {
675     // FIXME: Can this actually happen? We have no test coverage for it.
676     assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?");
677     LValue LV = CGF.EmitCheckedLValue(E->getSubExpr(),
678                                       CodeGenFunction::TCK_Load);
679     // FIXME: Do we also need to handle property references here?
680     if (LV.isSimple())
681       CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E));
682     else
683       CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast");
684 
685     if (!Dest.isIgnored())
686       CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination");
687     break;
688   }
689 
690   case CK_ToUnion: {
691     // Evaluate even if the destination is ignored.
692     if (Dest.isIgnored()) {
693       CGF.EmitAnyExpr(E->getSubExpr(), AggValueSlot::ignored(),
694                       /*ignoreResult=*/true);
695       break;
696     }
697 
698     // GCC union extension
699     QualType Ty = E->getSubExpr()->getType();
700     Address CastPtr =
701       Builder.CreateElementBitCast(Dest.getAddress(), CGF.ConvertType(Ty));
702     EmitInitializationToLValue(E->getSubExpr(),
703                                CGF.MakeAddrLValue(CastPtr, Ty));
704     break;
705   }
706 
707   case CK_DerivedToBase:
708   case CK_BaseToDerived:
709   case CK_UncheckedDerivedToBase: {
710     llvm_unreachable("cannot perform hierarchy conversion in EmitAggExpr: "
711                 "should have been unpacked before we got here");
712   }
713 
714   case CK_NonAtomicToAtomic:
715   case CK_AtomicToNonAtomic: {
716     bool isToAtomic = (E->getCastKind() == CK_NonAtomicToAtomic);
717 
718     // Determine the atomic and value types.
719     QualType atomicType = E->getSubExpr()->getType();
720     QualType valueType = E->getType();
721     if (isToAtomic) std::swap(atomicType, valueType);
722 
723     assert(atomicType->isAtomicType());
724     assert(CGF.getContext().hasSameUnqualifiedType(valueType,
725                           atomicType->castAs<AtomicType>()->getValueType()));
726 
727     // Just recurse normally if we're ignoring the result or the
728     // atomic type doesn't change representation.
729     if (Dest.isIgnored() || !CGF.CGM.isPaddedAtomicType(atomicType)) {
730       return Visit(E->getSubExpr());
731     }
732 
733     CastKind peepholeTarget =
734       (isToAtomic ? CK_AtomicToNonAtomic : CK_NonAtomicToAtomic);
735 
736     // These two cases are reverses of each other; try to peephole them.
737     if (Expr *op = findPeephole(E->getSubExpr(), peepholeTarget)) {
738       assert(CGF.getContext().hasSameUnqualifiedType(op->getType(),
739                                                      E->getType()) &&
740            "peephole significantly changed types?");
741       return Visit(op);
742     }
743 
744     // If we're converting an r-value of non-atomic type to an r-value
745     // of atomic type, just emit directly into the relevant sub-object.
746     if (isToAtomic) {
747       AggValueSlot valueDest = Dest;
748       if (!valueDest.isIgnored() && CGF.CGM.isPaddedAtomicType(atomicType)) {
749         // Zero-initialize.  (Strictly speaking, we only need to initialize
750         // the padding at the end, but this is simpler.)
751         if (!Dest.isZeroed())
752           CGF.EmitNullInitialization(Dest.getAddress(), atomicType);
753 
754         // Build a GEP to refer to the subobject.
755         Address valueAddr =
756             CGF.Builder.CreateStructGEP(valueDest.getAddress(), 0,
757                                         CharUnits());
758         valueDest = AggValueSlot::forAddr(valueAddr,
759                                           valueDest.getQualifiers(),
760                                           valueDest.isExternallyDestructed(),
761                                           valueDest.requiresGCollection(),
762                                           valueDest.isPotentiallyAliased(),
763                                           AggValueSlot::DoesNotOverlap,
764                                           AggValueSlot::IsZeroed);
765       }
766 
767       CGF.EmitAggExpr(E->getSubExpr(), valueDest);
768       return;
769     }
770 
771     // Otherwise, we're converting an atomic type to a non-atomic type.
772     // Make an atomic temporary, emit into that, and then copy the value out.
773     AggValueSlot atomicSlot =
774       CGF.CreateAggTemp(atomicType, "atomic-to-nonatomic.temp");
775     CGF.EmitAggExpr(E->getSubExpr(), atomicSlot);
776 
777     Address valueAddr =
778       Builder.CreateStructGEP(atomicSlot.getAddress(), 0, CharUnits());
779     RValue rvalue = RValue::getAggregate(valueAddr, atomicSlot.isVolatile());
780     return EmitFinalDestCopy(valueType, rvalue);
781   }
782 
783   case CK_LValueToRValue:
784     // If we're loading from a volatile type, force the destination
785     // into existence.
786     if (E->getSubExpr()->getType().isVolatileQualified()) {
787       EnsureDest(E->getType());
788       return Visit(E->getSubExpr());
789     }
790 
791     LLVM_FALLTHROUGH;
792 
793   case CK_NoOp:
794   case CK_UserDefinedConversion:
795   case CK_ConstructorConversion:
796     assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(),
797                                                    E->getType()) &&
798            "Implicit cast types must be compatible");
799     Visit(E->getSubExpr());
800     break;
801 
802   case CK_LValueBitCast:
803     llvm_unreachable("should not be emitting lvalue bitcast as rvalue");
804 
805   case CK_Dependent:
806   case CK_BitCast:
807   case CK_ArrayToPointerDecay:
808   case CK_FunctionToPointerDecay:
809   case CK_NullToPointer:
810   case CK_NullToMemberPointer:
811   case CK_BaseToDerivedMemberPointer:
812   case CK_DerivedToBaseMemberPointer:
813   case CK_MemberPointerToBoolean:
814   case CK_ReinterpretMemberPointer:
815   case CK_IntegralToPointer:
816   case CK_PointerToIntegral:
817   case CK_PointerToBoolean:
818   case CK_ToVoid:
819   case CK_VectorSplat:
820   case CK_IntegralCast:
821   case CK_BooleanToSignedIntegral:
822   case CK_IntegralToBoolean:
823   case CK_IntegralToFloating:
824   case CK_FloatingToIntegral:
825   case CK_FloatingToBoolean:
826   case CK_FloatingCast:
827   case CK_CPointerToObjCPointerCast:
828   case CK_BlockPointerToObjCPointerCast:
829   case CK_AnyPointerToBlockPointerCast:
830   case CK_ObjCObjectLValueCast:
831   case CK_FloatingRealToComplex:
832   case CK_FloatingComplexToReal:
833   case CK_FloatingComplexToBoolean:
834   case CK_FloatingComplexCast:
835   case CK_FloatingComplexToIntegralComplex:
836   case CK_IntegralRealToComplex:
837   case CK_IntegralComplexToReal:
838   case CK_IntegralComplexToBoolean:
839   case CK_IntegralComplexCast:
840   case CK_IntegralComplexToFloatingComplex:
841   case CK_ARCProduceObject:
842   case CK_ARCConsumeObject:
843   case CK_ARCReclaimReturnedObject:
844   case CK_ARCExtendBlockObject:
845   case CK_CopyAndAutoreleaseBlockObject:
846   case CK_BuiltinFnToFnPtr:
847   case CK_ZeroToOCLEvent:
848   case CK_ZeroToOCLQueue:
849   case CK_AddressSpaceConversion:
850   case CK_IntToOCLSampler:
851     llvm_unreachable("cast kind invalid for aggregate types");
852   }
853 }
854 
855 void AggExprEmitter::VisitCallExpr(const CallExpr *E) {
856   if (E->getCallReturnType(CGF.getContext())->isReferenceType()) {
857     EmitAggLoadOfLValue(E);
858     return;
859   }
860 
861   withReturnValueSlot(E, [&](ReturnValueSlot Slot) {
862     return CGF.EmitCallExpr(E, Slot);
863   });
864 }
865 
866 void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
867   withReturnValueSlot(E, [&](ReturnValueSlot Slot) {
868     return CGF.EmitObjCMessageExpr(E, Slot);
869   });
870 }
871 
872 void AggExprEmitter::VisitBinComma(const BinaryOperator *E) {
873   CGF.EmitIgnoredExpr(E->getLHS());
874   Visit(E->getRHS());
875 }
876 
877 void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) {
878   CodeGenFunction::StmtExprEvaluation eval(CGF);
879   CGF.EmitCompoundStmt(*E->getSubStmt(), true, Dest);
880 }
881 
882 void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) {
883   if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI)
884     VisitPointerToDataMemberBinaryOperator(E);
885   else
886     CGF.ErrorUnsupported(E, "aggregate binary expression");
887 }
888 
889 void AggExprEmitter::VisitPointerToDataMemberBinaryOperator(
890                                                     const BinaryOperator *E) {
891   LValue LV = CGF.EmitPointerToDataMemberBinaryExpr(E);
892   EmitFinalDestCopy(E->getType(), LV);
893 }
894 
895 /// Is the value of the given expression possibly a reference to or
896 /// into a __block variable?
897 static bool isBlockVarRef(const Expr *E) {
898   // Make sure we look through parens.
899   E = E->IgnoreParens();
900 
901   // Check for a direct reference to a __block variable.
902   if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
903     const VarDecl *var = dyn_cast<VarDecl>(DRE->getDecl());
904     return (var && var->hasAttr<BlocksAttr>());
905   }
906 
907   // More complicated stuff.
908 
909   // Binary operators.
910   if (const BinaryOperator *op = dyn_cast<BinaryOperator>(E)) {
911     // For an assignment or pointer-to-member operation, just care
912     // about the LHS.
913     if (op->isAssignmentOp() || op->isPtrMemOp())
914       return isBlockVarRef(op->getLHS());
915 
916     // For a comma, just care about the RHS.
917     if (op->getOpcode() == BO_Comma)
918       return isBlockVarRef(op->getRHS());
919 
920     // FIXME: pointer arithmetic?
921     return false;
922 
923   // Check both sides of a conditional operator.
924   } else if (const AbstractConditionalOperator *op
925                = dyn_cast<AbstractConditionalOperator>(E)) {
926     return isBlockVarRef(op->getTrueExpr())
927         || isBlockVarRef(op->getFalseExpr());
928 
929   // OVEs are required to support BinaryConditionalOperators.
930   } else if (const OpaqueValueExpr *op
931                = dyn_cast<OpaqueValueExpr>(E)) {
932     if (const Expr *src = op->getSourceExpr())
933       return isBlockVarRef(src);
934 
935   // Casts are necessary to get things like (*(int*)&var) = foo().
936   // We don't really care about the kind of cast here, except
937   // we don't want to look through l2r casts, because it's okay
938   // to get the *value* in a __block variable.
939   } else if (const CastExpr *cast = dyn_cast<CastExpr>(E)) {
940     if (cast->getCastKind() == CK_LValueToRValue)
941       return false;
942     return isBlockVarRef(cast->getSubExpr());
943 
944   // Handle unary operators.  Again, just aggressively look through
945   // it, ignoring the operation.
946   } else if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E)) {
947     return isBlockVarRef(uop->getSubExpr());
948 
949   // Look into the base of a field access.
950   } else if (const MemberExpr *mem = dyn_cast<MemberExpr>(E)) {
951     return isBlockVarRef(mem->getBase());
952 
953   // Look into the base of a subscript.
954   } else if (const ArraySubscriptExpr *sub = dyn_cast<ArraySubscriptExpr>(E)) {
955     return isBlockVarRef(sub->getBase());
956   }
957 
958   return false;
959 }
960 
961 void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
962   // For an assignment to work, the value on the right has
963   // to be compatible with the value on the left.
964   assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
965                                                  E->getRHS()->getType())
966          && "Invalid assignment");
967 
968   // If the LHS might be a __block variable, and the RHS can
969   // potentially cause a block copy, we need to evaluate the RHS first
970   // so that the assignment goes the right place.
971   // This is pretty semantically fragile.
972   if (isBlockVarRef(E->getLHS()) &&
973       E->getRHS()->HasSideEffects(CGF.getContext())) {
974     // Ensure that we have a destination, and evaluate the RHS into that.
975     EnsureDest(E->getRHS()->getType());
976     Visit(E->getRHS());
977 
978     // Now emit the LHS and copy into it.
979     LValue LHS = CGF.EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
980 
981     // That copy is an atomic copy if the LHS is atomic.
982     if (LHS.getType()->isAtomicType() ||
983         CGF.LValueIsSuitableForInlineAtomic(LHS)) {
984       CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
985       return;
986     }
987 
988     EmitCopy(E->getLHS()->getType(),
989              AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
990                                      needsGC(E->getLHS()->getType()),
991                                      AggValueSlot::IsAliased,
992                                      AggValueSlot::MayOverlap),
993              Dest);
994     return;
995   }
996 
997   LValue LHS = CGF.EmitLValue(E->getLHS());
998 
999   // If we have an atomic type, evaluate into the destination and then
1000   // do an atomic copy.
1001   if (LHS.getType()->isAtomicType() ||
1002       CGF.LValueIsSuitableForInlineAtomic(LHS)) {
1003     EnsureDest(E->getRHS()->getType());
1004     Visit(E->getRHS());
1005     CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
1006     return;
1007   }
1008 
1009   // Codegen the RHS so that it stores directly into the LHS.
1010   AggValueSlot LHSSlot =
1011     AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
1012                             needsGC(E->getLHS()->getType()),
1013                             AggValueSlot::IsAliased,
1014                             AggValueSlot::MayOverlap);
1015   // A non-volatile aggregate destination might have volatile member.
1016   if (!LHSSlot.isVolatile() &&
1017       CGF.hasVolatileMember(E->getLHS()->getType()))
1018     LHSSlot.setVolatile(true);
1019 
1020   CGF.EmitAggExpr(E->getRHS(), LHSSlot);
1021 
1022   // Copy into the destination if the assignment isn't ignored.
1023   EmitFinalDestCopy(E->getType(), LHS);
1024 }
1025 
1026 void AggExprEmitter::
1027 VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
1028   llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
1029   llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
1030   llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
1031 
1032   // Bind the common expression if necessary.
1033   CodeGenFunction::OpaqueValueMapping binding(CGF, E);
1034 
1035   CodeGenFunction::ConditionalEvaluation eval(CGF);
1036   CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock,
1037                            CGF.getProfileCount(E));
1038 
1039   // Save whether the destination's lifetime is externally managed.
1040   bool isExternallyDestructed = Dest.isExternallyDestructed();
1041 
1042   eval.begin(CGF);
1043   CGF.EmitBlock(LHSBlock);
1044   CGF.incrementProfileCounter(E);
1045   Visit(E->getTrueExpr());
1046   eval.end(CGF);
1047 
1048   assert(CGF.HaveInsertPoint() && "expression evaluation ended with no IP!");
1049   CGF.Builder.CreateBr(ContBlock);
1050 
1051   // If the result of an agg expression is unused, then the emission
1052   // of the LHS might need to create a destination slot.  That's fine
1053   // with us, and we can safely emit the RHS into the same slot, but
1054   // we shouldn't claim that it's already being destructed.
1055   Dest.setExternallyDestructed(isExternallyDestructed);
1056 
1057   eval.begin(CGF);
1058   CGF.EmitBlock(RHSBlock);
1059   Visit(E->getFalseExpr());
1060   eval.end(CGF);
1061 
1062   CGF.EmitBlock(ContBlock);
1063 }
1064 
1065 void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) {
1066   Visit(CE->getChosenSubExpr());
1067 }
1068 
1069 void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
1070   Address ArgValue = Address::invalid();
1071   Address ArgPtr = CGF.EmitVAArg(VE, ArgValue);
1072 
1073   // If EmitVAArg fails, emit an error.
1074   if (!ArgPtr.isValid()) {
1075     CGF.ErrorUnsupported(VE, "aggregate va_arg expression");
1076     return;
1077   }
1078 
1079   EmitFinalDestCopy(VE->getType(), CGF.MakeAddrLValue(ArgPtr, VE->getType()));
1080 }
1081 
1082 void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
1083   // Ensure that we have a slot, but if we already do, remember
1084   // whether it was externally destructed.
1085   bool wasExternallyDestructed = Dest.isExternallyDestructed();
1086   EnsureDest(E->getType());
1087 
1088   // We're going to push a destructor if there isn't already one.
1089   Dest.setExternallyDestructed();
1090 
1091   Visit(E->getSubExpr());
1092 
1093   // Push that destructor we promised.
1094   if (!wasExternallyDestructed)
1095     CGF.EmitCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddress());
1096 }
1097 
1098 void
1099 AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
1100   AggValueSlot Slot = EnsureSlot(E->getType());
1101   CGF.EmitCXXConstructExpr(E, Slot);
1102 }
1103 
1104 void AggExprEmitter::VisitCXXInheritedCtorInitExpr(
1105     const CXXInheritedCtorInitExpr *E) {
1106   AggValueSlot Slot = EnsureSlot(E->getType());
1107   CGF.EmitInheritedCXXConstructorCall(
1108       E->getConstructor(), E->constructsVBase(), Slot.getAddress(),
1109       E->inheritedFromVBase(), E);
1110 }
1111 
1112 void
1113 AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) {
1114   AggValueSlot Slot = EnsureSlot(E->getType());
1115   CGF.EmitLambdaExpr(E, Slot);
1116 }
1117 
1118 void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
1119   CGF.enterFullExpression(E);
1120   CodeGenFunction::RunCleanupsScope cleanups(CGF);
1121   Visit(E->getSubExpr());
1122 }
1123 
1124 void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
1125   QualType T = E->getType();
1126   AggValueSlot Slot = EnsureSlot(T);
1127   EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T));
1128 }
1129 
1130 void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
1131   QualType T = E->getType();
1132   AggValueSlot Slot = EnsureSlot(T);
1133   EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T));
1134 }
1135 
1136 /// isSimpleZero - If emitting this value will obviously just cause a store of
1137 /// zero to memory, return true.  This can return false if uncertain, so it just
1138 /// handles simple cases.
1139 static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) {
1140   E = E->IgnoreParens();
1141 
1142   // 0
1143   if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E))
1144     return IL->getValue() == 0;
1145   // +0.0
1146   if (const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(E))
1147     return FL->getValue().isPosZero();
1148   // int()
1149   if ((isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) &&
1150       CGF.getTypes().isZeroInitializable(E->getType()))
1151     return true;
1152   // (int*)0 - Null pointer expressions.
1153   if (const CastExpr *ICE = dyn_cast<CastExpr>(E))
1154     return ICE->getCastKind() == CK_NullToPointer &&
1155         CGF.getTypes().isPointerZeroInitializable(E->getType());
1156   // '\0'
1157   if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E))
1158     return CL->getValue() == 0;
1159 
1160   // Otherwise, hard case: conservatively return false.
1161   return false;
1162 }
1163 
1164 
1165 void
1166 AggExprEmitter::EmitInitializationToLValue(Expr *E, LValue LV) {
1167   QualType type = LV.getType();
1168   // FIXME: Ignore result?
1169   // FIXME: Are initializers affected by volatile?
1170   if (Dest.isZeroed() && isSimpleZero(E, CGF)) {
1171     // Storing "i32 0" to a zero'd memory location is a noop.
1172     return;
1173   } else if (isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) {
1174     return EmitNullInitializationToLValue(LV);
1175   } else if (isa<NoInitExpr>(E)) {
1176     // Do nothing.
1177     return;
1178   } else if (type->isReferenceType()) {
1179     RValue RV = CGF.EmitReferenceBindingToExpr(E);
1180     return CGF.EmitStoreThroughLValue(RV, LV);
1181   }
1182 
1183   switch (CGF.getEvaluationKind(type)) {
1184   case TEK_Complex:
1185     CGF.EmitComplexExprIntoLValue(E, LV, /*isInit*/ true);
1186     return;
1187   case TEK_Aggregate:
1188     CGF.EmitAggExpr(E, AggValueSlot::forLValue(LV,
1189                                                AggValueSlot::IsDestructed,
1190                                       AggValueSlot::DoesNotNeedGCBarriers,
1191                                                AggValueSlot::IsNotAliased,
1192                                                AggValueSlot::MayOverlap,
1193                                                Dest.isZeroed()));
1194     return;
1195   case TEK_Scalar:
1196     if (LV.isSimple()) {
1197       CGF.EmitScalarInit(E, /*D=*/nullptr, LV, /*Captured=*/false);
1198     } else {
1199       CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV);
1200     }
1201     return;
1202   }
1203   llvm_unreachable("bad evaluation kind");
1204 }
1205 
1206 void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) {
1207   QualType type = lv.getType();
1208 
1209   // If the destination slot is already zeroed out before the aggregate is
1210   // copied into it, we don't have to emit any zeros here.
1211   if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(type))
1212     return;
1213 
1214   if (CGF.hasScalarEvaluationKind(type)) {
1215     // For non-aggregates, we can store the appropriate null constant.
1216     llvm::Value *null = CGF.CGM.EmitNullConstant(type);
1217     // Note that the following is not equivalent to
1218     // EmitStoreThroughBitfieldLValue for ARC types.
1219     if (lv.isBitField()) {
1220       CGF.EmitStoreThroughBitfieldLValue(RValue::get(null), lv);
1221     } else {
1222       assert(lv.isSimple());
1223       CGF.EmitStoreOfScalar(null, lv, /* isInitialization */ true);
1224     }
1225   } else {
1226     // There's a potential optimization opportunity in combining
1227     // memsets; that would be easy for arrays, but relatively
1228     // difficult for structures with the current code.
1229     CGF.EmitNullInitialization(lv.getAddress(), lv.getType());
1230   }
1231 }
1232 
1233 void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
1234 #if 0
1235   // FIXME: Assess perf here?  Figure out what cases are worth optimizing here
1236   // (Length of globals? Chunks of zeroed-out space?).
1237   //
1238   // If we can, prefer a copy from a global; this is a lot less code for long
1239   // globals, and it's easier for the current optimizers to analyze.
1240   if (llvm::Constant* C = CGF.CGM.EmitConstantExpr(E, E->getType(), &CGF)) {
1241     llvm::GlobalVariable* GV =
1242     new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true,
1243                              llvm::GlobalValue::InternalLinkage, C, "");
1244     EmitFinalDestCopy(E->getType(), CGF.MakeAddrLValue(GV, E->getType()));
1245     return;
1246   }
1247 #endif
1248   if (E->hadArrayRangeDesignator())
1249     CGF.ErrorUnsupported(E, "GNU array range designator extension");
1250 
1251   if (E->isTransparent())
1252     return Visit(E->getInit(0));
1253 
1254   AggValueSlot Dest = EnsureSlot(E->getType());
1255 
1256   LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
1257 
1258   // Handle initialization of an array.
1259   if (E->getType()->isArrayType()) {
1260     auto AType = cast<llvm::ArrayType>(Dest.getAddress().getElementType());
1261     EmitArrayInit(Dest.getAddress(), AType, E->getType(), E);
1262     return;
1263   }
1264 
1265   assert(E->getType()->isRecordType() && "Only support structs/unions here!");
1266 
1267   // Do struct initialization; this code just sets each individual member
1268   // to the approprate value.  This makes bitfield support automatic;
1269   // the disadvantage is that the generated code is more difficult for
1270   // the optimizer, especially with bitfields.
1271   unsigned NumInitElements = E->getNumInits();
1272   RecordDecl *record = E->getType()->castAs<RecordType>()->getDecl();
1273 
1274   // We'll need to enter cleanup scopes in case any of the element
1275   // initializers throws an exception.
1276   SmallVector<EHScopeStack::stable_iterator, 16> cleanups;
1277   llvm::Instruction *cleanupDominator = nullptr;
1278 
1279   unsigned curInitIndex = 0;
1280 
1281   // Emit initialization of base classes.
1282   if (auto *CXXRD = dyn_cast<CXXRecordDecl>(record)) {
1283     assert(E->getNumInits() >= CXXRD->getNumBases() &&
1284            "missing initializer for base class");
1285     for (auto &Base : CXXRD->bases()) {
1286       assert(!Base.isVirtual() && "should not see vbases here");
1287       auto *BaseRD = Base.getType()->getAsCXXRecordDecl();
1288       Address V = CGF.GetAddressOfDirectBaseInCompleteClass(
1289           Dest.getAddress(), CXXRD, BaseRD,
1290           /*isBaseVirtual*/ false);
1291       AggValueSlot AggSlot = AggValueSlot::forAddr(
1292           V, Qualifiers(),
1293           AggValueSlot::IsDestructed,
1294           AggValueSlot::DoesNotNeedGCBarriers,
1295           AggValueSlot::IsNotAliased,
1296           CGF.overlapForBaseInit(CXXRD, BaseRD, Base.isVirtual()));
1297       CGF.EmitAggExpr(E->getInit(curInitIndex++), AggSlot);
1298 
1299       if (QualType::DestructionKind dtorKind =
1300               Base.getType().isDestructedType()) {
1301         CGF.pushDestroy(dtorKind, V, Base.getType());
1302         cleanups.push_back(CGF.EHStack.stable_begin());
1303       }
1304     }
1305   }
1306 
1307   // Prepare a 'this' for CXXDefaultInitExprs.
1308   CodeGenFunction::FieldConstructionScope FCS(CGF, Dest.getAddress());
1309 
1310   if (record->isUnion()) {
1311     // Only initialize one field of a union. The field itself is
1312     // specified by the initializer list.
1313     if (!E->getInitializedFieldInUnion()) {
1314       // Empty union; we have nothing to do.
1315 
1316 #ifndef NDEBUG
1317       // Make sure that it's really an empty and not a failure of
1318       // semantic analysis.
1319       for (const auto *Field : record->fields())
1320         assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed");
1321 #endif
1322       return;
1323     }
1324 
1325     // FIXME: volatility
1326     FieldDecl *Field = E->getInitializedFieldInUnion();
1327 
1328     LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestLV, Field);
1329     if (NumInitElements) {
1330       // Store the initializer into the field
1331       EmitInitializationToLValue(E->getInit(0), FieldLoc);
1332     } else {
1333       // Default-initialize to null.
1334       EmitNullInitializationToLValue(FieldLoc);
1335     }
1336 
1337     return;
1338   }
1339 
1340   // Here we iterate over the fields; this makes it simpler to both
1341   // default-initialize fields and skip over unnamed fields.
1342   for (const auto *field : record->fields()) {
1343     // We're done once we hit the flexible array member.
1344     if (field->getType()->isIncompleteArrayType())
1345       break;
1346 
1347     // Always skip anonymous bitfields.
1348     if (field->isUnnamedBitfield())
1349       continue;
1350 
1351     // We're done if we reach the end of the explicit initializers, we
1352     // have a zeroed object, and the rest of the fields are
1353     // zero-initializable.
1354     if (curInitIndex == NumInitElements && Dest.isZeroed() &&
1355         CGF.getTypes().isZeroInitializable(E->getType()))
1356       break;
1357 
1358 
1359     LValue LV = CGF.EmitLValueForFieldInitialization(DestLV, field);
1360     // We never generate write-barries for initialized fields.
1361     LV.setNonGC(true);
1362 
1363     if (curInitIndex < NumInitElements) {
1364       // Store the initializer into the field.
1365       EmitInitializationToLValue(E->getInit(curInitIndex++), LV);
1366     } else {
1367       // We're out of initializers; default-initialize to null
1368       EmitNullInitializationToLValue(LV);
1369     }
1370 
1371     // Push a destructor if necessary.
1372     // FIXME: if we have an array of structures, all explicitly
1373     // initialized, we can end up pushing a linear number of cleanups.
1374     bool pushedCleanup = false;
1375     if (QualType::DestructionKind dtorKind
1376           = field->getType().isDestructedType()) {
1377       assert(LV.isSimple());
1378       if (CGF.needsEHCleanup(dtorKind)) {
1379         if (!cleanupDominator)
1380           cleanupDominator = CGF.Builder.CreateAlignedLoad(
1381               CGF.Int8Ty,
1382               llvm::Constant::getNullValue(CGF.Int8PtrTy),
1383               CharUnits::One()); // placeholder
1384 
1385         CGF.pushDestroy(EHCleanup, LV.getAddress(), field->getType(),
1386                         CGF.getDestroyer(dtorKind), false);
1387         cleanups.push_back(CGF.EHStack.stable_begin());
1388         pushedCleanup = true;
1389       }
1390     }
1391 
1392     // If the GEP didn't get used because of a dead zero init or something
1393     // else, clean it up for -O0 builds and general tidiness.
1394     if (!pushedCleanup && LV.isSimple())
1395       if (llvm::GetElementPtrInst *GEP =
1396             dyn_cast<llvm::GetElementPtrInst>(LV.getPointer()))
1397         if (GEP->use_empty())
1398           GEP->eraseFromParent();
1399   }
1400 
1401   // Deactivate all the partial cleanups in reverse order, which
1402   // generally means popping them.
1403   for (unsigned i = cleanups.size(); i != 0; --i)
1404     CGF.DeactivateCleanupBlock(cleanups[i-1], cleanupDominator);
1405 
1406   // Destroy the placeholder if we made one.
1407   if (cleanupDominator)
1408     cleanupDominator->eraseFromParent();
1409 }
1410 
1411 void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
1412                                             llvm::Value *outerBegin) {
1413   // Emit the common subexpression.
1414   CodeGenFunction::OpaqueValueMapping binding(CGF, E->getCommonExpr());
1415 
1416   Address destPtr = EnsureSlot(E->getType()).getAddress();
1417   uint64_t numElements = E->getArraySize().getZExtValue();
1418 
1419   if (!numElements)
1420     return;
1421 
1422   // destPtr is an array*. Construct an elementType* by drilling down a level.
1423   llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
1424   llvm::Value *indices[] = {zero, zero};
1425   llvm::Value *begin = Builder.CreateInBoundsGEP(destPtr.getPointer(), indices,
1426                                                  "arrayinit.begin");
1427 
1428   // Prepare to special-case multidimensional array initialization: we avoid
1429   // emitting multiple destructor loops in that case.
1430   if (!outerBegin)
1431     outerBegin = begin;
1432   ArrayInitLoopExpr *InnerLoop = dyn_cast<ArrayInitLoopExpr>(E->getSubExpr());
1433 
1434   QualType elementType =
1435       CGF.getContext().getAsArrayType(E->getType())->getElementType();
1436   CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
1437   CharUnits elementAlign =
1438       destPtr.getAlignment().alignmentOfArrayElement(elementSize);
1439 
1440   llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
1441   llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
1442 
1443   // Jump into the body.
1444   CGF.EmitBlock(bodyBB);
1445   llvm::PHINode *index =
1446       Builder.CreatePHI(zero->getType(), 2, "arrayinit.index");
1447   index->addIncoming(zero, entryBB);
1448   llvm::Value *element = Builder.CreateInBoundsGEP(begin, index);
1449 
1450   // Prepare for a cleanup.
1451   QualType::DestructionKind dtorKind = elementType.isDestructedType();
1452   EHScopeStack::stable_iterator cleanup;
1453   if (CGF.needsEHCleanup(dtorKind) && !InnerLoop) {
1454     if (outerBegin->getType() != element->getType())
1455       outerBegin = Builder.CreateBitCast(outerBegin, element->getType());
1456     CGF.pushRegularPartialArrayCleanup(outerBegin, element, elementType,
1457                                        elementAlign,
1458                                        CGF.getDestroyer(dtorKind));
1459     cleanup = CGF.EHStack.stable_begin();
1460   } else {
1461     dtorKind = QualType::DK_none;
1462   }
1463 
1464   // Emit the actual filler expression.
1465   {
1466     // Temporaries created in an array initialization loop are destroyed
1467     // at the end of each iteration.
1468     CodeGenFunction::RunCleanupsScope CleanupsScope(CGF);
1469     CodeGenFunction::ArrayInitLoopExprScope Scope(CGF, index);
1470     LValue elementLV =
1471         CGF.MakeAddrLValue(Address(element, elementAlign), elementType);
1472 
1473     if (InnerLoop) {
1474       // If the subexpression is an ArrayInitLoopExpr, share its cleanup.
1475       auto elementSlot = AggValueSlot::forLValue(
1476           elementLV, AggValueSlot::IsDestructed,
1477           AggValueSlot::DoesNotNeedGCBarriers,
1478           AggValueSlot::IsNotAliased,
1479           AggValueSlot::DoesNotOverlap);
1480       AggExprEmitter(CGF, elementSlot, false)
1481           .VisitArrayInitLoopExpr(InnerLoop, outerBegin);
1482     } else
1483       EmitInitializationToLValue(E->getSubExpr(), elementLV);
1484   }
1485 
1486   // Move on to the next element.
1487   llvm::Value *nextIndex = Builder.CreateNUWAdd(
1488       index, llvm::ConstantInt::get(CGF.SizeTy, 1), "arrayinit.next");
1489   index->addIncoming(nextIndex, Builder.GetInsertBlock());
1490 
1491   // Leave the loop if we're done.
1492   llvm::Value *done = Builder.CreateICmpEQ(
1493       nextIndex, llvm::ConstantInt::get(CGF.SizeTy, numElements),
1494       "arrayinit.done");
1495   llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
1496   Builder.CreateCondBr(done, endBB, bodyBB);
1497 
1498   CGF.EmitBlock(endBB);
1499 
1500   // Leave the partial-array cleanup if we entered one.
1501   if (dtorKind)
1502     CGF.DeactivateCleanupBlock(cleanup, index);
1503 }
1504 
1505 void AggExprEmitter::VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E) {
1506   AggValueSlot Dest = EnsureSlot(E->getType());
1507 
1508   LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
1509   EmitInitializationToLValue(E->getBase(), DestLV);
1510   VisitInitListExpr(E->getUpdater());
1511 }
1512 
1513 //===----------------------------------------------------------------------===//
1514 //                        Entry Points into this File
1515 //===----------------------------------------------------------------------===//
1516 
1517 /// GetNumNonZeroBytesInInit - Get an approximate count of the number of
1518 /// non-zero bytes that will be stored when outputting the initializer for the
1519 /// specified initializer expression.
1520 static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) {
1521   E = E->IgnoreParens();
1522 
1523   // 0 and 0.0 won't require any non-zero stores!
1524   if (isSimpleZero(E, CGF)) return CharUnits::Zero();
1525 
1526   // If this is an initlist expr, sum up the size of sizes of the (present)
1527   // elements.  If this is something weird, assume the whole thing is non-zero.
1528   const InitListExpr *ILE = dyn_cast<InitListExpr>(E);
1529   if (!ILE || !CGF.getTypes().isZeroInitializable(ILE->getType()))
1530     return CGF.getContext().getTypeSizeInChars(E->getType());
1531 
1532   // InitListExprs for structs have to be handled carefully.  If there are
1533   // reference members, we need to consider the size of the reference, not the
1534   // referencee.  InitListExprs for unions and arrays can't have references.
1535   if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
1536     if (!RT->isUnionType()) {
1537       RecordDecl *SD = E->getType()->getAs<RecordType>()->getDecl();
1538       CharUnits NumNonZeroBytes = CharUnits::Zero();
1539 
1540       unsigned ILEElement = 0;
1541       if (auto *CXXRD = dyn_cast<CXXRecordDecl>(SD))
1542         while (ILEElement != CXXRD->getNumBases())
1543           NumNonZeroBytes +=
1544               GetNumNonZeroBytesInInit(ILE->getInit(ILEElement++), CGF);
1545       for (const auto *Field : SD->fields()) {
1546         // We're done once we hit the flexible array member or run out of
1547         // InitListExpr elements.
1548         if (Field->getType()->isIncompleteArrayType() ||
1549             ILEElement == ILE->getNumInits())
1550           break;
1551         if (Field->isUnnamedBitfield())
1552           continue;
1553 
1554         const Expr *E = ILE->getInit(ILEElement++);
1555 
1556         // Reference values are always non-null and have the width of a pointer.
1557         if (Field->getType()->isReferenceType())
1558           NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits(
1559               CGF.getTarget().getPointerWidth(0));
1560         else
1561           NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF);
1562       }
1563 
1564       return NumNonZeroBytes;
1565     }
1566   }
1567 
1568 
1569   CharUnits NumNonZeroBytes = CharUnits::Zero();
1570   for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)
1571     NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF);
1572   return NumNonZeroBytes;
1573 }
1574 
1575 /// CheckAggExprForMemSetUse - If the initializer is large and has a lot of
1576 /// zeros in it, emit a memset and avoid storing the individual zeros.
1577 ///
1578 static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
1579                                      CodeGenFunction &CGF) {
1580   // If the slot is already known to be zeroed, nothing to do.  Don't mess with
1581   // volatile stores.
1582   if (Slot.isZeroed() || Slot.isVolatile() || !Slot.getAddress().isValid())
1583     return;
1584 
1585   // C++ objects with a user-declared constructor don't need zero'ing.
1586   if (CGF.getLangOpts().CPlusPlus)
1587     if (const RecordType *RT = CGF.getContext()
1588                        .getBaseElementType(E->getType())->getAs<RecordType>()) {
1589       const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1590       if (RD->hasUserDeclaredConstructor())
1591         return;
1592     }
1593 
1594   // If the type is 16-bytes or smaller, prefer individual stores over memset.
1595   CharUnits Size = Slot.getPreferredSize(CGF.getContext(), E->getType());
1596   if (Size <= CharUnits::fromQuantity(16))
1597     return;
1598 
1599   // Check to see if over 3/4 of the initializer are known to be zero.  If so,
1600   // we prefer to emit memset + individual stores for the rest.
1601   CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF);
1602   if (NumNonZeroBytes*4 > Size)
1603     return;
1604 
1605   // Okay, it seems like a good idea to use an initial memset, emit the call.
1606   llvm::Constant *SizeVal = CGF.Builder.getInt64(Size.getQuantity());
1607 
1608   Address Loc = Slot.getAddress();
1609   Loc = CGF.Builder.CreateElementBitCast(Loc, CGF.Int8Ty);
1610   CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal, false);
1611 
1612   // Tell the AggExprEmitter that the slot is known zero.
1613   Slot.setZeroed();
1614 }
1615 
1616 
1617 
1618 
1619 /// EmitAggExpr - Emit the computation of the specified expression of aggregate
1620 /// type.  The result is computed into DestPtr.  Note that if DestPtr is null,
1621 /// the value of the aggregate expression is not needed.  If VolatileDest is
1622 /// true, DestPtr cannot be 0.
1623 void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot) {
1624   assert(E && hasAggregateEvaluationKind(E->getType()) &&
1625          "Invalid aggregate expression to emit");
1626   assert((Slot.getAddress().isValid() || Slot.isIgnored()) &&
1627          "slot has bits but no address");
1628 
1629   // Optimize the slot if possible.
1630   CheckAggExprForMemSetUse(Slot, E, *this);
1631 
1632   AggExprEmitter(*this, Slot, Slot.isIgnored()).Visit(const_cast<Expr*>(E));
1633 }
1634 
1635 LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
1636   assert(hasAggregateEvaluationKind(E->getType()) && "Invalid argument!");
1637   Address Temp = CreateMemTemp(E->getType());
1638   LValue LV = MakeAddrLValue(Temp, E->getType());
1639   EmitAggExpr(E, AggValueSlot::forLValue(LV, AggValueSlot::IsNotDestructed,
1640                                          AggValueSlot::DoesNotNeedGCBarriers,
1641                                          AggValueSlot::IsNotAliased,
1642                                          AggValueSlot::DoesNotOverlap));
1643   return LV;
1644 }
1645 
1646 AggValueSlot::Overlap_t CodeGenFunction::overlapForBaseInit(
1647     const CXXRecordDecl *RD, const CXXRecordDecl *BaseRD, bool IsVirtual) {
1648   // Virtual bases are initialized first, in address order, so there's never
1649   // any overlap during their initialization.
1650   //
1651   // FIXME: Under P0840, this is no longer true: the tail padding of a vbase
1652   // of a field could be reused by a vbase of a containing class.
1653   if (IsVirtual)
1654     return AggValueSlot::DoesNotOverlap;
1655 
1656   // If the base class is laid out entirely within the nvsize of the derived
1657   // class, its tail padding cannot yet be initialized, so we can issue
1658   // stores at the full width of the base class.
1659   const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
1660   if (Layout.getBaseClassOffset(BaseRD) +
1661           getContext().getASTRecordLayout(BaseRD).getSize() <=
1662       Layout.getNonVirtualSize())
1663     return AggValueSlot::DoesNotOverlap;
1664 
1665   // The tail padding may contain values we need to preserve.
1666   return AggValueSlot::MayOverlap;
1667 }
1668 
1669 void CodeGenFunction::EmitAggregateCopy(LValue Dest, LValue Src, QualType Ty,
1670                                         AggValueSlot::Overlap_t MayOverlap,
1671                                         bool isVolatile) {
1672   assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
1673 
1674   Address DestPtr = Dest.getAddress();
1675   Address SrcPtr = Src.getAddress();
1676 
1677   if (getLangOpts().CPlusPlus) {
1678     if (const RecordType *RT = Ty->getAs<RecordType>()) {
1679       CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl());
1680       assert((Record->hasTrivialCopyConstructor() ||
1681               Record->hasTrivialCopyAssignment() ||
1682               Record->hasTrivialMoveConstructor() ||
1683               Record->hasTrivialMoveAssignment() ||
1684               Record->isUnion()) &&
1685              "Trying to aggregate-copy a type without a trivial copy/move "
1686              "constructor or assignment operator");
1687       // Ignore empty classes in C++.
1688       if (Record->isEmpty())
1689         return;
1690     }
1691   }
1692 
1693   // Aggregate assignment turns into llvm.memcpy.  This is almost valid per
1694   // C99 6.5.16.1p3, which states "If the value being stored in an object is
1695   // read from another object that overlaps in anyway the storage of the first
1696   // object, then the overlap shall be exact and the two objects shall have
1697   // qualified or unqualified versions of a compatible type."
1698   //
1699   // memcpy is not defined if the source and destination pointers are exactly
1700   // equal, but other compilers do this optimization, and almost every memcpy
1701   // implementation handles this case safely.  If there is a libc that does not
1702   // safely handle this, we can add a target hook.
1703 
1704   // Get data size info for this aggregate. Don't copy the tail padding if this
1705   // might be a potentially-overlapping subobject, since the tail padding might
1706   // be occupied by a different object. Otherwise, copying it is fine.
1707   std::pair<CharUnits, CharUnits> TypeInfo;
1708   if (MayOverlap)
1709     TypeInfo = getContext().getTypeInfoDataSizeInChars(Ty);
1710   else
1711     TypeInfo = getContext().getTypeInfoInChars(Ty);
1712 
1713   llvm::Value *SizeVal = nullptr;
1714   if (TypeInfo.first.isZero()) {
1715     // But note that getTypeInfo returns 0 for a VLA.
1716     if (auto *VAT = dyn_cast_or_null<VariableArrayType>(
1717             getContext().getAsArrayType(Ty))) {
1718       QualType BaseEltTy;
1719       SizeVal = emitArrayLength(VAT, BaseEltTy, DestPtr);
1720       TypeInfo = getContext().getTypeInfoInChars(BaseEltTy);
1721       assert(!TypeInfo.first.isZero());
1722       SizeVal = Builder.CreateNUWMul(
1723           SizeVal,
1724           llvm::ConstantInt::get(SizeTy, TypeInfo.first.getQuantity()));
1725     }
1726   }
1727   if (!SizeVal) {
1728     SizeVal = llvm::ConstantInt::get(SizeTy, TypeInfo.first.getQuantity());
1729   }
1730 
1731   // FIXME: If we have a volatile struct, the optimizer can remove what might
1732   // appear to be `extra' memory ops:
1733   //
1734   // volatile struct { int i; } a, b;
1735   //
1736   // int main() {
1737   //   a = b;
1738   //   a = b;
1739   // }
1740   //
1741   // we need to use a different call here.  We use isVolatile to indicate when
1742   // either the source or the destination is volatile.
1743 
1744   DestPtr = Builder.CreateElementBitCast(DestPtr, Int8Ty);
1745   SrcPtr = Builder.CreateElementBitCast(SrcPtr, Int8Ty);
1746 
1747   // Don't do any of the memmove_collectable tests if GC isn't set.
1748   if (CGM.getLangOpts().getGC() == LangOptions::NonGC) {
1749     // fall through
1750   } else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
1751     RecordDecl *Record = RecordTy->getDecl();
1752     if (Record->hasObjectMember()) {
1753       CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
1754                                                     SizeVal);
1755       return;
1756     }
1757   } else if (Ty->isArrayType()) {
1758     QualType BaseType = getContext().getBaseElementType(Ty);
1759     if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
1760       if (RecordTy->getDecl()->hasObjectMember()) {
1761         CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
1762                                                       SizeVal);
1763         return;
1764       }
1765     }
1766   }
1767 
1768   auto Inst = Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, isVolatile);
1769 
1770   // Determine the metadata to describe the position of any padding in this
1771   // memcpy, as well as the TBAA tags for the members of the struct, in case
1772   // the optimizer wishes to expand it in to scalar memory operations.
1773   if (llvm::MDNode *TBAAStructTag = CGM.getTBAAStructInfo(Ty))
1774     Inst->setMetadata(llvm::LLVMContext::MD_tbaa_struct, TBAAStructTag);
1775 
1776   if (CGM.getCodeGenOpts().NewStructPathTBAA) {
1777     TBAAAccessInfo TBAAInfo = CGM.mergeTBAAInfoForMemoryTransfer(
1778         Dest.getTBAAInfo(), Src.getTBAAInfo());
1779     CGM.DecorateInstructionWithTBAA(Inst, TBAAInfo);
1780   }
1781 }
1782