1 //===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code to emit Aggregate Expr nodes as LLVM code.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CodeGenFunction.h"
15 #include "CGCXXABI.h"
16 #include "CGObjCRuntime.h"
17 #include "CodeGenModule.h"
18 #include "ConstantEmitter.h"
19 #include "clang/AST/ASTContext.h"
20 #include "clang/AST/DeclCXX.h"
21 #include "clang/AST/DeclTemplate.h"
22 #include "clang/AST/StmtVisitor.h"
23 #include "llvm/IR/Constants.h"
24 #include "llvm/IR/Function.h"
25 #include "llvm/IR/GlobalVariable.h"
26 #include "llvm/IR/Intrinsics.h"
27 #include "llvm/IR/IntrinsicInst.h"
28 using namespace clang;
29 using namespace CodeGen;
30 
31 //===----------------------------------------------------------------------===//
32 //                        Aggregate Expression Emitter
33 //===----------------------------------------------------------------------===//
34 
35 namespace  {
36 class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
37   CodeGenFunction &CGF;
38   CGBuilderTy &Builder;
39   AggValueSlot Dest;
40   bool IsResultUnused;
41 
42   AggValueSlot EnsureSlot(QualType T) {
43     if (!Dest.isIgnored()) return Dest;
44     return CGF.CreateAggTemp(T, "agg.tmp.ensured");
45   }
46   void EnsureDest(QualType T) {
47     if (!Dest.isIgnored()) return;
48     Dest = CGF.CreateAggTemp(T, "agg.tmp.ensured");
49   }
50 
51   // Calls `Fn` with a valid return value slot, potentially creating a temporary
52   // to do so. If a temporary is created, an appropriate copy into `Dest` will
53   // be emitted, as will lifetime markers.
54   //
55   // The given function should take a ReturnValueSlot, and return an RValue that
56   // points to said slot.
57   void withReturnValueSlot(const Expr *E,
58                            llvm::function_ref<RValue(ReturnValueSlot)> Fn);
59 
60 public:
61   AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest, bool IsResultUnused)
62     : CGF(cgf), Builder(CGF.Builder), Dest(Dest),
63     IsResultUnused(IsResultUnused) { }
64 
65   //===--------------------------------------------------------------------===//
66   //                               Utilities
67   //===--------------------------------------------------------------------===//
68 
69   /// EmitAggLoadOfLValue - Given an expression with aggregate type that
70   /// represents a value lvalue, this method emits the address of the lvalue,
71   /// then loads the result into DestPtr.
72   void EmitAggLoadOfLValue(const Expr *E);
73 
74   enum ExprValueKind {
75     EVK_RValue,
76     EVK_NonRValue
77   };
78 
79   /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
80   /// SrcIsRValue is true if source comes from an RValue.
81   void EmitFinalDestCopy(QualType type, const LValue &src,
82                          ExprValueKind SrcValueKind = EVK_NonRValue);
83   void EmitFinalDestCopy(QualType type, RValue src);
84   void EmitCopy(QualType type, const AggValueSlot &dest,
85                 const AggValueSlot &src);
86 
87   void EmitMoveFromReturnSlot(const Expr *E, RValue Src);
88 
89   void EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
90                      QualType ArrayQTy, InitListExpr *E);
91 
92   AggValueSlot::NeedsGCBarriers_t needsGC(QualType T) {
93     if (CGF.getLangOpts().getGC() && TypeRequiresGCollection(T))
94       return AggValueSlot::NeedsGCBarriers;
95     return AggValueSlot::DoesNotNeedGCBarriers;
96   }
97 
98   bool TypeRequiresGCollection(QualType T);
99 
100   //===--------------------------------------------------------------------===//
101   //                            Visitor Methods
102   //===--------------------------------------------------------------------===//
103 
104   void Visit(Expr *E) {
105     ApplyDebugLocation DL(CGF, E);
106     StmtVisitor<AggExprEmitter>::Visit(E);
107   }
108 
109   void VisitStmt(Stmt *S) {
110     CGF.ErrorUnsupported(S, "aggregate expression");
111   }
112   void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); }
113   void VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
114     Visit(GE->getResultExpr());
115   }
116   void VisitCoawaitExpr(CoawaitExpr *E) {
117     CGF.EmitCoawaitExpr(*E, Dest, IsResultUnused);
118   }
119   void VisitCoyieldExpr(CoyieldExpr *E) {
120     CGF.EmitCoyieldExpr(*E, Dest, IsResultUnused);
121   }
122   void VisitUnaryCoawait(UnaryOperator *E) { Visit(E->getSubExpr()); }
123   void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); }
124   void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
125     return Visit(E->getReplacement());
126   }
127 
128   void VisitConstantExpr(ConstantExpr *E) {
129     return Visit(E->getSubExpr());
130   }
131 
132   // l-values.
133   void VisitDeclRefExpr(DeclRefExpr *E) { EmitAggLoadOfLValue(E); }
134   void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); }
135   void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); }
136   void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); }
137   void VisitCompoundLiteralExpr(CompoundLiteralExpr *E);
138   void VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
139     EmitAggLoadOfLValue(E);
140   }
141   void VisitPredefinedExpr(const PredefinedExpr *E) {
142     EmitAggLoadOfLValue(E);
143   }
144 
145   // Operators.
146   void VisitCastExpr(CastExpr *E);
147   void VisitCallExpr(const CallExpr *E);
148   void VisitStmtExpr(const StmtExpr *E);
149   void VisitBinaryOperator(const BinaryOperator *BO);
150   void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *BO);
151   void VisitBinAssign(const BinaryOperator *E);
152   void VisitBinComma(const BinaryOperator *E);
153   void VisitBinCmp(const BinaryOperator *E);
154 
155   void VisitObjCMessageExpr(ObjCMessageExpr *E);
156   void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
157     EmitAggLoadOfLValue(E);
158   }
159 
160   void VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E);
161   void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO);
162   void VisitChooseExpr(const ChooseExpr *CE);
163   void VisitInitListExpr(InitListExpr *E);
164   void VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
165                               llvm::Value *outerBegin = nullptr);
166   void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
167   void VisitNoInitExpr(NoInitExpr *E) { } // Do nothing.
168   void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
169     Visit(DAE->getExpr());
170   }
171   void VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
172     CodeGenFunction::CXXDefaultInitExprScope Scope(CGF);
173     Visit(DIE->getExpr());
174   }
175   void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
176   void VisitCXXConstructExpr(const CXXConstructExpr *E);
177   void VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *E);
178   void VisitLambdaExpr(LambdaExpr *E);
179   void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E);
180   void VisitExprWithCleanups(ExprWithCleanups *E);
181   void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
182   void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); }
183   void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E);
184   void VisitOpaqueValueExpr(OpaqueValueExpr *E);
185 
186   void VisitPseudoObjectExpr(PseudoObjectExpr *E) {
187     if (E->isGLValue()) {
188       LValue LV = CGF.EmitPseudoObjectLValue(E);
189       return EmitFinalDestCopy(E->getType(), LV);
190     }
191 
192     CGF.EmitPseudoObjectRValue(E, EnsureSlot(E->getType()));
193   }
194 
195   void VisitVAArgExpr(VAArgExpr *E);
196 
197   void EmitInitializationToLValue(Expr *E, LValue Address);
198   void EmitNullInitializationToLValue(LValue Address);
199   //  case Expr::ChooseExprClass:
200   void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); }
201   void VisitAtomicExpr(AtomicExpr *E) {
202     RValue Res = CGF.EmitAtomicExpr(E);
203     EmitFinalDestCopy(E->getType(), Res);
204   }
205 };
206 }  // end anonymous namespace.
207 
208 //===----------------------------------------------------------------------===//
209 //                                Utilities
210 //===----------------------------------------------------------------------===//
211 
212 /// EmitAggLoadOfLValue - Given an expression with aggregate type that
213 /// represents a value lvalue, this method emits the address of the lvalue,
214 /// then loads the result into DestPtr.
215 void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) {
216   LValue LV = CGF.EmitLValue(E);
217 
218   // If the type of the l-value is atomic, then do an atomic load.
219   if (LV.getType()->isAtomicType() || CGF.LValueIsSuitableForInlineAtomic(LV)) {
220     CGF.EmitAtomicLoad(LV, E->getExprLoc(), Dest);
221     return;
222   }
223 
224   EmitFinalDestCopy(E->getType(), LV);
225 }
226 
227 /// True if the given aggregate type requires special GC API calls.
228 bool AggExprEmitter::TypeRequiresGCollection(QualType T) {
229   // Only record types have members that might require garbage collection.
230   const RecordType *RecordTy = T->getAs<RecordType>();
231   if (!RecordTy) return false;
232 
233   // Don't mess with non-trivial C++ types.
234   RecordDecl *Record = RecordTy->getDecl();
235   if (isa<CXXRecordDecl>(Record) &&
236       (cast<CXXRecordDecl>(Record)->hasNonTrivialCopyConstructor() ||
237        !cast<CXXRecordDecl>(Record)->hasTrivialDestructor()))
238     return false;
239 
240   // Check whether the type has an object member.
241   return Record->hasObjectMember();
242 }
243 
244 void AggExprEmitter::withReturnValueSlot(
245     const Expr *E, llvm::function_ref<RValue(ReturnValueSlot)> EmitCall) {
246   QualType RetTy = E->getType();
247   bool RequiresDestruction =
248       Dest.isIgnored() &&
249       RetTy.isDestructedType() == QualType::DK_nontrivial_c_struct;
250 
251   // If it makes no observable difference, save a memcpy + temporary.
252   //
253   // We need to always provide our own temporary if destruction is required.
254   // Otherwise, EmitCall will emit its own, notice that it's "unused", and end
255   // its lifetime before we have the chance to emit a proper destructor call.
256   bool UseTemp = Dest.isPotentiallyAliased() || Dest.requiresGCollection() ||
257                  (RequiresDestruction && !Dest.getAddress().isValid());
258 
259   Address RetAddr = Address::invalid();
260   Address RetAllocaAddr = Address::invalid();
261 
262   EHScopeStack::stable_iterator LifetimeEndBlock;
263   llvm::Value *LifetimeSizePtr = nullptr;
264   llvm::IntrinsicInst *LifetimeStartInst = nullptr;
265   if (!UseTemp) {
266     RetAddr = Dest.getAddress();
267   } else {
268     RetAddr = CGF.CreateMemTemp(RetTy, "tmp", &RetAllocaAddr);
269     uint64_t Size =
270         CGF.CGM.getDataLayout().getTypeAllocSize(CGF.ConvertTypeForMem(RetTy));
271     LifetimeSizePtr = CGF.EmitLifetimeStart(Size, RetAllocaAddr.getPointer());
272     if (LifetimeSizePtr) {
273       LifetimeStartInst =
274           cast<llvm::IntrinsicInst>(std::prev(Builder.GetInsertPoint()));
275       assert(LifetimeStartInst->getIntrinsicID() ==
276                  llvm::Intrinsic::lifetime_start &&
277              "Last insertion wasn't a lifetime.start?");
278 
279       CGF.pushFullExprCleanup<CodeGenFunction::CallLifetimeEnd>(
280           NormalEHLifetimeMarker, RetAllocaAddr, LifetimeSizePtr);
281       LifetimeEndBlock = CGF.EHStack.stable_begin();
282     }
283   }
284 
285   RValue Src =
286       EmitCall(ReturnValueSlot(RetAddr, Dest.isVolatile(), IsResultUnused));
287 
288   if (RequiresDestruction)
289     CGF.pushDestroy(RetTy.isDestructedType(), Src.getAggregateAddress(), RetTy);
290 
291   if (!UseTemp)
292     return;
293 
294   assert(Dest.getPointer() != Src.getAggregatePointer());
295   EmitFinalDestCopy(E->getType(), Src);
296 
297   if (!RequiresDestruction && LifetimeStartInst) {
298     // If there's no dtor to run, the copy was the last use of our temporary.
299     // Since we're not guaranteed to be in an ExprWithCleanups, clean up
300     // eagerly.
301     CGF.DeactivateCleanupBlock(LifetimeEndBlock, LifetimeStartInst);
302     CGF.EmitLifetimeEnd(LifetimeSizePtr, RetAllocaAddr.getPointer());
303   }
304 }
305 
306 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
307 void AggExprEmitter::EmitFinalDestCopy(QualType type, RValue src) {
308   assert(src.isAggregate() && "value must be aggregate value!");
309   LValue srcLV = CGF.MakeAddrLValue(src.getAggregateAddress(), type);
310   EmitFinalDestCopy(type, srcLV, EVK_RValue);
311 }
312 
313 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
314 void AggExprEmitter::EmitFinalDestCopy(QualType type, const LValue &src,
315                                        ExprValueKind SrcValueKind) {
316   // If Dest is ignored, then we're evaluating an aggregate expression
317   // in a context that doesn't care about the result.  Note that loads
318   // from volatile l-values force the existence of a non-ignored
319   // destination.
320   if (Dest.isIgnored())
321     return;
322 
323   // Copy non-trivial C structs here.
324   LValue DstLV = CGF.MakeAddrLValue(
325       Dest.getAddress(), Dest.isVolatile() ? type.withVolatile() : type);
326 
327   if (SrcValueKind == EVK_RValue) {
328     if (type.isNonTrivialToPrimitiveDestructiveMove() == QualType::PCK_Struct) {
329       if (Dest.isPotentiallyAliased())
330         CGF.callCStructMoveAssignmentOperator(DstLV, src);
331       else
332         CGF.callCStructMoveConstructor(DstLV, src);
333       return;
334     }
335   } else {
336     if (type.isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) {
337       if (Dest.isPotentiallyAliased())
338         CGF.callCStructCopyAssignmentOperator(DstLV, src);
339       else
340         CGF.callCStructCopyConstructor(DstLV, src);
341       return;
342     }
343   }
344 
345   AggValueSlot srcAgg =
346     AggValueSlot::forLValue(src, AggValueSlot::IsDestructed,
347                             needsGC(type), AggValueSlot::IsAliased,
348                             AggValueSlot::MayOverlap);
349   EmitCopy(type, Dest, srcAgg);
350 }
351 
352 /// Perform a copy from the source into the destination.
353 ///
354 /// \param type - the type of the aggregate being copied; qualifiers are
355 ///   ignored
356 void AggExprEmitter::EmitCopy(QualType type, const AggValueSlot &dest,
357                               const AggValueSlot &src) {
358   if (dest.requiresGCollection()) {
359     CharUnits sz = dest.getPreferredSize(CGF.getContext(), type);
360     llvm::Value *size = llvm::ConstantInt::get(CGF.SizeTy, sz.getQuantity());
361     CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF,
362                                                       dest.getAddress(),
363                                                       src.getAddress(),
364                                                       size);
365     return;
366   }
367 
368   // If the result of the assignment is used, copy the LHS there also.
369   // It's volatile if either side is.  Use the minimum alignment of
370   // the two sides.
371   LValue DestLV = CGF.MakeAddrLValue(dest.getAddress(), type);
372   LValue SrcLV = CGF.MakeAddrLValue(src.getAddress(), type);
373   CGF.EmitAggregateCopy(DestLV, SrcLV, type, dest.mayOverlap(),
374                         dest.isVolatile() || src.isVolatile());
375 }
376 
377 /// Emit the initializer for a std::initializer_list initialized with a
378 /// real initializer list.
379 void
380 AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) {
381   // Emit an array containing the elements.  The array is externally destructed
382   // if the std::initializer_list object is.
383   ASTContext &Ctx = CGF.getContext();
384   LValue Array = CGF.EmitLValue(E->getSubExpr());
385   assert(Array.isSimple() && "initializer_list array not a simple lvalue");
386   Address ArrayPtr = Array.getAddress();
387 
388   const ConstantArrayType *ArrayType =
389       Ctx.getAsConstantArrayType(E->getSubExpr()->getType());
390   assert(ArrayType && "std::initializer_list constructed from non-array");
391 
392   // FIXME: Perform the checks on the field types in SemaInit.
393   RecordDecl *Record = E->getType()->castAs<RecordType>()->getDecl();
394   RecordDecl::field_iterator Field = Record->field_begin();
395   if (Field == Record->field_end()) {
396     CGF.ErrorUnsupported(E, "weird std::initializer_list");
397     return;
398   }
399 
400   // Start pointer.
401   if (!Field->getType()->isPointerType() ||
402       !Ctx.hasSameType(Field->getType()->getPointeeType(),
403                        ArrayType->getElementType())) {
404     CGF.ErrorUnsupported(E, "weird std::initializer_list");
405     return;
406   }
407 
408   AggValueSlot Dest = EnsureSlot(E->getType());
409   LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
410   LValue Start = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
411   llvm::Value *Zero = llvm::ConstantInt::get(CGF.PtrDiffTy, 0);
412   llvm::Value *IdxStart[] = { Zero, Zero };
413   llvm::Value *ArrayStart =
414       Builder.CreateInBoundsGEP(ArrayPtr.getPointer(), IdxStart, "arraystart");
415   CGF.EmitStoreThroughLValue(RValue::get(ArrayStart), Start);
416   ++Field;
417 
418   if (Field == Record->field_end()) {
419     CGF.ErrorUnsupported(E, "weird std::initializer_list");
420     return;
421   }
422 
423   llvm::Value *Size = Builder.getInt(ArrayType->getSize());
424   LValue EndOrLength = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
425   if (Field->getType()->isPointerType() &&
426       Ctx.hasSameType(Field->getType()->getPointeeType(),
427                       ArrayType->getElementType())) {
428     // End pointer.
429     llvm::Value *IdxEnd[] = { Zero, Size };
430     llvm::Value *ArrayEnd =
431         Builder.CreateInBoundsGEP(ArrayPtr.getPointer(), IdxEnd, "arrayend");
432     CGF.EmitStoreThroughLValue(RValue::get(ArrayEnd), EndOrLength);
433   } else if (Ctx.hasSameType(Field->getType(), Ctx.getSizeType())) {
434     // Length.
435     CGF.EmitStoreThroughLValue(RValue::get(Size), EndOrLength);
436   } else {
437     CGF.ErrorUnsupported(E, "weird std::initializer_list");
438     return;
439   }
440 }
441 
442 /// Determine if E is a trivial array filler, that is, one that is
443 /// equivalent to zero-initialization.
444 static bool isTrivialFiller(Expr *E) {
445   if (!E)
446     return true;
447 
448   if (isa<ImplicitValueInitExpr>(E))
449     return true;
450 
451   if (auto *ILE = dyn_cast<InitListExpr>(E)) {
452     if (ILE->getNumInits())
453       return false;
454     return isTrivialFiller(ILE->getArrayFiller());
455   }
456 
457   if (auto *Cons = dyn_cast_or_null<CXXConstructExpr>(E))
458     return Cons->getConstructor()->isDefaultConstructor() &&
459            Cons->getConstructor()->isTrivial();
460 
461   // FIXME: Are there other cases where we can avoid emitting an initializer?
462   return false;
463 }
464 
465 /// Emit initialization of an array from an initializer list.
466 void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
467                                    QualType ArrayQTy, InitListExpr *E) {
468   uint64_t NumInitElements = E->getNumInits();
469 
470   uint64_t NumArrayElements = AType->getNumElements();
471   assert(NumInitElements <= NumArrayElements);
472 
473   QualType elementType =
474       CGF.getContext().getAsArrayType(ArrayQTy)->getElementType();
475 
476   // DestPtr is an array*.  Construct an elementType* by drilling
477   // down a level.
478   llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
479   llvm::Value *indices[] = { zero, zero };
480   llvm::Value *begin =
481     Builder.CreateInBoundsGEP(DestPtr.getPointer(), indices, "arrayinit.begin");
482 
483   CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
484   CharUnits elementAlign =
485     DestPtr.getAlignment().alignmentOfArrayElement(elementSize);
486 
487   // Consider initializing the array by copying from a global. For this to be
488   // more efficient than per-element initialization, the size of the elements
489   // with explicit initializers should be large enough.
490   if (NumInitElements * elementSize.getQuantity() > 16 &&
491       elementType.isTriviallyCopyableType(CGF.getContext())) {
492     CodeGen::CodeGenModule &CGM = CGF.CGM;
493     ConstantEmitter Emitter(CGM);
494     LangAS AS = ArrayQTy.getAddressSpace();
495     if (llvm::Constant *C = Emitter.tryEmitForInitializer(E, AS, ArrayQTy)) {
496       auto GV = new llvm::GlobalVariable(
497           CGM.getModule(), C->getType(),
498           CGM.isTypeConstant(ArrayQTy, /* ExcludeCtorDtor= */ true),
499           llvm::GlobalValue::PrivateLinkage, C, "constinit",
500           /* InsertBefore= */ nullptr, llvm::GlobalVariable::NotThreadLocal,
501           CGM.getContext().getTargetAddressSpace(AS));
502       Emitter.finalize(GV);
503       CharUnits Align = CGM.getContext().getTypeAlignInChars(ArrayQTy);
504       GV->setAlignment(Align.getQuantity());
505       EmitFinalDestCopy(ArrayQTy, CGF.MakeAddrLValue(GV, ArrayQTy, Align));
506       return;
507     }
508   }
509 
510   // Exception safety requires us to destroy all the
511   // already-constructed members if an initializer throws.
512   // For that, we'll need an EH cleanup.
513   QualType::DestructionKind dtorKind = elementType.isDestructedType();
514   Address endOfInit = Address::invalid();
515   EHScopeStack::stable_iterator cleanup;
516   llvm::Instruction *cleanupDominator = nullptr;
517   if (CGF.needsEHCleanup(dtorKind)) {
518     // In principle we could tell the cleanup where we are more
519     // directly, but the control flow can get so varied here that it
520     // would actually be quite complex.  Therefore we go through an
521     // alloca.
522     endOfInit = CGF.CreateTempAlloca(begin->getType(), CGF.getPointerAlign(),
523                                      "arrayinit.endOfInit");
524     cleanupDominator = Builder.CreateStore(begin, endOfInit);
525     CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType,
526                                          elementAlign,
527                                          CGF.getDestroyer(dtorKind));
528     cleanup = CGF.EHStack.stable_begin();
529 
530   // Otherwise, remember that we didn't need a cleanup.
531   } else {
532     dtorKind = QualType::DK_none;
533   }
534 
535   llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1);
536 
537   // The 'current element to initialize'.  The invariants on this
538   // variable are complicated.  Essentially, after each iteration of
539   // the loop, it points to the last initialized element, except
540   // that it points to the beginning of the array before any
541   // elements have been initialized.
542   llvm::Value *element = begin;
543 
544   // Emit the explicit initializers.
545   for (uint64_t i = 0; i != NumInitElements; ++i) {
546     // Advance to the next element.
547     if (i > 0) {
548       element = Builder.CreateInBoundsGEP(element, one, "arrayinit.element");
549 
550       // Tell the cleanup that it needs to destroy up to this
551       // element.  TODO: some of these stores can be trivially
552       // observed to be unnecessary.
553       if (endOfInit.isValid()) Builder.CreateStore(element, endOfInit);
554     }
555 
556     LValue elementLV =
557       CGF.MakeAddrLValue(Address(element, elementAlign), elementType);
558     EmitInitializationToLValue(E->getInit(i), elementLV);
559   }
560 
561   // Check whether there's a non-trivial array-fill expression.
562   Expr *filler = E->getArrayFiller();
563   bool hasTrivialFiller = isTrivialFiller(filler);
564 
565   // Any remaining elements need to be zero-initialized, possibly
566   // using the filler expression.  We can skip this if the we're
567   // emitting to zeroed memory.
568   if (NumInitElements != NumArrayElements &&
569       !(Dest.isZeroed() && hasTrivialFiller &&
570         CGF.getTypes().isZeroInitializable(elementType))) {
571 
572     // Use an actual loop.  This is basically
573     //   do { *array++ = filler; } while (array != end);
574 
575     // Advance to the start of the rest of the array.
576     if (NumInitElements) {
577       element = Builder.CreateInBoundsGEP(element, one, "arrayinit.start");
578       if (endOfInit.isValid()) Builder.CreateStore(element, endOfInit);
579     }
580 
581     // Compute the end of the array.
582     llvm::Value *end = Builder.CreateInBoundsGEP(begin,
583                       llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements),
584                                                  "arrayinit.end");
585 
586     llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
587     llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
588 
589     // Jump into the body.
590     CGF.EmitBlock(bodyBB);
591     llvm::PHINode *currentElement =
592       Builder.CreatePHI(element->getType(), 2, "arrayinit.cur");
593     currentElement->addIncoming(element, entryBB);
594 
595     // Emit the actual filler expression.
596     {
597       // C++1z [class.temporary]p5:
598       //   when a default constructor is called to initialize an element of
599       //   an array with no corresponding initializer [...] the destruction of
600       //   every temporary created in a default argument is sequenced before
601       //   the construction of the next array element, if any
602       CodeGenFunction::RunCleanupsScope CleanupsScope(CGF);
603       LValue elementLV =
604         CGF.MakeAddrLValue(Address(currentElement, elementAlign), elementType);
605       if (filler)
606         EmitInitializationToLValue(filler, elementLV);
607       else
608         EmitNullInitializationToLValue(elementLV);
609     }
610 
611     // Move on to the next element.
612     llvm::Value *nextElement =
613       Builder.CreateInBoundsGEP(currentElement, one, "arrayinit.next");
614 
615     // Tell the EH cleanup that we finished with the last element.
616     if (endOfInit.isValid()) Builder.CreateStore(nextElement, endOfInit);
617 
618     // Leave the loop if we're done.
619     llvm::Value *done = Builder.CreateICmpEQ(nextElement, end,
620                                              "arrayinit.done");
621     llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
622     Builder.CreateCondBr(done, endBB, bodyBB);
623     currentElement->addIncoming(nextElement, Builder.GetInsertBlock());
624 
625     CGF.EmitBlock(endBB);
626   }
627 
628   // Leave the partial-array cleanup if we entered one.
629   if (dtorKind) CGF.DeactivateCleanupBlock(cleanup, cleanupDominator);
630 }
631 
632 //===----------------------------------------------------------------------===//
633 //                            Visitor Methods
634 //===----------------------------------------------------------------------===//
635 
636 void AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E){
637   Visit(E->GetTemporaryExpr());
638 }
639 
640 void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) {
641   // If this is a unique OVE, just visit its source expression.
642   if (e->isUnique())
643     Visit(e->getSourceExpr());
644   else
645     EmitFinalDestCopy(e->getType(), CGF.getOrCreateOpaqueLValueMapping(e));
646 }
647 
648 void
649 AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
650   if (Dest.isPotentiallyAliased() &&
651       E->getType().isPODType(CGF.getContext())) {
652     // For a POD type, just emit a load of the lvalue + a copy, because our
653     // compound literal might alias the destination.
654     EmitAggLoadOfLValue(E);
655     return;
656   }
657 
658   AggValueSlot Slot = EnsureSlot(E->getType());
659   CGF.EmitAggExpr(E->getInitializer(), Slot);
660 }
661 
662 /// Attempt to look through various unimportant expressions to find a
663 /// cast of the given kind.
664 static Expr *findPeephole(Expr *op, CastKind kind) {
665   while (true) {
666     op = op->IgnoreParens();
667     if (CastExpr *castE = dyn_cast<CastExpr>(op)) {
668       if (castE->getCastKind() == kind)
669         return castE->getSubExpr();
670       if (castE->getCastKind() == CK_NoOp)
671         continue;
672     }
673     return nullptr;
674   }
675 }
676 
677 void AggExprEmitter::VisitCastExpr(CastExpr *E) {
678   if (const auto *ECE = dyn_cast<ExplicitCastExpr>(E))
679     CGF.CGM.EmitExplicitCastExprType(ECE, &CGF);
680   switch (E->getCastKind()) {
681   case CK_Dynamic: {
682     // FIXME: Can this actually happen? We have no test coverage for it.
683     assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?");
684     LValue LV = CGF.EmitCheckedLValue(E->getSubExpr(),
685                                       CodeGenFunction::TCK_Load);
686     // FIXME: Do we also need to handle property references here?
687     if (LV.isSimple())
688       CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E));
689     else
690       CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast");
691 
692     if (!Dest.isIgnored())
693       CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination");
694     break;
695   }
696 
697   case CK_ToUnion: {
698     // Evaluate even if the destination is ignored.
699     if (Dest.isIgnored()) {
700       CGF.EmitAnyExpr(E->getSubExpr(), AggValueSlot::ignored(),
701                       /*ignoreResult=*/true);
702       break;
703     }
704 
705     // GCC union extension
706     QualType Ty = E->getSubExpr()->getType();
707     Address CastPtr =
708       Builder.CreateElementBitCast(Dest.getAddress(), CGF.ConvertType(Ty));
709     EmitInitializationToLValue(E->getSubExpr(),
710                                CGF.MakeAddrLValue(CastPtr, Ty));
711     break;
712   }
713 
714   case CK_DerivedToBase:
715   case CK_BaseToDerived:
716   case CK_UncheckedDerivedToBase: {
717     llvm_unreachable("cannot perform hierarchy conversion in EmitAggExpr: "
718                 "should have been unpacked before we got here");
719   }
720 
721   case CK_NonAtomicToAtomic:
722   case CK_AtomicToNonAtomic: {
723     bool isToAtomic = (E->getCastKind() == CK_NonAtomicToAtomic);
724 
725     // Determine the atomic and value types.
726     QualType atomicType = E->getSubExpr()->getType();
727     QualType valueType = E->getType();
728     if (isToAtomic) std::swap(atomicType, valueType);
729 
730     assert(atomicType->isAtomicType());
731     assert(CGF.getContext().hasSameUnqualifiedType(valueType,
732                           atomicType->castAs<AtomicType>()->getValueType()));
733 
734     // Just recurse normally if we're ignoring the result or the
735     // atomic type doesn't change representation.
736     if (Dest.isIgnored() || !CGF.CGM.isPaddedAtomicType(atomicType)) {
737       return Visit(E->getSubExpr());
738     }
739 
740     CastKind peepholeTarget =
741       (isToAtomic ? CK_AtomicToNonAtomic : CK_NonAtomicToAtomic);
742 
743     // These two cases are reverses of each other; try to peephole them.
744     if (Expr *op = findPeephole(E->getSubExpr(), peepholeTarget)) {
745       assert(CGF.getContext().hasSameUnqualifiedType(op->getType(),
746                                                      E->getType()) &&
747            "peephole significantly changed types?");
748       return Visit(op);
749     }
750 
751     // If we're converting an r-value of non-atomic type to an r-value
752     // of atomic type, just emit directly into the relevant sub-object.
753     if (isToAtomic) {
754       AggValueSlot valueDest = Dest;
755       if (!valueDest.isIgnored() && CGF.CGM.isPaddedAtomicType(atomicType)) {
756         // Zero-initialize.  (Strictly speaking, we only need to initialize
757         // the padding at the end, but this is simpler.)
758         if (!Dest.isZeroed())
759           CGF.EmitNullInitialization(Dest.getAddress(), atomicType);
760 
761         // Build a GEP to refer to the subobject.
762         Address valueAddr =
763             CGF.Builder.CreateStructGEP(valueDest.getAddress(), 0,
764                                         CharUnits());
765         valueDest = AggValueSlot::forAddr(valueAddr,
766                                           valueDest.getQualifiers(),
767                                           valueDest.isExternallyDestructed(),
768                                           valueDest.requiresGCollection(),
769                                           valueDest.isPotentiallyAliased(),
770                                           AggValueSlot::DoesNotOverlap,
771                                           AggValueSlot::IsZeroed);
772       }
773 
774       CGF.EmitAggExpr(E->getSubExpr(), valueDest);
775       return;
776     }
777 
778     // Otherwise, we're converting an atomic type to a non-atomic type.
779     // Make an atomic temporary, emit into that, and then copy the value out.
780     AggValueSlot atomicSlot =
781       CGF.CreateAggTemp(atomicType, "atomic-to-nonatomic.temp");
782     CGF.EmitAggExpr(E->getSubExpr(), atomicSlot);
783 
784     Address valueAddr =
785       Builder.CreateStructGEP(atomicSlot.getAddress(), 0, CharUnits());
786     RValue rvalue = RValue::getAggregate(valueAddr, atomicSlot.isVolatile());
787     return EmitFinalDestCopy(valueType, rvalue);
788   }
789 
790   case CK_LValueToRValue:
791     // If we're loading from a volatile type, force the destination
792     // into existence.
793     if (E->getSubExpr()->getType().isVolatileQualified()) {
794       EnsureDest(E->getType());
795       return Visit(E->getSubExpr());
796     }
797 
798     LLVM_FALLTHROUGH;
799 
800   case CK_NoOp:
801   case CK_UserDefinedConversion:
802   case CK_ConstructorConversion:
803     assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(),
804                                                    E->getType()) &&
805            "Implicit cast types must be compatible");
806     Visit(E->getSubExpr());
807     break;
808 
809   case CK_LValueBitCast:
810     llvm_unreachable("should not be emitting lvalue bitcast as rvalue");
811 
812   case CK_Dependent:
813   case CK_BitCast:
814   case CK_ArrayToPointerDecay:
815   case CK_FunctionToPointerDecay:
816   case CK_NullToPointer:
817   case CK_NullToMemberPointer:
818   case CK_BaseToDerivedMemberPointer:
819   case CK_DerivedToBaseMemberPointer:
820   case CK_MemberPointerToBoolean:
821   case CK_ReinterpretMemberPointer:
822   case CK_IntegralToPointer:
823   case CK_PointerToIntegral:
824   case CK_PointerToBoolean:
825   case CK_ToVoid:
826   case CK_VectorSplat:
827   case CK_IntegralCast:
828   case CK_BooleanToSignedIntegral:
829   case CK_IntegralToBoolean:
830   case CK_IntegralToFloating:
831   case CK_FloatingToIntegral:
832   case CK_FloatingToBoolean:
833   case CK_FloatingCast:
834   case CK_CPointerToObjCPointerCast:
835   case CK_BlockPointerToObjCPointerCast:
836   case CK_AnyPointerToBlockPointerCast:
837   case CK_ObjCObjectLValueCast:
838   case CK_FloatingRealToComplex:
839   case CK_FloatingComplexToReal:
840   case CK_FloatingComplexToBoolean:
841   case CK_FloatingComplexCast:
842   case CK_FloatingComplexToIntegralComplex:
843   case CK_IntegralRealToComplex:
844   case CK_IntegralComplexToReal:
845   case CK_IntegralComplexToBoolean:
846   case CK_IntegralComplexCast:
847   case CK_IntegralComplexToFloatingComplex:
848   case CK_ARCProduceObject:
849   case CK_ARCConsumeObject:
850   case CK_ARCReclaimReturnedObject:
851   case CK_ARCExtendBlockObject:
852   case CK_CopyAndAutoreleaseBlockObject:
853   case CK_BuiltinFnToFnPtr:
854   case CK_ZeroToOCLOpaqueType:
855   case CK_AddressSpaceConversion:
856   case CK_IntToOCLSampler:
857   case CK_FixedPointCast:
858   case CK_FixedPointToBoolean:
859     llvm_unreachable("cast kind invalid for aggregate types");
860   }
861 }
862 
863 void AggExprEmitter::VisitCallExpr(const CallExpr *E) {
864   if (E->getCallReturnType(CGF.getContext())->isReferenceType()) {
865     EmitAggLoadOfLValue(E);
866     return;
867   }
868 
869   withReturnValueSlot(E, [&](ReturnValueSlot Slot) {
870     return CGF.EmitCallExpr(E, Slot);
871   });
872 }
873 
874 void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
875   withReturnValueSlot(E, [&](ReturnValueSlot Slot) {
876     return CGF.EmitObjCMessageExpr(E, Slot);
877   });
878 }
879 
880 void AggExprEmitter::VisitBinComma(const BinaryOperator *E) {
881   CGF.EmitIgnoredExpr(E->getLHS());
882   Visit(E->getRHS());
883 }
884 
885 void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) {
886   CodeGenFunction::StmtExprEvaluation eval(CGF);
887   CGF.EmitCompoundStmt(*E->getSubStmt(), true, Dest);
888 }
889 
890 enum CompareKind {
891   CK_Less,
892   CK_Greater,
893   CK_Equal,
894 };
895 
896 static llvm::Value *EmitCompare(CGBuilderTy &Builder, CodeGenFunction &CGF,
897                                 const BinaryOperator *E, llvm::Value *LHS,
898                                 llvm::Value *RHS, CompareKind Kind,
899                                 const char *NameSuffix = "") {
900   QualType ArgTy = E->getLHS()->getType();
901   if (const ComplexType *CT = ArgTy->getAs<ComplexType>())
902     ArgTy = CT->getElementType();
903 
904   if (const auto *MPT = ArgTy->getAs<MemberPointerType>()) {
905     assert(Kind == CK_Equal &&
906            "member pointers may only be compared for equality");
907     return CGF.CGM.getCXXABI().EmitMemberPointerComparison(
908         CGF, LHS, RHS, MPT, /*IsInequality*/ false);
909   }
910 
911   // Compute the comparison instructions for the specified comparison kind.
912   struct CmpInstInfo {
913     const char *Name;
914     llvm::CmpInst::Predicate FCmp;
915     llvm::CmpInst::Predicate SCmp;
916     llvm::CmpInst::Predicate UCmp;
917   };
918   CmpInstInfo InstInfo = [&]() -> CmpInstInfo {
919     using FI = llvm::FCmpInst;
920     using II = llvm::ICmpInst;
921     switch (Kind) {
922     case CK_Less:
923       return {"cmp.lt", FI::FCMP_OLT, II::ICMP_SLT, II::ICMP_ULT};
924     case CK_Greater:
925       return {"cmp.gt", FI::FCMP_OGT, II::ICMP_SGT, II::ICMP_UGT};
926     case CK_Equal:
927       return {"cmp.eq", FI::FCMP_OEQ, II::ICMP_EQ, II::ICMP_EQ};
928     }
929     llvm_unreachable("Unrecognised CompareKind enum");
930   }();
931 
932   if (ArgTy->hasFloatingRepresentation())
933     return Builder.CreateFCmp(InstInfo.FCmp, LHS, RHS,
934                               llvm::Twine(InstInfo.Name) + NameSuffix);
935   if (ArgTy->isIntegralOrEnumerationType() || ArgTy->isPointerType()) {
936     auto Inst =
937         ArgTy->hasSignedIntegerRepresentation() ? InstInfo.SCmp : InstInfo.UCmp;
938     return Builder.CreateICmp(Inst, LHS, RHS,
939                               llvm::Twine(InstInfo.Name) + NameSuffix);
940   }
941 
942   llvm_unreachable("unsupported aggregate binary expression should have "
943                    "already been handled");
944 }
945 
946 void AggExprEmitter::VisitBinCmp(const BinaryOperator *E) {
947   using llvm::BasicBlock;
948   using llvm::PHINode;
949   using llvm::Value;
950   assert(CGF.getContext().hasSameType(E->getLHS()->getType(),
951                                       E->getRHS()->getType()));
952   const ComparisonCategoryInfo &CmpInfo =
953       CGF.getContext().CompCategories.getInfoForType(E->getType());
954   assert(CmpInfo.Record->isTriviallyCopyable() &&
955          "cannot copy non-trivially copyable aggregate");
956 
957   QualType ArgTy = E->getLHS()->getType();
958 
959   // TODO: Handle comparing these types.
960   if (ArgTy->isVectorType())
961     return CGF.ErrorUnsupported(
962         E, "aggregate three-way comparison with vector arguments");
963   if (!ArgTy->isIntegralOrEnumerationType() && !ArgTy->isRealFloatingType() &&
964       !ArgTy->isNullPtrType() && !ArgTy->isPointerType() &&
965       !ArgTy->isMemberPointerType() && !ArgTy->isAnyComplexType()) {
966     return CGF.ErrorUnsupported(E, "aggregate three-way comparison");
967   }
968   bool IsComplex = ArgTy->isAnyComplexType();
969 
970   // Evaluate the operands to the expression and extract their values.
971   auto EmitOperand = [&](Expr *E) -> std::pair<Value *, Value *> {
972     RValue RV = CGF.EmitAnyExpr(E);
973     if (RV.isScalar())
974       return {RV.getScalarVal(), nullptr};
975     if (RV.isAggregate())
976       return {RV.getAggregatePointer(), nullptr};
977     assert(RV.isComplex());
978     return RV.getComplexVal();
979   };
980   auto LHSValues = EmitOperand(E->getLHS()),
981        RHSValues = EmitOperand(E->getRHS());
982 
983   auto EmitCmp = [&](CompareKind K) {
984     Value *Cmp = EmitCompare(Builder, CGF, E, LHSValues.first, RHSValues.first,
985                              K, IsComplex ? ".r" : "");
986     if (!IsComplex)
987       return Cmp;
988     assert(K == CompareKind::CK_Equal);
989     Value *CmpImag = EmitCompare(Builder, CGF, E, LHSValues.second,
990                                  RHSValues.second, K, ".i");
991     return Builder.CreateAnd(Cmp, CmpImag, "and.eq");
992   };
993   auto EmitCmpRes = [&](const ComparisonCategoryInfo::ValueInfo *VInfo) {
994     return Builder.getInt(VInfo->getIntValue());
995   };
996 
997   Value *Select;
998   if (ArgTy->isNullPtrType()) {
999     Select = EmitCmpRes(CmpInfo.getEqualOrEquiv());
1000   } else if (CmpInfo.isEquality()) {
1001     Select = Builder.CreateSelect(
1002         EmitCmp(CK_Equal), EmitCmpRes(CmpInfo.getEqualOrEquiv()),
1003         EmitCmpRes(CmpInfo.getNonequalOrNonequiv()), "sel.eq");
1004   } else if (!CmpInfo.isPartial()) {
1005     Value *SelectOne =
1006         Builder.CreateSelect(EmitCmp(CK_Less), EmitCmpRes(CmpInfo.getLess()),
1007                              EmitCmpRes(CmpInfo.getGreater()), "sel.lt");
1008     Select = Builder.CreateSelect(EmitCmp(CK_Equal),
1009                                   EmitCmpRes(CmpInfo.getEqualOrEquiv()),
1010                                   SelectOne, "sel.eq");
1011   } else {
1012     Value *SelectEq = Builder.CreateSelect(
1013         EmitCmp(CK_Equal), EmitCmpRes(CmpInfo.getEqualOrEquiv()),
1014         EmitCmpRes(CmpInfo.getUnordered()), "sel.eq");
1015     Value *SelectGT = Builder.CreateSelect(EmitCmp(CK_Greater),
1016                                            EmitCmpRes(CmpInfo.getGreater()),
1017                                            SelectEq, "sel.gt");
1018     Select = Builder.CreateSelect(
1019         EmitCmp(CK_Less), EmitCmpRes(CmpInfo.getLess()), SelectGT, "sel.lt");
1020   }
1021   // Create the return value in the destination slot.
1022   EnsureDest(E->getType());
1023   LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
1024 
1025   // Emit the address of the first (and only) field in the comparison category
1026   // type, and initialize it from the constant integer value selected above.
1027   LValue FieldLV = CGF.EmitLValueForFieldInitialization(
1028       DestLV, *CmpInfo.Record->field_begin());
1029   CGF.EmitStoreThroughLValue(RValue::get(Select), FieldLV, /*IsInit*/ true);
1030 
1031   // All done! The result is in the Dest slot.
1032 }
1033 
1034 void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) {
1035   if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI)
1036     VisitPointerToDataMemberBinaryOperator(E);
1037   else
1038     CGF.ErrorUnsupported(E, "aggregate binary expression");
1039 }
1040 
1041 void AggExprEmitter::VisitPointerToDataMemberBinaryOperator(
1042                                                     const BinaryOperator *E) {
1043   LValue LV = CGF.EmitPointerToDataMemberBinaryExpr(E);
1044   EmitFinalDestCopy(E->getType(), LV);
1045 }
1046 
1047 /// Is the value of the given expression possibly a reference to or
1048 /// into a __block variable?
1049 static bool isBlockVarRef(const Expr *E) {
1050   // Make sure we look through parens.
1051   E = E->IgnoreParens();
1052 
1053   // Check for a direct reference to a __block variable.
1054   if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
1055     const VarDecl *var = dyn_cast<VarDecl>(DRE->getDecl());
1056     return (var && var->hasAttr<BlocksAttr>());
1057   }
1058 
1059   // More complicated stuff.
1060 
1061   // Binary operators.
1062   if (const BinaryOperator *op = dyn_cast<BinaryOperator>(E)) {
1063     // For an assignment or pointer-to-member operation, just care
1064     // about the LHS.
1065     if (op->isAssignmentOp() || op->isPtrMemOp())
1066       return isBlockVarRef(op->getLHS());
1067 
1068     // For a comma, just care about the RHS.
1069     if (op->getOpcode() == BO_Comma)
1070       return isBlockVarRef(op->getRHS());
1071 
1072     // FIXME: pointer arithmetic?
1073     return false;
1074 
1075   // Check both sides of a conditional operator.
1076   } else if (const AbstractConditionalOperator *op
1077                = dyn_cast<AbstractConditionalOperator>(E)) {
1078     return isBlockVarRef(op->getTrueExpr())
1079         || isBlockVarRef(op->getFalseExpr());
1080 
1081   // OVEs are required to support BinaryConditionalOperators.
1082   } else if (const OpaqueValueExpr *op
1083                = dyn_cast<OpaqueValueExpr>(E)) {
1084     if (const Expr *src = op->getSourceExpr())
1085       return isBlockVarRef(src);
1086 
1087   // Casts are necessary to get things like (*(int*)&var) = foo().
1088   // We don't really care about the kind of cast here, except
1089   // we don't want to look through l2r casts, because it's okay
1090   // to get the *value* in a __block variable.
1091   } else if (const CastExpr *cast = dyn_cast<CastExpr>(E)) {
1092     if (cast->getCastKind() == CK_LValueToRValue)
1093       return false;
1094     return isBlockVarRef(cast->getSubExpr());
1095 
1096   // Handle unary operators.  Again, just aggressively look through
1097   // it, ignoring the operation.
1098   } else if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E)) {
1099     return isBlockVarRef(uop->getSubExpr());
1100 
1101   // Look into the base of a field access.
1102   } else if (const MemberExpr *mem = dyn_cast<MemberExpr>(E)) {
1103     return isBlockVarRef(mem->getBase());
1104 
1105   // Look into the base of a subscript.
1106   } else if (const ArraySubscriptExpr *sub = dyn_cast<ArraySubscriptExpr>(E)) {
1107     return isBlockVarRef(sub->getBase());
1108   }
1109 
1110   return false;
1111 }
1112 
1113 void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
1114   // For an assignment to work, the value on the right has
1115   // to be compatible with the value on the left.
1116   assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
1117                                                  E->getRHS()->getType())
1118          && "Invalid assignment");
1119 
1120   // If the LHS might be a __block variable, and the RHS can
1121   // potentially cause a block copy, we need to evaluate the RHS first
1122   // so that the assignment goes the right place.
1123   // This is pretty semantically fragile.
1124   if (isBlockVarRef(E->getLHS()) &&
1125       E->getRHS()->HasSideEffects(CGF.getContext())) {
1126     // Ensure that we have a destination, and evaluate the RHS into that.
1127     EnsureDest(E->getRHS()->getType());
1128     Visit(E->getRHS());
1129 
1130     // Now emit the LHS and copy into it.
1131     LValue LHS = CGF.EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
1132 
1133     // That copy is an atomic copy if the LHS is atomic.
1134     if (LHS.getType()->isAtomicType() ||
1135         CGF.LValueIsSuitableForInlineAtomic(LHS)) {
1136       CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
1137       return;
1138     }
1139 
1140     EmitCopy(E->getLHS()->getType(),
1141              AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
1142                                      needsGC(E->getLHS()->getType()),
1143                                      AggValueSlot::IsAliased,
1144                                      AggValueSlot::MayOverlap),
1145              Dest);
1146     return;
1147   }
1148 
1149   LValue LHS = CGF.EmitLValue(E->getLHS());
1150 
1151   // If we have an atomic type, evaluate into the destination and then
1152   // do an atomic copy.
1153   if (LHS.getType()->isAtomicType() ||
1154       CGF.LValueIsSuitableForInlineAtomic(LHS)) {
1155     EnsureDest(E->getRHS()->getType());
1156     Visit(E->getRHS());
1157     CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
1158     return;
1159   }
1160 
1161   // Codegen the RHS so that it stores directly into the LHS.
1162   AggValueSlot LHSSlot =
1163     AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
1164                             needsGC(E->getLHS()->getType()),
1165                             AggValueSlot::IsAliased,
1166                             AggValueSlot::MayOverlap);
1167   // A non-volatile aggregate destination might have volatile member.
1168   if (!LHSSlot.isVolatile() &&
1169       CGF.hasVolatileMember(E->getLHS()->getType()))
1170     LHSSlot.setVolatile(true);
1171 
1172   CGF.EmitAggExpr(E->getRHS(), LHSSlot);
1173 
1174   // Copy into the destination if the assignment isn't ignored.
1175   EmitFinalDestCopy(E->getType(), LHS);
1176 }
1177 
1178 void AggExprEmitter::
1179 VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
1180   llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
1181   llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
1182   llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
1183 
1184   // Bind the common expression if necessary.
1185   CodeGenFunction::OpaqueValueMapping binding(CGF, E);
1186 
1187   CodeGenFunction::ConditionalEvaluation eval(CGF);
1188   CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock,
1189                            CGF.getProfileCount(E));
1190 
1191   // Save whether the destination's lifetime is externally managed.
1192   bool isExternallyDestructed = Dest.isExternallyDestructed();
1193 
1194   eval.begin(CGF);
1195   CGF.EmitBlock(LHSBlock);
1196   CGF.incrementProfileCounter(E);
1197   Visit(E->getTrueExpr());
1198   eval.end(CGF);
1199 
1200   assert(CGF.HaveInsertPoint() && "expression evaluation ended with no IP!");
1201   CGF.Builder.CreateBr(ContBlock);
1202 
1203   // If the result of an agg expression is unused, then the emission
1204   // of the LHS might need to create a destination slot.  That's fine
1205   // with us, and we can safely emit the RHS into the same slot, but
1206   // we shouldn't claim that it's already being destructed.
1207   Dest.setExternallyDestructed(isExternallyDestructed);
1208 
1209   eval.begin(CGF);
1210   CGF.EmitBlock(RHSBlock);
1211   Visit(E->getFalseExpr());
1212   eval.end(CGF);
1213 
1214   CGF.EmitBlock(ContBlock);
1215 }
1216 
1217 void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) {
1218   Visit(CE->getChosenSubExpr());
1219 }
1220 
1221 void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
1222   Address ArgValue = Address::invalid();
1223   Address ArgPtr = CGF.EmitVAArg(VE, ArgValue);
1224 
1225   // If EmitVAArg fails, emit an error.
1226   if (!ArgPtr.isValid()) {
1227     CGF.ErrorUnsupported(VE, "aggregate va_arg expression");
1228     return;
1229   }
1230 
1231   EmitFinalDestCopy(VE->getType(), CGF.MakeAddrLValue(ArgPtr, VE->getType()));
1232 }
1233 
1234 void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
1235   // Ensure that we have a slot, but if we already do, remember
1236   // whether it was externally destructed.
1237   bool wasExternallyDestructed = Dest.isExternallyDestructed();
1238   EnsureDest(E->getType());
1239 
1240   // We're going to push a destructor if there isn't already one.
1241   Dest.setExternallyDestructed();
1242 
1243   Visit(E->getSubExpr());
1244 
1245   // Push that destructor we promised.
1246   if (!wasExternallyDestructed)
1247     CGF.EmitCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddress());
1248 }
1249 
1250 void
1251 AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
1252   AggValueSlot Slot = EnsureSlot(E->getType());
1253   CGF.EmitCXXConstructExpr(E, Slot);
1254 }
1255 
1256 void AggExprEmitter::VisitCXXInheritedCtorInitExpr(
1257     const CXXInheritedCtorInitExpr *E) {
1258   AggValueSlot Slot = EnsureSlot(E->getType());
1259   CGF.EmitInheritedCXXConstructorCall(
1260       E->getConstructor(), E->constructsVBase(), Slot.getAddress(),
1261       E->inheritedFromVBase(), E);
1262 }
1263 
1264 void
1265 AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) {
1266   AggValueSlot Slot = EnsureSlot(E->getType());
1267   LValue SlotLV = CGF.MakeAddrLValue(Slot.getAddress(), E->getType());
1268 
1269   // We'll need to enter cleanup scopes in case any of the element
1270   // initializers throws an exception.
1271   SmallVector<EHScopeStack::stable_iterator, 16> Cleanups;
1272   llvm::Instruction *CleanupDominator = nullptr;
1273 
1274   CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin();
1275   for (LambdaExpr::const_capture_init_iterator i = E->capture_init_begin(),
1276                                                e = E->capture_init_end();
1277        i != e; ++i, ++CurField) {
1278     // Emit initialization
1279     LValue LV = CGF.EmitLValueForFieldInitialization(SlotLV, *CurField);
1280     if (CurField->hasCapturedVLAType()) {
1281       CGF.EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV);
1282       continue;
1283     }
1284 
1285     EmitInitializationToLValue(*i, LV);
1286 
1287     // Push a destructor if necessary.
1288     if (QualType::DestructionKind DtorKind =
1289             CurField->getType().isDestructedType()) {
1290       assert(LV.isSimple());
1291       if (CGF.needsEHCleanup(DtorKind)) {
1292         if (!CleanupDominator)
1293           CleanupDominator = CGF.Builder.CreateAlignedLoad(
1294               CGF.Int8Ty,
1295               llvm::Constant::getNullValue(CGF.Int8PtrTy),
1296               CharUnits::One()); // placeholder
1297 
1298         CGF.pushDestroy(EHCleanup, LV.getAddress(), CurField->getType(),
1299                         CGF.getDestroyer(DtorKind), false);
1300         Cleanups.push_back(CGF.EHStack.stable_begin());
1301       }
1302     }
1303   }
1304 
1305   // Deactivate all the partial cleanups in reverse order, which
1306   // generally means popping them.
1307   for (unsigned i = Cleanups.size(); i != 0; --i)
1308     CGF.DeactivateCleanupBlock(Cleanups[i-1], CleanupDominator);
1309 
1310   // Destroy the placeholder if we made one.
1311   if (CleanupDominator)
1312     CleanupDominator->eraseFromParent();
1313 }
1314 
1315 void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
1316   CGF.enterFullExpression(E);
1317   CodeGenFunction::RunCleanupsScope cleanups(CGF);
1318   Visit(E->getSubExpr());
1319 }
1320 
1321 void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
1322   QualType T = E->getType();
1323   AggValueSlot Slot = EnsureSlot(T);
1324   EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T));
1325 }
1326 
1327 void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
1328   QualType T = E->getType();
1329   AggValueSlot Slot = EnsureSlot(T);
1330   EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T));
1331 }
1332 
1333 /// isSimpleZero - If emitting this value will obviously just cause a store of
1334 /// zero to memory, return true.  This can return false if uncertain, so it just
1335 /// handles simple cases.
1336 static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) {
1337   E = E->IgnoreParens();
1338 
1339   // 0
1340   if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E))
1341     return IL->getValue() == 0;
1342   // +0.0
1343   if (const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(E))
1344     return FL->getValue().isPosZero();
1345   // int()
1346   if ((isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) &&
1347       CGF.getTypes().isZeroInitializable(E->getType()))
1348     return true;
1349   // (int*)0 - Null pointer expressions.
1350   if (const CastExpr *ICE = dyn_cast<CastExpr>(E))
1351     return ICE->getCastKind() == CK_NullToPointer &&
1352         CGF.getTypes().isPointerZeroInitializable(E->getType());
1353   // '\0'
1354   if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E))
1355     return CL->getValue() == 0;
1356 
1357   // Otherwise, hard case: conservatively return false.
1358   return false;
1359 }
1360 
1361 
1362 void
1363 AggExprEmitter::EmitInitializationToLValue(Expr *E, LValue LV) {
1364   QualType type = LV.getType();
1365   // FIXME: Ignore result?
1366   // FIXME: Are initializers affected by volatile?
1367   if (Dest.isZeroed() && isSimpleZero(E, CGF)) {
1368     // Storing "i32 0" to a zero'd memory location is a noop.
1369     return;
1370   } else if (isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) {
1371     return EmitNullInitializationToLValue(LV);
1372   } else if (isa<NoInitExpr>(E)) {
1373     // Do nothing.
1374     return;
1375   } else if (type->isReferenceType()) {
1376     RValue RV = CGF.EmitReferenceBindingToExpr(E);
1377     return CGF.EmitStoreThroughLValue(RV, LV);
1378   }
1379 
1380   switch (CGF.getEvaluationKind(type)) {
1381   case TEK_Complex:
1382     CGF.EmitComplexExprIntoLValue(E, LV, /*isInit*/ true);
1383     return;
1384   case TEK_Aggregate:
1385     CGF.EmitAggExpr(E, AggValueSlot::forLValue(LV,
1386                                                AggValueSlot::IsDestructed,
1387                                       AggValueSlot::DoesNotNeedGCBarriers,
1388                                                AggValueSlot::IsNotAliased,
1389                                                AggValueSlot::MayOverlap,
1390                                                Dest.isZeroed()));
1391     return;
1392   case TEK_Scalar:
1393     if (LV.isSimple()) {
1394       CGF.EmitScalarInit(E, /*D=*/nullptr, LV, /*Captured=*/false);
1395     } else {
1396       CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV);
1397     }
1398     return;
1399   }
1400   llvm_unreachable("bad evaluation kind");
1401 }
1402 
1403 void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) {
1404   QualType type = lv.getType();
1405 
1406   // If the destination slot is already zeroed out before the aggregate is
1407   // copied into it, we don't have to emit any zeros here.
1408   if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(type))
1409     return;
1410 
1411   if (CGF.hasScalarEvaluationKind(type)) {
1412     // For non-aggregates, we can store the appropriate null constant.
1413     llvm::Value *null = CGF.CGM.EmitNullConstant(type);
1414     // Note that the following is not equivalent to
1415     // EmitStoreThroughBitfieldLValue for ARC types.
1416     if (lv.isBitField()) {
1417       CGF.EmitStoreThroughBitfieldLValue(RValue::get(null), lv);
1418     } else {
1419       assert(lv.isSimple());
1420       CGF.EmitStoreOfScalar(null, lv, /* isInitialization */ true);
1421     }
1422   } else {
1423     // There's a potential optimization opportunity in combining
1424     // memsets; that would be easy for arrays, but relatively
1425     // difficult for structures with the current code.
1426     CGF.EmitNullInitialization(lv.getAddress(), lv.getType());
1427   }
1428 }
1429 
1430 void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
1431 #if 0
1432   // FIXME: Assess perf here?  Figure out what cases are worth optimizing here
1433   // (Length of globals? Chunks of zeroed-out space?).
1434   //
1435   // If we can, prefer a copy from a global; this is a lot less code for long
1436   // globals, and it's easier for the current optimizers to analyze.
1437   if (llvm::Constant* C = CGF.CGM.EmitConstantExpr(E, E->getType(), &CGF)) {
1438     llvm::GlobalVariable* GV =
1439     new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true,
1440                              llvm::GlobalValue::InternalLinkage, C, "");
1441     EmitFinalDestCopy(E->getType(), CGF.MakeAddrLValue(GV, E->getType()));
1442     return;
1443   }
1444 #endif
1445   if (E->hadArrayRangeDesignator())
1446     CGF.ErrorUnsupported(E, "GNU array range designator extension");
1447 
1448   if (E->isTransparent())
1449     return Visit(E->getInit(0));
1450 
1451   AggValueSlot Dest = EnsureSlot(E->getType());
1452 
1453   LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
1454 
1455   // Handle initialization of an array.
1456   if (E->getType()->isArrayType()) {
1457     auto AType = cast<llvm::ArrayType>(Dest.getAddress().getElementType());
1458     EmitArrayInit(Dest.getAddress(), AType, E->getType(), E);
1459     return;
1460   }
1461 
1462   assert(E->getType()->isRecordType() && "Only support structs/unions here!");
1463 
1464   // Do struct initialization; this code just sets each individual member
1465   // to the approprate value.  This makes bitfield support automatic;
1466   // the disadvantage is that the generated code is more difficult for
1467   // the optimizer, especially with bitfields.
1468   unsigned NumInitElements = E->getNumInits();
1469   RecordDecl *record = E->getType()->castAs<RecordType>()->getDecl();
1470 
1471   // We'll need to enter cleanup scopes in case any of the element
1472   // initializers throws an exception.
1473   SmallVector<EHScopeStack::stable_iterator, 16> cleanups;
1474   llvm::Instruction *cleanupDominator = nullptr;
1475 
1476   unsigned curInitIndex = 0;
1477 
1478   // Emit initialization of base classes.
1479   if (auto *CXXRD = dyn_cast<CXXRecordDecl>(record)) {
1480     assert(E->getNumInits() >= CXXRD->getNumBases() &&
1481            "missing initializer for base class");
1482     for (auto &Base : CXXRD->bases()) {
1483       assert(!Base.isVirtual() && "should not see vbases here");
1484       auto *BaseRD = Base.getType()->getAsCXXRecordDecl();
1485       Address V = CGF.GetAddressOfDirectBaseInCompleteClass(
1486           Dest.getAddress(), CXXRD, BaseRD,
1487           /*isBaseVirtual*/ false);
1488       AggValueSlot AggSlot = AggValueSlot::forAddr(
1489           V, Qualifiers(),
1490           AggValueSlot::IsDestructed,
1491           AggValueSlot::DoesNotNeedGCBarriers,
1492           AggValueSlot::IsNotAliased,
1493           CGF.overlapForBaseInit(CXXRD, BaseRD, Base.isVirtual()));
1494       CGF.EmitAggExpr(E->getInit(curInitIndex++), AggSlot);
1495 
1496       if (QualType::DestructionKind dtorKind =
1497               Base.getType().isDestructedType()) {
1498         CGF.pushDestroy(dtorKind, V, Base.getType());
1499         cleanups.push_back(CGF.EHStack.stable_begin());
1500       }
1501     }
1502   }
1503 
1504   // Prepare a 'this' for CXXDefaultInitExprs.
1505   CodeGenFunction::FieldConstructionScope FCS(CGF, Dest.getAddress());
1506 
1507   if (record->isUnion()) {
1508     // Only initialize one field of a union. The field itself is
1509     // specified by the initializer list.
1510     if (!E->getInitializedFieldInUnion()) {
1511       // Empty union; we have nothing to do.
1512 
1513 #ifndef NDEBUG
1514       // Make sure that it's really an empty and not a failure of
1515       // semantic analysis.
1516       for (const auto *Field : record->fields())
1517         assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed");
1518 #endif
1519       return;
1520     }
1521 
1522     // FIXME: volatility
1523     FieldDecl *Field = E->getInitializedFieldInUnion();
1524 
1525     LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestLV, Field);
1526     if (NumInitElements) {
1527       // Store the initializer into the field
1528       EmitInitializationToLValue(E->getInit(0), FieldLoc);
1529     } else {
1530       // Default-initialize to null.
1531       EmitNullInitializationToLValue(FieldLoc);
1532     }
1533 
1534     return;
1535   }
1536 
1537   // Here we iterate over the fields; this makes it simpler to both
1538   // default-initialize fields and skip over unnamed fields.
1539   for (const auto *field : record->fields()) {
1540     // We're done once we hit the flexible array member.
1541     if (field->getType()->isIncompleteArrayType())
1542       break;
1543 
1544     // Always skip anonymous bitfields.
1545     if (field->isUnnamedBitfield())
1546       continue;
1547 
1548     // We're done if we reach the end of the explicit initializers, we
1549     // have a zeroed object, and the rest of the fields are
1550     // zero-initializable.
1551     if (curInitIndex == NumInitElements && Dest.isZeroed() &&
1552         CGF.getTypes().isZeroInitializable(E->getType()))
1553       break;
1554 
1555 
1556     LValue LV = CGF.EmitLValueForFieldInitialization(DestLV, field);
1557     // We never generate write-barries for initialized fields.
1558     LV.setNonGC(true);
1559 
1560     if (curInitIndex < NumInitElements) {
1561       // Store the initializer into the field.
1562       EmitInitializationToLValue(E->getInit(curInitIndex++), LV);
1563     } else {
1564       // We're out of initializers; default-initialize to null
1565       EmitNullInitializationToLValue(LV);
1566     }
1567 
1568     // Push a destructor if necessary.
1569     // FIXME: if we have an array of structures, all explicitly
1570     // initialized, we can end up pushing a linear number of cleanups.
1571     bool pushedCleanup = false;
1572     if (QualType::DestructionKind dtorKind
1573           = field->getType().isDestructedType()) {
1574       assert(LV.isSimple());
1575       if (CGF.needsEHCleanup(dtorKind)) {
1576         if (!cleanupDominator)
1577           cleanupDominator = CGF.Builder.CreateAlignedLoad(
1578               CGF.Int8Ty,
1579               llvm::Constant::getNullValue(CGF.Int8PtrTy),
1580               CharUnits::One()); // placeholder
1581 
1582         CGF.pushDestroy(EHCleanup, LV.getAddress(), field->getType(),
1583                         CGF.getDestroyer(dtorKind), false);
1584         cleanups.push_back(CGF.EHStack.stable_begin());
1585         pushedCleanup = true;
1586       }
1587     }
1588 
1589     // If the GEP didn't get used because of a dead zero init or something
1590     // else, clean it up for -O0 builds and general tidiness.
1591     if (!pushedCleanup && LV.isSimple())
1592       if (llvm::GetElementPtrInst *GEP =
1593             dyn_cast<llvm::GetElementPtrInst>(LV.getPointer()))
1594         if (GEP->use_empty())
1595           GEP->eraseFromParent();
1596   }
1597 
1598   // Deactivate all the partial cleanups in reverse order, which
1599   // generally means popping them.
1600   for (unsigned i = cleanups.size(); i != 0; --i)
1601     CGF.DeactivateCleanupBlock(cleanups[i-1], cleanupDominator);
1602 
1603   // Destroy the placeholder if we made one.
1604   if (cleanupDominator)
1605     cleanupDominator->eraseFromParent();
1606 }
1607 
1608 void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
1609                                             llvm::Value *outerBegin) {
1610   // Emit the common subexpression.
1611   CodeGenFunction::OpaqueValueMapping binding(CGF, E->getCommonExpr());
1612 
1613   Address destPtr = EnsureSlot(E->getType()).getAddress();
1614   uint64_t numElements = E->getArraySize().getZExtValue();
1615 
1616   if (!numElements)
1617     return;
1618 
1619   // destPtr is an array*. Construct an elementType* by drilling down a level.
1620   llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
1621   llvm::Value *indices[] = {zero, zero};
1622   llvm::Value *begin = Builder.CreateInBoundsGEP(destPtr.getPointer(), indices,
1623                                                  "arrayinit.begin");
1624 
1625   // Prepare to special-case multidimensional array initialization: we avoid
1626   // emitting multiple destructor loops in that case.
1627   if (!outerBegin)
1628     outerBegin = begin;
1629   ArrayInitLoopExpr *InnerLoop = dyn_cast<ArrayInitLoopExpr>(E->getSubExpr());
1630 
1631   QualType elementType =
1632       CGF.getContext().getAsArrayType(E->getType())->getElementType();
1633   CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
1634   CharUnits elementAlign =
1635       destPtr.getAlignment().alignmentOfArrayElement(elementSize);
1636 
1637   llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
1638   llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
1639 
1640   // Jump into the body.
1641   CGF.EmitBlock(bodyBB);
1642   llvm::PHINode *index =
1643       Builder.CreatePHI(zero->getType(), 2, "arrayinit.index");
1644   index->addIncoming(zero, entryBB);
1645   llvm::Value *element = Builder.CreateInBoundsGEP(begin, index);
1646 
1647   // Prepare for a cleanup.
1648   QualType::DestructionKind dtorKind = elementType.isDestructedType();
1649   EHScopeStack::stable_iterator cleanup;
1650   if (CGF.needsEHCleanup(dtorKind) && !InnerLoop) {
1651     if (outerBegin->getType() != element->getType())
1652       outerBegin = Builder.CreateBitCast(outerBegin, element->getType());
1653     CGF.pushRegularPartialArrayCleanup(outerBegin, element, elementType,
1654                                        elementAlign,
1655                                        CGF.getDestroyer(dtorKind));
1656     cleanup = CGF.EHStack.stable_begin();
1657   } else {
1658     dtorKind = QualType::DK_none;
1659   }
1660 
1661   // Emit the actual filler expression.
1662   {
1663     // Temporaries created in an array initialization loop are destroyed
1664     // at the end of each iteration.
1665     CodeGenFunction::RunCleanupsScope CleanupsScope(CGF);
1666     CodeGenFunction::ArrayInitLoopExprScope Scope(CGF, index);
1667     LValue elementLV =
1668         CGF.MakeAddrLValue(Address(element, elementAlign), elementType);
1669 
1670     if (InnerLoop) {
1671       // If the subexpression is an ArrayInitLoopExpr, share its cleanup.
1672       auto elementSlot = AggValueSlot::forLValue(
1673           elementLV, AggValueSlot::IsDestructed,
1674           AggValueSlot::DoesNotNeedGCBarriers,
1675           AggValueSlot::IsNotAliased,
1676           AggValueSlot::DoesNotOverlap);
1677       AggExprEmitter(CGF, elementSlot, false)
1678           .VisitArrayInitLoopExpr(InnerLoop, outerBegin);
1679     } else
1680       EmitInitializationToLValue(E->getSubExpr(), elementLV);
1681   }
1682 
1683   // Move on to the next element.
1684   llvm::Value *nextIndex = Builder.CreateNUWAdd(
1685       index, llvm::ConstantInt::get(CGF.SizeTy, 1), "arrayinit.next");
1686   index->addIncoming(nextIndex, Builder.GetInsertBlock());
1687 
1688   // Leave the loop if we're done.
1689   llvm::Value *done = Builder.CreateICmpEQ(
1690       nextIndex, llvm::ConstantInt::get(CGF.SizeTy, numElements),
1691       "arrayinit.done");
1692   llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
1693   Builder.CreateCondBr(done, endBB, bodyBB);
1694 
1695   CGF.EmitBlock(endBB);
1696 
1697   // Leave the partial-array cleanup if we entered one.
1698   if (dtorKind)
1699     CGF.DeactivateCleanupBlock(cleanup, index);
1700 }
1701 
1702 void AggExprEmitter::VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E) {
1703   AggValueSlot Dest = EnsureSlot(E->getType());
1704 
1705   LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
1706   EmitInitializationToLValue(E->getBase(), DestLV);
1707   VisitInitListExpr(E->getUpdater());
1708 }
1709 
1710 //===----------------------------------------------------------------------===//
1711 //                        Entry Points into this File
1712 //===----------------------------------------------------------------------===//
1713 
1714 /// GetNumNonZeroBytesInInit - Get an approximate count of the number of
1715 /// non-zero bytes that will be stored when outputting the initializer for the
1716 /// specified initializer expression.
1717 static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) {
1718   E = E->IgnoreParens();
1719 
1720   // 0 and 0.0 won't require any non-zero stores!
1721   if (isSimpleZero(E, CGF)) return CharUnits::Zero();
1722 
1723   // If this is an initlist expr, sum up the size of sizes of the (present)
1724   // elements.  If this is something weird, assume the whole thing is non-zero.
1725   const InitListExpr *ILE = dyn_cast<InitListExpr>(E);
1726   while (ILE && ILE->isTransparent())
1727     ILE = dyn_cast<InitListExpr>(ILE->getInit(0));
1728   if (!ILE || !CGF.getTypes().isZeroInitializable(ILE->getType()))
1729     return CGF.getContext().getTypeSizeInChars(E->getType());
1730 
1731   // InitListExprs for structs have to be handled carefully.  If there are
1732   // reference members, we need to consider the size of the reference, not the
1733   // referencee.  InitListExprs for unions and arrays can't have references.
1734   if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
1735     if (!RT->isUnionType()) {
1736       RecordDecl *SD = E->getType()->getAs<RecordType>()->getDecl();
1737       CharUnits NumNonZeroBytes = CharUnits::Zero();
1738 
1739       unsigned ILEElement = 0;
1740       if (auto *CXXRD = dyn_cast<CXXRecordDecl>(SD))
1741         while (ILEElement != CXXRD->getNumBases())
1742           NumNonZeroBytes +=
1743               GetNumNonZeroBytesInInit(ILE->getInit(ILEElement++), CGF);
1744       for (const auto *Field : SD->fields()) {
1745         // We're done once we hit the flexible array member or run out of
1746         // InitListExpr elements.
1747         if (Field->getType()->isIncompleteArrayType() ||
1748             ILEElement == ILE->getNumInits())
1749           break;
1750         if (Field->isUnnamedBitfield())
1751           continue;
1752 
1753         const Expr *E = ILE->getInit(ILEElement++);
1754 
1755         // Reference values are always non-null and have the width of a pointer.
1756         if (Field->getType()->isReferenceType())
1757           NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits(
1758               CGF.getTarget().getPointerWidth(0));
1759         else
1760           NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF);
1761       }
1762 
1763       return NumNonZeroBytes;
1764     }
1765   }
1766 
1767 
1768   CharUnits NumNonZeroBytes = CharUnits::Zero();
1769   for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)
1770     NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF);
1771   return NumNonZeroBytes;
1772 }
1773 
1774 /// CheckAggExprForMemSetUse - If the initializer is large and has a lot of
1775 /// zeros in it, emit a memset and avoid storing the individual zeros.
1776 ///
1777 static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
1778                                      CodeGenFunction &CGF) {
1779   // If the slot is already known to be zeroed, nothing to do.  Don't mess with
1780   // volatile stores.
1781   if (Slot.isZeroed() || Slot.isVolatile() || !Slot.getAddress().isValid())
1782     return;
1783 
1784   // C++ objects with a user-declared constructor don't need zero'ing.
1785   if (CGF.getLangOpts().CPlusPlus)
1786     if (const RecordType *RT = CGF.getContext()
1787                        .getBaseElementType(E->getType())->getAs<RecordType>()) {
1788       const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1789       if (RD->hasUserDeclaredConstructor())
1790         return;
1791     }
1792 
1793   // If the type is 16-bytes or smaller, prefer individual stores over memset.
1794   CharUnits Size = Slot.getPreferredSize(CGF.getContext(), E->getType());
1795   if (Size <= CharUnits::fromQuantity(16))
1796     return;
1797 
1798   // Check to see if over 3/4 of the initializer are known to be zero.  If so,
1799   // we prefer to emit memset + individual stores for the rest.
1800   CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF);
1801   if (NumNonZeroBytes*4 > Size)
1802     return;
1803 
1804   // Okay, it seems like a good idea to use an initial memset, emit the call.
1805   llvm::Constant *SizeVal = CGF.Builder.getInt64(Size.getQuantity());
1806 
1807   Address Loc = Slot.getAddress();
1808   Loc = CGF.Builder.CreateElementBitCast(Loc, CGF.Int8Ty);
1809   CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal, false);
1810 
1811   // Tell the AggExprEmitter that the slot is known zero.
1812   Slot.setZeroed();
1813 }
1814 
1815 
1816 
1817 
1818 /// EmitAggExpr - Emit the computation of the specified expression of aggregate
1819 /// type.  The result is computed into DestPtr.  Note that if DestPtr is null,
1820 /// the value of the aggregate expression is not needed.  If VolatileDest is
1821 /// true, DestPtr cannot be 0.
1822 void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot) {
1823   assert(E && hasAggregateEvaluationKind(E->getType()) &&
1824          "Invalid aggregate expression to emit");
1825   assert((Slot.getAddress().isValid() || Slot.isIgnored()) &&
1826          "slot has bits but no address");
1827 
1828   // Optimize the slot if possible.
1829   CheckAggExprForMemSetUse(Slot, E, *this);
1830 
1831   AggExprEmitter(*this, Slot, Slot.isIgnored()).Visit(const_cast<Expr*>(E));
1832 }
1833 
1834 LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
1835   assert(hasAggregateEvaluationKind(E->getType()) && "Invalid argument!");
1836   Address Temp = CreateMemTemp(E->getType());
1837   LValue LV = MakeAddrLValue(Temp, E->getType());
1838   EmitAggExpr(E, AggValueSlot::forLValue(LV, AggValueSlot::IsNotDestructed,
1839                                          AggValueSlot::DoesNotNeedGCBarriers,
1840                                          AggValueSlot::IsNotAliased,
1841                                          AggValueSlot::DoesNotOverlap));
1842   return LV;
1843 }
1844 
1845 AggValueSlot::Overlap_t CodeGenFunction::overlapForBaseInit(
1846     const CXXRecordDecl *RD, const CXXRecordDecl *BaseRD, bool IsVirtual) {
1847   // Virtual bases are initialized first, in address order, so there's never
1848   // any overlap during their initialization.
1849   //
1850   // FIXME: Under P0840, this is no longer true: the tail padding of a vbase
1851   // of a field could be reused by a vbase of a containing class.
1852   if (IsVirtual)
1853     return AggValueSlot::DoesNotOverlap;
1854 
1855   // If the base class is laid out entirely within the nvsize of the derived
1856   // class, its tail padding cannot yet be initialized, so we can issue
1857   // stores at the full width of the base class.
1858   const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
1859   if (Layout.getBaseClassOffset(BaseRD) +
1860           getContext().getASTRecordLayout(BaseRD).getSize() <=
1861       Layout.getNonVirtualSize())
1862     return AggValueSlot::DoesNotOverlap;
1863 
1864   // The tail padding may contain values we need to preserve.
1865   return AggValueSlot::MayOverlap;
1866 }
1867 
1868 void CodeGenFunction::EmitAggregateCopy(LValue Dest, LValue Src, QualType Ty,
1869                                         AggValueSlot::Overlap_t MayOverlap,
1870                                         bool isVolatile) {
1871   assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
1872 
1873   Address DestPtr = Dest.getAddress();
1874   Address SrcPtr = Src.getAddress();
1875 
1876   if (getLangOpts().CPlusPlus) {
1877     if (const RecordType *RT = Ty->getAs<RecordType>()) {
1878       CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl());
1879       assert((Record->hasTrivialCopyConstructor() ||
1880               Record->hasTrivialCopyAssignment() ||
1881               Record->hasTrivialMoveConstructor() ||
1882               Record->hasTrivialMoveAssignment() ||
1883               Record->isUnion()) &&
1884              "Trying to aggregate-copy a type without a trivial copy/move "
1885              "constructor or assignment operator");
1886       // Ignore empty classes in C++.
1887       if (Record->isEmpty())
1888         return;
1889     }
1890   }
1891 
1892   // Aggregate assignment turns into llvm.memcpy.  This is almost valid per
1893   // C99 6.5.16.1p3, which states "If the value being stored in an object is
1894   // read from another object that overlaps in anyway the storage of the first
1895   // object, then the overlap shall be exact and the two objects shall have
1896   // qualified or unqualified versions of a compatible type."
1897   //
1898   // memcpy is not defined if the source and destination pointers are exactly
1899   // equal, but other compilers do this optimization, and almost every memcpy
1900   // implementation handles this case safely.  If there is a libc that does not
1901   // safely handle this, we can add a target hook.
1902 
1903   // Get data size info for this aggregate. Don't copy the tail padding if this
1904   // might be a potentially-overlapping subobject, since the tail padding might
1905   // be occupied by a different object. Otherwise, copying it is fine.
1906   std::pair<CharUnits, CharUnits> TypeInfo;
1907   if (MayOverlap)
1908     TypeInfo = getContext().getTypeInfoDataSizeInChars(Ty);
1909   else
1910     TypeInfo = getContext().getTypeInfoInChars(Ty);
1911 
1912   llvm::Value *SizeVal = nullptr;
1913   if (TypeInfo.first.isZero()) {
1914     // But note that getTypeInfo returns 0 for a VLA.
1915     if (auto *VAT = dyn_cast_or_null<VariableArrayType>(
1916             getContext().getAsArrayType(Ty))) {
1917       QualType BaseEltTy;
1918       SizeVal = emitArrayLength(VAT, BaseEltTy, DestPtr);
1919       TypeInfo = getContext().getTypeInfoInChars(BaseEltTy);
1920       assert(!TypeInfo.first.isZero());
1921       SizeVal = Builder.CreateNUWMul(
1922           SizeVal,
1923           llvm::ConstantInt::get(SizeTy, TypeInfo.first.getQuantity()));
1924     }
1925   }
1926   if (!SizeVal) {
1927     SizeVal = llvm::ConstantInt::get(SizeTy, TypeInfo.first.getQuantity());
1928   }
1929 
1930   // FIXME: If we have a volatile struct, the optimizer can remove what might
1931   // appear to be `extra' memory ops:
1932   //
1933   // volatile struct { int i; } a, b;
1934   //
1935   // int main() {
1936   //   a = b;
1937   //   a = b;
1938   // }
1939   //
1940   // we need to use a different call here.  We use isVolatile to indicate when
1941   // either the source or the destination is volatile.
1942 
1943   DestPtr = Builder.CreateElementBitCast(DestPtr, Int8Ty);
1944   SrcPtr = Builder.CreateElementBitCast(SrcPtr, Int8Ty);
1945 
1946   // Don't do any of the memmove_collectable tests if GC isn't set.
1947   if (CGM.getLangOpts().getGC() == LangOptions::NonGC) {
1948     // fall through
1949   } else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
1950     RecordDecl *Record = RecordTy->getDecl();
1951     if (Record->hasObjectMember()) {
1952       CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
1953                                                     SizeVal);
1954       return;
1955     }
1956   } else if (Ty->isArrayType()) {
1957     QualType BaseType = getContext().getBaseElementType(Ty);
1958     if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
1959       if (RecordTy->getDecl()->hasObjectMember()) {
1960         CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
1961                                                       SizeVal);
1962         return;
1963       }
1964     }
1965   }
1966 
1967   auto Inst = Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, isVolatile);
1968 
1969   // Determine the metadata to describe the position of any padding in this
1970   // memcpy, as well as the TBAA tags for the members of the struct, in case
1971   // the optimizer wishes to expand it in to scalar memory operations.
1972   if (llvm::MDNode *TBAAStructTag = CGM.getTBAAStructInfo(Ty))
1973     Inst->setMetadata(llvm::LLVMContext::MD_tbaa_struct, TBAAStructTag);
1974 
1975   if (CGM.getCodeGenOpts().NewStructPathTBAA) {
1976     TBAAAccessInfo TBAAInfo = CGM.mergeTBAAInfoForMemoryTransfer(
1977         Dest.getTBAAInfo(), Src.getTBAAInfo());
1978     CGM.DecorateInstructionWithTBAA(Inst, TBAAInfo);
1979   }
1980 }
1981