1 //===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code to emit Aggregate Expr nodes as LLVM code.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CodeGenFunction.h"
15 #include "CGObjCRuntime.h"
16 #include "CodeGenModule.h"
17 #include "ConstantEmitter.h"
18 #include "clang/AST/ASTContext.h"
19 #include "clang/AST/DeclCXX.h"
20 #include "clang/AST/DeclTemplate.h"
21 #include "clang/AST/StmtVisitor.h"
22 #include "llvm/IR/Constants.h"
23 #include "llvm/IR/Function.h"
24 #include "llvm/IR/GlobalVariable.h"
25 #include "llvm/IR/Intrinsics.h"
26 using namespace clang;
27 using namespace CodeGen;
28 
29 //===----------------------------------------------------------------------===//
30 //                        Aggregate Expression Emitter
31 //===----------------------------------------------------------------------===//
32 
33 namespace  {
34 class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
35   CodeGenFunction &CGF;
36   CGBuilderTy &Builder;
37   AggValueSlot Dest;
38   bool IsResultUnused;
39 
40   AggValueSlot EnsureSlot(QualType T) {
41     if (!Dest.isIgnored()) return Dest;
42     return CGF.CreateAggTemp(T, "agg.tmp.ensured");
43   }
44   void EnsureDest(QualType T) {
45     if (!Dest.isIgnored()) return;
46     Dest = CGF.CreateAggTemp(T, "agg.tmp.ensured");
47   }
48 
49   // Calls `Fn` with a valid return value slot, potentially creating a temporary
50   // to do so. If a temporary is created, an appropriate copy into `Dest` will
51   // be emitted.
52   //
53   // The given function should take a ReturnValueSlot, and return an RValue that
54   // points to said slot.
55   void withReturnValueSlot(const Expr *E,
56                            llvm::function_ref<RValue(ReturnValueSlot)> Fn);
57 
58 public:
59   AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest, bool IsResultUnused)
60     : CGF(cgf), Builder(CGF.Builder), Dest(Dest),
61     IsResultUnused(IsResultUnused) { }
62 
63   //===--------------------------------------------------------------------===//
64   //                               Utilities
65   //===--------------------------------------------------------------------===//
66 
67   /// EmitAggLoadOfLValue - Given an expression with aggregate type that
68   /// represents a value lvalue, this method emits the address of the lvalue,
69   /// then loads the result into DestPtr.
70   void EmitAggLoadOfLValue(const Expr *E);
71 
72   enum ExprValueKind {
73     EVK_RValue,
74     EVK_NonRValue
75   };
76 
77   /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
78   /// SrcIsRValue is true if source comes from an RValue.
79   void EmitFinalDestCopy(QualType type, const LValue &src,
80                          ExprValueKind SrcValueKind = EVK_NonRValue);
81   void EmitFinalDestCopy(QualType type, RValue src);
82   void EmitCopy(QualType type, const AggValueSlot &dest,
83                 const AggValueSlot &src);
84 
85   void EmitMoveFromReturnSlot(const Expr *E, RValue Src);
86 
87   void EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
88                      QualType ArrayQTy, InitListExpr *E);
89 
90   AggValueSlot::NeedsGCBarriers_t needsGC(QualType T) {
91     if (CGF.getLangOpts().getGC() && TypeRequiresGCollection(T))
92       return AggValueSlot::NeedsGCBarriers;
93     return AggValueSlot::DoesNotNeedGCBarriers;
94   }
95 
96   bool TypeRequiresGCollection(QualType T);
97 
98   //===--------------------------------------------------------------------===//
99   //                            Visitor Methods
100   //===--------------------------------------------------------------------===//
101 
102   void Visit(Expr *E) {
103     ApplyDebugLocation DL(CGF, E);
104     StmtVisitor<AggExprEmitter>::Visit(E);
105   }
106 
107   void VisitStmt(Stmt *S) {
108     CGF.ErrorUnsupported(S, "aggregate expression");
109   }
110   void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); }
111   void VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
112     Visit(GE->getResultExpr());
113   }
114   void VisitCoawaitExpr(CoawaitExpr *E) {
115     CGF.EmitCoawaitExpr(*E, Dest, IsResultUnused);
116   }
117   void VisitCoyieldExpr(CoyieldExpr *E) {
118     CGF.EmitCoyieldExpr(*E, Dest, IsResultUnused);
119   }
120   void VisitUnaryCoawait(UnaryOperator *E) { Visit(E->getSubExpr()); }
121   void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); }
122   void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
123     return Visit(E->getReplacement());
124   }
125 
126   // l-values.
127   void VisitDeclRefExpr(DeclRefExpr *E) { EmitAggLoadOfLValue(E); }
128   void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); }
129   void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); }
130   void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); }
131   void VisitCompoundLiteralExpr(CompoundLiteralExpr *E);
132   void VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
133     EmitAggLoadOfLValue(E);
134   }
135   void VisitPredefinedExpr(const PredefinedExpr *E) {
136     EmitAggLoadOfLValue(E);
137   }
138 
139   // Operators.
140   void VisitCastExpr(CastExpr *E);
141   void VisitCallExpr(const CallExpr *E);
142   void VisitStmtExpr(const StmtExpr *E);
143   void VisitBinaryOperator(const BinaryOperator *BO);
144   void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *BO);
145   void VisitBinAssign(const BinaryOperator *E);
146   void VisitBinComma(const BinaryOperator *E);
147 
148   void VisitObjCMessageExpr(ObjCMessageExpr *E);
149   void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
150     EmitAggLoadOfLValue(E);
151   }
152 
153   void VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E);
154   void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO);
155   void VisitChooseExpr(const ChooseExpr *CE);
156   void VisitInitListExpr(InitListExpr *E);
157   void VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
158                               llvm::Value *outerBegin = nullptr);
159   void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
160   void VisitNoInitExpr(NoInitExpr *E) { } // Do nothing.
161   void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
162     Visit(DAE->getExpr());
163   }
164   void VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
165     CodeGenFunction::CXXDefaultInitExprScope Scope(CGF);
166     Visit(DIE->getExpr());
167   }
168   void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
169   void VisitCXXConstructExpr(const CXXConstructExpr *E);
170   void VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *E);
171   void VisitLambdaExpr(LambdaExpr *E);
172   void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E);
173   void VisitExprWithCleanups(ExprWithCleanups *E);
174   void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
175   void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); }
176   void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E);
177   void VisitOpaqueValueExpr(OpaqueValueExpr *E);
178 
179   void VisitPseudoObjectExpr(PseudoObjectExpr *E) {
180     if (E->isGLValue()) {
181       LValue LV = CGF.EmitPseudoObjectLValue(E);
182       return EmitFinalDestCopy(E->getType(), LV);
183     }
184 
185     CGF.EmitPseudoObjectRValue(E, EnsureSlot(E->getType()));
186   }
187 
188   void VisitVAArgExpr(VAArgExpr *E);
189 
190   void EmitInitializationToLValue(Expr *E, LValue Address);
191   void EmitNullInitializationToLValue(LValue Address);
192   //  case Expr::ChooseExprClass:
193   void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); }
194   void VisitAtomicExpr(AtomicExpr *E) {
195     RValue Res = CGF.EmitAtomicExpr(E);
196     EmitFinalDestCopy(E->getType(), Res);
197   }
198 };
199 }  // end anonymous namespace.
200 
201 //===----------------------------------------------------------------------===//
202 //                                Utilities
203 //===----------------------------------------------------------------------===//
204 
205 /// EmitAggLoadOfLValue - Given an expression with aggregate type that
206 /// represents a value lvalue, this method emits the address of the lvalue,
207 /// then loads the result into DestPtr.
208 void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) {
209   LValue LV = CGF.EmitLValue(E);
210 
211   // If the type of the l-value is atomic, then do an atomic load.
212   if (LV.getType()->isAtomicType() || CGF.LValueIsSuitableForInlineAtomic(LV)) {
213     CGF.EmitAtomicLoad(LV, E->getExprLoc(), Dest);
214     return;
215   }
216 
217   EmitFinalDestCopy(E->getType(), LV);
218 }
219 
220 /// \brief True if the given aggregate type requires special GC API calls.
221 bool AggExprEmitter::TypeRequiresGCollection(QualType T) {
222   // Only record types have members that might require garbage collection.
223   const RecordType *RecordTy = T->getAs<RecordType>();
224   if (!RecordTy) return false;
225 
226   // Don't mess with non-trivial C++ types.
227   RecordDecl *Record = RecordTy->getDecl();
228   if (isa<CXXRecordDecl>(Record) &&
229       (cast<CXXRecordDecl>(Record)->hasNonTrivialCopyConstructor() ||
230        !cast<CXXRecordDecl>(Record)->hasTrivialDestructor()))
231     return false;
232 
233   // Check whether the type has an object member.
234   return Record->hasObjectMember();
235 }
236 
237 void AggExprEmitter::withReturnValueSlot(
238     const Expr *E, llvm::function_ref<RValue(ReturnValueSlot)> EmitCall) {
239   QualType RetTy = E->getType();
240   bool RequiresDestruction =
241       Dest.isIgnored() &&
242       RetTy.isDestructedType() == QualType::DK_nontrivial_c_struct;
243 
244   // If it makes no observable difference, save a memcpy + temporary.
245   //
246   // We need to always provide our own temporary if destruction is required.
247   // Otherwise, EmitCall will emit its own, notice that it's "unused", and end
248   // its lifetime before we have the chance to emit a proper destructor call.
249   bool UseTemp = Dest.isPotentiallyAliased() || Dest.requiresGCollection() ||
250                  (RequiresDestruction && !Dest.getAddress().isValid());
251 
252   Address RetAddr = Address::invalid();
253   if (!UseTemp) {
254     RetAddr = Dest.getAddress();
255   } else {
256     RetAddr = CGF.CreateMemTemp(RetTy);
257     uint64_t Size =
258         CGF.CGM.getDataLayout().getTypeAllocSize(CGF.ConvertTypeForMem(RetTy));
259     if (llvm::Value *LifetimeSizePtr =
260             CGF.EmitLifetimeStart(Size, RetAddr.getPointer()))
261       CGF.pushFullExprCleanup<CodeGenFunction::CallLifetimeEnd>(
262           NormalEHLifetimeMarker, RetAddr, LifetimeSizePtr);
263   }
264 
265   RValue Src =
266       EmitCall(ReturnValueSlot(RetAddr, Dest.isVolatile(), IsResultUnused));
267 
268   if (RequiresDestruction)
269     CGF.pushDestroy(RetTy.isDestructedType(), Src.getAggregateAddress(), RetTy);
270 
271   if (UseTemp) {
272     assert(Dest.getPointer() != Src.getAggregatePointer());
273     EmitFinalDestCopy(E->getType(), Src);
274   }
275 }
276 
277 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
278 void AggExprEmitter::EmitFinalDestCopy(QualType type, RValue src) {
279   assert(src.isAggregate() && "value must be aggregate value!");
280   LValue srcLV = CGF.MakeAddrLValue(src.getAggregateAddress(), type);
281   EmitFinalDestCopy(type, srcLV, EVK_RValue);
282 }
283 
284 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
285 void AggExprEmitter::EmitFinalDestCopy(QualType type, const LValue &src,
286                                        ExprValueKind SrcValueKind) {
287   // If Dest is ignored, then we're evaluating an aggregate expression
288   // in a context that doesn't care about the result.  Note that loads
289   // from volatile l-values force the existence of a non-ignored
290   // destination.
291   if (Dest.isIgnored())
292     return;
293 
294   // Copy non-trivial C structs here.
295   LValue DstLV = CGF.MakeAddrLValue(
296       Dest.getAddress(), Dest.isVolatile() ? type.withVolatile() : type);
297 
298   if (SrcValueKind == EVK_RValue) {
299     if (type.isNonTrivialToPrimitiveDestructiveMove() == QualType::PCK_Struct) {
300       if (Dest.isPotentiallyAliased())
301         CGF.callCStructMoveAssignmentOperator(DstLV, src);
302       else
303         CGF.callCStructMoveConstructor(DstLV, src);
304       return;
305     }
306   } else {
307     if (type.isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) {
308       if (Dest.isPotentiallyAliased())
309         CGF.callCStructCopyAssignmentOperator(DstLV, src);
310       else
311         CGF.callCStructCopyConstructor(DstLV, src);
312       return;
313     }
314   }
315 
316   AggValueSlot srcAgg =
317     AggValueSlot::forLValue(src, AggValueSlot::IsDestructed,
318                             needsGC(type), AggValueSlot::IsAliased);
319   EmitCopy(type, Dest, srcAgg);
320 }
321 
322 /// Perform a copy from the source into the destination.
323 ///
324 /// \param type - the type of the aggregate being copied; qualifiers are
325 ///   ignored
326 void AggExprEmitter::EmitCopy(QualType type, const AggValueSlot &dest,
327                               const AggValueSlot &src) {
328   if (dest.requiresGCollection()) {
329     CharUnits sz = CGF.getContext().getTypeSizeInChars(type);
330     llvm::Value *size = llvm::ConstantInt::get(CGF.SizeTy, sz.getQuantity());
331     CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF,
332                                                       dest.getAddress(),
333                                                       src.getAddress(),
334                                                       size);
335     return;
336   }
337 
338   // If the result of the assignment is used, copy the LHS there also.
339   // It's volatile if either side is.  Use the minimum alignment of
340   // the two sides.
341   LValue DestLV = CGF.MakeAddrLValue(dest.getAddress(), type);
342   LValue SrcLV = CGF.MakeAddrLValue(src.getAddress(), type);
343   CGF.EmitAggregateCopy(DestLV, SrcLV, type,
344                         dest.isVolatile() || src.isVolatile());
345 }
346 
347 /// \brief Emit the initializer for a std::initializer_list initialized with a
348 /// real initializer list.
349 void
350 AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) {
351   // Emit an array containing the elements.  The array is externally destructed
352   // if the std::initializer_list object is.
353   ASTContext &Ctx = CGF.getContext();
354   LValue Array = CGF.EmitLValue(E->getSubExpr());
355   assert(Array.isSimple() && "initializer_list array not a simple lvalue");
356   Address ArrayPtr = Array.getAddress();
357 
358   const ConstantArrayType *ArrayType =
359       Ctx.getAsConstantArrayType(E->getSubExpr()->getType());
360   assert(ArrayType && "std::initializer_list constructed from non-array");
361 
362   // FIXME: Perform the checks on the field types in SemaInit.
363   RecordDecl *Record = E->getType()->castAs<RecordType>()->getDecl();
364   RecordDecl::field_iterator Field = Record->field_begin();
365   if (Field == Record->field_end()) {
366     CGF.ErrorUnsupported(E, "weird std::initializer_list");
367     return;
368   }
369 
370   // Start pointer.
371   if (!Field->getType()->isPointerType() ||
372       !Ctx.hasSameType(Field->getType()->getPointeeType(),
373                        ArrayType->getElementType())) {
374     CGF.ErrorUnsupported(E, "weird std::initializer_list");
375     return;
376   }
377 
378   AggValueSlot Dest = EnsureSlot(E->getType());
379   LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
380   LValue Start = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
381   llvm::Value *Zero = llvm::ConstantInt::get(CGF.PtrDiffTy, 0);
382   llvm::Value *IdxStart[] = { Zero, Zero };
383   llvm::Value *ArrayStart =
384       Builder.CreateInBoundsGEP(ArrayPtr.getPointer(), IdxStart, "arraystart");
385   CGF.EmitStoreThroughLValue(RValue::get(ArrayStart), Start);
386   ++Field;
387 
388   if (Field == Record->field_end()) {
389     CGF.ErrorUnsupported(E, "weird std::initializer_list");
390     return;
391   }
392 
393   llvm::Value *Size = Builder.getInt(ArrayType->getSize());
394   LValue EndOrLength = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
395   if (Field->getType()->isPointerType() &&
396       Ctx.hasSameType(Field->getType()->getPointeeType(),
397                       ArrayType->getElementType())) {
398     // End pointer.
399     llvm::Value *IdxEnd[] = { Zero, Size };
400     llvm::Value *ArrayEnd =
401         Builder.CreateInBoundsGEP(ArrayPtr.getPointer(), IdxEnd, "arrayend");
402     CGF.EmitStoreThroughLValue(RValue::get(ArrayEnd), EndOrLength);
403   } else if (Ctx.hasSameType(Field->getType(), Ctx.getSizeType())) {
404     // Length.
405     CGF.EmitStoreThroughLValue(RValue::get(Size), EndOrLength);
406   } else {
407     CGF.ErrorUnsupported(E, "weird std::initializer_list");
408     return;
409   }
410 }
411 
412 /// \brief Determine if E is a trivial array filler, that is, one that is
413 /// equivalent to zero-initialization.
414 static bool isTrivialFiller(Expr *E) {
415   if (!E)
416     return true;
417 
418   if (isa<ImplicitValueInitExpr>(E))
419     return true;
420 
421   if (auto *ILE = dyn_cast<InitListExpr>(E)) {
422     if (ILE->getNumInits())
423       return false;
424     return isTrivialFiller(ILE->getArrayFiller());
425   }
426 
427   if (auto *Cons = dyn_cast_or_null<CXXConstructExpr>(E))
428     return Cons->getConstructor()->isDefaultConstructor() &&
429            Cons->getConstructor()->isTrivial();
430 
431   // FIXME: Are there other cases where we can avoid emitting an initializer?
432   return false;
433 }
434 
435 /// \brief Emit initialization of an array from an initializer list.
436 void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
437                                    QualType ArrayQTy, InitListExpr *E) {
438   uint64_t NumInitElements = E->getNumInits();
439 
440   uint64_t NumArrayElements = AType->getNumElements();
441   assert(NumInitElements <= NumArrayElements);
442 
443   QualType elementType =
444       CGF.getContext().getAsArrayType(ArrayQTy)->getElementType();
445 
446   // DestPtr is an array*.  Construct an elementType* by drilling
447   // down a level.
448   llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
449   llvm::Value *indices[] = { zero, zero };
450   llvm::Value *begin =
451     Builder.CreateInBoundsGEP(DestPtr.getPointer(), indices, "arrayinit.begin");
452 
453   CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
454   CharUnits elementAlign =
455     DestPtr.getAlignment().alignmentOfArrayElement(elementSize);
456 
457   // Consider initializing the array by copying from a global. For this to be
458   // more efficient than per-element initialization, the size of the elements
459   // with explicit initializers should be large enough.
460   if (NumInitElements * elementSize.getQuantity() > 16 &&
461       elementType.isTriviallyCopyableType(CGF.getContext())) {
462     CodeGen::CodeGenModule &CGM = CGF.CGM;
463     ConstantEmitter Emitter(CGM);
464     LangAS AS = ArrayQTy.getAddressSpace();
465     if (llvm::Constant *C = Emitter.tryEmitForInitializer(E, AS, ArrayQTy)) {
466       auto GV = new llvm::GlobalVariable(
467           CGM.getModule(), C->getType(),
468           CGM.isTypeConstant(ArrayQTy, /* ExcludeCtorDtor= */ true),
469           llvm::GlobalValue::PrivateLinkage, C, "constinit",
470           /* InsertBefore= */ nullptr, llvm::GlobalVariable::NotThreadLocal,
471           CGM.getContext().getTargetAddressSpace(AS));
472       Emitter.finalize(GV);
473       CharUnits Align = CGM.getContext().getTypeAlignInChars(ArrayQTy);
474       GV->setAlignment(Align.getQuantity());
475       EmitFinalDestCopy(ArrayQTy, CGF.MakeAddrLValue(GV, ArrayQTy, Align));
476       return;
477     }
478   }
479 
480   // Exception safety requires us to destroy all the
481   // already-constructed members if an initializer throws.
482   // For that, we'll need an EH cleanup.
483   QualType::DestructionKind dtorKind = elementType.isDestructedType();
484   Address endOfInit = Address::invalid();
485   EHScopeStack::stable_iterator cleanup;
486   llvm::Instruction *cleanupDominator = nullptr;
487   if (CGF.needsEHCleanup(dtorKind)) {
488     // In principle we could tell the cleanup where we are more
489     // directly, but the control flow can get so varied here that it
490     // would actually be quite complex.  Therefore we go through an
491     // alloca.
492     endOfInit = CGF.CreateTempAlloca(begin->getType(), CGF.getPointerAlign(),
493                                      "arrayinit.endOfInit");
494     cleanupDominator = Builder.CreateStore(begin, endOfInit);
495     CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType,
496                                          elementAlign,
497                                          CGF.getDestroyer(dtorKind));
498     cleanup = CGF.EHStack.stable_begin();
499 
500   // Otherwise, remember that we didn't need a cleanup.
501   } else {
502     dtorKind = QualType::DK_none;
503   }
504 
505   llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1);
506 
507   // The 'current element to initialize'.  The invariants on this
508   // variable are complicated.  Essentially, after each iteration of
509   // the loop, it points to the last initialized element, except
510   // that it points to the beginning of the array before any
511   // elements have been initialized.
512   llvm::Value *element = begin;
513 
514   // Emit the explicit initializers.
515   for (uint64_t i = 0; i != NumInitElements; ++i) {
516     // Advance to the next element.
517     if (i > 0) {
518       element = Builder.CreateInBoundsGEP(element, one, "arrayinit.element");
519 
520       // Tell the cleanup that it needs to destroy up to this
521       // element.  TODO: some of these stores can be trivially
522       // observed to be unnecessary.
523       if (endOfInit.isValid()) Builder.CreateStore(element, endOfInit);
524     }
525 
526     LValue elementLV =
527       CGF.MakeAddrLValue(Address(element, elementAlign), elementType);
528     EmitInitializationToLValue(E->getInit(i), elementLV);
529   }
530 
531   // Check whether there's a non-trivial array-fill expression.
532   Expr *filler = E->getArrayFiller();
533   bool hasTrivialFiller = isTrivialFiller(filler);
534 
535   // Any remaining elements need to be zero-initialized, possibly
536   // using the filler expression.  We can skip this if the we're
537   // emitting to zeroed memory.
538   if (NumInitElements != NumArrayElements &&
539       !(Dest.isZeroed() && hasTrivialFiller &&
540         CGF.getTypes().isZeroInitializable(elementType))) {
541 
542     // Use an actual loop.  This is basically
543     //   do { *array++ = filler; } while (array != end);
544 
545     // Advance to the start of the rest of the array.
546     if (NumInitElements) {
547       element = Builder.CreateInBoundsGEP(element, one, "arrayinit.start");
548       if (endOfInit.isValid()) Builder.CreateStore(element, endOfInit);
549     }
550 
551     // Compute the end of the array.
552     llvm::Value *end = Builder.CreateInBoundsGEP(begin,
553                       llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements),
554                                                  "arrayinit.end");
555 
556     llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
557     llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
558 
559     // Jump into the body.
560     CGF.EmitBlock(bodyBB);
561     llvm::PHINode *currentElement =
562       Builder.CreatePHI(element->getType(), 2, "arrayinit.cur");
563     currentElement->addIncoming(element, entryBB);
564 
565     // Emit the actual filler expression.
566     {
567       // C++1z [class.temporary]p5:
568       //   when a default constructor is called to initialize an element of
569       //   an array with no corresponding initializer [...] the destruction of
570       //   every temporary created in a default argument is sequenced before
571       //   the construction of the next array element, if any
572       CodeGenFunction::RunCleanupsScope CleanupsScope(CGF);
573       LValue elementLV =
574         CGF.MakeAddrLValue(Address(currentElement, elementAlign), elementType);
575       if (filler)
576         EmitInitializationToLValue(filler, elementLV);
577       else
578         EmitNullInitializationToLValue(elementLV);
579     }
580 
581     // Move on to the next element.
582     llvm::Value *nextElement =
583       Builder.CreateInBoundsGEP(currentElement, one, "arrayinit.next");
584 
585     // Tell the EH cleanup that we finished with the last element.
586     if (endOfInit.isValid()) Builder.CreateStore(nextElement, endOfInit);
587 
588     // Leave the loop if we're done.
589     llvm::Value *done = Builder.CreateICmpEQ(nextElement, end,
590                                              "arrayinit.done");
591     llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
592     Builder.CreateCondBr(done, endBB, bodyBB);
593     currentElement->addIncoming(nextElement, Builder.GetInsertBlock());
594 
595     CGF.EmitBlock(endBB);
596   }
597 
598   // Leave the partial-array cleanup if we entered one.
599   if (dtorKind) CGF.DeactivateCleanupBlock(cleanup, cleanupDominator);
600 }
601 
602 //===----------------------------------------------------------------------===//
603 //                            Visitor Methods
604 //===----------------------------------------------------------------------===//
605 
606 void AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E){
607   Visit(E->GetTemporaryExpr());
608 }
609 
610 void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) {
611   EmitFinalDestCopy(e->getType(), CGF.getOpaqueLValueMapping(e));
612 }
613 
614 void
615 AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
616   if (Dest.isPotentiallyAliased() &&
617       E->getType().isPODType(CGF.getContext())) {
618     // For a POD type, just emit a load of the lvalue + a copy, because our
619     // compound literal might alias the destination.
620     EmitAggLoadOfLValue(E);
621     return;
622   }
623 
624   AggValueSlot Slot = EnsureSlot(E->getType());
625   CGF.EmitAggExpr(E->getInitializer(), Slot);
626 }
627 
628 /// Attempt to look through various unimportant expressions to find a
629 /// cast of the given kind.
630 static Expr *findPeephole(Expr *op, CastKind kind) {
631   while (true) {
632     op = op->IgnoreParens();
633     if (CastExpr *castE = dyn_cast<CastExpr>(op)) {
634       if (castE->getCastKind() == kind)
635         return castE->getSubExpr();
636       if (castE->getCastKind() == CK_NoOp)
637         continue;
638     }
639     return nullptr;
640   }
641 }
642 
643 void AggExprEmitter::VisitCastExpr(CastExpr *E) {
644   if (const auto *ECE = dyn_cast<ExplicitCastExpr>(E))
645     CGF.CGM.EmitExplicitCastExprType(ECE, &CGF);
646   switch (E->getCastKind()) {
647   case CK_Dynamic: {
648     // FIXME: Can this actually happen? We have no test coverage for it.
649     assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?");
650     LValue LV = CGF.EmitCheckedLValue(E->getSubExpr(),
651                                       CodeGenFunction::TCK_Load);
652     // FIXME: Do we also need to handle property references here?
653     if (LV.isSimple())
654       CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E));
655     else
656       CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast");
657 
658     if (!Dest.isIgnored())
659       CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination");
660     break;
661   }
662 
663   case CK_ToUnion: {
664     // Evaluate even if the destination is ignored.
665     if (Dest.isIgnored()) {
666       CGF.EmitAnyExpr(E->getSubExpr(), AggValueSlot::ignored(),
667                       /*ignoreResult=*/true);
668       break;
669     }
670 
671     // GCC union extension
672     QualType Ty = E->getSubExpr()->getType();
673     Address CastPtr =
674       Builder.CreateElementBitCast(Dest.getAddress(), CGF.ConvertType(Ty));
675     EmitInitializationToLValue(E->getSubExpr(),
676                                CGF.MakeAddrLValue(CastPtr, Ty));
677     break;
678   }
679 
680   case CK_DerivedToBase:
681   case CK_BaseToDerived:
682   case CK_UncheckedDerivedToBase: {
683     llvm_unreachable("cannot perform hierarchy conversion in EmitAggExpr: "
684                 "should have been unpacked before we got here");
685   }
686 
687   case CK_NonAtomicToAtomic:
688   case CK_AtomicToNonAtomic: {
689     bool isToAtomic = (E->getCastKind() == CK_NonAtomicToAtomic);
690 
691     // Determine the atomic and value types.
692     QualType atomicType = E->getSubExpr()->getType();
693     QualType valueType = E->getType();
694     if (isToAtomic) std::swap(atomicType, valueType);
695 
696     assert(atomicType->isAtomicType());
697     assert(CGF.getContext().hasSameUnqualifiedType(valueType,
698                           atomicType->castAs<AtomicType>()->getValueType()));
699 
700     // Just recurse normally if we're ignoring the result or the
701     // atomic type doesn't change representation.
702     if (Dest.isIgnored() || !CGF.CGM.isPaddedAtomicType(atomicType)) {
703       return Visit(E->getSubExpr());
704     }
705 
706     CastKind peepholeTarget =
707       (isToAtomic ? CK_AtomicToNonAtomic : CK_NonAtomicToAtomic);
708 
709     // These two cases are reverses of each other; try to peephole them.
710     if (Expr *op = findPeephole(E->getSubExpr(), peepholeTarget)) {
711       assert(CGF.getContext().hasSameUnqualifiedType(op->getType(),
712                                                      E->getType()) &&
713            "peephole significantly changed types?");
714       return Visit(op);
715     }
716 
717     // If we're converting an r-value of non-atomic type to an r-value
718     // of atomic type, just emit directly into the relevant sub-object.
719     if (isToAtomic) {
720       AggValueSlot valueDest = Dest;
721       if (!valueDest.isIgnored() && CGF.CGM.isPaddedAtomicType(atomicType)) {
722         // Zero-initialize.  (Strictly speaking, we only need to intialize
723         // the padding at the end, but this is simpler.)
724         if (!Dest.isZeroed())
725           CGF.EmitNullInitialization(Dest.getAddress(), atomicType);
726 
727         // Build a GEP to refer to the subobject.
728         Address valueAddr =
729             CGF.Builder.CreateStructGEP(valueDest.getAddress(), 0,
730                                         CharUnits());
731         valueDest = AggValueSlot::forAddr(valueAddr,
732                                           valueDest.getQualifiers(),
733                                           valueDest.isExternallyDestructed(),
734                                           valueDest.requiresGCollection(),
735                                           valueDest.isPotentiallyAliased(),
736                                           AggValueSlot::IsZeroed);
737       }
738 
739       CGF.EmitAggExpr(E->getSubExpr(), valueDest);
740       return;
741     }
742 
743     // Otherwise, we're converting an atomic type to a non-atomic type.
744     // Make an atomic temporary, emit into that, and then copy the value out.
745     AggValueSlot atomicSlot =
746       CGF.CreateAggTemp(atomicType, "atomic-to-nonatomic.temp");
747     CGF.EmitAggExpr(E->getSubExpr(), atomicSlot);
748 
749     Address valueAddr =
750       Builder.CreateStructGEP(atomicSlot.getAddress(), 0, CharUnits());
751     RValue rvalue = RValue::getAggregate(valueAddr, atomicSlot.isVolatile());
752     return EmitFinalDestCopy(valueType, rvalue);
753   }
754 
755   case CK_LValueToRValue:
756     // If we're loading from a volatile type, force the destination
757     // into existence.
758     if (E->getSubExpr()->getType().isVolatileQualified()) {
759       EnsureDest(E->getType());
760       return Visit(E->getSubExpr());
761     }
762 
763     LLVM_FALLTHROUGH;
764 
765   case CK_NoOp:
766   case CK_UserDefinedConversion:
767   case CK_ConstructorConversion:
768     assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(),
769                                                    E->getType()) &&
770            "Implicit cast types must be compatible");
771     Visit(E->getSubExpr());
772     break;
773 
774   case CK_LValueBitCast:
775     llvm_unreachable("should not be emitting lvalue bitcast as rvalue");
776 
777   case CK_Dependent:
778   case CK_BitCast:
779   case CK_ArrayToPointerDecay:
780   case CK_FunctionToPointerDecay:
781   case CK_NullToPointer:
782   case CK_NullToMemberPointer:
783   case CK_BaseToDerivedMemberPointer:
784   case CK_DerivedToBaseMemberPointer:
785   case CK_MemberPointerToBoolean:
786   case CK_ReinterpretMemberPointer:
787   case CK_IntegralToPointer:
788   case CK_PointerToIntegral:
789   case CK_PointerToBoolean:
790   case CK_ToVoid:
791   case CK_VectorSplat:
792   case CK_IntegralCast:
793   case CK_BooleanToSignedIntegral:
794   case CK_IntegralToBoolean:
795   case CK_IntegralToFloating:
796   case CK_FloatingToIntegral:
797   case CK_FloatingToBoolean:
798   case CK_FloatingCast:
799   case CK_CPointerToObjCPointerCast:
800   case CK_BlockPointerToObjCPointerCast:
801   case CK_AnyPointerToBlockPointerCast:
802   case CK_ObjCObjectLValueCast:
803   case CK_FloatingRealToComplex:
804   case CK_FloatingComplexToReal:
805   case CK_FloatingComplexToBoolean:
806   case CK_FloatingComplexCast:
807   case CK_FloatingComplexToIntegralComplex:
808   case CK_IntegralRealToComplex:
809   case CK_IntegralComplexToReal:
810   case CK_IntegralComplexToBoolean:
811   case CK_IntegralComplexCast:
812   case CK_IntegralComplexToFloatingComplex:
813   case CK_ARCProduceObject:
814   case CK_ARCConsumeObject:
815   case CK_ARCReclaimReturnedObject:
816   case CK_ARCExtendBlockObject:
817   case CK_CopyAndAutoreleaseBlockObject:
818   case CK_BuiltinFnToFnPtr:
819   case CK_ZeroToOCLEvent:
820   case CK_ZeroToOCLQueue:
821   case CK_AddressSpaceConversion:
822   case CK_IntToOCLSampler:
823     llvm_unreachable("cast kind invalid for aggregate types");
824   }
825 }
826 
827 void AggExprEmitter::VisitCallExpr(const CallExpr *E) {
828   if (E->getCallReturnType(CGF.getContext())->isReferenceType()) {
829     EmitAggLoadOfLValue(E);
830     return;
831   }
832 
833   withReturnValueSlot(E, [&](ReturnValueSlot Slot) {
834     return CGF.EmitCallExpr(E, Slot);
835   });
836 }
837 
838 void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
839   withReturnValueSlot(E, [&](ReturnValueSlot Slot) {
840     return CGF.EmitObjCMessageExpr(E, Slot);
841   });
842 }
843 
844 void AggExprEmitter::VisitBinComma(const BinaryOperator *E) {
845   CGF.EmitIgnoredExpr(E->getLHS());
846   Visit(E->getRHS());
847 }
848 
849 void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) {
850   CodeGenFunction::StmtExprEvaluation eval(CGF);
851   CGF.EmitCompoundStmt(*E->getSubStmt(), true, Dest);
852 }
853 
854 void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) {
855   if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI)
856     VisitPointerToDataMemberBinaryOperator(E);
857   else
858     CGF.ErrorUnsupported(E, "aggregate binary expression");
859 }
860 
861 void AggExprEmitter::VisitPointerToDataMemberBinaryOperator(
862                                                     const BinaryOperator *E) {
863   LValue LV = CGF.EmitPointerToDataMemberBinaryExpr(E);
864   EmitFinalDestCopy(E->getType(), LV);
865 }
866 
867 /// Is the value of the given expression possibly a reference to or
868 /// into a __block variable?
869 static bool isBlockVarRef(const Expr *E) {
870   // Make sure we look through parens.
871   E = E->IgnoreParens();
872 
873   // Check for a direct reference to a __block variable.
874   if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
875     const VarDecl *var = dyn_cast<VarDecl>(DRE->getDecl());
876     return (var && var->hasAttr<BlocksAttr>());
877   }
878 
879   // More complicated stuff.
880 
881   // Binary operators.
882   if (const BinaryOperator *op = dyn_cast<BinaryOperator>(E)) {
883     // For an assignment or pointer-to-member operation, just care
884     // about the LHS.
885     if (op->isAssignmentOp() || op->isPtrMemOp())
886       return isBlockVarRef(op->getLHS());
887 
888     // For a comma, just care about the RHS.
889     if (op->getOpcode() == BO_Comma)
890       return isBlockVarRef(op->getRHS());
891 
892     // FIXME: pointer arithmetic?
893     return false;
894 
895   // Check both sides of a conditional operator.
896   } else if (const AbstractConditionalOperator *op
897                = dyn_cast<AbstractConditionalOperator>(E)) {
898     return isBlockVarRef(op->getTrueExpr())
899         || isBlockVarRef(op->getFalseExpr());
900 
901   // OVEs are required to support BinaryConditionalOperators.
902   } else if (const OpaqueValueExpr *op
903                = dyn_cast<OpaqueValueExpr>(E)) {
904     if (const Expr *src = op->getSourceExpr())
905       return isBlockVarRef(src);
906 
907   // Casts are necessary to get things like (*(int*)&var) = foo().
908   // We don't really care about the kind of cast here, except
909   // we don't want to look through l2r casts, because it's okay
910   // to get the *value* in a __block variable.
911   } else if (const CastExpr *cast = dyn_cast<CastExpr>(E)) {
912     if (cast->getCastKind() == CK_LValueToRValue)
913       return false;
914     return isBlockVarRef(cast->getSubExpr());
915 
916   // Handle unary operators.  Again, just aggressively look through
917   // it, ignoring the operation.
918   } else if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E)) {
919     return isBlockVarRef(uop->getSubExpr());
920 
921   // Look into the base of a field access.
922   } else if (const MemberExpr *mem = dyn_cast<MemberExpr>(E)) {
923     return isBlockVarRef(mem->getBase());
924 
925   // Look into the base of a subscript.
926   } else if (const ArraySubscriptExpr *sub = dyn_cast<ArraySubscriptExpr>(E)) {
927     return isBlockVarRef(sub->getBase());
928   }
929 
930   return false;
931 }
932 
933 void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
934   // For an assignment to work, the value on the right has
935   // to be compatible with the value on the left.
936   assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
937                                                  E->getRHS()->getType())
938          && "Invalid assignment");
939 
940   // If the LHS might be a __block variable, and the RHS can
941   // potentially cause a block copy, we need to evaluate the RHS first
942   // so that the assignment goes the right place.
943   // This is pretty semantically fragile.
944   if (isBlockVarRef(E->getLHS()) &&
945       E->getRHS()->HasSideEffects(CGF.getContext())) {
946     // Ensure that we have a destination, and evaluate the RHS into that.
947     EnsureDest(E->getRHS()->getType());
948     Visit(E->getRHS());
949 
950     // Now emit the LHS and copy into it.
951     LValue LHS = CGF.EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
952 
953     // That copy is an atomic copy if the LHS is atomic.
954     if (LHS.getType()->isAtomicType() ||
955         CGF.LValueIsSuitableForInlineAtomic(LHS)) {
956       CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
957       return;
958     }
959 
960     EmitCopy(E->getLHS()->getType(),
961              AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
962                                      needsGC(E->getLHS()->getType()),
963                                      AggValueSlot::IsAliased),
964              Dest);
965     return;
966   }
967 
968   LValue LHS = CGF.EmitLValue(E->getLHS());
969 
970   // If we have an atomic type, evaluate into the destination and then
971   // do an atomic copy.
972   if (LHS.getType()->isAtomicType() ||
973       CGF.LValueIsSuitableForInlineAtomic(LHS)) {
974     EnsureDest(E->getRHS()->getType());
975     Visit(E->getRHS());
976     CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
977     return;
978   }
979 
980   // Codegen the RHS so that it stores directly into the LHS.
981   AggValueSlot LHSSlot =
982     AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
983                             needsGC(E->getLHS()->getType()),
984                             AggValueSlot::IsAliased);
985   // A non-volatile aggregate destination might have volatile member.
986   if (!LHSSlot.isVolatile() &&
987       CGF.hasVolatileMember(E->getLHS()->getType()))
988     LHSSlot.setVolatile(true);
989 
990   CGF.EmitAggExpr(E->getRHS(), LHSSlot);
991 
992   // Copy into the destination if the assignment isn't ignored.
993   EmitFinalDestCopy(E->getType(), LHS);
994 }
995 
996 void AggExprEmitter::
997 VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
998   llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
999   llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
1000   llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
1001 
1002   // Bind the common expression if necessary.
1003   CodeGenFunction::OpaqueValueMapping binding(CGF, E);
1004 
1005   CodeGenFunction::ConditionalEvaluation eval(CGF);
1006   CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock,
1007                            CGF.getProfileCount(E));
1008 
1009   // Save whether the destination's lifetime is externally managed.
1010   bool isExternallyDestructed = Dest.isExternallyDestructed();
1011 
1012   eval.begin(CGF);
1013   CGF.EmitBlock(LHSBlock);
1014   CGF.incrementProfileCounter(E);
1015   Visit(E->getTrueExpr());
1016   eval.end(CGF);
1017 
1018   assert(CGF.HaveInsertPoint() && "expression evaluation ended with no IP!");
1019   CGF.Builder.CreateBr(ContBlock);
1020 
1021   // If the result of an agg expression is unused, then the emission
1022   // of the LHS might need to create a destination slot.  That's fine
1023   // with us, and we can safely emit the RHS into the same slot, but
1024   // we shouldn't claim that it's already being destructed.
1025   Dest.setExternallyDestructed(isExternallyDestructed);
1026 
1027   eval.begin(CGF);
1028   CGF.EmitBlock(RHSBlock);
1029   Visit(E->getFalseExpr());
1030   eval.end(CGF);
1031 
1032   CGF.EmitBlock(ContBlock);
1033 }
1034 
1035 void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) {
1036   Visit(CE->getChosenSubExpr());
1037 }
1038 
1039 void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
1040   Address ArgValue = Address::invalid();
1041   Address ArgPtr = CGF.EmitVAArg(VE, ArgValue);
1042 
1043   // If EmitVAArg fails, emit an error.
1044   if (!ArgPtr.isValid()) {
1045     CGF.ErrorUnsupported(VE, "aggregate va_arg expression");
1046     return;
1047   }
1048 
1049   EmitFinalDestCopy(VE->getType(), CGF.MakeAddrLValue(ArgPtr, VE->getType()));
1050 }
1051 
1052 void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
1053   // Ensure that we have a slot, but if we already do, remember
1054   // whether it was externally destructed.
1055   bool wasExternallyDestructed = Dest.isExternallyDestructed();
1056   EnsureDest(E->getType());
1057 
1058   // We're going to push a destructor if there isn't already one.
1059   Dest.setExternallyDestructed();
1060 
1061   Visit(E->getSubExpr());
1062 
1063   // Push that destructor we promised.
1064   if (!wasExternallyDestructed)
1065     CGF.EmitCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddress());
1066 }
1067 
1068 void
1069 AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
1070   AggValueSlot Slot = EnsureSlot(E->getType());
1071   CGF.EmitCXXConstructExpr(E, Slot);
1072 }
1073 
1074 void AggExprEmitter::VisitCXXInheritedCtorInitExpr(
1075     const CXXInheritedCtorInitExpr *E) {
1076   AggValueSlot Slot = EnsureSlot(E->getType());
1077   CGF.EmitInheritedCXXConstructorCall(
1078       E->getConstructor(), E->constructsVBase(), Slot.getAddress(),
1079       E->inheritedFromVBase(), E);
1080 }
1081 
1082 void
1083 AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) {
1084   AggValueSlot Slot = EnsureSlot(E->getType());
1085   CGF.EmitLambdaExpr(E, Slot);
1086 }
1087 
1088 void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
1089   CGF.enterFullExpression(E);
1090   CodeGenFunction::RunCleanupsScope cleanups(CGF);
1091   Visit(E->getSubExpr());
1092 }
1093 
1094 void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
1095   QualType T = E->getType();
1096   AggValueSlot Slot = EnsureSlot(T);
1097   EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T));
1098 }
1099 
1100 void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
1101   QualType T = E->getType();
1102   AggValueSlot Slot = EnsureSlot(T);
1103   EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T));
1104 }
1105 
1106 /// isSimpleZero - If emitting this value will obviously just cause a store of
1107 /// zero to memory, return true.  This can return false if uncertain, so it just
1108 /// handles simple cases.
1109 static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) {
1110   E = E->IgnoreParens();
1111 
1112   // 0
1113   if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E))
1114     return IL->getValue() == 0;
1115   // +0.0
1116   if (const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(E))
1117     return FL->getValue().isPosZero();
1118   // int()
1119   if ((isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) &&
1120       CGF.getTypes().isZeroInitializable(E->getType()))
1121     return true;
1122   // (int*)0 - Null pointer expressions.
1123   if (const CastExpr *ICE = dyn_cast<CastExpr>(E))
1124     return ICE->getCastKind() == CK_NullToPointer &&
1125         CGF.getTypes().isPointerZeroInitializable(E->getType());
1126   // '\0'
1127   if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E))
1128     return CL->getValue() == 0;
1129 
1130   // Otherwise, hard case: conservatively return false.
1131   return false;
1132 }
1133 
1134 
1135 void
1136 AggExprEmitter::EmitInitializationToLValue(Expr *E, LValue LV) {
1137   QualType type = LV.getType();
1138   // FIXME: Ignore result?
1139   // FIXME: Are initializers affected by volatile?
1140   if (Dest.isZeroed() && isSimpleZero(E, CGF)) {
1141     // Storing "i32 0" to a zero'd memory location is a noop.
1142     return;
1143   } else if (isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) {
1144     return EmitNullInitializationToLValue(LV);
1145   } else if (isa<NoInitExpr>(E)) {
1146     // Do nothing.
1147     return;
1148   } else if (type->isReferenceType()) {
1149     RValue RV = CGF.EmitReferenceBindingToExpr(E);
1150     return CGF.EmitStoreThroughLValue(RV, LV);
1151   }
1152 
1153   switch (CGF.getEvaluationKind(type)) {
1154   case TEK_Complex:
1155     CGF.EmitComplexExprIntoLValue(E, LV, /*isInit*/ true);
1156     return;
1157   case TEK_Aggregate:
1158     CGF.EmitAggExpr(E, AggValueSlot::forLValue(LV,
1159                                                AggValueSlot::IsDestructed,
1160                                       AggValueSlot::DoesNotNeedGCBarriers,
1161                                                AggValueSlot::IsNotAliased,
1162                                                Dest.isZeroed()));
1163     return;
1164   case TEK_Scalar:
1165     if (LV.isSimple()) {
1166       CGF.EmitScalarInit(E, /*D=*/nullptr, LV, /*Captured=*/false);
1167     } else {
1168       CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV);
1169     }
1170     return;
1171   }
1172   llvm_unreachable("bad evaluation kind");
1173 }
1174 
1175 void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) {
1176   QualType type = lv.getType();
1177 
1178   // If the destination slot is already zeroed out before the aggregate is
1179   // copied into it, we don't have to emit any zeros here.
1180   if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(type))
1181     return;
1182 
1183   if (CGF.hasScalarEvaluationKind(type)) {
1184     // For non-aggregates, we can store the appropriate null constant.
1185     llvm::Value *null = CGF.CGM.EmitNullConstant(type);
1186     // Note that the following is not equivalent to
1187     // EmitStoreThroughBitfieldLValue for ARC types.
1188     if (lv.isBitField()) {
1189       CGF.EmitStoreThroughBitfieldLValue(RValue::get(null), lv);
1190     } else {
1191       assert(lv.isSimple());
1192       CGF.EmitStoreOfScalar(null, lv, /* isInitialization */ true);
1193     }
1194   } else {
1195     // There's a potential optimization opportunity in combining
1196     // memsets; that would be easy for arrays, but relatively
1197     // difficult for structures with the current code.
1198     CGF.EmitNullInitialization(lv.getAddress(), lv.getType());
1199   }
1200 }
1201 
1202 void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
1203 #if 0
1204   // FIXME: Assess perf here?  Figure out what cases are worth optimizing here
1205   // (Length of globals? Chunks of zeroed-out space?).
1206   //
1207   // If we can, prefer a copy from a global; this is a lot less code for long
1208   // globals, and it's easier for the current optimizers to analyze.
1209   if (llvm::Constant* C = CGF.CGM.EmitConstantExpr(E, E->getType(), &CGF)) {
1210     llvm::GlobalVariable* GV =
1211     new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true,
1212                              llvm::GlobalValue::InternalLinkage, C, "");
1213     EmitFinalDestCopy(E->getType(), CGF.MakeAddrLValue(GV, E->getType()));
1214     return;
1215   }
1216 #endif
1217   if (E->hadArrayRangeDesignator())
1218     CGF.ErrorUnsupported(E, "GNU array range designator extension");
1219 
1220   if (E->isTransparent())
1221     return Visit(E->getInit(0));
1222 
1223   AggValueSlot Dest = EnsureSlot(E->getType());
1224 
1225   LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
1226 
1227   // Handle initialization of an array.
1228   if (E->getType()->isArrayType()) {
1229     auto AType = cast<llvm::ArrayType>(Dest.getAddress().getElementType());
1230     EmitArrayInit(Dest.getAddress(), AType, E->getType(), E);
1231     return;
1232   }
1233 
1234   assert(E->getType()->isRecordType() && "Only support structs/unions here!");
1235 
1236   // Do struct initialization; this code just sets each individual member
1237   // to the approprate value.  This makes bitfield support automatic;
1238   // the disadvantage is that the generated code is more difficult for
1239   // the optimizer, especially with bitfields.
1240   unsigned NumInitElements = E->getNumInits();
1241   RecordDecl *record = E->getType()->castAs<RecordType>()->getDecl();
1242 
1243   // We'll need to enter cleanup scopes in case any of the element
1244   // initializers throws an exception.
1245   SmallVector<EHScopeStack::stable_iterator, 16> cleanups;
1246   llvm::Instruction *cleanupDominator = nullptr;
1247 
1248   unsigned curInitIndex = 0;
1249 
1250   // Emit initialization of base classes.
1251   if (auto *CXXRD = dyn_cast<CXXRecordDecl>(record)) {
1252     assert(E->getNumInits() >= CXXRD->getNumBases() &&
1253            "missing initializer for base class");
1254     for (auto &Base : CXXRD->bases()) {
1255       assert(!Base.isVirtual() && "should not see vbases here");
1256       auto *BaseRD = Base.getType()->getAsCXXRecordDecl();
1257       Address V = CGF.GetAddressOfDirectBaseInCompleteClass(
1258           Dest.getAddress(), CXXRD, BaseRD,
1259           /*isBaseVirtual*/ false);
1260       AggValueSlot AggSlot =
1261         AggValueSlot::forAddr(V, Qualifiers(),
1262                               AggValueSlot::IsDestructed,
1263                               AggValueSlot::DoesNotNeedGCBarriers,
1264                               AggValueSlot::IsNotAliased);
1265       CGF.EmitAggExpr(E->getInit(curInitIndex++), AggSlot);
1266 
1267       if (QualType::DestructionKind dtorKind =
1268               Base.getType().isDestructedType()) {
1269         CGF.pushDestroy(dtorKind, V, Base.getType());
1270         cleanups.push_back(CGF.EHStack.stable_begin());
1271       }
1272     }
1273   }
1274 
1275   // Prepare a 'this' for CXXDefaultInitExprs.
1276   CodeGenFunction::FieldConstructionScope FCS(CGF, Dest.getAddress());
1277 
1278   if (record->isUnion()) {
1279     // Only initialize one field of a union. The field itself is
1280     // specified by the initializer list.
1281     if (!E->getInitializedFieldInUnion()) {
1282       // Empty union; we have nothing to do.
1283 
1284 #ifndef NDEBUG
1285       // Make sure that it's really an empty and not a failure of
1286       // semantic analysis.
1287       for (const auto *Field : record->fields())
1288         assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed");
1289 #endif
1290       return;
1291     }
1292 
1293     // FIXME: volatility
1294     FieldDecl *Field = E->getInitializedFieldInUnion();
1295 
1296     LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestLV, Field);
1297     if (NumInitElements) {
1298       // Store the initializer into the field
1299       EmitInitializationToLValue(E->getInit(0), FieldLoc);
1300     } else {
1301       // Default-initialize to null.
1302       EmitNullInitializationToLValue(FieldLoc);
1303     }
1304 
1305     return;
1306   }
1307 
1308   // Here we iterate over the fields; this makes it simpler to both
1309   // default-initialize fields and skip over unnamed fields.
1310   for (const auto *field : record->fields()) {
1311     // We're done once we hit the flexible array member.
1312     if (field->getType()->isIncompleteArrayType())
1313       break;
1314 
1315     // Always skip anonymous bitfields.
1316     if (field->isUnnamedBitfield())
1317       continue;
1318 
1319     // We're done if we reach the end of the explicit initializers, we
1320     // have a zeroed object, and the rest of the fields are
1321     // zero-initializable.
1322     if (curInitIndex == NumInitElements && Dest.isZeroed() &&
1323         CGF.getTypes().isZeroInitializable(E->getType()))
1324       break;
1325 
1326 
1327     LValue LV = CGF.EmitLValueForFieldInitialization(DestLV, field);
1328     // We never generate write-barries for initialized fields.
1329     LV.setNonGC(true);
1330 
1331     if (curInitIndex < NumInitElements) {
1332       // Store the initializer into the field.
1333       EmitInitializationToLValue(E->getInit(curInitIndex++), LV);
1334     } else {
1335       // We're out of initializers; default-initialize to null
1336       EmitNullInitializationToLValue(LV);
1337     }
1338 
1339     // Push a destructor if necessary.
1340     // FIXME: if we have an array of structures, all explicitly
1341     // initialized, we can end up pushing a linear number of cleanups.
1342     bool pushedCleanup = false;
1343     if (QualType::DestructionKind dtorKind
1344           = field->getType().isDestructedType()) {
1345       assert(LV.isSimple());
1346       if (CGF.needsEHCleanup(dtorKind)) {
1347         if (!cleanupDominator)
1348           cleanupDominator = CGF.Builder.CreateAlignedLoad(
1349               CGF.Int8Ty,
1350               llvm::Constant::getNullValue(CGF.Int8PtrTy),
1351               CharUnits::One()); // placeholder
1352 
1353         CGF.pushDestroy(EHCleanup, LV.getAddress(), field->getType(),
1354                         CGF.getDestroyer(dtorKind), false);
1355         cleanups.push_back(CGF.EHStack.stable_begin());
1356         pushedCleanup = true;
1357       }
1358     }
1359 
1360     // If the GEP didn't get used because of a dead zero init or something
1361     // else, clean it up for -O0 builds and general tidiness.
1362     if (!pushedCleanup && LV.isSimple())
1363       if (llvm::GetElementPtrInst *GEP =
1364             dyn_cast<llvm::GetElementPtrInst>(LV.getPointer()))
1365         if (GEP->use_empty())
1366           GEP->eraseFromParent();
1367   }
1368 
1369   // Deactivate all the partial cleanups in reverse order, which
1370   // generally means popping them.
1371   for (unsigned i = cleanups.size(); i != 0; --i)
1372     CGF.DeactivateCleanupBlock(cleanups[i-1], cleanupDominator);
1373 
1374   // Destroy the placeholder if we made one.
1375   if (cleanupDominator)
1376     cleanupDominator->eraseFromParent();
1377 }
1378 
1379 void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
1380                                             llvm::Value *outerBegin) {
1381   // Emit the common subexpression.
1382   CodeGenFunction::OpaqueValueMapping binding(CGF, E->getCommonExpr());
1383 
1384   Address destPtr = EnsureSlot(E->getType()).getAddress();
1385   uint64_t numElements = E->getArraySize().getZExtValue();
1386 
1387   if (!numElements)
1388     return;
1389 
1390   // destPtr is an array*. Construct an elementType* by drilling down a level.
1391   llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
1392   llvm::Value *indices[] = {zero, zero};
1393   llvm::Value *begin = Builder.CreateInBoundsGEP(destPtr.getPointer(), indices,
1394                                                  "arrayinit.begin");
1395 
1396   // Prepare to special-case multidimensional array initialization: we avoid
1397   // emitting multiple destructor loops in that case.
1398   if (!outerBegin)
1399     outerBegin = begin;
1400   ArrayInitLoopExpr *InnerLoop = dyn_cast<ArrayInitLoopExpr>(E->getSubExpr());
1401 
1402   QualType elementType =
1403       CGF.getContext().getAsArrayType(E->getType())->getElementType();
1404   CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
1405   CharUnits elementAlign =
1406       destPtr.getAlignment().alignmentOfArrayElement(elementSize);
1407 
1408   llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
1409   llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
1410 
1411   // Jump into the body.
1412   CGF.EmitBlock(bodyBB);
1413   llvm::PHINode *index =
1414       Builder.CreatePHI(zero->getType(), 2, "arrayinit.index");
1415   index->addIncoming(zero, entryBB);
1416   llvm::Value *element = Builder.CreateInBoundsGEP(begin, index);
1417 
1418   // Prepare for a cleanup.
1419   QualType::DestructionKind dtorKind = elementType.isDestructedType();
1420   EHScopeStack::stable_iterator cleanup;
1421   if (CGF.needsEHCleanup(dtorKind) && !InnerLoop) {
1422     if (outerBegin->getType() != element->getType())
1423       outerBegin = Builder.CreateBitCast(outerBegin, element->getType());
1424     CGF.pushRegularPartialArrayCleanup(outerBegin, element, elementType,
1425                                        elementAlign,
1426                                        CGF.getDestroyer(dtorKind));
1427     cleanup = CGF.EHStack.stable_begin();
1428   } else {
1429     dtorKind = QualType::DK_none;
1430   }
1431 
1432   // Emit the actual filler expression.
1433   {
1434     // Temporaries created in an array initialization loop are destroyed
1435     // at the end of each iteration.
1436     CodeGenFunction::RunCleanupsScope CleanupsScope(CGF);
1437     CodeGenFunction::ArrayInitLoopExprScope Scope(CGF, index);
1438     LValue elementLV =
1439         CGF.MakeAddrLValue(Address(element, elementAlign), elementType);
1440 
1441     if (InnerLoop) {
1442       // If the subexpression is an ArrayInitLoopExpr, share its cleanup.
1443       auto elementSlot = AggValueSlot::forLValue(
1444           elementLV, AggValueSlot::IsDestructed,
1445           AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased);
1446       AggExprEmitter(CGF, elementSlot, false)
1447           .VisitArrayInitLoopExpr(InnerLoop, outerBegin);
1448     } else
1449       EmitInitializationToLValue(E->getSubExpr(), elementLV);
1450   }
1451 
1452   // Move on to the next element.
1453   llvm::Value *nextIndex = Builder.CreateNUWAdd(
1454       index, llvm::ConstantInt::get(CGF.SizeTy, 1), "arrayinit.next");
1455   index->addIncoming(nextIndex, Builder.GetInsertBlock());
1456 
1457   // Leave the loop if we're done.
1458   llvm::Value *done = Builder.CreateICmpEQ(
1459       nextIndex, llvm::ConstantInt::get(CGF.SizeTy, numElements),
1460       "arrayinit.done");
1461   llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
1462   Builder.CreateCondBr(done, endBB, bodyBB);
1463 
1464   CGF.EmitBlock(endBB);
1465 
1466   // Leave the partial-array cleanup if we entered one.
1467   if (dtorKind)
1468     CGF.DeactivateCleanupBlock(cleanup, index);
1469 }
1470 
1471 void AggExprEmitter::VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E) {
1472   AggValueSlot Dest = EnsureSlot(E->getType());
1473 
1474   LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
1475   EmitInitializationToLValue(E->getBase(), DestLV);
1476   VisitInitListExpr(E->getUpdater());
1477 }
1478 
1479 //===----------------------------------------------------------------------===//
1480 //                        Entry Points into this File
1481 //===----------------------------------------------------------------------===//
1482 
1483 /// GetNumNonZeroBytesInInit - Get an approximate count of the number of
1484 /// non-zero bytes that will be stored when outputting the initializer for the
1485 /// specified initializer expression.
1486 static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) {
1487   E = E->IgnoreParens();
1488 
1489   // 0 and 0.0 won't require any non-zero stores!
1490   if (isSimpleZero(E, CGF)) return CharUnits::Zero();
1491 
1492   // If this is an initlist expr, sum up the size of sizes of the (present)
1493   // elements.  If this is something weird, assume the whole thing is non-zero.
1494   const InitListExpr *ILE = dyn_cast<InitListExpr>(E);
1495   if (!ILE || !CGF.getTypes().isZeroInitializable(ILE->getType()))
1496     return CGF.getContext().getTypeSizeInChars(E->getType());
1497 
1498   // InitListExprs for structs have to be handled carefully.  If there are
1499   // reference members, we need to consider the size of the reference, not the
1500   // referencee.  InitListExprs for unions and arrays can't have references.
1501   if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
1502     if (!RT->isUnionType()) {
1503       RecordDecl *SD = E->getType()->getAs<RecordType>()->getDecl();
1504       CharUnits NumNonZeroBytes = CharUnits::Zero();
1505 
1506       unsigned ILEElement = 0;
1507       if (auto *CXXRD = dyn_cast<CXXRecordDecl>(SD))
1508         while (ILEElement != CXXRD->getNumBases())
1509           NumNonZeroBytes +=
1510               GetNumNonZeroBytesInInit(ILE->getInit(ILEElement++), CGF);
1511       for (const auto *Field : SD->fields()) {
1512         // We're done once we hit the flexible array member or run out of
1513         // InitListExpr elements.
1514         if (Field->getType()->isIncompleteArrayType() ||
1515             ILEElement == ILE->getNumInits())
1516           break;
1517         if (Field->isUnnamedBitfield())
1518           continue;
1519 
1520         const Expr *E = ILE->getInit(ILEElement++);
1521 
1522         // Reference values are always non-null and have the width of a pointer.
1523         if (Field->getType()->isReferenceType())
1524           NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits(
1525               CGF.getTarget().getPointerWidth(0));
1526         else
1527           NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF);
1528       }
1529 
1530       return NumNonZeroBytes;
1531     }
1532   }
1533 
1534 
1535   CharUnits NumNonZeroBytes = CharUnits::Zero();
1536   for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)
1537     NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF);
1538   return NumNonZeroBytes;
1539 }
1540 
1541 /// CheckAggExprForMemSetUse - If the initializer is large and has a lot of
1542 /// zeros in it, emit a memset and avoid storing the individual zeros.
1543 ///
1544 static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
1545                                      CodeGenFunction &CGF) {
1546   // If the slot is already known to be zeroed, nothing to do.  Don't mess with
1547   // volatile stores.
1548   if (Slot.isZeroed() || Slot.isVolatile() || !Slot.getAddress().isValid())
1549     return;
1550 
1551   // C++ objects with a user-declared constructor don't need zero'ing.
1552   if (CGF.getLangOpts().CPlusPlus)
1553     if (const RecordType *RT = CGF.getContext()
1554                        .getBaseElementType(E->getType())->getAs<RecordType>()) {
1555       const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1556       if (RD->hasUserDeclaredConstructor())
1557         return;
1558     }
1559 
1560   // If the type is 16-bytes or smaller, prefer individual stores over memset.
1561   CharUnits Size = CGF.getContext().getTypeSizeInChars(E->getType());
1562   if (Size <= CharUnits::fromQuantity(16))
1563     return;
1564 
1565   // Check to see if over 3/4 of the initializer are known to be zero.  If so,
1566   // we prefer to emit memset + individual stores for the rest.
1567   CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF);
1568   if (NumNonZeroBytes*4 > Size)
1569     return;
1570 
1571   // Okay, it seems like a good idea to use an initial memset, emit the call.
1572   llvm::Constant *SizeVal = CGF.Builder.getInt64(Size.getQuantity());
1573 
1574   Address Loc = Slot.getAddress();
1575   Loc = CGF.Builder.CreateElementBitCast(Loc, CGF.Int8Ty);
1576   CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal, false);
1577 
1578   // Tell the AggExprEmitter that the slot is known zero.
1579   Slot.setZeroed();
1580 }
1581 
1582 
1583 
1584 
1585 /// EmitAggExpr - Emit the computation of the specified expression of aggregate
1586 /// type.  The result is computed into DestPtr.  Note that if DestPtr is null,
1587 /// the value of the aggregate expression is not needed.  If VolatileDest is
1588 /// true, DestPtr cannot be 0.
1589 void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot) {
1590   assert(E && hasAggregateEvaluationKind(E->getType()) &&
1591          "Invalid aggregate expression to emit");
1592   assert((Slot.getAddress().isValid() || Slot.isIgnored()) &&
1593          "slot has bits but no address");
1594 
1595   // Optimize the slot if possible.
1596   CheckAggExprForMemSetUse(Slot, E, *this);
1597 
1598   AggExprEmitter(*this, Slot, Slot.isIgnored()).Visit(const_cast<Expr*>(E));
1599 }
1600 
1601 LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
1602   assert(hasAggregateEvaluationKind(E->getType()) && "Invalid argument!");
1603   Address Temp = CreateMemTemp(E->getType());
1604   LValue LV = MakeAddrLValue(Temp, E->getType());
1605   EmitAggExpr(E, AggValueSlot::forLValue(LV, AggValueSlot::IsNotDestructed,
1606                                          AggValueSlot::DoesNotNeedGCBarriers,
1607                                          AggValueSlot::IsNotAliased));
1608   return LV;
1609 }
1610 
1611 void CodeGenFunction::EmitAggregateCopy(LValue Dest, LValue Src,
1612                                         QualType Ty, bool isVolatile,
1613                                         bool isAssignment) {
1614   assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
1615 
1616   Address DestPtr = Dest.getAddress();
1617   Address SrcPtr = Src.getAddress();
1618 
1619   if (getLangOpts().CPlusPlus) {
1620     if (const RecordType *RT = Ty->getAs<RecordType>()) {
1621       CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl());
1622       assert((Record->hasTrivialCopyConstructor() ||
1623               Record->hasTrivialCopyAssignment() ||
1624               Record->hasTrivialMoveConstructor() ||
1625               Record->hasTrivialMoveAssignment() ||
1626               Record->isUnion()) &&
1627              "Trying to aggregate-copy a type without a trivial copy/move "
1628              "constructor or assignment operator");
1629       // Ignore empty classes in C++.
1630       if (Record->isEmpty())
1631         return;
1632     }
1633   }
1634 
1635   // Aggregate assignment turns into llvm.memcpy.  This is almost valid per
1636   // C99 6.5.16.1p3, which states "If the value being stored in an object is
1637   // read from another object that overlaps in anyway the storage of the first
1638   // object, then the overlap shall be exact and the two objects shall have
1639   // qualified or unqualified versions of a compatible type."
1640   //
1641   // memcpy is not defined if the source and destination pointers are exactly
1642   // equal, but other compilers do this optimization, and almost every memcpy
1643   // implementation handles this case safely.  If there is a libc that does not
1644   // safely handle this, we can add a target hook.
1645 
1646   // Get data size info for this aggregate. If this is an assignment,
1647   // don't copy the tail padding, because we might be assigning into a
1648   // base subobject where the tail padding is claimed.  Otherwise,
1649   // copying it is fine.
1650   std::pair<CharUnits, CharUnits> TypeInfo;
1651   if (isAssignment)
1652     TypeInfo = getContext().getTypeInfoDataSizeInChars(Ty);
1653   else
1654     TypeInfo = getContext().getTypeInfoInChars(Ty);
1655 
1656   llvm::Value *SizeVal = nullptr;
1657   if (TypeInfo.first.isZero()) {
1658     // But note that getTypeInfo returns 0 for a VLA.
1659     if (auto *VAT = dyn_cast_or_null<VariableArrayType>(
1660             getContext().getAsArrayType(Ty))) {
1661       QualType BaseEltTy;
1662       SizeVal = emitArrayLength(VAT, BaseEltTy, DestPtr);
1663       TypeInfo = getContext().getTypeInfoDataSizeInChars(BaseEltTy);
1664       std::pair<CharUnits, CharUnits> LastElementTypeInfo;
1665       if (!isAssignment)
1666         LastElementTypeInfo = getContext().getTypeInfoInChars(BaseEltTy);
1667       assert(!TypeInfo.first.isZero());
1668       SizeVal = Builder.CreateNUWMul(
1669           SizeVal,
1670           llvm::ConstantInt::get(SizeTy, TypeInfo.first.getQuantity()));
1671       if (!isAssignment) {
1672         SizeVal = Builder.CreateNUWSub(
1673             SizeVal,
1674             llvm::ConstantInt::get(SizeTy, TypeInfo.first.getQuantity()));
1675         SizeVal = Builder.CreateNUWAdd(
1676             SizeVal, llvm::ConstantInt::get(
1677                          SizeTy, LastElementTypeInfo.first.getQuantity()));
1678       }
1679     }
1680   }
1681   if (!SizeVal) {
1682     SizeVal = llvm::ConstantInt::get(SizeTy, TypeInfo.first.getQuantity());
1683   }
1684 
1685   // FIXME: If we have a volatile struct, the optimizer can remove what might
1686   // appear to be `extra' memory ops:
1687   //
1688   // volatile struct { int i; } a, b;
1689   //
1690   // int main() {
1691   //   a = b;
1692   //   a = b;
1693   // }
1694   //
1695   // we need to use a different call here.  We use isVolatile to indicate when
1696   // either the source or the destination is volatile.
1697 
1698   DestPtr = Builder.CreateElementBitCast(DestPtr, Int8Ty);
1699   SrcPtr = Builder.CreateElementBitCast(SrcPtr, Int8Ty);
1700 
1701   // Don't do any of the memmove_collectable tests if GC isn't set.
1702   if (CGM.getLangOpts().getGC() == LangOptions::NonGC) {
1703     // fall through
1704   } else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
1705     RecordDecl *Record = RecordTy->getDecl();
1706     if (Record->hasObjectMember()) {
1707       CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
1708                                                     SizeVal);
1709       return;
1710     }
1711   } else if (Ty->isArrayType()) {
1712     QualType BaseType = getContext().getBaseElementType(Ty);
1713     if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
1714       if (RecordTy->getDecl()->hasObjectMember()) {
1715         CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
1716                                                       SizeVal);
1717         return;
1718       }
1719     }
1720   }
1721 
1722   auto Inst = Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, isVolatile);
1723 
1724   // Determine the metadata to describe the position of any padding in this
1725   // memcpy, as well as the TBAA tags for the members of the struct, in case
1726   // the optimizer wishes to expand it in to scalar memory operations.
1727   if (llvm::MDNode *TBAAStructTag = CGM.getTBAAStructInfo(Ty))
1728     Inst->setMetadata(llvm::LLVMContext::MD_tbaa_struct, TBAAStructTag);
1729 
1730   if (CGM.getCodeGenOpts().NewStructPathTBAA) {
1731     TBAAAccessInfo TBAAInfo = CGM.mergeTBAAInfoForMemoryTransfer(
1732         Dest.getTBAAInfo(), Src.getTBAAInfo());
1733     CGM.DecorateInstructionWithTBAA(Inst, TBAAInfo);
1734   }
1735 }
1736