1 //===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code to emit Aggregate Expr nodes as LLVM code.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CodeGenFunction.h"
15 #include "CGObjCRuntime.h"
16 #include "CodeGenModule.h"
17 #include "clang/AST/ASTContext.h"
18 #include "clang/AST/DeclCXX.h"
19 #include "clang/AST/DeclTemplate.h"
20 #include "clang/AST/StmtVisitor.h"
21 #include "llvm/IR/Constants.h"
22 #include "llvm/IR/Function.h"
23 #include "llvm/IR/GlobalVariable.h"
24 #include "llvm/IR/Intrinsics.h"
25 using namespace clang;
26 using namespace CodeGen;
27 
28 //===----------------------------------------------------------------------===//
29 //                        Aggregate Expression Emitter
30 //===----------------------------------------------------------------------===//
31 
32 llvm::Value *AggValueSlot::getPaddedAtomicAddr() const {
33   assert(isValueOfAtomic());
34   llvm::GEPOperator *op = cast<llvm::GEPOperator>(getAddr());
35   assert(op->getNumIndices() == 2);
36   assert(op->hasAllZeroIndices());
37   return op->getPointerOperand();
38 }
39 
40 namespace  {
41 class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
42   CodeGenFunction &CGF;
43   CGBuilderTy &Builder;
44   AggValueSlot Dest;
45 
46   /// We want to use 'dest' as the return slot except under two
47   /// conditions:
48   ///   - The destination slot requires garbage collection, so we
49   ///     need to use the GC API.
50   ///   - The destination slot is potentially aliased.
51   bool shouldUseDestForReturnSlot() const {
52     return !(Dest.requiresGCollection() || Dest.isPotentiallyAliased());
53   }
54 
55   ReturnValueSlot getReturnValueSlot() const {
56     if (!shouldUseDestForReturnSlot())
57       return ReturnValueSlot();
58 
59     return ReturnValueSlot(Dest.getAddr(), Dest.isVolatile());
60   }
61 
62   AggValueSlot EnsureSlot(QualType T) {
63     if (!Dest.isIgnored()) return Dest;
64     return CGF.CreateAggTemp(T, "agg.tmp.ensured");
65   }
66   void EnsureDest(QualType T) {
67     if (!Dest.isIgnored()) return;
68     Dest = CGF.CreateAggTemp(T, "agg.tmp.ensured");
69   }
70 
71 public:
72   AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest)
73     : CGF(cgf), Builder(CGF.Builder), Dest(Dest) {
74   }
75 
76   //===--------------------------------------------------------------------===//
77   //                               Utilities
78   //===--------------------------------------------------------------------===//
79 
80   /// EmitAggLoadOfLValue - Given an expression with aggregate type that
81   /// represents a value lvalue, this method emits the address of the lvalue,
82   /// then loads the result into DestPtr.
83   void EmitAggLoadOfLValue(const Expr *E);
84 
85   /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
86   void EmitFinalDestCopy(QualType type, const LValue &src);
87   void EmitFinalDestCopy(QualType type, RValue src,
88                          CharUnits srcAlignment = CharUnits::Zero());
89   void EmitCopy(QualType type, const AggValueSlot &dest,
90                 const AggValueSlot &src);
91 
92   void EmitMoveFromReturnSlot(const Expr *E, RValue Src);
93 
94   void EmitStdInitializerList(llvm::Value *DestPtr, InitListExpr *InitList);
95   void EmitArrayInit(llvm::Value *DestPtr, llvm::ArrayType *AType,
96                      QualType elementType, InitListExpr *E);
97 
98   AggValueSlot::NeedsGCBarriers_t needsGC(QualType T) {
99     if (CGF.getLangOpts().getGC() && TypeRequiresGCollection(T))
100       return AggValueSlot::NeedsGCBarriers;
101     return AggValueSlot::DoesNotNeedGCBarriers;
102   }
103 
104   bool TypeRequiresGCollection(QualType T);
105 
106   //===--------------------------------------------------------------------===//
107   //                            Visitor Methods
108   //===--------------------------------------------------------------------===//
109 
110   void VisitStmt(Stmt *S) {
111     CGF.ErrorUnsupported(S, "aggregate expression");
112   }
113   void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); }
114   void VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
115     Visit(GE->getResultExpr());
116   }
117   void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); }
118   void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
119     return Visit(E->getReplacement());
120   }
121 
122   // l-values.
123   void VisitDeclRefExpr(DeclRefExpr *E) {
124     // For aggregates, we should always be able to emit the variable
125     // as an l-value unless it's a reference.  This is due to the fact
126     // that we can't actually ever see a normal l2r conversion on an
127     // aggregate in C++, and in C there's no language standard
128     // actively preventing us from listing variables in the captures
129     // list of a block.
130     if (E->getDecl()->getType()->isReferenceType()) {
131       if (CodeGenFunction::ConstantEmission result
132             = CGF.tryEmitAsConstant(E)) {
133         EmitFinalDestCopy(E->getType(), result.getReferenceLValue(CGF, E));
134         return;
135       }
136     }
137 
138     EmitAggLoadOfLValue(E);
139   }
140 
141   void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); }
142   void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); }
143   void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); }
144   void VisitCompoundLiteralExpr(CompoundLiteralExpr *E);
145   void VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
146     EmitAggLoadOfLValue(E);
147   }
148   void VisitPredefinedExpr(const PredefinedExpr *E) {
149     EmitAggLoadOfLValue(E);
150   }
151 
152   // Operators.
153   void VisitCastExpr(CastExpr *E);
154   void VisitCallExpr(const CallExpr *E);
155   void VisitStmtExpr(const StmtExpr *E);
156   void VisitBinaryOperator(const BinaryOperator *BO);
157   void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *BO);
158   void VisitBinAssign(const BinaryOperator *E);
159   void VisitBinComma(const BinaryOperator *E);
160 
161   void VisitObjCMessageExpr(ObjCMessageExpr *E);
162   void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
163     EmitAggLoadOfLValue(E);
164   }
165 
166   void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO);
167   void VisitChooseExpr(const ChooseExpr *CE);
168   void VisitInitListExpr(InitListExpr *E);
169   void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
170   void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
171     Visit(DAE->getExpr());
172   }
173   void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
174   void VisitCXXConstructExpr(const CXXConstructExpr *E);
175   void VisitLambdaExpr(LambdaExpr *E);
176   void VisitExprWithCleanups(ExprWithCleanups *E);
177   void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
178   void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); }
179   void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E);
180   void VisitOpaqueValueExpr(OpaqueValueExpr *E);
181 
182   void VisitPseudoObjectExpr(PseudoObjectExpr *E) {
183     if (E->isGLValue()) {
184       LValue LV = CGF.EmitPseudoObjectLValue(E);
185       return EmitFinalDestCopy(E->getType(), LV);
186     }
187 
188     CGF.EmitPseudoObjectRValue(E, EnsureSlot(E->getType()));
189   }
190 
191   void VisitVAArgExpr(VAArgExpr *E);
192 
193   void EmitInitializationToLValue(Expr *E, LValue Address);
194   void EmitNullInitializationToLValue(LValue Address);
195   //  case Expr::ChooseExprClass:
196   void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); }
197   void VisitAtomicExpr(AtomicExpr *E) {
198     CGF.EmitAtomicExpr(E, EnsureSlot(E->getType()).getAddr());
199   }
200 };
201 
202 /// A helper class for emitting expressions into the value sub-object
203 /// of a padded atomic type.
204 class ValueDestForAtomic {
205   AggValueSlot Dest;
206 public:
207   ValueDestForAtomic(CodeGenFunction &CGF, AggValueSlot dest, QualType type)
208     : Dest(dest) {
209     assert(!Dest.isValueOfAtomic());
210     if (!Dest.isIgnored() && CGF.CGM.isPaddedAtomicType(type)) {
211       llvm::Value *valueAddr = CGF.Builder.CreateStructGEP(Dest.getAddr(), 0);
212       Dest = AggValueSlot::forAddr(valueAddr,
213                                    Dest.getAlignment(),
214                                    Dest.getQualifiers(),
215                                    Dest.isExternallyDestructed(),
216                                    Dest.requiresGCollection(),
217                                    Dest.isPotentiallyAliased(),
218                                    Dest.isZeroed(),
219                                    AggValueSlot::IsValueOfAtomic);
220     }
221   }
222 
223   const AggValueSlot &getDest() const { return Dest; }
224 
225   ~ValueDestForAtomic() {
226     // Kill the GEP if we made one and it didn't end up used.
227     if (Dest.isValueOfAtomic()) {
228       llvm::Instruction *addr = cast<llvm::GetElementPtrInst>(Dest.getAddr());
229       if (addr->use_empty()) addr->eraseFromParent();
230     }
231   }
232 };
233 }  // end anonymous namespace.
234 
235 //===----------------------------------------------------------------------===//
236 //                                Utilities
237 //===----------------------------------------------------------------------===//
238 
239 /// EmitAggLoadOfLValue - Given an expression with aggregate type that
240 /// represents a value lvalue, this method emits the address of the lvalue,
241 /// then loads the result into DestPtr.
242 void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) {
243   LValue LV = CGF.EmitLValue(E);
244 
245   // If the type of the l-value is atomic, then do an atomic load.
246   if (LV.getType()->isAtomicType()) {
247     ValueDestForAtomic valueDest(CGF, Dest, LV.getType());
248     CGF.EmitAtomicLoad(LV, valueDest.getDest());
249     return;
250   }
251 
252   EmitFinalDestCopy(E->getType(), LV);
253 }
254 
255 /// \brief True if the given aggregate type requires special GC API calls.
256 bool AggExprEmitter::TypeRequiresGCollection(QualType T) {
257   // Only record types have members that might require garbage collection.
258   const RecordType *RecordTy = T->getAs<RecordType>();
259   if (!RecordTy) return false;
260 
261   // Don't mess with non-trivial C++ types.
262   RecordDecl *Record = RecordTy->getDecl();
263   if (isa<CXXRecordDecl>(Record) &&
264       (cast<CXXRecordDecl>(Record)->hasNonTrivialCopyConstructor() ||
265        !cast<CXXRecordDecl>(Record)->hasTrivialDestructor()))
266     return false;
267 
268   // Check whether the type has an object member.
269   return Record->hasObjectMember();
270 }
271 
272 /// \brief Perform the final move to DestPtr if for some reason
273 /// getReturnValueSlot() didn't use it directly.
274 ///
275 /// The idea is that you do something like this:
276 ///   RValue Result = EmitSomething(..., getReturnValueSlot());
277 ///   EmitMoveFromReturnSlot(E, Result);
278 ///
279 /// If nothing interferes, this will cause the result to be emitted
280 /// directly into the return value slot.  Otherwise, a final move
281 /// will be performed.
282 void AggExprEmitter::EmitMoveFromReturnSlot(const Expr *E, RValue src) {
283   if (shouldUseDestForReturnSlot()) {
284     // Logically, Dest.getAddr() should equal Src.getAggregateAddr().
285     // The possibility of undef rvalues complicates that a lot,
286     // though, so we can't really assert.
287     return;
288   }
289 
290   // Otherwise, copy from there to the destination.
291   assert(Dest.getAddr() != src.getAggregateAddr());
292   std::pair<CharUnits, CharUnits> typeInfo =
293     CGF.getContext().getTypeInfoInChars(E->getType());
294   EmitFinalDestCopy(E->getType(), src, typeInfo.second);
295 }
296 
297 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
298 void AggExprEmitter::EmitFinalDestCopy(QualType type, RValue src,
299                                        CharUnits srcAlign) {
300   assert(src.isAggregate() && "value must be aggregate value!");
301   LValue srcLV = CGF.MakeAddrLValue(src.getAggregateAddr(), type, srcAlign);
302   EmitFinalDestCopy(type, srcLV);
303 }
304 
305 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
306 void AggExprEmitter::EmitFinalDestCopy(QualType type, const LValue &src) {
307   // If Dest is ignored, then we're evaluating an aggregate expression
308   // in a context that doesn't care about the result.  Note that loads
309   // from volatile l-values force the existence of a non-ignored
310   // destination.
311   if (Dest.isIgnored())
312     return;
313 
314   AggValueSlot srcAgg =
315     AggValueSlot::forLValue(src, AggValueSlot::IsDestructed,
316                             needsGC(type), AggValueSlot::IsAliased);
317   EmitCopy(type, Dest, srcAgg);
318 }
319 
320 /// Perform a copy from the source into the destination.
321 ///
322 /// \param type - the type of the aggregate being copied; qualifiers are
323 ///   ignored
324 void AggExprEmitter::EmitCopy(QualType type, const AggValueSlot &dest,
325                               const AggValueSlot &src) {
326   if (dest.requiresGCollection()) {
327     CharUnits sz = CGF.getContext().getTypeSizeInChars(type);
328     llvm::Value *size = llvm::ConstantInt::get(CGF.SizeTy, sz.getQuantity());
329     CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF,
330                                                       dest.getAddr(),
331                                                       src.getAddr(),
332                                                       size);
333     return;
334   }
335 
336   // If the result of the assignment is used, copy the LHS there also.
337   // It's volatile if either side is.  Use the minimum alignment of
338   // the two sides.
339   CGF.EmitAggregateCopy(dest.getAddr(), src.getAddr(), type,
340                         dest.isVolatile() || src.isVolatile(),
341                         std::min(dest.getAlignment(), src.getAlignment()));
342 }
343 
344 static QualType GetStdInitializerListElementType(QualType T) {
345   // Just assume that this is really std::initializer_list.
346   ClassTemplateSpecializationDecl *specialization =
347       cast<ClassTemplateSpecializationDecl>(T->castAs<RecordType>()->getDecl());
348   return specialization->getTemplateArgs()[0].getAsType();
349 }
350 
351 /// \brief Prepare cleanup for the temporary array.
352 static void EmitStdInitializerListCleanup(CodeGenFunction &CGF,
353                                           QualType arrayType,
354                                           llvm::Value *addr,
355                                           const InitListExpr *initList) {
356   QualType::DestructionKind dtorKind = arrayType.isDestructedType();
357   if (!dtorKind)
358     return; // Type doesn't need destroying.
359   if (dtorKind != QualType::DK_cxx_destructor) {
360     CGF.ErrorUnsupported(initList, "ObjC ARC type in initializer_list");
361     return;
362   }
363 
364   CodeGenFunction::Destroyer *destroyer = CGF.getDestroyer(dtorKind);
365   CGF.pushDestroy(NormalAndEHCleanup, addr, arrayType, destroyer,
366                   /*EHCleanup=*/true);
367 }
368 
369 /// \brief Emit the initializer for a std::initializer_list initialized with a
370 /// real initializer list.
371 void AggExprEmitter::EmitStdInitializerList(llvm::Value *destPtr,
372                                             InitListExpr *initList) {
373   // We emit an array containing the elements, then have the init list point
374   // at the array.
375   ASTContext &ctx = CGF.getContext();
376   unsigned numInits = initList->getNumInits();
377   QualType element = GetStdInitializerListElementType(initList->getType());
378   llvm::APInt size(ctx.getTypeSize(ctx.getSizeType()), numInits);
379   QualType array = ctx.getConstantArrayType(element, size, ArrayType::Normal,0);
380   llvm::Type *LTy = CGF.ConvertTypeForMem(array);
381   llvm::AllocaInst *alloc = CGF.CreateTempAlloca(LTy);
382   alloc->setAlignment(ctx.getTypeAlignInChars(array).getQuantity());
383   alloc->setName(".initlist.");
384 
385   EmitArrayInit(alloc, cast<llvm::ArrayType>(LTy), element, initList);
386 
387   // FIXME: The diagnostics are somewhat out of place here.
388   RecordDecl *record = initList->getType()->castAs<RecordType>()->getDecl();
389   RecordDecl::field_iterator field = record->field_begin();
390   if (field == record->field_end()) {
391     CGF.ErrorUnsupported(initList, "weird std::initializer_list");
392     return;
393   }
394 
395   QualType elementPtr = ctx.getPointerType(element.withConst());
396 
397   // Start pointer.
398   if (!ctx.hasSameType(field->getType(), elementPtr)) {
399     CGF.ErrorUnsupported(initList, "weird std::initializer_list");
400     return;
401   }
402   LValue DestLV = CGF.MakeNaturalAlignAddrLValue(destPtr, initList->getType());
403   LValue start = CGF.EmitLValueForFieldInitialization(DestLV, *field);
404   llvm::Value *arrayStart = Builder.CreateStructGEP(alloc, 0, "arraystart");
405   CGF.EmitStoreThroughLValue(RValue::get(arrayStart), start);
406   ++field;
407 
408   if (field == record->field_end()) {
409     CGF.ErrorUnsupported(initList, "weird std::initializer_list");
410     return;
411   }
412   LValue endOrLength = CGF.EmitLValueForFieldInitialization(DestLV, *field);
413   if (ctx.hasSameType(field->getType(), elementPtr)) {
414     // End pointer.
415     llvm::Value *arrayEnd = Builder.CreateStructGEP(alloc,numInits, "arrayend");
416     CGF.EmitStoreThroughLValue(RValue::get(arrayEnd), endOrLength);
417   } else if(ctx.hasSameType(field->getType(), ctx.getSizeType())) {
418     // Length.
419     CGF.EmitStoreThroughLValue(RValue::get(Builder.getInt(size)), endOrLength);
420   } else {
421     CGF.ErrorUnsupported(initList, "weird std::initializer_list");
422     return;
423   }
424 
425   if (!Dest.isExternallyDestructed())
426     EmitStdInitializerListCleanup(CGF, array, alloc, initList);
427 }
428 
429 /// \brief Emit initialization of an array from an initializer list.
430 void AggExprEmitter::EmitArrayInit(llvm::Value *DestPtr, llvm::ArrayType *AType,
431                                    QualType elementType, InitListExpr *E) {
432   uint64_t NumInitElements = E->getNumInits();
433 
434   uint64_t NumArrayElements = AType->getNumElements();
435   assert(NumInitElements <= NumArrayElements);
436 
437   // DestPtr is an array*.  Construct an elementType* by drilling
438   // down a level.
439   llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
440   llvm::Value *indices[] = { zero, zero };
441   llvm::Value *begin =
442     Builder.CreateInBoundsGEP(DestPtr, indices, "arrayinit.begin");
443 
444   // Exception safety requires us to destroy all the
445   // already-constructed members if an initializer throws.
446   // For that, we'll need an EH cleanup.
447   QualType::DestructionKind dtorKind = elementType.isDestructedType();
448   llvm::AllocaInst *endOfInit = 0;
449   EHScopeStack::stable_iterator cleanup;
450   llvm::Instruction *cleanupDominator = 0;
451   if (CGF.needsEHCleanup(dtorKind)) {
452     // In principle we could tell the cleanup where we are more
453     // directly, but the control flow can get so varied here that it
454     // would actually be quite complex.  Therefore we go through an
455     // alloca.
456     endOfInit = CGF.CreateTempAlloca(begin->getType(),
457                                      "arrayinit.endOfInit");
458     cleanupDominator = Builder.CreateStore(begin, endOfInit);
459     CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType,
460                                          CGF.getDestroyer(dtorKind));
461     cleanup = CGF.EHStack.stable_begin();
462 
463   // Otherwise, remember that we didn't need a cleanup.
464   } else {
465     dtorKind = QualType::DK_none;
466   }
467 
468   llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1);
469 
470   // The 'current element to initialize'.  The invariants on this
471   // variable are complicated.  Essentially, after each iteration of
472   // the loop, it points to the last initialized element, except
473   // that it points to the beginning of the array before any
474   // elements have been initialized.
475   llvm::Value *element = begin;
476 
477   // Emit the explicit initializers.
478   for (uint64_t i = 0; i != NumInitElements; ++i) {
479     // Advance to the next element.
480     if (i > 0) {
481       element = Builder.CreateInBoundsGEP(element, one, "arrayinit.element");
482 
483       // Tell the cleanup that it needs to destroy up to this
484       // element.  TODO: some of these stores can be trivially
485       // observed to be unnecessary.
486       if (endOfInit) Builder.CreateStore(element, endOfInit);
487     }
488 
489     // If these are nested std::initializer_list inits, do them directly,
490     // because they are conceptually the same "location".
491     InitListExpr *initList = dyn_cast<InitListExpr>(E->getInit(i));
492     if (initList && initList->initializesStdInitializerList()) {
493       EmitStdInitializerList(element, initList);
494     } else {
495       LValue elementLV = CGF.MakeAddrLValue(element, elementType);
496       EmitInitializationToLValue(E->getInit(i), elementLV);
497     }
498   }
499 
500   // Check whether there's a non-trivial array-fill expression.
501   // Note that this will be a CXXConstructExpr even if the element
502   // type is an array (or array of array, etc.) of class type.
503   Expr *filler = E->getArrayFiller();
504   bool hasTrivialFiller = true;
505   if (CXXConstructExpr *cons = dyn_cast_or_null<CXXConstructExpr>(filler)) {
506     assert(cons->getConstructor()->isDefaultConstructor());
507     hasTrivialFiller = cons->getConstructor()->isTrivial();
508   }
509 
510   // Any remaining elements need to be zero-initialized, possibly
511   // using the filler expression.  We can skip this if the we're
512   // emitting to zeroed memory.
513   if (NumInitElements != NumArrayElements &&
514       !(Dest.isZeroed() && hasTrivialFiller &&
515         CGF.getTypes().isZeroInitializable(elementType))) {
516 
517     // Use an actual loop.  This is basically
518     //   do { *array++ = filler; } while (array != end);
519 
520     // Advance to the start of the rest of the array.
521     if (NumInitElements) {
522       element = Builder.CreateInBoundsGEP(element, one, "arrayinit.start");
523       if (endOfInit) Builder.CreateStore(element, endOfInit);
524     }
525 
526     // Compute the end of the array.
527     llvm::Value *end = Builder.CreateInBoundsGEP(begin,
528                       llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements),
529                                                  "arrayinit.end");
530 
531     llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
532     llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
533 
534     // Jump into the body.
535     CGF.EmitBlock(bodyBB);
536     llvm::PHINode *currentElement =
537       Builder.CreatePHI(element->getType(), 2, "arrayinit.cur");
538     currentElement->addIncoming(element, entryBB);
539 
540     // Emit the actual filler expression.
541     LValue elementLV = CGF.MakeAddrLValue(currentElement, elementType);
542     if (filler)
543       EmitInitializationToLValue(filler, elementLV);
544     else
545       EmitNullInitializationToLValue(elementLV);
546 
547     // Move on to the next element.
548     llvm::Value *nextElement =
549       Builder.CreateInBoundsGEP(currentElement, one, "arrayinit.next");
550 
551     // Tell the EH cleanup that we finished with the last element.
552     if (endOfInit) Builder.CreateStore(nextElement, endOfInit);
553 
554     // Leave the loop if we're done.
555     llvm::Value *done = Builder.CreateICmpEQ(nextElement, end,
556                                              "arrayinit.done");
557     llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
558     Builder.CreateCondBr(done, endBB, bodyBB);
559     currentElement->addIncoming(nextElement, Builder.GetInsertBlock());
560 
561     CGF.EmitBlock(endBB);
562   }
563 
564   // Leave the partial-array cleanup if we entered one.
565   if (dtorKind) CGF.DeactivateCleanupBlock(cleanup, cleanupDominator);
566 }
567 
568 //===----------------------------------------------------------------------===//
569 //                            Visitor Methods
570 //===----------------------------------------------------------------------===//
571 
572 void AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E){
573   Visit(E->GetTemporaryExpr());
574 }
575 
576 void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) {
577   EmitFinalDestCopy(e->getType(), CGF.getOpaqueLValueMapping(e));
578 }
579 
580 void
581 AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
582   if (Dest.isPotentiallyAliased() &&
583       E->getType().isPODType(CGF.getContext())) {
584     // For a POD type, just emit a load of the lvalue + a copy, because our
585     // compound literal might alias the destination.
586     EmitAggLoadOfLValue(E);
587     return;
588   }
589 
590   AggValueSlot Slot = EnsureSlot(E->getType());
591   CGF.EmitAggExpr(E->getInitializer(), Slot);
592 }
593 
594 /// Attempt to look through various unimportant expressions to find a
595 /// cast of the given kind.
596 static Expr *findPeephole(Expr *op, CastKind kind) {
597   while (true) {
598     op = op->IgnoreParens();
599     if (CastExpr *castE = dyn_cast<CastExpr>(op)) {
600       if (castE->getCastKind() == kind)
601         return castE->getSubExpr();
602       if (castE->getCastKind() == CK_NoOp)
603         continue;
604     }
605     return 0;
606   }
607 }
608 
609 void AggExprEmitter::VisitCastExpr(CastExpr *E) {
610   switch (E->getCastKind()) {
611   case CK_Dynamic: {
612     // FIXME: Can this actually happen? We have no test coverage for it.
613     assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?");
614     LValue LV = CGF.EmitCheckedLValue(E->getSubExpr(),
615                                       CodeGenFunction::TCK_Load);
616     // FIXME: Do we also need to handle property references here?
617     if (LV.isSimple())
618       CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E));
619     else
620       CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast");
621 
622     if (!Dest.isIgnored())
623       CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination");
624     break;
625   }
626 
627   case CK_ToUnion: {
628     if (Dest.isIgnored()) break;
629 
630     // GCC union extension
631     QualType Ty = E->getSubExpr()->getType();
632     QualType PtrTy = CGF.getContext().getPointerType(Ty);
633     llvm::Value *CastPtr = Builder.CreateBitCast(Dest.getAddr(),
634                                                  CGF.ConvertType(PtrTy));
635     EmitInitializationToLValue(E->getSubExpr(),
636                                CGF.MakeAddrLValue(CastPtr, Ty));
637     break;
638   }
639 
640   case CK_DerivedToBase:
641   case CK_BaseToDerived:
642   case CK_UncheckedDerivedToBase: {
643     llvm_unreachable("cannot perform hierarchy conversion in EmitAggExpr: "
644                 "should have been unpacked before we got here");
645   }
646 
647   case CK_NonAtomicToAtomic:
648   case CK_AtomicToNonAtomic: {
649     bool isToAtomic = (E->getCastKind() == CK_NonAtomicToAtomic);
650 
651     // Determine the atomic and value types.
652     QualType atomicType = E->getSubExpr()->getType();
653     QualType valueType = E->getType();
654     if (isToAtomic) std::swap(atomicType, valueType);
655 
656     assert(atomicType->isAtomicType());
657     assert(CGF.getContext().hasSameUnqualifiedType(valueType,
658                           atomicType->castAs<AtomicType>()->getValueType()));
659 
660     // Just recurse normally if we're ignoring the result or the
661     // atomic type doesn't change representation.
662     if (Dest.isIgnored() || !CGF.CGM.isPaddedAtomicType(atomicType)) {
663       return Visit(E->getSubExpr());
664     }
665 
666     CastKind peepholeTarget =
667       (isToAtomic ? CK_AtomicToNonAtomic : CK_NonAtomicToAtomic);
668 
669     // These two cases are reverses of each other; try to peephole them.
670     if (Expr *op = findPeephole(E->getSubExpr(), peepholeTarget)) {
671       assert(CGF.getContext().hasSameUnqualifiedType(op->getType(),
672                                                      E->getType()) &&
673            "peephole significantly changed types?");
674       return Visit(op);
675     }
676 
677     // If we're converting an r-value of non-atomic type to an r-value
678     // of atomic type, just make an atomic temporary, emit into that,
679     // and then copy the value out.  (FIXME: do we need to
680     // zero-initialize it first?)
681     if (isToAtomic) {
682       ValueDestForAtomic valueDest(CGF, Dest, atomicType);
683       CGF.EmitAggExpr(E->getSubExpr(), valueDest.getDest());
684       return;
685     }
686 
687     // Otherwise, we're converting an atomic type to a non-atomic type.
688 
689     // If the dest is a value-of-atomic subobject, drill back out.
690     if (Dest.isValueOfAtomic()) {
691       AggValueSlot atomicSlot =
692         AggValueSlot::forAddr(Dest.getPaddedAtomicAddr(),
693                               Dest.getAlignment(),
694                               Dest.getQualifiers(),
695                               Dest.isExternallyDestructed(),
696                               Dest.requiresGCollection(),
697                               Dest.isPotentiallyAliased(),
698                               Dest.isZeroed(),
699                               AggValueSlot::IsNotValueOfAtomic);
700       CGF.EmitAggExpr(E->getSubExpr(), atomicSlot);
701       return;
702     }
703 
704     // Otherwise, make an atomic temporary, emit into that, and then
705     // copy the value out.
706     AggValueSlot atomicSlot =
707       CGF.CreateAggTemp(atomicType, "atomic-to-nonatomic.temp");
708     CGF.EmitAggExpr(E->getSubExpr(), atomicSlot);
709 
710     llvm::Value *valueAddr =
711       Builder.CreateStructGEP(atomicSlot.getAddr(), 0);
712     RValue rvalue = RValue::getAggregate(valueAddr, atomicSlot.isVolatile());
713     return EmitFinalDestCopy(valueType, rvalue);
714   }
715 
716   case CK_LValueToRValue:
717     // If we're loading from a volatile type, force the destination
718     // into existence.
719     if (E->getSubExpr()->getType().isVolatileQualified()) {
720       EnsureDest(E->getType());
721       return Visit(E->getSubExpr());
722     }
723 
724     // fallthrough
725 
726   case CK_NoOp:
727   case CK_UserDefinedConversion:
728   case CK_ConstructorConversion:
729     assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(),
730                                                    E->getType()) &&
731            "Implicit cast types must be compatible");
732     Visit(E->getSubExpr());
733     break;
734 
735   case CK_LValueBitCast:
736     llvm_unreachable("should not be emitting lvalue bitcast as rvalue");
737 
738   case CK_Dependent:
739   case CK_BitCast:
740   case CK_ArrayToPointerDecay:
741   case CK_FunctionToPointerDecay:
742   case CK_NullToPointer:
743   case CK_NullToMemberPointer:
744   case CK_BaseToDerivedMemberPointer:
745   case CK_DerivedToBaseMemberPointer:
746   case CK_MemberPointerToBoolean:
747   case CK_ReinterpretMemberPointer:
748   case CK_IntegralToPointer:
749   case CK_PointerToIntegral:
750   case CK_PointerToBoolean:
751   case CK_ToVoid:
752   case CK_VectorSplat:
753   case CK_IntegralCast:
754   case CK_IntegralToBoolean:
755   case CK_IntegralToFloating:
756   case CK_FloatingToIntegral:
757   case CK_FloatingToBoolean:
758   case CK_FloatingCast:
759   case CK_CPointerToObjCPointerCast:
760   case CK_BlockPointerToObjCPointerCast:
761   case CK_AnyPointerToBlockPointerCast:
762   case CK_ObjCObjectLValueCast:
763   case CK_FloatingRealToComplex:
764   case CK_FloatingComplexToReal:
765   case CK_FloatingComplexToBoolean:
766   case CK_FloatingComplexCast:
767   case CK_FloatingComplexToIntegralComplex:
768   case CK_IntegralRealToComplex:
769   case CK_IntegralComplexToReal:
770   case CK_IntegralComplexToBoolean:
771   case CK_IntegralComplexCast:
772   case CK_IntegralComplexToFloatingComplex:
773   case CK_ARCProduceObject:
774   case CK_ARCConsumeObject:
775   case CK_ARCReclaimReturnedObject:
776   case CK_ARCExtendBlockObject:
777   case CK_CopyAndAutoreleaseBlockObject:
778   case CK_BuiltinFnToFnPtr:
779   case CK_ZeroToOCLEvent:
780     llvm_unreachable("cast kind invalid for aggregate types");
781   }
782 }
783 
784 void AggExprEmitter::VisitCallExpr(const CallExpr *E) {
785   if (E->getCallReturnType()->isReferenceType()) {
786     EmitAggLoadOfLValue(E);
787     return;
788   }
789 
790   RValue RV = CGF.EmitCallExpr(E, getReturnValueSlot());
791   EmitMoveFromReturnSlot(E, RV);
792 }
793 
794 void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
795   RValue RV = CGF.EmitObjCMessageExpr(E, getReturnValueSlot());
796   EmitMoveFromReturnSlot(E, RV);
797 }
798 
799 void AggExprEmitter::VisitBinComma(const BinaryOperator *E) {
800   CGF.EmitIgnoredExpr(E->getLHS());
801   Visit(E->getRHS());
802 }
803 
804 void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) {
805   CodeGenFunction::StmtExprEvaluation eval(CGF);
806   CGF.EmitCompoundStmt(*E->getSubStmt(), true, Dest);
807 }
808 
809 void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) {
810   if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI)
811     VisitPointerToDataMemberBinaryOperator(E);
812   else
813     CGF.ErrorUnsupported(E, "aggregate binary expression");
814 }
815 
816 void AggExprEmitter::VisitPointerToDataMemberBinaryOperator(
817                                                     const BinaryOperator *E) {
818   LValue LV = CGF.EmitPointerToDataMemberBinaryExpr(E);
819   EmitFinalDestCopy(E->getType(), LV);
820 }
821 
822 /// Is the value of the given expression possibly a reference to or
823 /// into a __block variable?
824 static bool isBlockVarRef(const Expr *E) {
825   // Make sure we look through parens.
826   E = E->IgnoreParens();
827 
828   // Check for a direct reference to a __block variable.
829   if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
830     const VarDecl *var = dyn_cast<VarDecl>(DRE->getDecl());
831     return (var && var->hasAttr<BlocksAttr>());
832   }
833 
834   // More complicated stuff.
835 
836   // Binary operators.
837   if (const BinaryOperator *op = dyn_cast<BinaryOperator>(E)) {
838     // For an assignment or pointer-to-member operation, just care
839     // about the LHS.
840     if (op->isAssignmentOp() || op->isPtrMemOp())
841       return isBlockVarRef(op->getLHS());
842 
843     // For a comma, just care about the RHS.
844     if (op->getOpcode() == BO_Comma)
845       return isBlockVarRef(op->getRHS());
846 
847     // FIXME: pointer arithmetic?
848     return false;
849 
850   // Check both sides of a conditional operator.
851   } else if (const AbstractConditionalOperator *op
852                = dyn_cast<AbstractConditionalOperator>(E)) {
853     return isBlockVarRef(op->getTrueExpr())
854         || isBlockVarRef(op->getFalseExpr());
855 
856   // OVEs are required to support BinaryConditionalOperators.
857   } else if (const OpaqueValueExpr *op
858                = dyn_cast<OpaqueValueExpr>(E)) {
859     if (const Expr *src = op->getSourceExpr())
860       return isBlockVarRef(src);
861 
862   // Casts are necessary to get things like (*(int*)&var) = foo().
863   // We don't really care about the kind of cast here, except
864   // we don't want to look through l2r casts, because it's okay
865   // to get the *value* in a __block variable.
866   } else if (const CastExpr *cast = dyn_cast<CastExpr>(E)) {
867     if (cast->getCastKind() == CK_LValueToRValue)
868       return false;
869     return isBlockVarRef(cast->getSubExpr());
870 
871   // Handle unary operators.  Again, just aggressively look through
872   // it, ignoring the operation.
873   } else if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E)) {
874     return isBlockVarRef(uop->getSubExpr());
875 
876   // Look into the base of a field access.
877   } else if (const MemberExpr *mem = dyn_cast<MemberExpr>(E)) {
878     return isBlockVarRef(mem->getBase());
879 
880   // Look into the base of a subscript.
881   } else if (const ArraySubscriptExpr *sub = dyn_cast<ArraySubscriptExpr>(E)) {
882     return isBlockVarRef(sub->getBase());
883   }
884 
885   return false;
886 }
887 
888 void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
889   // For an assignment to work, the value on the right has
890   // to be compatible with the value on the left.
891   assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
892                                                  E->getRHS()->getType())
893          && "Invalid assignment");
894 
895   // If the LHS might be a __block variable, and the RHS can
896   // potentially cause a block copy, we need to evaluate the RHS first
897   // so that the assignment goes the right place.
898   // This is pretty semantically fragile.
899   if (isBlockVarRef(E->getLHS()) &&
900       E->getRHS()->HasSideEffects(CGF.getContext())) {
901     // Ensure that we have a destination, and evaluate the RHS into that.
902     EnsureDest(E->getRHS()->getType());
903     Visit(E->getRHS());
904 
905     // Now emit the LHS and copy into it.
906     LValue LHS = CGF.EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
907 
908     // That copy is an atomic copy if the LHS is atomic.
909     if (LHS.getType()->isAtomicType()) {
910       CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
911       return;
912     }
913 
914     EmitCopy(E->getLHS()->getType(),
915              AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
916                                      needsGC(E->getLHS()->getType()),
917                                      AggValueSlot::IsAliased),
918              Dest);
919     return;
920   }
921 
922   LValue LHS = CGF.EmitLValue(E->getLHS());
923 
924   // If we have an atomic type, evaluate into the destination and then
925   // do an atomic copy.
926   if (LHS.getType()->isAtomicType()) {
927     EnsureDest(E->getRHS()->getType());
928     Visit(E->getRHS());
929     CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
930     return;
931   }
932 
933   // Codegen the RHS so that it stores directly into the LHS.
934   AggValueSlot LHSSlot =
935     AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
936                             needsGC(E->getLHS()->getType()),
937                             AggValueSlot::IsAliased);
938   // A non-volatile aggregate destination might have volatile member.
939   if (!LHSSlot.isVolatile() &&
940       CGF.hasVolatileMember(E->getLHS()->getType()))
941     LHSSlot.setVolatile(true);
942 
943   CGF.EmitAggExpr(E->getRHS(), LHSSlot);
944 
945   // Copy into the destination if the assignment isn't ignored.
946   EmitFinalDestCopy(E->getType(), LHS);
947 }
948 
949 void AggExprEmitter::
950 VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
951   llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
952   llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
953   llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
954 
955   // Bind the common expression if necessary.
956   CodeGenFunction::OpaqueValueMapping binding(CGF, E);
957 
958   CodeGenFunction::ConditionalEvaluation eval(CGF);
959   CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock);
960 
961   // Save whether the destination's lifetime is externally managed.
962   bool isExternallyDestructed = Dest.isExternallyDestructed();
963 
964   eval.begin(CGF);
965   CGF.EmitBlock(LHSBlock);
966   Visit(E->getTrueExpr());
967   eval.end(CGF);
968 
969   assert(CGF.HaveInsertPoint() && "expression evaluation ended with no IP!");
970   CGF.Builder.CreateBr(ContBlock);
971 
972   // If the result of an agg expression is unused, then the emission
973   // of the LHS might need to create a destination slot.  That's fine
974   // with us, and we can safely emit the RHS into the same slot, but
975   // we shouldn't claim that it's already being destructed.
976   Dest.setExternallyDestructed(isExternallyDestructed);
977 
978   eval.begin(CGF);
979   CGF.EmitBlock(RHSBlock);
980   Visit(E->getFalseExpr());
981   eval.end(CGF);
982 
983   CGF.EmitBlock(ContBlock);
984 }
985 
986 void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) {
987   Visit(CE->getChosenSubExpr(CGF.getContext()));
988 }
989 
990 void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
991   llvm::Value *ArgValue = CGF.EmitVAListRef(VE->getSubExpr());
992   llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType());
993 
994   if (!ArgPtr) {
995     CGF.ErrorUnsupported(VE, "aggregate va_arg expression");
996     return;
997   }
998 
999   EmitFinalDestCopy(VE->getType(), CGF.MakeAddrLValue(ArgPtr, VE->getType()));
1000 }
1001 
1002 void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
1003   // Ensure that we have a slot, but if we already do, remember
1004   // whether it was externally destructed.
1005   bool wasExternallyDestructed = Dest.isExternallyDestructed();
1006   EnsureDest(E->getType());
1007 
1008   // We're going to push a destructor if there isn't already one.
1009   Dest.setExternallyDestructed();
1010 
1011   Visit(E->getSubExpr());
1012 
1013   // Push that destructor we promised.
1014   if (!wasExternallyDestructed)
1015     CGF.EmitCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddr());
1016 }
1017 
1018 void
1019 AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
1020   AggValueSlot Slot = EnsureSlot(E->getType());
1021   CGF.EmitCXXConstructExpr(E, Slot);
1022 }
1023 
1024 void
1025 AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) {
1026   AggValueSlot Slot = EnsureSlot(E->getType());
1027   CGF.EmitLambdaExpr(E, Slot);
1028 }
1029 
1030 void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
1031   CGF.enterFullExpression(E);
1032   CodeGenFunction::RunCleanupsScope cleanups(CGF);
1033   Visit(E->getSubExpr());
1034 }
1035 
1036 void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
1037   QualType T = E->getType();
1038   AggValueSlot Slot = EnsureSlot(T);
1039   EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T));
1040 }
1041 
1042 void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
1043   QualType T = E->getType();
1044   AggValueSlot Slot = EnsureSlot(T);
1045   EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T));
1046 }
1047 
1048 /// isSimpleZero - If emitting this value will obviously just cause a store of
1049 /// zero to memory, return true.  This can return false if uncertain, so it just
1050 /// handles simple cases.
1051 static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) {
1052   E = E->IgnoreParens();
1053 
1054   // 0
1055   if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E))
1056     return IL->getValue() == 0;
1057   // +0.0
1058   if (const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(E))
1059     return FL->getValue().isPosZero();
1060   // int()
1061   if ((isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) &&
1062       CGF.getTypes().isZeroInitializable(E->getType()))
1063     return true;
1064   // (int*)0 - Null pointer expressions.
1065   if (const CastExpr *ICE = dyn_cast<CastExpr>(E))
1066     return ICE->getCastKind() == CK_NullToPointer;
1067   // '\0'
1068   if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E))
1069     return CL->getValue() == 0;
1070 
1071   // Otherwise, hard case: conservatively return false.
1072   return false;
1073 }
1074 
1075 
1076 void
1077 AggExprEmitter::EmitInitializationToLValue(Expr* E, LValue LV) {
1078   QualType type = LV.getType();
1079   // FIXME: Ignore result?
1080   // FIXME: Are initializers affected by volatile?
1081   if (Dest.isZeroed() && isSimpleZero(E, CGF)) {
1082     // Storing "i32 0" to a zero'd memory location is a noop.
1083     return;
1084   } else if (isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) {
1085     return EmitNullInitializationToLValue(LV);
1086   } else if (type->isReferenceType()) {
1087     RValue RV = CGF.EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0);
1088     return CGF.EmitStoreThroughLValue(RV, LV);
1089   }
1090 
1091   switch (CGF.getEvaluationKind(type)) {
1092   case TEK_Complex:
1093     CGF.EmitComplexExprIntoLValue(E, LV, /*isInit*/ true);
1094     return;
1095   case TEK_Aggregate:
1096     CGF.EmitAggExpr(E, AggValueSlot::forLValue(LV,
1097                                                AggValueSlot::IsDestructed,
1098                                       AggValueSlot::DoesNotNeedGCBarriers,
1099                                                AggValueSlot::IsNotAliased,
1100                                                Dest.isZeroed()));
1101     return;
1102   case TEK_Scalar:
1103     if (LV.isSimple()) {
1104       CGF.EmitScalarInit(E, /*D=*/0, LV, /*Captured=*/false);
1105     } else {
1106       CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV);
1107     }
1108     return;
1109   }
1110   llvm_unreachable("bad evaluation kind");
1111 }
1112 
1113 void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) {
1114   QualType type = lv.getType();
1115 
1116   // If the destination slot is already zeroed out before the aggregate is
1117   // copied into it, we don't have to emit any zeros here.
1118   if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(type))
1119     return;
1120 
1121   if (CGF.hasScalarEvaluationKind(type)) {
1122     // For non-aggregates, we can store the appropriate null constant.
1123     llvm::Value *null = CGF.CGM.EmitNullConstant(type);
1124     // Note that the following is not equivalent to
1125     // EmitStoreThroughBitfieldLValue for ARC types.
1126     if (lv.isBitField()) {
1127       CGF.EmitStoreThroughBitfieldLValue(RValue::get(null), lv);
1128     } else {
1129       assert(lv.isSimple());
1130       CGF.EmitStoreOfScalar(null, lv, /* isInitialization */ true);
1131     }
1132   } else {
1133     // There's a potential optimization opportunity in combining
1134     // memsets; that would be easy for arrays, but relatively
1135     // difficult for structures with the current code.
1136     CGF.EmitNullInitialization(lv.getAddress(), lv.getType());
1137   }
1138 }
1139 
1140 void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
1141 #if 0
1142   // FIXME: Assess perf here?  Figure out what cases are worth optimizing here
1143   // (Length of globals? Chunks of zeroed-out space?).
1144   //
1145   // If we can, prefer a copy from a global; this is a lot less code for long
1146   // globals, and it's easier for the current optimizers to analyze.
1147   if (llvm::Constant* C = CGF.CGM.EmitConstantExpr(E, E->getType(), &CGF)) {
1148     llvm::GlobalVariable* GV =
1149     new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true,
1150                              llvm::GlobalValue::InternalLinkage, C, "");
1151     EmitFinalDestCopy(E->getType(), CGF.MakeAddrLValue(GV, E->getType()));
1152     return;
1153   }
1154 #endif
1155   if (E->hadArrayRangeDesignator())
1156     CGF.ErrorUnsupported(E, "GNU array range designator extension");
1157 
1158   if (E->initializesStdInitializerList()) {
1159     EmitStdInitializerList(Dest.getAddr(), E);
1160     return;
1161   }
1162 
1163   AggValueSlot Dest = EnsureSlot(E->getType());
1164   LValue DestLV = CGF.MakeAddrLValue(Dest.getAddr(), E->getType(),
1165                                      Dest.getAlignment());
1166 
1167   // Handle initialization of an array.
1168   if (E->getType()->isArrayType()) {
1169     if (E->isStringLiteralInit())
1170       return Visit(E->getInit(0));
1171 
1172     QualType elementType =
1173         CGF.getContext().getAsArrayType(E->getType())->getElementType();
1174 
1175     llvm::PointerType *APType =
1176       cast<llvm::PointerType>(Dest.getAddr()->getType());
1177     llvm::ArrayType *AType =
1178       cast<llvm::ArrayType>(APType->getElementType());
1179 
1180     EmitArrayInit(Dest.getAddr(), AType, elementType, E);
1181     return;
1182   }
1183 
1184   assert(E->getType()->isRecordType() && "Only support structs/unions here!");
1185 
1186   // Do struct initialization; this code just sets each individual member
1187   // to the approprate value.  This makes bitfield support automatic;
1188   // the disadvantage is that the generated code is more difficult for
1189   // the optimizer, especially with bitfields.
1190   unsigned NumInitElements = E->getNumInits();
1191   RecordDecl *record = E->getType()->castAs<RecordType>()->getDecl();
1192 
1193   if (record->isUnion()) {
1194     // Only initialize one field of a union. The field itself is
1195     // specified by the initializer list.
1196     if (!E->getInitializedFieldInUnion()) {
1197       // Empty union; we have nothing to do.
1198 
1199 #ifndef NDEBUG
1200       // Make sure that it's really an empty and not a failure of
1201       // semantic analysis.
1202       for (RecordDecl::field_iterator Field = record->field_begin(),
1203                                    FieldEnd = record->field_end();
1204            Field != FieldEnd; ++Field)
1205         assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed");
1206 #endif
1207       return;
1208     }
1209 
1210     // FIXME: volatility
1211     FieldDecl *Field = E->getInitializedFieldInUnion();
1212 
1213     LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestLV, Field);
1214     if (NumInitElements) {
1215       // Store the initializer into the field
1216       EmitInitializationToLValue(E->getInit(0), FieldLoc);
1217     } else {
1218       // Default-initialize to null.
1219       EmitNullInitializationToLValue(FieldLoc);
1220     }
1221 
1222     return;
1223   }
1224 
1225   // We'll need to enter cleanup scopes in case any of the member
1226   // initializers throw an exception.
1227   SmallVector<EHScopeStack::stable_iterator, 16> cleanups;
1228   llvm::Instruction *cleanupDominator = 0;
1229 
1230   // Here we iterate over the fields; this makes it simpler to both
1231   // default-initialize fields and skip over unnamed fields.
1232   unsigned curInitIndex = 0;
1233   for (RecordDecl::field_iterator field = record->field_begin(),
1234                                fieldEnd = record->field_end();
1235        field != fieldEnd; ++field) {
1236     // We're done once we hit the flexible array member.
1237     if (field->getType()->isIncompleteArrayType())
1238       break;
1239 
1240     // Always skip anonymous bitfields.
1241     if (field->isUnnamedBitfield())
1242       continue;
1243 
1244     // We're done if we reach the end of the explicit initializers, we
1245     // have a zeroed object, and the rest of the fields are
1246     // zero-initializable.
1247     if (curInitIndex == NumInitElements && Dest.isZeroed() &&
1248         CGF.getTypes().isZeroInitializable(E->getType()))
1249       break;
1250 
1251 
1252     LValue LV = CGF.EmitLValueForFieldInitialization(DestLV, *field);
1253     // We never generate write-barries for initialized fields.
1254     LV.setNonGC(true);
1255 
1256     if (curInitIndex < NumInitElements) {
1257       // Store the initializer into the field.
1258       EmitInitializationToLValue(E->getInit(curInitIndex++), LV);
1259     } else {
1260       // We're out of initalizers; default-initialize to null
1261       EmitNullInitializationToLValue(LV);
1262     }
1263 
1264     // Push a destructor if necessary.
1265     // FIXME: if we have an array of structures, all explicitly
1266     // initialized, we can end up pushing a linear number of cleanups.
1267     bool pushedCleanup = false;
1268     if (QualType::DestructionKind dtorKind
1269           = field->getType().isDestructedType()) {
1270       assert(LV.isSimple());
1271       if (CGF.needsEHCleanup(dtorKind)) {
1272         if (!cleanupDominator)
1273           cleanupDominator = CGF.Builder.CreateUnreachable(); // placeholder
1274 
1275         CGF.pushDestroy(EHCleanup, LV.getAddress(), field->getType(),
1276                         CGF.getDestroyer(dtorKind), false);
1277         cleanups.push_back(CGF.EHStack.stable_begin());
1278         pushedCleanup = true;
1279       }
1280     }
1281 
1282     // If the GEP didn't get used because of a dead zero init or something
1283     // else, clean it up for -O0 builds and general tidiness.
1284     if (!pushedCleanup && LV.isSimple())
1285       if (llvm::GetElementPtrInst *GEP =
1286             dyn_cast<llvm::GetElementPtrInst>(LV.getAddress()))
1287         if (GEP->use_empty())
1288           GEP->eraseFromParent();
1289   }
1290 
1291   // Deactivate all the partial cleanups in reverse order, which
1292   // generally means popping them.
1293   for (unsigned i = cleanups.size(); i != 0; --i)
1294     CGF.DeactivateCleanupBlock(cleanups[i-1], cleanupDominator);
1295 
1296   // Destroy the placeholder if we made one.
1297   if (cleanupDominator)
1298     cleanupDominator->eraseFromParent();
1299 }
1300 
1301 //===----------------------------------------------------------------------===//
1302 //                        Entry Points into this File
1303 //===----------------------------------------------------------------------===//
1304 
1305 /// GetNumNonZeroBytesInInit - Get an approximate count of the number of
1306 /// non-zero bytes that will be stored when outputting the initializer for the
1307 /// specified initializer expression.
1308 static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) {
1309   E = E->IgnoreParens();
1310 
1311   // 0 and 0.0 won't require any non-zero stores!
1312   if (isSimpleZero(E, CGF)) return CharUnits::Zero();
1313 
1314   // If this is an initlist expr, sum up the size of sizes of the (present)
1315   // elements.  If this is something weird, assume the whole thing is non-zero.
1316   const InitListExpr *ILE = dyn_cast<InitListExpr>(E);
1317   if (ILE == 0 || !CGF.getTypes().isZeroInitializable(ILE->getType()))
1318     return CGF.getContext().getTypeSizeInChars(E->getType());
1319 
1320   // InitListExprs for structs have to be handled carefully.  If there are
1321   // reference members, we need to consider the size of the reference, not the
1322   // referencee.  InitListExprs for unions and arrays can't have references.
1323   if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
1324     if (!RT->isUnionType()) {
1325       RecordDecl *SD = E->getType()->getAs<RecordType>()->getDecl();
1326       CharUnits NumNonZeroBytes = CharUnits::Zero();
1327 
1328       unsigned ILEElement = 0;
1329       for (RecordDecl::field_iterator Field = SD->field_begin(),
1330            FieldEnd = SD->field_end(); Field != FieldEnd; ++Field) {
1331         // We're done once we hit the flexible array member or run out of
1332         // InitListExpr elements.
1333         if (Field->getType()->isIncompleteArrayType() ||
1334             ILEElement == ILE->getNumInits())
1335           break;
1336         if (Field->isUnnamedBitfield())
1337           continue;
1338 
1339         const Expr *E = ILE->getInit(ILEElement++);
1340 
1341         // Reference values are always non-null and have the width of a pointer.
1342         if (Field->getType()->isReferenceType())
1343           NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits(
1344               CGF.getContext().getTargetInfo().getPointerWidth(0));
1345         else
1346           NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF);
1347       }
1348 
1349       return NumNonZeroBytes;
1350     }
1351   }
1352 
1353 
1354   CharUnits NumNonZeroBytes = CharUnits::Zero();
1355   for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)
1356     NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF);
1357   return NumNonZeroBytes;
1358 }
1359 
1360 /// CheckAggExprForMemSetUse - If the initializer is large and has a lot of
1361 /// zeros in it, emit a memset and avoid storing the individual zeros.
1362 ///
1363 static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
1364                                      CodeGenFunction &CGF) {
1365   // If the slot is already known to be zeroed, nothing to do.  Don't mess with
1366   // volatile stores.
1367   if (Slot.isZeroed() || Slot.isVolatile() || Slot.getAddr() == 0) return;
1368 
1369   // C++ objects with a user-declared constructor don't need zero'ing.
1370   if (CGF.getLangOpts().CPlusPlus)
1371     if (const RecordType *RT = CGF.getContext()
1372                        .getBaseElementType(E->getType())->getAs<RecordType>()) {
1373       const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1374       if (RD->hasUserDeclaredConstructor())
1375         return;
1376     }
1377 
1378   // If the type is 16-bytes or smaller, prefer individual stores over memset.
1379   std::pair<CharUnits, CharUnits> TypeInfo =
1380     CGF.getContext().getTypeInfoInChars(E->getType());
1381   if (TypeInfo.first <= CharUnits::fromQuantity(16))
1382     return;
1383 
1384   // Check to see if over 3/4 of the initializer are known to be zero.  If so,
1385   // we prefer to emit memset + individual stores for the rest.
1386   CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF);
1387   if (NumNonZeroBytes*4 > TypeInfo.first)
1388     return;
1389 
1390   // Okay, it seems like a good idea to use an initial memset, emit the call.
1391   llvm::Constant *SizeVal = CGF.Builder.getInt64(TypeInfo.first.getQuantity());
1392   CharUnits Align = TypeInfo.second;
1393 
1394   llvm::Value *Loc = Slot.getAddr();
1395 
1396   Loc = CGF.Builder.CreateBitCast(Loc, CGF.Int8PtrTy);
1397   CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal,
1398                            Align.getQuantity(), false);
1399 
1400   // Tell the AggExprEmitter that the slot is known zero.
1401   Slot.setZeroed();
1402 }
1403 
1404 
1405 
1406 
1407 /// EmitAggExpr - Emit the computation of the specified expression of aggregate
1408 /// type.  The result is computed into DestPtr.  Note that if DestPtr is null,
1409 /// the value of the aggregate expression is not needed.  If VolatileDest is
1410 /// true, DestPtr cannot be 0.
1411 void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot) {
1412   assert(E && hasAggregateEvaluationKind(E->getType()) &&
1413          "Invalid aggregate expression to emit");
1414   assert((Slot.getAddr() != 0 || Slot.isIgnored()) &&
1415          "slot has bits but no address");
1416 
1417   // Optimize the slot if possible.
1418   CheckAggExprForMemSetUse(Slot, E, *this);
1419 
1420   AggExprEmitter(*this, Slot).Visit(const_cast<Expr*>(E));
1421 }
1422 
1423 LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
1424   assert(hasAggregateEvaluationKind(E->getType()) && "Invalid argument!");
1425   llvm::Value *Temp = CreateMemTemp(E->getType());
1426   LValue LV = MakeAddrLValue(Temp, E->getType());
1427   EmitAggExpr(E, AggValueSlot::forLValue(LV, AggValueSlot::IsNotDestructed,
1428                                          AggValueSlot::DoesNotNeedGCBarriers,
1429                                          AggValueSlot::IsNotAliased));
1430   return LV;
1431 }
1432 
1433 void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
1434                                         llvm::Value *SrcPtr, QualType Ty,
1435                                         bool isVolatile,
1436                                         CharUnits alignment,
1437                                         bool isAssignment) {
1438   assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
1439 
1440   if (getLangOpts().CPlusPlus) {
1441     if (const RecordType *RT = Ty->getAs<RecordType>()) {
1442       CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl());
1443       assert((Record->hasTrivialCopyConstructor() ||
1444               Record->hasTrivialCopyAssignment() ||
1445               Record->hasTrivialMoveConstructor() ||
1446               Record->hasTrivialMoveAssignment()) &&
1447              "Trying to aggregate-copy a type without a trivial copy/move "
1448              "constructor or assignment operator");
1449       // Ignore empty classes in C++.
1450       if (Record->isEmpty())
1451         return;
1452     }
1453   }
1454 
1455   // Aggregate assignment turns into llvm.memcpy.  This is almost valid per
1456   // C99 6.5.16.1p3, which states "If the value being stored in an object is
1457   // read from another object that overlaps in anyway the storage of the first
1458   // object, then the overlap shall be exact and the two objects shall have
1459   // qualified or unqualified versions of a compatible type."
1460   //
1461   // memcpy is not defined if the source and destination pointers are exactly
1462   // equal, but other compilers do this optimization, and almost every memcpy
1463   // implementation handles this case safely.  If there is a libc that does not
1464   // safely handle this, we can add a target hook.
1465 
1466   // Get data size and alignment info for this aggregate. If this is an
1467   // assignment don't copy the tail padding. Otherwise copying it is fine.
1468   std::pair<CharUnits, CharUnits> TypeInfo;
1469   if (isAssignment)
1470     TypeInfo = getContext().getTypeInfoDataSizeInChars(Ty);
1471   else
1472     TypeInfo = getContext().getTypeInfoInChars(Ty);
1473 
1474   if (alignment.isZero())
1475     alignment = TypeInfo.second;
1476 
1477   // FIXME: Handle variable sized types.
1478 
1479   // FIXME: If we have a volatile struct, the optimizer can remove what might
1480   // appear to be `extra' memory ops:
1481   //
1482   // volatile struct { int i; } a, b;
1483   //
1484   // int main() {
1485   //   a = b;
1486   //   a = b;
1487   // }
1488   //
1489   // we need to use a different call here.  We use isVolatile to indicate when
1490   // either the source or the destination is volatile.
1491 
1492   llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType());
1493   llvm::Type *DBP =
1494     llvm::Type::getInt8PtrTy(getLLVMContext(), DPT->getAddressSpace());
1495   DestPtr = Builder.CreateBitCast(DestPtr, DBP);
1496 
1497   llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType());
1498   llvm::Type *SBP =
1499     llvm::Type::getInt8PtrTy(getLLVMContext(), SPT->getAddressSpace());
1500   SrcPtr = Builder.CreateBitCast(SrcPtr, SBP);
1501 
1502   // Don't do any of the memmove_collectable tests if GC isn't set.
1503   if (CGM.getLangOpts().getGC() == LangOptions::NonGC) {
1504     // fall through
1505   } else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
1506     RecordDecl *Record = RecordTy->getDecl();
1507     if (Record->hasObjectMember()) {
1508       CharUnits size = TypeInfo.first;
1509       llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
1510       llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity());
1511       CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
1512                                                     SizeVal);
1513       return;
1514     }
1515   } else if (Ty->isArrayType()) {
1516     QualType BaseType = getContext().getBaseElementType(Ty);
1517     if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
1518       if (RecordTy->getDecl()->hasObjectMember()) {
1519         CharUnits size = TypeInfo.first;
1520         llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
1521         llvm::Value *SizeVal =
1522           llvm::ConstantInt::get(SizeTy, size.getQuantity());
1523         CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
1524                                                       SizeVal);
1525         return;
1526       }
1527     }
1528   }
1529 
1530   // Determine the metadata to describe the position of any padding in this
1531   // memcpy, as well as the TBAA tags for the members of the struct, in case
1532   // the optimizer wishes to expand it in to scalar memory operations.
1533   llvm::MDNode *TBAAStructTag = CGM.getTBAAStructInfo(Ty);
1534 
1535   Builder.CreateMemCpy(DestPtr, SrcPtr,
1536                        llvm::ConstantInt::get(IntPtrTy,
1537                                               TypeInfo.first.getQuantity()),
1538                        alignment.getQuantity(), isVolatile,
1539                        /*TBAATag=*/0, TBAAStructTag);
1540 }
1541 
1542 void CodeGenFunction::MaybeEmitStdInitializerListCleanup(llvm::Value *loc,
1543                                                          const Expr *init) {
1544   const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(init);
1545   if (cleanups)
1546     init = cleanups->getSubExpr();
1547 
1548   if (isa<InitListExpr>(init) &&
1549       cast<InitListExpr>(init)->initializesStdInitializerList()) {
1550     // We initialized this std::initializer_list with an initializer list.
1551     // A backing array was created. Push a cleanup for it.
1552     EmitStdInitializerListCleanup(loc, cast<InitListExpr>(init));
1553   }
1554 }
1555 
1556 static void EmitRecursiveStdInitializerListCleanup(CodeGenFunction &CGF,
1557                                                    llvm::Value *arrayStart,
1558                                                    const InitListExpr *init) {
1559   // Check if there are any recursive cleanups to do, i.e. if we have
1560   //   std::initializer_list<std::initializer_list<obj>> list = {{obj()}};
1561   // then we need to destroy the inner array as well.
1562   for (unsigned i = 0, e = init->getNumInits(); i != e; ++i) {
1563     const InitListExpr *subInit = dyn_cast<InitListExpr>(init->getInit(i));
1564     if (!subInit || !subInit->initializesStdInitializerList())
1565       continue;
1566 
1567     // This one needs to be destroyed. Get the address of the std::init_list.
1568     llvm::Value *offset = llvm::ConstantInt::get(CGF.SizeTy, i);
1569     llvm::Value *loc = CGF.Builder.CreateInBoundsGEP(arrayStart, offset,
1570                                                  "std.initlist");
1571     CGF.EmitStdInitializerListCleanup(loc, subInit);
1572   }
1573 }
1574 
1575 void CodeGenFunction::EmitStdInitializerListCleanup(llvm::Value *loc,
1576                                                     const InitListExpr *init) {
1577   ASTContext &ctx = getContext();
1578   QualType element = GetStdInitializerListElementType(init->getType());
1579   unsigned numInits = init->getNumInits();
1580   llvm::APInt size(ctx.getTypeSize(ctx.getSizeType()), numInits);
1581   QualType array =ctx.getConstantArrayType(element, size, ArrayType::Normal, 0);
1582   QualType arrayPtr = ctx.getPointerType(array);
1583   llvm::Type *arrayPtrType = ConvertType(arrayPtr);
1584 
1585   // lvalue is the location of a std::initializer_list, which as its first
1586   // element has a pointer to the array we want to destroy.
1587   llvm::Value *startPointer = Builder.CreateStructGEP(loc, 0, "startPointer");
1588   llvm::Value *startAddress = Builder.CreateLoad(startPointer, "startAddress");
1589 
1590   ::EmitRecursiveStdInitializerListCleanup(*this, startAddress, init);
1591 
1592   llvm::Value *arrayAddress =
1593       Builder.CreateBitCast(startAddress, arrayPtrType, "arrayAddress");
1594   ::EmitStdInitializerListCleanup(*this, array, arrayAddress, init);
1595 }
1596