1 //===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code to emit Aggregate Expr nodes as LLVM code.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CodeGenFunction.h"
15 #include "CodeGenModule.h"
16 #include "CGObjCRuntime.h"
17 #include "clang/AST/ASTContext.h"
18 #include "clang/AST/DeclCXX.h"
19 #include "clang/AST/StmtVisitor.h"
20 #include "llvm/Constants.h"
21 #include "llvm/Function.h"
22 #include "llvm/GlobalVariable.h"
23 #include "llvm/Intrinsics.h"
24 using namespace clang;
25 using namespace CodeGen;
26 
27 //===----------------------------------------------------------------------===//
28 //                        Aggregate Expression Emitter
29 //===----------------------------------------------------------------------===//
30 
31 namespace  {
32 class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
33   CodeGenFunction &CGF;
34   CGBuilderTy &Builder;
35   AggValueSlot Dest;
36   bool IgnoreResult;
37 
38   /// We want to use 'dest' as the return slot except under two
39   /// conditions:
40   ///   - The destination slot requires garbage collection, so we
41   ///     need to use the GC API.
42   ///   - The destination slot is potentially aliased.
43   bool shouldUseDestForReturnSlot() const {
44     return !(Dest.requiresGCollection() || Dest.isPotentiallyAliased());
45   }
46 
47   ReturnValueSlot getReturnValueSlot() const {
48     if (!shouldUseDestForReturnSlot())
49       return ReturnValueSlot();
50 
51     return ReturnValueSlot(Dest.getAddr(), Dest.isVolatile());
52   }
53 
54   AggValueSlot EnsureSlot(QualType T) {
55     if (!Dest.isIgnored()) return Dest;
56     return CGF.CreateAggTemp(T, "agg.tmp.ensured");
57   }
58 
59 public:
60   AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest,
61                  bool ignore)
62     : CGF(cgf), Builder(CGF.Builder), Dest(Dest),
63       IgnoreResult(ignore) {
64   }
65 
66   //===--------------------------------------------------------------------===//
67   //                               Utilities
68   //===--------------------------------------------------------------------===//
69 
70   /// EmitAggLoadOfLValue - Given an expression with aggregate type that
71   /// represents a value lvalue, this method emits the address of the lvalue,
72   /// then loads the result into DestPtr.
73   void EmitAggLoadOfLValue(const Expr *E);
74 
75   /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
76   void EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore = false);
77   void EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore = false);
78 
79   void EmitMoveFromReturnSlot(const Expr *E, RValue Src);
80 
81   AggValueSlot::NeedsGCBarriers_t needsGC(QualType T) {
82     if (CGF.getLangOptions().getGCMode() && TypeRequiresGCollection(T))
83       return AggValueSlot::NeedsGCBarriers;
84     return AggValueSlot::DoesNotNeedGCBarriers;
85   }
86 
87   bool TypeRequiresGCollection(QualType T);
88 
89   //===--------------------------------------------------------------------===//
90   //                            Visitor Methods
91   //===--------------------------------------------------------------------===//
92 
93   void VisitStmt(Stmt *S) {
94     CGF.ErrorUnsupported(S, "aggregate expression");
95   }
96   void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); }
97   void VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
98     Visit(GE->getResultExpr());
99   }
100   void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); }
101   void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
102     return Visit(E->getReplacement());
103   }
104 
105   // l-values.
106   void VisitDeclRefExpr(DeclRefExpr *DRE) { EmitAggLoadOfLValue(DRE); }
107   void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); }
108   void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); }
109   void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); }
110   void VisitCompoundLiteralExpr(CompoundLiteralExpr *E);
111   void VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
112     EmitAggLoadOfLValue(E);
113   }
114   void VisitBlockDeclRefExpr(const BlockDeclRefExpr *E) {
115     EmitAggLoadOfLValue(E);
116   }
117   void VisitPredefinedExpr(const PredefinedExpr *E) {
118     EmitAggLoadOfLValue(E);
119   }
120 
121   // Operators.
122   void VisitCastExpr(CastExpr *E);
123   void VisitCallExpr(const CallExpr *E);
124   void VisitStmtExpr(const StmtExpr *E);
125   void VisitBinaryOperator(const BinaryOperator *BO);
126   void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *BO);
127   void VisitBinAssign(const BinaryOperator *E);
128   void VisitBinComma(const BinaryOperator *E);
129 
130   void VisitObjCMessageExpr(ObjCMessageExpr *E);
131   void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
132     EmitAggLoadOfLValue(E);
133   }
134   void VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E);
135 
136   void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO);
137   void VisitChooseExpr(const ChooseExpr *CE);
138   void VisitInitListExpr(InitListExpr *E);
139   void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
140   void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
141     Visit(DAE->getExpr());
142   }
143   void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
144   void VisitCXXConstructExpr(const CXXConstructExpr *E);
145   void VisitExprWithCleanups(ExprWithCleanups *E);
146   void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
147   void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); }
148   void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E);
149   void VisitOpaqueValueExpr(OpaqueValueExpr *E);
150 
151   void VisitVAArgExpr(VAArgExpr *E);
152 
153   void EmitInitializationToLValue(Expr *E, LValue Address);
154   void EmitNullInitializationToLValue(LValue Address);
155   //  case Expr::ChooseExprClass:
156   void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); }
157 };
158 }  // end anonymous namespace.
159 
160 //===----------------------------------------------------------------------===//
161 //                                Utilities
162 //===----------------------------------------------------------------------===//
163 
164 /// EmitAggLoadOfLValue - Given an expression with aggregate type that
165 /// represents a value lvalue, this method emits the address of the lvalue,
166 /// then loads the result into DestPtr.
167 void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) {
168   LValue LV = CGF.EmitLValue(E);
169   EmitFinalDestCopy(E, LV);
170 }
171 
172 /// \brief True if the given aggregate type requires special GC API calls.
173 bool AggExprEmitter::TypeRequiresGCollection(QualType T) {
174   // Only record types have members that might require garbage collection.
175   const RecordType *RecordTy = T->getAs<RecordType>();
176   if (!RecordTy) return false;
177 
178   // Don't mess with non-trivial C++ types.
179   RecordDecl *Record = RecordTy->getDecl();
180   if (isa<CXXRecordDecl>(Record) &&
181       (!cast<CXXRecordDecl>(Record)->hasTrivialCopyConstructor() ||
182        !cast<CXXRecordDecl>(Record)->hasTrivialDestructor()))
183     return false;
184 
185   // Check whether the type has an object member.
186   return Record->hasObjectMember();
187 }
188 
189 /// \brief Perform the final move to DestPtr if for some reason
190 /// getReturnValueSlot() didn't use it directly.
191 ///
192 /// The idea is that you do something like this:
193 ///   RValue Result = EmitSomething(..., getReturnValueSlot());
194 ///   EmitMoveFromReturnSlot(E, Result);
195 ///
196 /// If nothing interferes, this will cause the result to be emitted
197 /// directly into the return value slot.  Otherwise, a final move
198 /// will be performed.
199 void AggExprEmitter::EmitMoveFromReturnSlot(const Expr *E, RValue Src) {
200   if (shouldUseDestForReturnSlot()) {
201     // Logically, Dest.getAddr() should equal Src.getAggregateAddr().
202     // The possibility of undef rvalues complicates that a lot,
203     // though, so we can't really assert.
204     return;
205   }
206 
207   // Otherwise, do a final copy,
208   assert(Dest.getAddr() != Src.getAggregateAddr());
209   EmitFinalDestCopy(E, Src, /*Ignore*/ true);
210 }
211 
212 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
213 void AggExprEmitter::EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore) {
214   assert(Src.isAggregate() && "value must be aggregate value!");
215 
216   // If Dest is ignored, then we're evaluating an aggregate expression
217   // in a context (like an expression statement) that doesn't care
218   // about the result.  C says that an lvalue-to-rvalue conversion is
219   // performed in these cases; C++ says that it is not.  In either
220   // case, we don't actually need to do anything unless the value is
221   // volatile.
222   if (Dest.isIgnored()) {
223     if (!Src.isVolatileQualified() ||
224         CGF.CGM.getLangOptions().CPlusPlus ||
225         (IgnoreResult && Ignore))
226       return;
227 
228     // If the source is volatile, we must read from it; to do that, we need
229     // some place to put it.
230     Dest = CGF.CreateAggTemp(E->getType(), "agg.tmp");
231   }
232 
233   if (Dest.requiresGCollection()) {
234     CharUnits size = CGF.getContext().getTypeSizeInChars(E->getType());
235     llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
236     llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity());
237     CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF,
238                                                       Dest.getAddr(),
239                                                       Src.getAggregateAddr(),
240                                                       SizeVal);
241     return;
242   }
243   // If the result of the assignment is used, copy the LHS there also.
244   // FIXME: Pass VolatileDest as well.  I think we also need to merge volatile
245   // from the source as well, as we can't eliminate it if either operand
246   // is volatile, unless copy has volatile for both source and destination..
247   CGF.EmitAggregateCopy(Dest.getAddr(), Src.getAggregateAddr(), E->getType(),
248                         Dest.isVolatile()|Src.isVolatileQualified());
249 }
250 
251 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
252 void AggExprEmitter::EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore) {
253   assert(Src.isSimple() && "Can't have aggregate bitfield, vector, etc");
254 
255   EmitFinalDestCopy(E, RValue::getAggregate(Src.getAddress(),
256                                             Src.isVolatileQualified()),
257                     Ignore);
258 }
259 
260 //===----------------------------------------------------------------------===//
261 //                            Visitor Methods
262 //===----------------------------------------------------------------------===//
263 
264 void AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E){
265   Visit(E->GetTemporaryExpr());
266 }
267 
268 void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) {
269   EmitFinalDestCopy(e, CGF.getOpaqueLValueMapping(e));
270 }
271 
272 void
273 AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
274   if (E->getType().isPODType(CGF.getContext())) {
275     // For a POD type, just emit a load of the lvalue + a copy, because our
276     // compound literal might alias the destination.
277     // FIXME: This is a band-aid; the real problem appears to be in our handling
278     // of assignments, where we store directly into the LHS without checking
279     // whether anything in the RHS aliases.
280     EmitAggLoadOfLValue(E);
281     return;
282   }
283 
284   AggValueSlot Slot = EnsureSlot(E->getType());
285   CGF.EmitAggExpr(E->getInitializer(), Slot);
286 }
287 
288 
289 void AggExprEmitter::VisitCastExpr(CastExpr *E) {
290   switch (E->getCastKind()) {
291   case CK_Dynamic: {
292     assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?");
293     LValue LV = CGF.EmitCheckedLValue(E->getSubExpr());
294     // FIXME: Do we also need to handle property references here?
295     if (LV.isSimple())
296       CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E));
297     else
298       CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast");
299 
300     if (!Dest.isIgnored())
301       CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination");
302     break;
303   }
304 
305   case CK_ToUnion: {
306     if (Dest.isIgnored()) break;
307 
308     // GCC union extension
309     QualType Ty = E->getSubExpr()->getType();
310     QualType PtrTy = CGF.getContext().getPointerType(Ty);
311     llvm::Value *CastPtr = Builder.CreateBitCast(Dest.getAddr(),
312                                                  CGF.ConvertType(PtrTy));
313     EmitInitializationToLValue(E->getSubExpr(),
314                                CGF.MakeAddrLValue(CastPtr, Ty));
315     break;
316   }
317 
318   case CK_DerivedToBase:
319   case CK_BaseToDerived:
320   case CK_UncheckedDerivedToBase: {
321     assert(0 && "cannot perform hierarchy conversion in EmitAggExpr: "
322                 "should have been unpacked before we got here");
323     break;
324   }
325 
326   case CK_GetObjCProperty: {
327     LValue LV = CGF.EmitLValue(E->getSubExpr());
328     assert(LV.isPropertyRef());
329     RValue RV = CGF.EmitLoadOfPropertyRefLValue(LV, getReturnValueSlot());
330     EmitMoveFromReturnSlot(E, RV);
331     break;
332   }
333 
334   case CK_LValueToRValue: // hope for downstream optimization
335   case CK_NoOp:
336   case CK_UserDefinedConversion:
337   case CK_ConstructorConversion:
338     assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(),
339                                                    E->getType()) &&
340            "Implicit cast types must be compatible");
341     Visit(E->getSubExpr());
342     break;
343 
344   case CK_LValueBitCast:
345     llvm_unreachable("should not be emitting lvalue bitcast as rvalue");
346     break;
347 
348   case CK_Dependent:
349   case CK_BitCast:
350   case CK_ArrayToPointerDecay:
351   case CK_FunctionToPointerDecay:
352   case CK_NullToPointer:
353   case CK_NullToMemberPointer:
354   case CK_BaseToDerivedMemberPointer:
355   case CK_DerivedToBaseMemberPointer:
356   case CK_MemberPointerToBoolean:
357   case CK_IntegralToPointer:
358   case CK_PointerToIntegral:
359   case CK_PointerToBoolean:
360   case CK_ToVoid:
361   case CK_VectorSplat:
362   case CK_IntegralCast:
363   case CK_IntegralToBoolean:
364   case CK_IntegralToFloating:
365   case CK_FloatingToIntegral:
366   case CK_FloatingToBoolean:
367   case CK_FloatingCast:
368   case CK_AnyPointerToObjCPointerCast:
369   case CK_AnyPointerToBlockPointerCast:
370   case CK_ObjCObjectLValueCast:
371   case CK_FloatingRealToComplex:
372   case CK_FloatingComplexToReal:
373   case CK_FloatingComplexToBoolean:
374   case CK_FloatingComplexCast:
375   case CK_FloatingComplexToIntegralComplex:
376   case CK_IntegralRealToComplex:
377   case CK_IntegralComplexToReal:
378   case CK_IntegralComplexToBoolean:
379   case CK_IntegralComplexCast:
380   case CK_IntegralComplexToFloatingComplex:
381   case CK_ObjCProduceObject:
382   case CK_ObjCConsumeObject:
383   case CK_ObjCReclaimReturnedObject:
384     llvm_unreachable("cast kind invalid for aggregate types");
385   }
386 }
387 
388 void AggExprEmitter::VisitCallExpr(const CallExpr *E) {
389   if (E->getCallReturnType()->isReferenceType()) {
390     EmitAggLoadOfLValue(E);
391     return;
392   }
393 
394   RValue RV = CGF.EmitCallExpr(E, getReturnValueSlot());
395   EmitMoveFromReturnSlot(E, RV);
396 }
397 
398 void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
399   RValue RV = CGF.EmitObjCMessageExpr(E, getReturnValueSlot());
400   EmitMoveFromReturnSlot(E, RV);
401 }
402 
403 void AggExprEmitter::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
404   llvm_unreachable("direct property access not surrounded by "
405                    "lvalue-to-rvalue cast");
406 }
407 
408 void AggExprEmitter::VisitBinComma(const BinaryOperator *E) {
409   CGF.EmitIgnoredExpr(E->getLHS());
410   Visit(E->getRHS());
411 }
412 
413 void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) {
414   CodeGenFunction::StmtExprEvaluation eval(CGF);
415   CGF.EmitCompoundStmt(*E->getSubStmt(), true, Dest);
416 }
417 
418 void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) {
419   if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI)
420     VisitPointerToDataMemberBinaryOperator(E);
421   else
422     CGF.ErrorUnsupported(E, "aggregate binary expression");
423 }
424 
425 void AggExprEmitter::VisitPointerToDataMemberBinaryOperator(
426                                                     const BinaryOperator *E) {
427   LValue LV = CGF.EmitPointerToDataMemberBinaryExpr(E);
428   EmitFinalDestCopy(E, LV);
429 }
430 
431 void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
432   // For an assignment to work, the value on the right has
433   // to be compatible with the value on the left.
434   assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
435                                                  E->getRHS()->getType())
436          && "Invalid assignment");
437 
438   if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E->getLHS()))
439     if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl()))
440       if (VD->hasAttr<BlocksAttr>() &&
441           E->getRHS()->HasSideEffects(CGF.getContext())) {
442         // When __block variable on LHS, the RHS must be evaluated first
443         // as it may change the 'forwarding' field via call to Block_copy.
444         LValue RHS = CGF.EmitLValue(E->getRHS());
445         LValue LHS = CGF.EmitLValue(E->getLHS());
446         Dest = AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
447                                        needsGC(E->getLHS()->getType()));
448         EmitFinalDestCopy(E, RHS, true);
449         return;
450       }
451 
452   LValue LHS = CGF.EmitLValue(E->getLHS());
453 
454   // We have to special case property setters, otherwise we must have
455   // a simple lvalue (no aggregates inside vectors, bitfields).
456   if (LHS.isPropertyRef()) {
457     const ObjCPropertyRefExpr *RE = LHS.getPropertyRefExpr();
458     QualType ArgType = RE->getSetterArgType();
459     RValue Src;
460     if (ArgType->isReferenceType())
461       Src = CGF.EmitReferenceBindingToExpr(E->getRHS(), 0);
462     else {
463       AggValueSlot Slot = EnsureSlot(E->getRHS()->getType());
464       CGF.EmitAggExpr(E->getRHS(), Slot);
465       Src = Slot.asRValue();
466     }
467     CGF.EmitStoreThroughPropertyRefLValue(Src, LHS);
468   } else {
469     // Codegen the RHS so that it stores directly into the LHS.
470     AggValueSlot LHSSlot =
471       AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
472                               needsGC(E->getLHS()->getType()));
473     CGF.EmitAggExpr(E->getRHS(), LHSSlot, false);
474     EmitFinalDestCopy(E, LHS, true);
475   }
476 }
477 
478 void AggExprEmitter::
479 VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
480   llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
481   llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
482   llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
483 
484   // Bind the common expression if necessary.
485   CodeGenFunction::OpaqueValueMapping binding(CGF, E);
486 
487   CodeGenFunction::ConditionalEvaluation eval(CGF);
488   CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock);
489 
490   // Save whether the destination's lifetime is externally managed.
491   bool DestLifetimeManaged = Dest.isLifetimeExternallyManaged();
492 
493   eval.begin(CGF);
494   CGF.EmitBlock(LHSBlock);
495   Visit(E->getTrueExpr());
496   eval.end(CGF);
497 
498   assert(CGF.HaveInsertPoint() && "expression evaluation ended with no IP!");
499   CGF.Builder.CreateBr(ContBlock);
500 
501   // If the result of an agg expression is unused, then the emission
502   // of the LHS might need to create a destination slot.  That's fine
503   // with us, and we can safely emit the RHS into the same slot, but
504   // we shouldn't claim that its lifetime is externally managed.
505   Dest.setLifetimeExternallyManaged(DestLifetimeManaged);
506 
507   eval.begin(CGF);
508   CGF.EmitBlock(RHSBlock);
509   Visit(E->getFalseExpr());
510   eval.end(CGF);
511 
512   CGF.EmitBlock(ContBlock);
513 }
514 
515 void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) {
516   Visit(CE->getChosenSubExpr(CGF.getContext()));
517 }
518 
519 void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
520   llvm::Value *ArgValue = CGF.EmitVAListRef(VE->getSubExpr());
521   llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType());
522 
523   if (!ArgPtr) {
524     CGF.ErrorUnsupported(VE, "aggregate va_arg expression");
525     return;
526   }
527 
528   EmitFinalDestCopy(VE, CGF.MakeAddrLValue(ArgPtr, VE->getType()));
529 }
530 
531 void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
532   // Ensure that we have a slot, but if we already do, remember
533   // whether its lifetime was externally managed.
534   bool WasManaged = Dest.isLifetimeExternallyManaged();
535   Dest = EnsureSlot(E->getType());
536   Dest.setLifetimeExternallyManaged();
537 
538   Visit(E->getSubExpr());
539 
540   // Set up the temporary's destructor if its lifetime wasn't already
541   // being managed.
542   if (!WasManaged)
543     CGF.EmitCXXTemporary(E->getTemporary(), Dest.getAddr());
544 }
545 
546 void
547 AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
548   AggValueSlot Slot = EnsureSlot(E->getType());
549   CGF.EmitCXXConstructExpr(E, Slot);
550 }
551 
552 void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
553   CGF.EmitExprWithCleanups(E, Dest);
554 }
555 
556 void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
557   QualType T = E->getType();
558   AggValueSlot Slot = EnsureSlot(T);
559   EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T));
560 }
561 
562 void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
563   QualType T = E->getType();
564   AggValueSlot Slot = EnsureSlot(T);
565   EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T));
566 }
567 
568 /// isSimpleZero - If emitting this value will obviously just cause a store of
569 /// zero to memory, return true.  This can return false if uncertain, so it just
570 /// handles simple cases.
571 static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) {
572   E = E->IgnoreParens();
573 
574   // 0
575   if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E))
576     return IL->getValue() == 0;
577   // +0.0
578   if (const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(E))
579     return FL->getValue().isPosZero();
580   // int()
581   if ((isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) &&
582       CGF.getTypes().isZeroInitializable(E->getType()))
583     return true;
584   // (int*)0 - Null pointer expressions.
585   if (const CastExpr *ICE = dyn_cast<CastExpr>(E))
586     return ICE->getCastKind() == CK_NullToPointer;
587   // '\0'
588   if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E))
589     return CL->getValue() == 0;
590 
591   // Otherwise, hard case: conservatively return false.
592   return false;
593 }
594 
595 
596 void
597 AggExprEmitter::EmitInitializationToLValue(Expr* E, LValue LV) {
598   QualType type = LV.getType();
599   // FIXME: Ignore result?
600   // FIXME: Are initializers affected by volatile?
601   if (Dest.isZeroed() && isSimpleZero(E, CGF)) {
602     // Storing "i32 0" to a zero'd memory location is a noop.
603   } else if (isa<ImplicitValueInitExpr>(E)) {
604     EmitNullInitializationToLValue(LV);
605   } else if (type->isReferenceType()) {
606     RValue RV = CGF.EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0);
607     CGF.EmitStoreThroughLValue(RV, LV);
608   } else if (type->isAnyComplexType()) {
609     CGF.EmitComplexExprIntoAddr(E, LV.getAddress(), false);
610   } else if (CGF.hasAggregateLLVMType(type)) {
611     CGF.EmitAggExpr(E, AggValueSlot::forLValue(LV,
612                                                AggValueSlot::IsDestructed,
613                                       AggValueSlot::DoesNotNeedGCBarriers,
614                                                AggValueSlot::IsNotAliased,
615                                                Dest.isZeroed()));
616   } else if (LV.isSimple()) {
617     CGF.EmitScalarInit(E, /*D=*/0, LV, /*Captured=*/false);
618   } else {
619     CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV);
620   }
621 }
622 
623 void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) {
624   QualType type = lv.getType();
625 
626   // If the destination slot is already zeroed out before the aggregate is
627   // copied into it, we don't have to emit any zeros here.
628   if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(type))
629     return;
630 
631   if (!CGF.hasAggregateLLVMType(type)) {
632     // For non-aggregates, we can store zero
633     llvm::Value *null = llvm::Constant::getNullValue(CGF.ConvertType(type));
634     CGF.EmitStoreThroughLValue(RValue::get(null), lv);
635   } else {
636     // There's a potential optimization opportunity in combining
637     // memsets; that would be easy for arrays, but relatively
638     // difficult for structures with the current code.
639     CGF.EmitNullInitialization(lv.getAddress(), lv.getType());
640   }
641 }
642 
643 void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
644 #if 0
645   // FIXME: Assess perf here?  Figure out what cases are worth optimizing here
646   // (Length of globals? Chunks of zeroed-out space?).
647   //
648   // If we can, prefer a copy from a global; this is a lot less code for long
649   // globals, and it's easier for the current optimizers to analyze.
650   if (llvm::Constant* C = CGF.CGM.EmitConstantExpr(E, E->getType(), &CGF)) {
651     llvm::GlobalVariable* GV =
652     new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true,
653                              llvm::GlobalValue::InternalLinkage, C, "");
654     EmitFinalDestCopy(E, CGF.MakeAddrLValue(GV, E->getType()));
655     return;
656   }
657 #endif
658   if (E->hadArrayRangeDesignator())
659     CGF.ErrorUnsupported(E, "GNU array range designator extension");
660 
661   llvm::Value *DestPtr = Dest.getAddr();
662 
663   // Handle initialization of an array.
664   if (E->getType()->isArrayType()) {
665     llvm::PointerType *APType =
666       cast<llvm::PointerType>(DestPtr->getType());
667     llvm::ArrayType *AType =
668       cast<llvm::ArrayType>(APType->getElementType());
669 
670     uint64_t NumInitElements = E->getNumInits();
671 
672     if (E->getNumInits() > 0) {
673       QualType T1 = E->getType();
674       QualType T2 = E->getInit(0)->getType();
675       if (CGF.getContext().hasSameUnqualifiedType(T1, T2)) {
676         EmitAggLoadOfLValue(E->getInit(0));
677         return;
678       }
679     }
680 
681     uint64_t NumArrayElements = AType->getNumElements();
682     assert(NumInitElements <= NumArrayElements);
683 
684     QualType elementType = E->getType().getCanonicalType();
685     elementType = CGF.getContext().getQualifiedType(
686                     cast<ArrayType>(elementType)->getElementType(),
687                     elementType.getQualifiers() + Dest.getQualifiers());
688 
689     // DestPtr is an array*.  Construct an elementType* by drilling
690     // down a level.
691     llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
692     llvm::Value *indices[] = { zero, zero };
693     llvm::Value *begin =
694       Builder.CreateInBoundsGEP(DestPtr, indices, "arrayinit.begin");
695 
696     // Exception safety requires us to destroy all the
697     // already-constructed members if an initializer throws.
698     // For that, we'll need an EH cleanup.
699     QualType::DestructionKind dtorKind = elementType.isDestructedType();
700     llvm::AllocaInst *endOfInit = 0;
701     EHScopeStack::stable_iterator cleanup;
702     if (CGF.needsEHCleanup(dtorKind)) {
703       // In principle we could tell the cleanup where we are more
704       // directly, but the control flow can get so varied here that it
705       // would actually be quite complex.  Therefore we go through an
706       // alloca.
707       endOfInit = CGF.CreateTempAlloca(begin->getType(),
708                                        "arrayinit.endOfInit");
709       Builder.CreateStore(begin, endOfInit);
710       CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType,
711                                            CGF.getDestroyer(dtorKind));
712       cleanup = CGF.EHStack.stable_begin();
713 
714     // Otherwise, remember that we didn't need a cleanup.
715     } else {
716       dtorKind = QualType::DK_none;
717     }
718 
719     llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1);
720 
721     // The 'current element to initialize'.  The invariants on this
722     // variable are complicated.  Essentially, after each iteration of
723     // the loop, it points to the last initialized element, except
724     // that it points to the beginning of the array before any
725     // elements have been initialized.
726     llvm::Value *element = begin;
727 
728     // Emit the explicit initializers.
729     for (uint64_t i = 0; i != NumInitElements; ++i) {
730       // Advance to the next element.
731       if (i > 0) {
732         element = Builder.CreateInBoundsGEP(element, one, "arrayinit.element");
733 
734         // Tell the cleanup that it needs to destroy up to this
735         // element.  TODO: some of these stores can be trivially
736         // observed to be unnecessary.
737         if (endOfInit) Builder.CreateStore(element, endOfInit);
738       }
739 
740       LValue elementLV = CGF.MakeAddrLValue(element, elementType);
741       EmitInitializationToLValue(E->getInit(i), elementLV);
742     }
743 
744     // Check whether there's a non-trivial array-fill expression.
745     // Note that this will be a CXXConstructExpr even if the element
746     // type is an array (or array of array, etc.) of class type.
747     Expr *filler = E->getArrayFiller();
748     bool hasTrivialFiller = true;
749     if (CXXConstructExpr *cons = dyn_cast_or_null<CXXConstructExpr>(filler)) {
750       assert(cons->getConstructor()->isDefaultConstructor());
751       hasTrivialFiller = cons->getConstructor()->isTrivial();
752     }
753 
754     // Any remaining elements need to be zero-initialized, possibly
755     // using the filler expression.  We can skip this if the we're
756     // emitting to zeroed memory.
757     if (NumInitElements != NumArrayElements &&
758         !(Dest.isZeroed() && hasTrivialFiller &&
759           CGF.getTypes().isZeroInitializable(elementType))) {
760 
761       // Use an actual loop.  This is basically
762       //   do { *array++ = filler; } while (array != end);
763 
764       // Advance to the start of the rest of the array.
765       if (NumInitElements) {
766         element = Builder.CreateInBoundsGEP(element, one, "arrayinit.start");
767         if (endOfInit) Builder.CreateStore(element, endOfInit);
768       }
769 
770       // Compute the end of the array.
771       llvm::Value *end = Builder.CreateInBoundsGEP(begin,
772                         llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements),
773                                                    "arrayinit.end");
774 
775       llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
776       llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
777 
778       // Jump into the body.
779       CGF.EmitBlock(bodyBB);
780       llvm::PHINode *currentElement =
781         Builder.CreatePHI(element->getType(), 2, "arrayinit.cur");
782       currentElement->addIncoming(element, entryBB);
783 
784       // Emit the actual filler expression.
785       LValue elementLV = CGF.MakeAddrLValue(currentElement, elementType);
786       if (filler)
787         EmitInitializationToLValue(filler, elementLV);
788       else
789         EmitNullInitializationToLValue(elementLV);
790 
791       // Move on to the next element.
792       llvm::Value *nextElement =
793         Builder.CreateInBoundsGEP(currentElement, one, "arrayinit.next");
794 
795       // Tell the EH cleanup that we finished with the last element.
796       if (endOfInit) Builder.CreateStore(nextElement, endOfInit);
797 
798       // Leave the loop if we're done.
799       llvm::Value *done = Builder.CreateICmpEQ(nextElement, end,
800                                                "arrayinit.done");
801       llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
802       Builder.CreateCondBr(done, endBB, bodyBB);
803       currentElement->addIncoming(nextElement, Builder.GetInsertBlock());
804 
805       CGF.EmitBlock(endBB);
806     }
807 
808     // Leave the partial-array cleanup if we entered one.
809     if (dtorKind) CGF.DeactivateCleanupBlock(cleanup);
810 
811     return;
812   }
813 
814   assert(E->getType()->isRecordType() && "Only support structs/unions here!");
815 
816   // Do struct initialization; this code just sets each individual member
817   // to the approprate value.  This makes bitfield support automatic;
818   // the disadvantage is that the generated code is more difficult for
819   // the optimizer, especially with bitfields.
820   unsigned NumInitElements = E->getNumInits();
821   RecordDecl *record = E->getType()->castAs<RecordType>()->getDecl();
822 
823   if (record->isUnion()) {
824     // Only initialize one field of a union. The field itself is
825     // specified by the initializer list.
826     if (!E->getInitializedFieldInUnion()) {
827       // Empty union; we have nothing to do.
828 
829 #ifndef NDEBUG
830       // Make sure that it's really an empty and not a failure of
831       // semantic analysis.
832       for (RecordDecl::field_iterator Field = record->field_begin(),
833                                    FieldEnd = record->field_end();
834            Field != FieldEnd; ++Field)
835         assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed");
836 #endif
837       return;
838     }
839 
840     // FIXME: volatility
841     FieldDecl *Field = E->getInitializedFieldInUnion();
842 
843     LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestPtr, Field, 0);
844     if (NumInitElements) {
845       // Store the initializer into the field
846       EmitInitializationToLValue(E->getInit(0), FieldLoc);
847     } else {
848       // Default-initialize to null.
849       EmitNullInitializationToLValue(FieldLoc);
850     }
851 
852     return;
853   }
854 
855   // We'll need to enter cleanup scopes in case any of the member
856   // initializers throw an exception.
857   SmallVector<EHScopeStack::stable_iterator, 16> cleanups;
858 
859   // Here we iterate over the fields; this makes it simpler to both
860   // default-initialize fields and skip over unnamed fields.
861   unsigned curInitIndex = 0;
862   for (RecordDecl::field_iterator field = record->field_begin(),
863                                fieldEnd = record->field_end();
864        field != fieldEnd; ++field) {
865     // We're done once we hit the flexible array member.
866     if (field->getType()->isIncompleteArrayType())
867       break;
868 
869     // Always skip anonymous bitfields.
870     if (field->isUnnamedBitfield())
871       continue;
872 
873     // We're done if we reach the end of the explicit initializers, we
874     // have a zeroed object, and the rest of the fields are
875     // zero-initializable.
876     if (curInitIndex == NumInitElements && Dest.isZeroed() &&
877         CGF.getTypes().isZeroInitializable(E->getType()))
878       break;
879 
880     // FIXME: volatility
881     LValue LV = CGF.EmitLValueForFieldInitialization(DestPtr, *field, 0);
882     // We never generate write-barries for initialized fields.
883     LV.setNonGC(true);
884 
885     if (curInitIndex < NumInitElements) {
886       // Store the initializer into the field.
887       EmitInitializationToLValue(E->getInit(curInitIndex++), LV);
888     } else {
889       // We're out of initalizers; default-initialize to null
890       EmitNullInitializationToLValue(LV);
891     }
892 
893     // Push a destructor if necessary.
894     // FIXME: if we have an array of structures, all explicitly
895     // initialized, we can end up pushing a linear number of cleanups.
896     bool pushedCleanup = false;
897     if (QualType::DestructionKind dtorKind
898           = field->getType().isDestructedType()) {
899       assert(LV.isSimple());
900       if (CGF.needsEHCleanup(dtorKind)) {
901         CGF.pushDestroy(EHCleanup, LV.getAddress(), field->getType(),
902                         CGF.getDestroyer(dtorKind), false);
903         cleanups.push_back(CGF.EHStack.stable_begin());
904         pushedCleanup = true;
905       }
906     }
907 
908     // If the GEP didn't get used because of a dead zero init or something
909     // else, clean it up for -O0 builds and general tidiness.
910     if (!pushedCleanup && LV.isSimple())
911       if (llvm::GetElementPtrInst *GEP =
912             dyn_cast<llvm::GetElementPtrInst>(LV.getAddress()))
913         if (GEP->use_empty())
914           GEP->eraseFromParent();
915   }
916 
917   // Deactivate all the partial cleanups in reverse order, which
918   // generally means popping them.
919   for (unsigned i = cleanups.size(); i != 0; --i)
920     CGF.DeactivateCleanupBlock(cleanups[i-1]);
921 }
922 
923 //===----------------------------------------------------------------------===//
924 //                        Entry Points into this File
925 //===----------------------------------------------------------------------===//
926 
927 /// GetNumNonZeroBytesInInit - Get an approximate count of the number of
928 /// non-zero bytes that will be stored when outputting the initializer for the
929 /// specified initializer expression.
930 static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) {
931   E = E->IgnoreParens();
932 
933   // 0 and 0.0 won't require any non-zero stores!
934   if (isSimpleZero(E, CGF)) return CharUnits::Zero();
935 
936   // If this is an initlist expr, sum up the size of sizes of the (present)
937   // elements.  If this is something weird, assume the whole thing is non-zero.
938   const InitListExpr *ILE = dyn_cast<InitListExpr>(E);
939   if (ILE == 0 || !CGF.getTypes().isZeroInitializable(ILE->getType()))
940     return CGF.getContext().getTypeSizeInChars(E->getType());
941 
942   // InitListExprs for structs have to be handled carefully.  If there are
943   // reference members, we need to consider the size of the reference, not the
944   // referencee.  InitListExprs for unions and arrays can't have references.
945   if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
946     if (!RT->isUnionType()) {
947       RecordDecl *SD = E->getType()->getAs<RecordType>()->getDecl();
948       CharUnits NumNonZeroBytes = CharUnits::Zero();
949 
950       unsigned ILEElement = 0;
951       for (RecordDecl::field_iterator Field = SD->field_begin(),
952            FieldEnd = SD->field_end(); Field != FieldEnd; ++Field) {
953         // We're done once we hit the flexible array member or run out of
954         // InitListExpr elements.
955         if (Field->getType()->isIncompleteArrayType() ||
956             ILEElement == ILE->getNumInits())
957           break;
958         if (Field->isUnnamedBitfield())
959           continue;
960 
961         const Expr *E = ILE->getInit(ILEElement++);
962 
963         // Reference values are always non-null and have the width of a pointer.
964         if (Field->getType()->isReferenceType())
965           NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits(
966               CGF.getContext().Target.getPointerWidth(0));
967         else
968           NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF);
969       }
970 
971       return NumNonZeroBytes;
972     }
973   }
974 
975 
976   CharUnits NumNonZeroBytes = CharUnits::Zero();
977   for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)
978     NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF);
979   return NumNonZeroBytes;
980 }
981 
982 /// CheckAggExprForMemSetUse - If the initializer is large and has a lot of
983 /// zeros in it, emit a memset and avoid storing the individual zeros.
984 ///
985 static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
986                                      CodeGenFunction &CGF) {
987   // If the slot is already known to be zeroed, nothing to do.  Don't mess with
988   // volatile stores.
989   if (Slot.isZeroed() || Slot.isVolatile() || Slot.getAddr() == 0) return;
990 
991   // C++ objects with a user-declared constructor don't need zero'ing.
992   if (CGF.getContext().getLangOptions().CPlusPlus)
993     if (const RecordType *RT = CGF.getContext()
994                        .getBaseElementType(E->getType())->getAs<RecordType>()) {
995       const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
996       if (RD->hasUserDeclaredConstructor())
997         return;
998     }
999 
1000   // If the type is 16-bytes or smaller, prefer individual stores over memset.
1001   std::pair<CharUnits, CharUnits> TypeInfo =
1002     CGF.getContext().getTypeInfoInChars(E->getType());
1003   if (TypeInfo.first <= CharUnits::fromQuantity(16))
1004     return;
1005 
1006   // Check to see if over 3/4 of the initializer are known to be zero.  If so,
1007   // we prefer to emit memset + individual stores for the rest.
1008   CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF);
1009   if (NumNonZeroBytes*4 > TypeInfo.first)
1010     return;
1011 
1012   // Okay, it seems like a good idea to use an initial memset, emit the call.
1013   llvm::Constant *SizeVal = CGF.Builder.getInt64(TypeInfo.first.getQuantity());
1014   CharUnits Align = TypeInfo.second;
1015 
1016   llvm::Value *Loc = Slot.getAddr();
1017   llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
1018 
1019   Loc = CGF.Builder.CreateBitCast(Loc, BP);
1020   CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal,
1021                            Align.getQuantity(), false);
1022 
1023   // Tell the AggExprEmitter that the slot is known zero.
1024   Slot.setZeroed();
1025 }
1026 
1027 
1028 
1029 
1030 /// EmitAggExpr - Emit the computation of the specified expression of aggregate
1031 /// type.  The result is computed into DestPtr.  Note that if DestPtr is null,
1032 /// the value of the aggregate expression is not needed.  If VolatileDest is
1033 /// true, DestPtr cannot be 0.
1034 ///
1035 /// \param IsInitializer - true if this evaluation is initializing an
1036 /// object whose lifetime is already being managed.
1037 void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot,
1038                                   bool IgnoreResult) {
1039   assert(E && hasAggregateLLVMType(E->getType()) &&
1040          "Invalid aggregate expression to emit");
1041   assert((Slot.getAddr() != 0 || Slot.isIgnored()) &&
1042          "slot has bits but no address");
1043 
1044   // Optimize the slot if possible.
1045   CheckAggExprForMemSetUse(Slot, E, *this);
1046 
1047   AggExprEmitter(*this, Slot, IgnoreResult).Visit(const_cast<Expr*>(E));
1048 }
1049 
1050 LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
1051   assert(hasAggregateLLVMType(E->getType()) && "Invalid argument!");
1052   llvm::Value *Temp = CreateMemTemp(E->getType());
1053   LValue LV = MakeAddrLValue(Temp, E->getType());
1054   EmitAggExpr(E, AggValueSlot::forLValue(LV, AggValueSlot::IsNotDestructed,
1055                                          AggValueSlot::DoesNotNeedGCBarriers));
1056   return LV;
1057 }
1058 
1059 void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
1060                                         llvm::Value *SrcPtr, QualType Ty,
1061                                         bool isVolatile) {
1062   assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
1063 
1064   if (getContext().getLangOptions().CPlusPlus) {
1065     if (const RecordType *RT = Ty->getAs<RecordType>()) {
1066       CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl());
1067       assert((Record->hasTrivialCopyConstructor() ||
1068               Record->hasTrivialCopyAssignment()) &&
1069              "Trying to aggregate-copy a type without a trivial copy "
1070              "constructor or assignment operator");
1071       // Ignore empty classes in C++.
1072       if (Record->isEmpty())
1073         return;
1074     }
1075   }
1076 
1077   // Aggregate assignment turns into llvm.memcpy.  This is almost valid per
1078   // C99 6.5.16.1p3, which states "If the value being stored in an object is
1079   // read from another object that overlaps in anyway the storage of the first
1080   // object, then the overlap shall be exact and the two objects shall have
1081   // qualified or unqualified versions of a compatible type."
1082   //
1083   // memcpy is not defined if the source and destination pointers are exactly
1084   // equal, but other compilers do this optimization, and almost every memcpy
1085   // implementation handles this case safely.  If there is a libc that does not
1086   // safely handle this, we can add a target hook.
1087 
1088   // Get size and alignment info for this aggregate.
1089   std::pair<CharUnits, CharUnits> TypeInfo =
1090     getContext().getTypeInfoInChars(Ty);
1091 
1092   // FIXME: Handle variable sized types.
1093 
1094   // FIXME: If we have a volatile struct, the optimizer can remove what might
1095   // appear to be `extra' memory ops:
1096   //
1097   // volatile struct { int i; } a, b;
1098   //
1099   // int main() {
1100   //   a = b;
1101   //   a = b;
1102   // }
1103   //
1104   // we need to use a different call here.  We use isVolatile to indicate when
1105   // either the source or the destination is volatile.
1106 
1107   llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType());
1108   llvm::Type *DBP =
1109     llvm::Type::getInt8PtrTy(getLLVMContext(), DPT->getAddressSpace());
1110   DestPtr = Builder.CreateBitCast(DestPtr, DBP, "tmp");
1111 
1112   llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType());
1113   llvm::Type *SBP =
1114     llvm::Type::getInt8PtrTy(getLLVMContext(), SPT->getAddressSpace());
1115   SrcPtr = Builder.CreateBitCast(SrcPtr, SBP, "tmp");
1116 
1117   // Don't do any of the memmove_collectable tests if GC isn't set.
1118   if (CGM.getLangOptions().getGCMode() == LangOptions::NonGC) {
1119     // fall through
1120   } else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
1121     RecordDecl *Record = RecordTy->getDecl();
1122     if (Record->hasObjectMember()) {
1123       CharUnits size = TypeInfo.first;
1124       llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
1125       llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity());
1126       CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
1127                                                     SizeVal);
1128       return;
1129     }
1130   } else if (Ty->isArrayType()) {
1131     QualType BaseType = getContext().getBaseElementType(Ty);
1132     if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
1133       if (RecordTy->getDecl()->hasObjectMember()) {
1134         CharUnits size = TypeInfo.first;
1135         llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
1136         llvm::Value *SizeVal =
1137           llvm::ConstantInt::get(SizeTy, size.getQuantity());
1138         CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
1139                                                       SizeVal);
1140         return;
1141       }
1142     }
1143   }
1144 
1145   Builder.CreateMemCpy(DestPtr, SrcPtr,
1146                        llvm::ConstantInt::get(IntPtrTy,
1147                                               TypeInfo.first.getQuantity()),
1148                        TypeInfo.second.getQuantity(), isVolatile);
1149 }
1150