1 //===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code dealing with code generation of C++ expressions
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "clang/Frontend/CodeGenOptions.h"
15 #include "CodeGenFunction.h"
16 #include "CGCUDARuntime.h"
17 #include "CGCXXABI.h"
18 #include "CGObjCRuntime.h"
19 #include "CGDebugInfo.h"
20 #include "llvm/Intrinsics.h"
21 #include "llvm/Support/CallSite.h"
22 
23 using namespace clang;
24 using namespace CodeGen;
25 
26 RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
27                                           llvm::Value *Callee,
28                                           ReturnValueSlot ReturnValue,
29                                           llvm::Value *This,
30                                           llvm::Value *VTT,
31                                           CallExpr::const_arg_iterator ArgBeg,
32                                           CallExpr::const_arg_iterator ArgEnd) {
33   assert(MD->isInstance() &&
34          "Trying to emit a member call expr on a static method!");
35 
36   // C++11 [class.mfct.non-static]p2:
37   //   If a non-static member function of a class X is called for an object that
38   //   is not of type X, or of a type derived from X, the behavior is undefined.
39   EmitCheck(CT_MemberCall, This, getContext().getRecordType(MD->getParent()));
40 
41   CallArgList Args;
42 
43   // Push the this ptr.
44   Args.add(RValue::get(This), MD->getThisType(getContext()));
45 
46   // If there is a VTT parameter, emit it.
47   if (VTT) {
48     QualType T = getContext().getPointerType(getContext().VoidPtrTy);
49     Args.add(RValue::get(VTT), T);
50   }
51 
52   const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
53   RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, Args.size());
54 
55   // And the rest of the call args.
56   EmitCallArgs(Args, FPT, ArgBeg, ArgEnd);
57 
58   return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required),
59                   Callee, ReturnValue, Args, MD);
60 }
61 
62 // FIXME: Ideally Expr::IgnoreParenNoopCasts should do this, but it doesn't do
63 // quite what we want.
64 static const Expr *skipNoOpCastsAndParens(const Expr *E) {
65   while (true) {
66     if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
67       E = PE->getSubExpr();
68       continue;
69     }
70 
71     if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
72       if (CE->getCastKind() == CK_NoOp) {
73         E = CE->getSubExpr();
74         continue;
75       }
76     }
77     if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
78       if (UO->getOpcode() == UO_Extension) {
79         E = UO->getSubExpr();
80         continue;
81       }
82     }
83     return E;
84   }
85 }
86 
87 /// canDevirtualizeMemberFunctionCalls - Checks whether virtual calls on given
88 /// expr can be devirtualized.
89 static bool canDevirtualizeMemberFunctionCalls(ASTContext &Context,
90                                                const Expr *Base,
91                                                const CXXMethodDecl *MD) {
92 
93   // When building with -fapple-kext, all calls must go through the vtable since
94   // the kernel linker can do runtime patching of vtables.
95   if (Context.getLangOpts().AppleKext)
96     return false;
97 
98   // If the most derived class is marked final, we know that no subclass can
99   // override this member function and so we can devirtualize it. For example:
100   //
101   // struct A { virtual void f(); }
102   // struct B final : A { };
103   //
104   // void f(B *b) {
105   //   b->f();
106   // }
107   //
108   const CXXRecordDecl *MostDerivedClassDecl = Base->getBestDynamicClassType();
109   if (MostDerivedClassDecl->hasAttr<FinalAttr>())
110     return true;
111 
112   // If the member function is marked 'final', we know that it can't be
113   // overridden and can therefore devirtualize it.
114   if (MD->hasAttr<FinalAttr>())
115     return true;
116 
117   // Similarly, if the class itself is marked 'final' it can't be overridden
118   // and we can therefore devirtualize the member function call.
119   if (MD->getParent()->hasAttr<FinalAttr>())
120     return true;
121 
122   Base = skipNoOpCastsAndParens(Base);
123   if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) {
124     if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
125       // This is a record decl. We know the type and can devirtualize it.
126       return VD->getType()->isRecordType();
127     }
128 
129     return false;
130   }
131 
132   // We can devirtualize calls on an object accessed by a class member access
133   // expression, since by C++11 [basic.life]p6 we know that it can't refer to
134   // a derived class object constructed in the same location.
135   if (const MemberExpr *ME = dyn_cast<MemberExpr>(Base))
136     if (const ValueDecl *VD = dyn_cast<ValueDecl>(ME->getMemberDecl()))
137       return VD->getType()->isRecordType();
138 
139   // We can always devirtualize calls on temporary object expressions.
140   if (isa<CXXConstructExpr>(Base))
141     return true;
142 
143   // And calls on bound temporaries.
144   if (isa<CXXBindTemporaryExpr>(Base))
145     return true;
146 
147   // Check if this is a call expr that returns a record type.
148   if (const CallExpr *CE = dyn_cast<CallExpr>(Base))
149     return CE->getCallReturnType()->isRecordType();
150 
151   // We can't devirtualize the call.
152   return false;
153 }
154 
155 static CXXRecordDecl *getCXXRecord(const Expr *E) {
156   QualType T = E->getType();
157   if (const PointerType *PTy = T->getAs<PointerType>())
158     T = PTy->getPointeeType();
159   const RecordType *Ty = T->castAs<RecordType>();
160   return cast<CXXRecordDecl>(Ty->getDecl());
161 }
162 
163 // Note: This function also emit constructor calls to support a MSVC
164 // extensions allowing explicit constructor function call.
165 RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
166                                               ReturnValueSlot ReturnValue) {
167   const Expr *callee = CE->getCallee()->IgnoreParens();
168 
169   if (isa<BinaryOperator>(callee))
170     return EmitCXXMemberPointerCallExpr(CE, ReturnValue);
171 
172   const MemberExpr *ME = cast<MemberExpr>(callee);
173   const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
174 
175   CGDebugInfo *DI = getDebugInfo();
176   if (DI && CGM.getCodeGenOpts().DebugInfo == CodeGenOptions::LimitedDebugInfo
177       && !isa<CallExpr>(ME->getBase())) {
178     QualType PQTy = ME->getBase()->IgnoreParenImpCasts()->getType();
179     if (const PointerType * PTy = dyn_cast<PointerType>(PQTy)) {
180       DI->getOrCreateRecordType(PTy->getPointeeType(),
181                                 MD->getParent()->getLocation());
182     }
183   }
184 
185   if (MD->isStatic()) {
186     // The method is static, emit it as we would a regular call.
187     llvm::Value *Callee = CGM.GetAddrOfFunction(MD);
188     return EmitCall(getContext().getPointerType(MD->getType()), Callee,
189                     ReturnValue, CE->arg_begin(), CE->arg_end());
190   }
191 
192   // Compute the object pointer.
193   const Expr *Base = ME->getBase();
194   bool CanUseVirtualCall = MD->isVirtual() && !ME->hasQualifier();
195 
196   const CXXMethodDecl *DevirtualizedMethod = NULL;
197   if (CanUseVirtualCall &&
198       canDevirtualizeMemberFunctionCalls(getContext(), Base, MD)) {
199     const CXXRecordDecl *BestDynamicDecl = Base->getBestDynamicClassType();
200     DevirtualizedMethod = MD->getCorrespondingMethodInClass(BestDynamicDecl);
201     assert(DevirtualizedMethod);
202     const CXXRecordDecl *DevirtualizedClass = DevirtualizedMethod->getParent();
203     const Expr *Inner = Base->ignoreParenBaseCasts();
204     if (getCXXRecord(Inner) == DevirtualizedClass)
205       // If the class of the Inner expression is where the dynamic method
206       // is defined, build the this pointer from it.
207       Base = Inner;
208     else if (getCXXRecord(Base) != DevirtualizedClass) {
209       // If the method is defined in a class that is not the best dynamic
210       // one or the one of the full expression, we would have to build
211       // a derived-to-base cast to compute the correct this pointer, but
212       // we don't have support for that yet, so do a virtual call.
213       DevirtualizedMethod = NULL;
214     }
215     // If the return types are not the same, this might be a case where more
216     // code needs to run to compensate for it. For example, the derived
217     // method might return a type that inherits form from the return
218     // type of MD and has a prefix.
219     // For now we just avoid devirtualizing these covariant cases.
220     if (DevirtualizedMethod &&
221         DevirtualizedMethod->getResultType().getCanonicalType() !=
222         MD->getResultType().getCanonicalType())
223       DevirtualizedMethod = NULL;
224   }
225 
226   llvm::Value *This;
227   if (ME->isArrow())
228     This = EmitScalarExpr(Base);
229   else
230     This = EmitLValue(Base).getAddress();
231 
232 
233   if (MD->isTrivial()) {
234     if (isa<CXXDestructorDecl>(MD)) return RValue::get(0);
235     if (isa<CXXConstructorDecl>(MD) &&
236         cast<CXXConstructorDecl>(MD)->isDefaultConstructor())
237       return RValue::get(0);
238 
239     if (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) {
240       // We don't like to generate the trivial copy/move assignment operator
241       // when it isn't necessary; just produce the proper effect here.
242       llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
243       EmitAggregateCopy(This, RHS, CE->getType());
244       return RValue::get(This);
245     }
246 
247     if (isa<CXXConstructorDecl>(MD) &&
248         cast<CXXConstructorDecl>(MD)->isCopyOrMoveConstructor()) {
249       // Trivial move and copy ctor are the same.
250       llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
251       EmitSynthesizedCXXCopyCtorCall(cast<CXXConstructorDecl>(MD), This, RHS,
252                                      CE->arg_begin(), CE->arg_end());
253       return RValue::get(This);
254     }
255     llvm_unreachable("unknown trivial member function");
256   }
257 
258   // Compute the function type we're calling.
259   const CGFunctionInfo *FInfo = 0;
260   if (isa<CXXDestructorDecl>(MD))
261     FInfo = &CGM.getTypes().arrangeCXXDestructor(cast<CXXDestructorDecl>(MD),
262                                                  Dtor_Complete);
263   else if (isa<CXXConstructorDecl>(MD))
264     FInfo = &CGM.getTypes().arrangeCXXConstructorDeclaration(
265                                                  cast<CXXConstructorDecl>(MD),
266                                                  Ctor_Complete);
267   else
268     FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(MD);
269 
270   llvm::Type *Ty = CGM.getTypes().GetFunctionType(*FInfo);
271 
272   // C++ [class.virtual]p12:
273   //   Explicit qualification with the scope operator (5.1) suppresses the
274   //   virtual call mechanism.
275   //
276   // We also don't emit a virtual call if the base expression has a record type
277   // because then we know what the type is.
278   bool UseVirtualCall = CanUseVirtualCall && !DevirtualizedMethod;
279 
280   llvm::Value *Callee;
281   if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD)) {
282     if (UseVirtualCall) {
283       Callee = BuildVirtualCall(Dtor, Dtor_Complete, This, Ty);
284     } else {
285       if (getContext().getLangOpts().AppleKext &&
286           MD->isVirtual() &&
287           ME->hasQualifier())
288         Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
289       else if (!DevirtualizedMethod)
290         Callee = CGM.GetAddrOfFunction(GlobalDecl(Dtor, Dtor_Complete), Ty);
291       else {
292         const CXXDestructorDecl *DDtor =
293           cast<CXXDestructorDecl>(DevirtualizedMethod);
294         Callee = CGM.GetAddrOfFunction(GlobalDecl(DDtor, Dtor_Complete), Ty);
295       }
296     }
297   } else if (const CXXConstructorDecl *Ctor =
298                dyn_cast<CXXConstructorDecl>(MD)) {
299     Callee = CGM.GetAddrOfFunction(GlobalDecl(Ctor, Ctor_Complete), Ty);
300   } else if (UseVirtualCall) {
301       Callee = BuildVirtualCall(MD, This, Ty);
302   } else {
303     if (getContext().getLangOpts().AppleKext &&
304         MD->isVirtual() &&
305         ME->hasQualifier())
306       Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
307     else if (!DevirtualizedMethod)
308       Callee = CGM.GetAddrOfFunction(MD, Ty);
309     else {
310       Callee = CGM.GetAddrOfFunction(DevirtualizedMethod, Ty);
311     }
312   }
313 
314   return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
315                            CE->arg_begin(), CE->arg_end());
316 }
317 
318 RValue
319 CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
320                                               ReturnValueSlot ReturnValue) {
321   const BinaryOperator *BO =
322       cast<BinaryOperator>(E->getCallee()->IgnoreParens());
323   const Expr *BaseExpr = BO->getLHS();
324   const Expr *MemFnExpr = BO->getRHS();
325 
326   const MemberPointerType *MPT =
327     MemFnExpr->getType()->castAs<MemberPointerType>();
328 
329   const FunctionProtoType *FPT =
330     MPT->getPointeeType()->castAs<FunctionProtoType>();
331   const CXXRecordDecl *RD =
332     cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
333 
334   // Get the member function pointer.
335   llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
336 
337   // Emit the 'this' pointer.
338   llvm::Value *This;
339 
340   if (BO->getOpcode() == BO_PtrMemI)
341     This = EmitScalarExpr(BaseExpr);
342   else
343     This = EmitLValue(BaseExpr).getAddress();
344 
345   EmitCheck(CT_MemberCall, This, QualType(MPT->getClass(), 0));
346 
347   // Ask the ABI to load the callee.  Note that This is modified.
348   llvm::Value *Callee =
349     CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, This, MemFnPtr, MPT);
350 
351   CallArgList Args;
352 
353   QualType ThisType =
354     getContext().getPointerType(getContext().getTagDeclType(RD));
355 
356   // Push the this ptr.
357   Args.add(RValue::get(This), ThisType);
358 
359   RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, 1);
360 
361   // And the rest of the call args
362   EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end());
363   return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required), Callee,
364                   ReturnValue, Args);
365 }
366 
367 RValue
368 CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
369                                                const CXXMethodDecl *MD,
370                                                ReturnValueSlot ReturnValue) {
371   assert(MD->isInstance() &&
372          "Trying to emit a member call expr on a static method!");
373   LValue LV = EmitLValue(E->getArg(0));
374   llvm::Value *This = LV.getAddress();
375 
376   if ((MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) &&
377       MD->isTrivial()) {
378     llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress();
379     QualType Ty = E->getType();
380     EmitAggregateCopy(This, Src, Ty);
381     return RValue::get(This);
382   }
383 
384   llvm::Value *Callee = EmitCXXOperatorMemberCallee(E, MD, This);
385   return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
386                            E->arg_begin() + 1, E->arg_end());
387 }
388 
389 RValue CodeGenFunction::EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
390                                                ReturnValueSlot ReturnValue) {
391   return CGM.getCUDARuntime().EmitCUDAKernelCallExpr(*this, E, ReturnValue);
392 }
393 
394 static void EmitNullBaseClassInitialization(CodeGenFunction &CGF,
395                                             llvm::Value *DestPtr,
396                                             const CXXRecordDecl *Base) {
397   if (Base->isEmpty())
398     return;
399 
400   DestPtr = CGF.EmitCastToVoidPtr(DestPtr);
401 
402   const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(Base);
403   CharUnits Size = Layout.getNonVirtualSize();
404   CharUnits Align = Layout.getNonVirtualAlign();
405 
406   llvm::Value *SizeVal = CGF.CGM.getSize(Size);
407 
408   // If the type contains a pointer to data member we can't memset it to zero.
409   // Instead, create a null constant and copy it to the destination.
410   // TODO: there are other patterns besides zero that we can usefully memset,
411   // like -1, which happens to be the pattern used by member-pointers.
412   // TODO: isZeroInitializable can be over-conservative in the case where a
413   // virtual base contains a member pointer.
414   if (!CGF.CGM.getTypes().isZeroInitializable(Base)) {
415     llvm::Constant *NullConstant = CGF.CGM.EmitNullConstantForBase(Base);
416 
417     llvm::GlobalVariable *NullVariable =
418       new llvm::GlobalVariable(CGF.CGM.getModule(), NullConstant->getType(),
419                                /*isConstant=*/true,
420                                llvm::GlobalVariable::PrivateLinkage,
421                                NullConstant, Twine());
422     NullVariable->setAlignment(Align.getQuantity());
423     llvm::Value *SrcPtr = CGF.EmitCastToVoidPtr(NullVariable);
424 
425     // Get and call the appropriate llvm.memcpy overload.
426     CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, Align.getQuantity());
427     return;
428   }
429 
430   // Otherwise, just memset the whole thing to zero.  This is legal
431   // because in LLVM, all default initializers (other than the ones we just
432   // handled above) are guaranteed to have a bit pattern of all zeros.
433   CGF.Builder.CreateMemSet(DestPtr, CGF.Builder.getInt8(0), SizeVal,
434                            Align.getQuantity());
435 }
436 
437 void
438 CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
439                                       AggValueSlot Dest) {
440   assert(!Dest.isIgnored() && "Must have a destination!");
441   const CXXConstructorDecl *CD = E->getConstructor();
442 
443   // If we require zero initialization before (or instead of) calling the
444   // constructor, as can be the case with a non-user-provided default
445   // constructor, emit the zero initialization now, unless destination is
446   // already zeroed.
447   if (E->requiresZeroInitialization() && !Dest.isZeroed()) {
448     switch (E->getConstructionKind()) {
449     case CXXConstructExpr::CK_Delegating:
450     case CXXConstructExpr::CK_Complete:
451       EmitNullInitialization(Dest.getAddr(), E->getType());
452       break;
453     case CXXConstructExpr::CK_VirtualBase:
454     case CXXConstructExpr::CK_NonVirtualBase:
455       EmitNullBaseClassInitialization(*this, Dest.getAddr(), CD->getParent());
456       break;
457     }
458   }
459 
460   // If this is a call to a trivial default constructor, do nothing.
461   if (CD->isTrivial() && CD->isDefaultConstructor())
462     return;
463 
464   // Elide the constructor if we're constructing from a temporary.
465   // The temporary check is required because Sema sets this on NRVO
466   // returns.
467   if (getContext().getLangOpts().ElideConstructors && E->isElidable()) {
468     assert(getContext().hasSameUnqualifiedType(E->getType(),
469                                                E->getArg(0)->getType()));
470     if (E->getArg(0)->isTemporaryObject(getContext(), CD->getParent())) {
471       EmitAggExpr(E->getArg(0), Dest);
472       return;
473     }
474   }
475 
476   if (const ConstantArrayType *arrayType
477         = getContext().getAsConstantArrayType(E->getType())) {
478     EmitCXXAggrConstructorCall(CD, arrayType, Dest.getAddr(),
479                                E->arg_begin(), E->arg_end());
480   } else {
481     CXXCtorType Type = Ctor_Complete;
482     bool ForVirtualBase = false;
483 
484     switch (E->getConstructionKind()) {
485      case CXXConstructExpr::CK_Delegating:
486       // We should be emitting a constructor; GlobalDecl will assert this
487       Type = CurGD.getCtorType();
488       break;
489 
490      case CXXConstructExpr::CK_Complete:
491       Type = Ctor_Complete;
492       break;
493 
494      case CXXConstructExpr::CK_VirtualBase:
495       ForVirtualBase = true;
496       // fall-through
497 
498      case CXXConstructExpr::CK_NonVirtualBase:
499       Type = Ctor_Base;
500     }
501 
502     // Call the constructor.
503     EmitCXXConstructorCall(CD, Type, ForVirtualBase, Dest.getAddr(),
504                            E->arg_begin(), E->arg_end());
505   }
506 }
507 
508 void
509 CodeGenFunction::EmitSynthesizedCXXCopyCtor(llvm::Value *Dest,
510                                             llvm::Value *Src,
511                                             const Expr *Exp) {
512   if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Exp))
513     Exp = E->getSubExpr();
514   assert(isa<CXXConstructExpr>(Exp) &&
515          "EmitSynthesizedCXXCopyCtor - unknown copy ctor expr");
516   const CXXConstructExpr* E = cast<CXXConstructExpr>(Exp);
517   const CXXConstructorDecl *CD = E->getConstructor();
518   RunCleanupsScope Scope(*this);
519 
520   // If we require zero initialization before (or instead of) calling the
521   // constructor, as can be the case with a non-user-provided default
522   // constructor, emit the zero initialization now.
523   // FIXME. Do I still need this for a copy ctor synthesis?
524   if (E->requiresZeroInitialization())
525     EmitNullInitialization(Dest, E->getType());
526 
527   assert(!getContext().getAsConstantArrayType(E->getType())
528          && "EmitSynthesizedCXXCopyCtor - Copied-in Array");
529   EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src,
530                                  E->arg_begin(), E->arg_end());
531 }
532 
533 static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,
534                                         const CXXNewExpr *E) {
535   if (!E->isArray())
536     return CharUnits::Zero();
537 
538   // No cookie is required if the operator new[] being used is the
539   // reserved placement operator new[].
540   if (E->getOperatorNew()->isReservedGlobalPlacementOperator())
541     return CharUnits::Zero();
542 
543   return CGF.CGM.getCXXABI().GetArrayCookieSize(E);
544 }
545 
546 static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
547                                         const CXXNewExpr *e,
548                                         unsigned minElements,
549                                         llvm::Value *&numElements,
550                                         llvm::Value *&sizeWithoutCookie) {
551   QualType type = e->getAllocatedType();
552 
553   if (!e->isArray()) {
554     CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
555     sizeWithoutCookie
556       = llvm::ConstantInt::get(CGF.SizeTy, typeSize.getQuantity());
557     return sizeWithoutCookie;
558   }
559 
560   // The width of size_t.
561   unsigned sizeWidth = CGF.SizeTy->getBitWidth();
562 
563   // Figure out the cookie size.
564   llvm::APInt cookieSize(sizeWidth,
565                          CalculateCookiePadding(CGF, e).getQuantity());
566 
567   // Emit the array size expression.
568   // We multiply the size of all dimensions for NumElements.
569   // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
570   numElements = CGF.EmitScalarExpr(e->getArraySize());
571   assert(isa<llvm::IntegerType>(numElements->getType()));
572 
573   // The number of elements can be have an arbitrary integer type;
574   // essentially, we need to multiply it by a constant factor, add a
575   // cookie size, and verify that the result is representable as a
576   // size_t.  That's just a gloss, though, and it's wrong in one
577   // important way: if the count is negative, it's an error even if
578   // the cookie size would bring the total size >= 0.
579   bool isSigned
580     = e->getArraySize()->getType()->isSignedIntegerOrEnumerationType();
581   llvm::IntegerType *numElementsType
582     = cast<llvm::IntegerType>(numElements->getType());
583   unsigned numElementsWidth = numElementsType->getBitWidth();
584 
585   // Compute the constant factor.
586   llvm::APInt arraySizeMultiplier(sizeWidth, 1);
587   while (const ConstantArrayType *CAT
588              = CGF.getContext().getAsConstantArrayType(type)) {
589     type = CAT->getElementType();
590     arraySizeMultiplier *= CAT->getSize();
591   }
592 
593   CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
594   llvm::APInt typeSizeMultiplier(sizeWidth, typeSize.getQuantity());
595   typeSizeMultiplier *= arraySizeMultiplier;
596 
597   // This will be a size_t.
598   llvm::Value *size;
599 
600   // If someone is doing 'new int[42]' there is no need to do a dynamic check.
601   // Don't bloat the -O0 code.
602   if (llvm::ConstantInt *numElementsC =
603         dyn_cast<llvm::ConstantInt>(numElements)) {
604     const llvm::APInt &count = numElementsC->getValue();
605 
606     bool hasAnyOverflow = false;
607 
608     // If 'count' was a negative number, it's an overflow.
609     if (isSigned && count.isNegative())
610       hasAnyOverflow = true;
611 
612     // We want to do all this arithmetic in size_t.  If numElements is
613     // wider than that, check whether it's already too big, and if so,
614     // overflow.
615     else if (numElementsWidth > sizeWidth &&
616              numElementsWidth - sizeWidth > count.countLeadingZeros())
617       hasAnyOverflow = true;
618 
619     // Okay, compute a count at the right width.
620     llvm::APInt adjustedCount = count.zextOrTrunc(sizeWidth);
621 
622     // If there is a brace-initializer, we cannot allocate fewer elements than
623     // there are initializers. If we do, that's treated like an overflow.
624     if (adjustedCount.ult(minElements))
625       hasAnyOverflow = true;
626 
627     // Scale numElements by that.  This might overflow, but we don't
628     // care because it only overflows if allocationSize does, too, and
629     // if that overflows then we shouldn't use this.
630     numElements = llvm::ConstantInt::get(CGF.SizeTy,
631                                          adjustedCount * arraySizeMultiplier);
632 
633     // Compute the size before cookie, and track whether it overflowed.
634     bool overflow;
635     llvm::APInt allocationSize
636       = adjustedCount.umul_ov(typeSizeMultiplier, overflow);
637     hasAnyOverflow |= overflow;
638 
639     // Add in the cookie, and check whether it's overflowed.
640     if (cookieSize != 0) {
641       // Save the current size without a cookie.  This shouldn't be
642       // used if there was overflow.
643       sizeWithoutCookie = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
644 
645       allocationSize = allocationSize.uadd_ov(cookieSize, overflow);
646       hasAnyOverflow |= overflow;
647     }
648 
649     // On overflow, produce a -1 so operator new will fail.
650     if (hasAnyOverflow) {
651       size = llvm::Constant::getAllOnesValue(CGF.SizeTy);
652     } else {
653       size = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
654     }
655 
656   // Otherwise, we might need to use the overflow intrinsics.
657   } else {
658     // There are up to five conditions we need to test for:
659     // 1) if isSigned, we need to check whether numElements is negative;
660     // 2) if numElementsWidth > sizeWidth, we need to check whether
661     //   numElements is larger than something representable in size_t;
662     // 3) if minElements > 0, we need to check whether numElements is smaller
663     //    than that.
664     // 4) we need to compute
665     //      sizeWithoutCookie := numElements * typeSizeMultiplier
666     //    and check whether it overflows; and
667     // 5) if we need a cookie, we need to compute
668     //      size := sizeWithoutCookie + cookieSize
669     //    and check whether it overflows.
670 
671     llvm::Value *hasOverflow = 0;
672 
673     // If numElementsWidth > sizeWidth, then one way or another, we're
674     // going to have to do a comparison for (2), and this happens to
675     // take care of (1), too.
676     if (numElementsWidth > sizeWidth) {
677       llvm::APInt threshold(numElementsWidth, 1);
678       threshold <<= sizeWidth;
679 
680       llvm::Value *thresholdV
681         = llvm::ConstantInt::get(numElementsType, threshold);
682 
683       hasOverflow = CGF.Builder.CreateICmpUGE(numElements, thresholdV);
684       numElements = CGF.Builder.CreateTrunc(numElements, CGF.SizeTy);
685 
686     // Otherwise, if we're signed, we want to sext up to size_t.
687     } else if (isSigned) {
688       if (numElementsWidth < sizeWidth)
689         numElements = CGF.Builder.CreateSExt(numElements, CGF.SizeTy);
690 
691       // If there's a non-1 type size multiplier, then we can do the
692       // signedness check at the same time as we do the multiply
693       // because a negative number times anything will cause an
694       // unsigned overflow.  Otherwise, we have to do it here. But at least
695       // in this case, we can subsume the >= minElements check.
696       if (typeSizeMultiplier == 1)
697         hasOverflow = CGF.Builder.CreateICmpSLT(numElements,
698                               llvm::ConstantInt::get(CGF.SizeTy, minElements));
699 
700     // Otherwise, zext up to size_t if necessary.
701     } else if (numElementsWidth < sizeWidth) {
702       numElements = CGF.Builder.CreateZExt(numElements, CGF.SizeTy);
703     }
704 
705     assert(numElements->getType() == CGF.SizeTy);
706 
707     if (minElements) {
708       // Don't allow allocation of fewer elements than we have initializers.
709       if (!hasOverflow) {
710         hasOverflow = CGF.Builder.CreateICmpULT(numElements,
711                               llvm::ConstantInt::get(CGF.SizeTy, minElements));
712       } else if (numElementsWidth > sizeWidth) {
713         // The other existing overflow subsumes this check.
714         // We do an unsigned comparison, since any signed value < -1 is
715         // taken care of either above or below.
716         hasOverflow = CGF.Builder.CreateOr(hasOverflow,
717                           CGF.Builder.CreateICmpULT(numElements,
718                               llvm::ConstantInt::get(CGF.SizeTy, minElements)));
719       }
720     }
721 
722     size = numElements;
723 
724     // Multiply by the type size if necessary.  This multiplier
725     // includes all the factors for nested arrays.
726     //
727     // This step also causes numElements to be scaled up by the
728     // nested-array factor if necessary.  Overflow on this computation
729     // can be ignored because the result shouldn't be used if
730     // allocation fails.
731     if (typeSizeMultiplier != 1) {
732       llvm::Value *umul_with_overflow
733         = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, CGF.SizeTy);
734 
735       llvm::Value *tsmV =
736         llvm::ConstantInt::get(CGF.SizeTy, typeSizeMultiplier);
737       llvm::Value *result =
738         CGF.Builder.CreateCall2(umul_with_overflow, size, tsmV);
739 
740       llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
741       if (hasOverflow)
742         hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
743       else
744         hasOverflow = overflowed;
745 
746       size = CGF.Builder.CreateExtractValue(result, 0);
747 
748       // Also scale up numElements by the array size multiplier.
749       if (arraySizeMultiplier != 1) {
750         // If the base element type size is 1, then we can re-use the
751         // multiply we just did.
752         if (typeSize.isOne()) {
753           assert(arraySizeMultiplier == typeSizeMultiplier);
754           numElements = size;
755 
756         // Otherwise we need a separate multiply.
757         } else {
758           llvm::Value *asmV =
759             llvm::ConstantInt::get(CGF.SizeTy, arraySizeMultiplier);
760           numElements = CGF.Builder.CreateMul(numElements, asmV);
761         }
762       }
763     } else {
764       // numElements doesn't need to be scaled.
765       assert(arraySizeMultiplier == 1);
766     }
767 
768     // Add in the cookie size if necessary.
769     if (cookieSize != 0) {
770       sizeWithoutCookie = size;
771 
772       llvm::Value *uadd_with_overflow
773         = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, CGF.SizeTy);
774 
775       llvm::Value *cookieSizeV = llvm::ConstantInt::get(CGF.SizeTy, cookieSize);
776       llvm::Value *result =
777         CGF.Builder.CreateCall2(uadd_with_overflow, size, cookieSizeV);
778 
779       llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
780       if (hasOverflow)
781         hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
782       else
783         hasOverflow = overflowed;
784 
785       size = CGF.Builder.CreateExtractValue(result, 0);
786     }
787 
788     // If we had any possibility of dynamic overflow, make a select to
789     // overwrite 'size' with an all-ones value, which should cause
790     // operator new to throw.
791     if (hasOverflow)
792       size = CGF.Builder.CreateSelect(hasOverflow,
793                                  llvm::Constant::getAllOnesValue(CGF.SizeTy),
794                                       size);
795   }
796 
797   if (cookieSize == 0)
798     sizeWithoutCookie = size;
799   else
800     assert(sizeWithoutCookie && "didn't set sizeWithoutCookie?");
801 
802   return size;
803 }
804 
805 static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const Expr *Init,
806                                     QualType AllocType, llvm::Value *NewPtr) {
807 
808   CharUnits Alignment = CGF.getContext().getTypeAlignInChars(AllocType);
809   if (!CGF.hasAggregateLLVMType(AllocType))
810     CGF.EmitScalarInit(Init, 0, CGF.MakeAddrLValue(NewPtr, AllocType,
811                                                    Alignment),
812                        false);
813   else if (AllocType->isAnyComplexType())
814     CGF.EmitComplexExprIntoAddr(Init, NewPtr,
815                                 AllocType.isVolatileQualified());
816   else {
817     AggValueSlot Slot
818       = AggValueSlot::forAddr(NewPtr, Alignment, AllocType.getQualifiers(),
819                               AggValueSlot::IsDestructed,
820                               AggValueSlot::DoesNotNeedGCBarriers,
821                               AggValueSlot::IsNotAliased);
822     CGF.EmitAggExpr(Init, Slot);
823 
824     CGF.MaybeEmitStdInitializerListCleanup(NewPtr, Init);
825   }
826 }
827 
828 void
829 CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E,
830                                          QualType elementType,
831                                          llvm::Value *beginPtr,
832                                          llvm::Value *numElements) {
833   if (!E->hasInitializer())
834     return; // We have a POD type.
835 
836   llvm::Value *explicitPtr = beginPtr;
837   // Find the end of the array, hoisted out of the loop.
838   llvm::Value *endPtr =
839     Builder.CreateInBoundsGEP(beginPtr, numElements, "array.end");
840 
841   unsigned initializerElements = 0;
842 
843   const Expr *Init = E->getInitializer();
844   llvm::AllocaInst *endOfInit = 0;
845   QualType::DestructionKind dtorKind = elementType.isDestructedType();
846   EHScopeStack::stable_iterator cleanup;
847   llvm::Instruction *cleanupDominator = 0;
848   // If the initializer is an initializer list, first do the explicit elements.
849   if (const InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) {
850     initializerElements = ILE->getNumInits();
851 
852     // Enter a partial-destruction cleanup if necessary.
853     if (needsEHCleanup(dtorKind)) {
854       // In principle we could tell the cleanup where we are more
855       // directly, but the control flow can get so varied here that it
856       // would actually be quite complex.  Therefore we go through an
857       // alloca.
858       endOfInit = CreateTempAlloca(beginPtr->getType(), "array.endOfInit");
859       cleanupDominator = Builder.CreateStore(beginPtr, endOfInit);
860       pushIrregularPartialArrayCleanup(beginPtr, endOfInit, elementType,
861                                        getDestroyer(dtorKind));
862       cleanup = EHStack.stable_begin();
863     }
864 
865     for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i) {
866       // Tell the cleanup that it needs to destroy up to this
867       // element.  TODO: some of these stores can be trivially
868       // observed to be unnecessary.
869       if (endOfInit) Builder.CreateStore(explicitPtr, endOfInit);
870       StoreAnyExprIntoOneUnit(*this, ILE->getInit(i), elementType, explicitPtr);
871       explicitPtr =Builder.CreateConstGEP1_32(explicitPtr, 1, "array.exp.next");
872     }
873 
874     // The remaining elements are filled with the array filler expression.
875     Init = ILE->getArrayFiller();
876   }
877 
878   // Create the continuation block.
879   llvm::BasicBlock *contBB = createBasicBlock("new.loop.end");
880 
881   // If the number of elements isn't constant, we have to now check if there is
882   // anything left to initialize.
883   if (llvm::ConstantInt *constNum = dyn_cast<llvm::ConstantInt>(numElements)) {
884     // If all elements have already been initialized, skip the whole loop.
885     if (constNum->getZExtValue() <= initializerElements) {
886       // If there was a cleanup, deactivate it.
887       if (cleanupDominator)
888         DeactivateCleanupBlock(cleanup, cleanupDominator);;
889       return;
890     }
891   } else {
892     llvm::BasicBlock *nonEmptyBB = createBasicBlock("new.loop.nonempty");
893     llvm::Value *isEmpty = Builder.CreateICmpEQ(explicitPtr, endPtr,
894                                                 "array.isempty");
895     Builder.CreateCondBr(isEmpty, contBB, nonEmptyBB);
896     EmitBlock(nonEmptyBB);
897   }
898 
899   // Enter the loop.
900   llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
901   llvm::BasicBlock *loopBB = createBasicBlock("new.loop");
902 
903   EmitBlock(loopBB);
904 
905   // Set up the current-element phi.
906   llvm::PHINode *curPtr =
907     Builder.CreatePHI(explicitPtr->getType(), 2, "array.cur");
908   curPtr->addIncoming(explicitPtr, entryBB);
909 
910   // Store the new cleanup position for irregular cleanups.
911   if (endOfInit) Builder.CreateStore(curPtr, endOfInit);
912 
913   // Enter a partial-destruction cleanup if necessary.
914   if (!cleanupDominator && needsEHCleanup(dtorKind)) {
915     pushRegularPartialArrayCleanup(beginPtr, curPtr, elementType,
916                                    getDestroyer(dtorKind));
917     cleanup = EHStack.stable_begin();
918     cleanupDominator = Builder.CreateUnreachable();
919   }
920 
921   // Emit the initializer into this element.
922   StoreAnyExprIntoOneUnit(*this, Init, E->getAllocatedType(), curPtr);
923 
924   // Leave the cleanup if we entered one.
925   if (cleanupDominator) {
926     DeactivateCleanupBlock(cleanup, cleanupDominator);
927     cleanupDominator->eraseFromParent();
928   }
929 
930   // Advance to the next element.
931   llvm::Value *nextPtr = Builder.CreateConstGEP1_32(curPtr, 1, "array.next");
932 
933   // Check whether we've gotten to the end of the array and, if so,
934   // exit the loop.
935   llvm::Value *isEnd = Builder.CreateICmpEQ(nextPtr, endPtr, "array.atend");
936   Builder.CreateCondBr(isEnd, contBB, loopBB);
937   curPtr->addIncoming(nextPtr, Builder.GetInsertBlock());
938 
939   EmitBlock(contBB);
940 }
941 
942 static void EmitZeroMemSet(CodeGenFunction &CGF, QualType T,
943                            llvm::Value *NewPtr, llvm::Value *Size) {
944   CGF.EmitCastToVoidPtr(NewPtr);
945   CharUnits Alignment = CGF.getContext().getTypeAlignInChars(T);
946   CGF.Builder.CreateMemSet(NewPtr, CGF.Builder.getInt8(0), Size,
947                            Alignment.getQuantity(), false);
948 }
949 
950 static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
951                                QualType ElementType,
952                                llvm::Value *NewPtr,
953                                llvm::Value *NumElements,
954                                llvm::Value *AllocSizeWithoutCookie) {
955   const Expr *Init = E->getInitializer();
956   if (E->isArray()) {
957     if (const CXXConstructExpr *CCE = dyn_cast_or_null<CXXConstructExpr>(Init)){
958       CXXConstructorDecl *Ctor = CCE->getConstructor();
959       if (Ctor->isTrivial()) {
960         // If new expression did not specify value-initialization, then there
961         // is no initialization.
962         if (!CCE->requiresZeroInitialization() || Ctor->getParent()->isEmpty())
963           return;
964 
965         if (CGF.CGM.getTypes().isZeroInitializable(ElementType)) {
966           // Optimization: since zero initialization will just set the memory
967           // to all zeroes, generate a single memset to do it in one shot.
968           EmitZeroMemSet(CGF, ElementType, NewPtr, AllocSizeWithoutCookie);
969           return;
970         }
971       }
972 
973       CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr,
974                                      CCE->arg_begin(),  CCE->arg_end(),
975                                      CCE->requiresZeroInitialization());
976       return;
977     } else if (Init && isa<ImplicitValueInitExpr>(Init) &&
978                CGF.CGM.getTypes().isZeroInitializable(ElementType)) {
979       // Optimization: since zero initialization will just set the memory
980       // to all zeroes, generate a single memset to do it in one shot.
981       EmitZeroMemSet(CGF, ElementType, NewPtr, AllocSizeWithoutCookie);
982       return;
983     }
984     CGF.EmitNewArrayInitializer(E, ElementType, NewPtr, NumElements);
985     return;
986   }
987 
988   if (!Init)
989     return;
990 
991   StoreAnyExprIntoOneUnit(CGF, Init, E->getAllocatedType(), NewPtr);
992 }
993 
994 namespace {
995   /// A cleanup to call the given 'operator delete' function upon
996   /// abnormal exit from a new expression.
997   class CallDeleteDuringNew : public EHScopeStack::Cleanup {
998     size_t NumPlacementArgs;
999     const FunctionDecl *OperatorDelete;
1000     llvm::Value *Ptr;
1001     llvm::Value *AllocSize;
1002 
1003     RValue *getPlacementArgs() { return reinterpret_cast<RValue*>(this+1); }
1004 
1005   public:
1006     static size_t getExtraSize(size_t NumPlacementArgs) {
1007       return NumPlacementArgs * sizeof(RValue);
1008     }
1009 
1010     CallDeleteDuringNew(size_t NumPlacementArgs,
1011                         const FunctionDecl *OperatorDelete,
1012                         llvm::Value *Ptr,
1013                         llvm::Value *AllocSize)
1014       : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
1015         Ptr(Ptr), AllocSize(AllocSize) {}
1016 
1017     void setPlacementArg(unsigned I, RValue Arg) {
1018       assert(I < NumPlacementArgs && "index out of range");
1019       getPlacementArgs()[I] = Arg;
1020     }
1021 
1022     void Emit(CodeGenFunction &CGF, Flags flags) {
1023       const FunctionProtoType *FPT
1024         = OperatorDelete->getType()->getAs<FunctionProtoType>();
1025       assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
1026              (FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
1027 
1028       CallArgList DeleteArgs;
1029 
1030       // The first argument is always a void*.
1031       FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
1032       DeleteArgs.add(RValue::get(Ptr), *AI++);
1033 
1034       // A member 'operator delete' can take an extra 'size_t' argument.
1035       if (FPT->getNumArgs() == NumPlacementArgs + 2)
1036         DeleteArgs.add(RValue::get(AllocSize), *AI++);
1037 
1038       // Pass the rest of the arguments, which must match exactly.
1039       for (unsigned I = 0; I != NumPlacementArgs; ++I)
1040         DeleteArgs.add(getPlacementArgs()[I], *AI++);
1041 
1042       // Call 'operator delete'.
1043       CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall(DeleteArgs, FPT),
1044                    CGF.CGM.GetAddrOfFunction(OperatorDelete),
1045                    ReturnValueSlot(), DeleteArgs, OperatorDelete);
1046     }
1047   };
1048 
1049   /// A cleanup to call the given 'operator delete' function upon
1050   /// abnormal exit from a new expression when the new expression is
1051   /// conditional.
1052   class CallDeleteDuringConditionalNew : public EHScopeStack::Cleanup {
1053     size_t NumPlacementArgs;
1054     const FunctionDecl *OperatorDelete;
1055     DominatingValue<RValue>::saved_type Ptr;
1056     DominatingValue<RValue>::saved_type AllocSize;
1057 
1058     DominatingValue<RValue>::saved_type *getPlacementArgs() {
1059       return reinterpret_cast<DominatingValue<RValue>::saved_type*>(this+1);
1060     }
1061 
1062   public:
1063     static size_t getExtraSize(size_t NumPlacementArgs) {
1064       return NumPlacementArgs * sizeof(DominatingValue<RValue>::saved_type);
1065     }
1066 
1067     CallDeleteDuringConditionalNew(size_t NumPlacementArgs,
1068                                    const FunctionDecl *OperatorDelete,
1069                                    DominatingValue<RValue>::saved_type Ptr,
1070                               DominatingValue<RValue>::saved_type AllocSize)
1071       : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
1072         Ptr(Ptr), AllocSize(AllocSize) {}
1073 
1074     void setPlacementArg(unsigned I, DominatingValue<RValue>::saved_type Arg) {
1075       assert(I < NumPlacementArgs && "index out of range");
1076       getPlacementArgs()[I] = Arg;
1077     }
1078 
1079     void Emit(CodeGenFunction &CGF, Flags flags) {
1080       const FunctionProtoType *FPT
1081         = OperatorDelete->getType()->getAs<FunctionProtoType>();
1082       assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
1083              (FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
1084 
1085       CallArgList DeleteArgs;
1086 
1087       // The first argument is always a void*.
1088       FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
1089       DeleteArgs.add(Ptr.restore(CGF), *AI++);
1090 
1091       // A member 'operator delete' can take an extra 'size_t' argument.
1092       if (FPT->getNumArgs() == NumPlacementArgs + 2) {
1093         RValue RV = AllocSize.restore(CGF);
1094         DeleteArgs.add(RV, *AI++);
1095       }
1096 
1097       // Pass the rest of the arguments, which must match exactly.
1098       for (unsigned I = 0; I != NumPlacementArgs; ++I) {
1099         RValue RV = getPlacementArgs()[I].restore(CGF);
1100         DeleteArgs.add(RV, *AI++);
1101       }
1102 
1103       // Call 'operator delete'.
1104       CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall(DeleteArgs, FPT),
1105                    CGF.CGM.GetAddrOfFunction(OperatorDelete),
1106                    ReturnValueSlot(), DeleteArgs, OperatorDelete);
1107     }
1108   };
1109 }
1110 
1111 /// Enter a cleanup to call 'operator delete' if the initializer in a
1112 /// new-expression throws.
1113 static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
1114                                   const CXXNewExpr *E,
1115                                   llvm::Value *NewPtr,
1116                                   llvm::Value *AllocSize,
1117                                   const CallArgList &NewArgs) {
1118   // If we're not inside a conditional branch, then the cleanup will
1119   // dominate and we can do the easier (and more efficient) thing.
1120   if (!CGF.isInConditionalBranch()) {
1121     CallDeleteDuringNew *Cleanup = CGF.EHStack
1122       .pushCleanupWithExtra<CallDeleteDuringNew>(EHCleanup,
1123                                                  E->getNumPlacementArgs(),
1124                                                  E->getOperatorDelete(),
1125                                                  NewPtr, AllocSize);
1126     for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
1127       Cleanup->setPlacementArg(I, NewArgs[I+1].RV);
1128 
1129     return;
1130   }
1131 
1132   // Otherwise, we need to save all this stuff.
1133   DominatingValue<RValue>::saved_type SavedNewPtr =
1134     DominatingValue<RValue>::save(CGF, RValue::get(NewPtr));
1135   DominatingValue<RValue>::saved_type SavedAllocSize =
1136     DominatingValue<RValue>::save(CGF, RValue::get(AllocSize));
1137 
1138   CallDeleteDuringConditionalNew *Cleanup = CGF.EHStack
1139     .pushCleanupWithExtra<CallDeleteDuringConditionalNew>(EHCleanup,
1140                                                  E->getNumPlacementArgs(),
1141                                                  E->getOperatorDelete(),
1142                                                  SavedNewPtr,
1143                                                  SavedAllocSize);
1144   for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
1145     Cleanup->setPlacementArg(I,
1146                      DominatingValue<RValue>::save(CGF, NewArgs[I+1].RV));
1147 
1148   CGF.initFullExprCleanup();
1149 }
1150 
1151 llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
1152   // The element type being allocated.
1153   QualType allocType = getContext().getBaseElementType(E->getAllocatedType());
1154 
1155   // 1. Build a call to the allocation function.
1156   FunctionDecl *allocator = E->getOperatorNew();
1157   const FunctionProtoType *allocatorType =
1158     allocator->getType()->castAs<FunctionProtoType>();
1159 
1160   CallArgList allocatorArgs;
1161 
1162   // The allocation size is the first argument.
1163   QualType sizeType = getContext().getSizeType();
1164 
1165   // If there is a brace-initializer, cannot allocate fewer elements than inits.
1166   unsigned minElements = 0;
1167   if (E->isArray() && E->hasInitializer()) {
1168     if (const InitListExpr *ILE = dyn_cast<InitListExpr>(E->getInitializer()))
1169       minElements = ILE->getNumInits();
1170   }
1171 
1172   llvm::Value *numElements = 0;
1173   llvm::Value *allocSizeWithoutCookie = 0;
1174   llvm::Value *allocSize =
1175     EmitCXXNewAllocSize(*this, E, minElements, numElements,
1176                         allocSizeWithoutCookie);
1177 
1178   allocatorArgs.add(RValue::get(allocSize), sizeType);
1179 
1180   // Emit the rest of the arguments.
1181   // FIXME: Ideally, this should just use EmitCallArgs.
1182   CXXNewExpr::const_arg_iterator placementArg = E->placement_arg_begin();
1183 
1184   // First, use the types from the function type.
1185   // We start at 1 here because the first argument (the allocation size)
1186   // has already been emitted.
1187   for (unsigned i = 1, e = allocatorType->getNumArgs(); i != e;
1188        ++i, ++placementArg) {
1189     QualType argType = allocatorType->getArgType(i);
1190 
1191     assert(getContext().hasSameUnqualifiedType(argType.getNonReferenceType(),
1192                                                placementArg->getType()) &&
1193            "type mismatch in call argument!");
1194 
1195     EmitCallArg(allocatorArgs, *placementArg, argType);
1196   }
1197 
1198   // Either we've emitted all the call args, or we have a call to a
1199   // variadic function.
1200   assert((placementArg == E->placement_arg_end() ||
1201           allocatorType->isVariadic()) &&
1202          "Extra arguments to non-variadic function!");
1203 
1204   // If we still have any arguments, emit them using the type of the argument.
1205   for (CXXNewExpr::const_arg_iterator placementArgsEnd = E->placement_arg_end();
1206        placementArg != placementArgsEnd; ++placementArg) {
1207     EmitCallArg(allocatorArgs, *placementArg, placementArg->getType());
1208   }
1209 
1210   // Emit the allocation call.  If the allocator is a global placement
1211   // operator, just "inline" it directly.
1212   RValue RV;
1213   if (allocator->isReservedGlobalPlacementOperator()) {
1214     assert(allocatorArgs.size() == 2);
1215     RV = allocatorArgs[1].RV;
1216     // TODO: kill any unnecessary computations done for the size
1217     // argument.
1218   } else {
1219     RV = EmitCall(CGM.getTypes().arrangeFreeFunctionCall(allocatorArgs,
1220                                                          allocatorType),
1221                   CGM.GetAddrOfFunction(allocator), ReturnValueSlot(),
1222                   allocatorArgs, allocator);
1223   }
1224 
1225   // Emit a null check on the allocation result if the allocation
1226   // function is allowed to return null (because it has a non-throwing
1227   // exception spec; for this part, we inline
1228   // CXXNewExpr::shouldNullCheckAllocation()) and we have an
1229   // interesting initializer.
1230   bool nullCheck = allocatorType->isNothrow(getContext()) &&
1231     (!allocType.isPODType(getContext()) || E->hasInitializer());
1232 
1233   llvm::BasicBlock *nullCheckBB = 0;
1234   llvm::BasicBlock *contBB = 0;
1235 
1236   llvm::Value *allocation = RV.getScalarVal();
1237   unsigned AS =
1238     cast<llvm::PointerType>(allocation->getType())->getAddressSpace();
1239 
1240   // The null-check means that the initializer is conditionally
1241   // evaluated.
1242   ConditionalEvaluation conditional(*this);
1243 
1244   if (nullCheck) {
1245     conditional.begin(*this);
1246 
1247     nullCheckBB = Builder.GetInsertBlock();
1248     llvm::BasicBlock *notNullBB = createBasicBlock("new.notnull");
1249     contBB = createBasicBlock("new.cont");
1250 
1251     llvm::Value *isNull = Builder.CreateIsNull(allocation, "new.isnull");
1252     Builder.CreateCondBr(isNull, contBB, notNullBB);
1253     EmitBlock(notNullBB);
1254   }
1255 
1256   // If there's an operator delete, enter a cleanup to call it if an
1257   // exception is thrown.
1258   EHScopeStack::stable_iterator operatorDeleteCleanup;
1259   llvm::Instruction *cleanupDominator = 0;
1260   if (E->getOperatorDelete() &&
1261       !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
1262     EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocatorArgs);
1263     operatorDeleteCleanup = EHStack.stable_begin();
1264     cleanupDominator = Builder.CreateUnreachable();
1265   }
1266 
1267   assert((allocSize == allocSizeWithoutCookie) ==
1268          CalculateCookiePadding(*this, E).isZero());
1269   if (allocSize != allocSizeWithoutCookie) {
1270     assert(E->isArray());
1271     allocation = CGM.getCXXABI().InitializeArrayCookie(*this, allocation,
1272                                                        numElements,
1273                                                        E, allocType);
1274   }
1275 
1276   llvm::Type *elementPtrTy
1277     = ConvertTypeForMem(allocType)->getPointerTo(AS);
1278   llvm::Value *result = Builder.CreateBitCast(allocation, elementPtrTy);
1279 
1280   EmitNewInitializer(*this, E, allocType, result, numElements,
1281                      allocSizeWithoutCookie);
1282   if (E->isArray()) {
1283     // NewPtr is a pointer to the base element type.  If we're
1284     // allocating an array of arrays, we'll need to cast back to the
1285     // array pointer type.
1286     llvm::Type *resultType = ConvertTypeForMem(E->getType());
1287     if (result->getType() != resultType)
1288       result = Builder.CreateBitCast(result, resultType);
1289   }
1290 
1291   // Deactivate the 'operator delete' cleanup if we finished
1292   // initialization.
1293   if (operatorDeleteCleanup.isValid()) {
1294     DeactivateCleanupBlock(operatorDeleteCleanup, cleanupDominator);
1295     cleanupDominator->eraseFromParent();
1296   }
1297 
1298   if (nullCheck) {
1299     conditional.end(*this);
1300 
1301     llvm::BasicBlock *notNullBB = Builder.GetInsertBlock();
1302     EmitBlock(contBB);
1303 
1304     llvm::PHINode *PHI = Builder.CreatePHI(result->getType(), 2);
1305     PHI->addIncoming(result, notNullBB);
1306     PHI->addIncoming(llvm::Constant::getNullValue(result->getType()),
1307                      nullCheckBB);
1308 
1309     result = PHI;
1310   }
1311 
1312   return result;
1313 }
1314 
1315 void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
1316                                      llvm::Value *Ptr,
1317                                      QualType DeleteTy) {
1318   assert(DeleteFD->getOverloadedOperator() == OO_Delete);
1319 
1320   const FunctionProtoType *DeleteFTy =
1321     DeleteFD->getType()->getAs<FunctionProtoType>();
1322 
1323   CallArgList DeleteArgs;
1324 
1325   // Check if we need to pass the size to the delete operator.
1326   llvm::Value *Size = 0;
1327   QualType SizeTy;
1328   if (DeleteFTy->getNumArgs() == 2) {
1329     SizeTy = DeleteFTy->getArgType(1);
1330     CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy);
1331     Size = llvm::ConstantInt::get(ConvertType(SizeTy),
1332                                   DeleteTypeSize.getQuantity());
1333   }
1334 
1335   QualType ArgTy = DeleteFTy->getArgType(0);
1336   llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy));
1337   DeleteArgs.add(RValue::get(DeletePtr), ArgTy);
1338 
1339   if (Size)
1340     DeleteArgs.add(RValue::get(Size), SizeTy);
1341 
1342   // Emit the call to delete.
1343   EmitCall(CGM.getTypes().arrangeFreeFunctionCall(DeleteArgs, DeleteFTy),
1344            CGM.GetAddrOfFunction(DeleteFD), ReturnValueSlot(),
1345            DeleteArgs, DeleteFD);
1346 }
1347 
1348 namespace {
1349   /// Calls the given 'operator delete' on a single object.
1350   struct CallObjectDelete : EHScopeStack::Cleanup {
1351     llvm::Value *Ptr;
1352     const FunctionDecl *OperatorDelete;
1353     QualType ElementType;
1354 
1355     CallObjectDelete(llvm::Value *Ptr,
1356                      const FunctionDecl *OperatorDelete,
1357                      QualType ElementType)
1358       : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {}
1359 
1360     void Emit(CodeGenFunction &CGF, Flags flags) {
1361       CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType);
1362     }
1363   };
1364 }
1365 
1366 /// Emit the code for deleting a single object.
1367 static void EmitObjectDelete(CodeGenFunction &CGF,
1368                              const FunctionDecl *OperatorDelete,
1369                              llvm::Value *Ptr,
1370                              QualType ElementType,
1371                              bool UseGlobalDelete) {
1372   // Find the destructor for the type, if applicable.  If the
1373   // destructor is virtual, we'll just emit the vcall and return.
1374   const CXXDestructorDecl *Dtor = 0;
1375   if (const RecordType *RT = ElementType->getAs<RecordType>()) {
1376     CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1377     if (RD->hasDefinition() && !RD->hasTrivialDestructor()) {
1378       Dtor = RD->getDestructor();
1379 
1380       if (Dtor->isVirtual()) {
1381         if (UseGlobalDelete) {
1382           // If we're supposed to call the global delete, make sure we do so
1383           // even if the destructor throws.
1384           CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
1385                                                     Ptr, OperatorDelete,
1386                                                     ElementType);
1387         }
1388 
1389         llvm::Type *Ty =
1390           CGF.getTypes().GetFunctionType(
1391                          CGF.getTypes().arrangeCXXDestructor(Dtor, Dtor_Complete));
1392 
1393         llvm::Value *Callee
1394           = CGF.BuildVirtualCall(Dtor,
1395                                  UseGlobalDelete? Dtor_Complete : Dtor_Deleting,
1396                                  Ptr, Ty);
1397         CGF.EmitCXXMemberCall(Dtor, Callee, ReturnValueSlot(), Ptr, /*VTT=*/0,
1398                               0, 0);
1399 
1400         if (UseGlobalDelete) {
1401           CGF.PopCleanupBlock();
1402         }
1403 
1404         return;
1405       }
1406     }
1407   }
1408 
1409   // Make sure that we call delete even if the dtor throws.
1410   // This doesn't have to a conditional cleanup because we're going
1411   // to pop it off in a second.
1412   CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
1413                                             Ptr, OperatorDelete, ElementType);
1414 
1415   if (Dtor)
1416     CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
1417                               /*ForVirtualBase=*/false, Ptr);
1418   else if (CGF.getLangOpts().ObjCAutoRefCount &&
1419            ElementType->isObjCLifetimeType()) {
1420     switch (ElementType.getObjCLifetime()) {
1421     case Qualifiers::OCL_None:
1422     case Qualifiers::OCL_ExplicitNone:
1423     case Qualifiers::OCL_Autoreleasing:
1424       break;
1425 
1426     case Qualifiers::OCL_Strong: {
1427       // Load the pointer value.
1428       llvm::Value *PtrValue = CGF.Builder.CreateLoad(Ptr,
1429                                              ElementType.isVolatileQualified());
1430 
1431       CGF.EmitARCRelease(PtrValue, /*precise*/ true);
1432       break;
1433     }
1434 
1435     case Qualifiers::OCL_Weak:
1436       CGF.EmitARCDestroyWeak(Ptr);
1437       break;
1438     }
1439   }
1440 
1441   CGF.PopCleanupBlock();
1442 }
1443 
1444 namespace {
1445   /// Calls the given 'operator delete' on an array of objects.
1446   struct CallArrayDelete : EHScopeStack::Cleanup {
1447     llvm::Value *Ptr;
1448     const FunctionDecl *OperatorDelete;
1449     llvm::Value *NumElements;
1450     QualType ElementType;
1451     CharUnits CookieSize;
1452 
1453     CallArrayDelete(llvm::Value *Ptr,
1454                     const FunctionDecl *OperatorDelete,
1455                     llvm::Value *NumElements,
1456                     QualType ElementType,
1457                     CharUnits CookieSize)
1458       : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements),
1459         ElementType(ElementType), CookieSize(CookieSize) {}
1460 
1461     void Emit(CodeGenFunction &CGF, Flags flags) {
1462       const FunctionProtoType *DeleteFTy =
1463         OperatorDelete->getType()->getAs<FunctionProtoType>();
1464       assert(DeleteFTy->getNumArgs() == 1 || DeleteFTy->getNumArgs() == 2);
1465 
1466       CallArgList Args;
1467 
1468       // Pass the pointer as the first argument.
1469       QualType VoidPtrTy = DeleteFTy->getArgType(0);
1470       llvm::Value *DeletePtr
1471         = CGF.Builder.CreateBitCast(Ptr, CGF.ConvertType(VoidPtrTy));
1472       Args.add(RValue::get(DeletePtr), VoidPtrTy);
1473 
1474       // Pass the original requested size as the second argument.
1475       if (DeleteFTy->getNumArgs() == 2) {
1476         QualType size_t = DeleteFTy->getArgType(1);
1477         llvm::IntegerType *SizeTy
1478           = cast<llvm::IntegerType>(CGF.ConvertType(size_t));
1479 
1480         CharUnits ElementTypeSize =
1481           CGF.CGM.getContext().getTypeSizeInChars(ElementType);
1482 
1483         // The size of an element, multiplied by the number of elements.
1484         llvm::Value *Size
1485           = llvm::ConstantInt::get(SizeTy, ElementTypeSize.getQuantity());
1486         Size = CGF.Builder.CreateMul(Size, NumElements);
1487 
1488         // Plus the size of the cookie if applicable.
1489         if (!CookieSize.isZero()) {
1490           llvm::Value *CookieSizeV
1491             = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
1492           Size = CGF.Builder.CreateAdd(Size, CookieSizeV);
1493         }
1494 
1495         Args.add(RValue::get(Size), size_t);
1496       }
1497 
1498       // Emit the call to delete.
1499       CGF.EmitCall(CGF.getTypes().arrangeFreeFunctionCall(Args, DeleteFTy),
1500                    CGF.CGM.GetAddrOfFunction(OperatorDelete),
1501                    ReturnValueSlot(), Args, OperatorDelete);
1502     }
1503   };
1504 }
1505 
1506 /// Emit the code for deleting an array of objects.
1507 static void EmitArrayDelete(CodeGenFunction &CGF,
1508                             const CXXDeleteExpr *E,
1509                             llvm::Value *deletedPtr,
1510                             QualType elementType) {
1511   llvm::Value *numElements = 0;
1512   llvm::Value *allocatedPtr = 0;
1513   CharUnits cookieSize;
1514   CGF.CGM.getCXXABI().ReadArrayCookie(CGF, deletedPtr, E, elementType,
1515                                       numElements, allocatedPtr, cookieSize);
1516 
1517   assert(allocatedPtr && "ReadArrayCookie didn't set allocated pointer");
1518 
1519   // Make sure that we call delete even if one of the dtors throws.
1520   const FunctionDecl *operatorDelete = E->getOperatorDelete();
1521   CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup,
1522                                            allocatedPtr, operatorDelete,
1523                                            numElements, elementType,
1524                                            cookieSize);
1525 
1526   // Destroy the elements.
1527   if (QualType::DestructionKind dtorKind = elementType.isDestructedType()) {
1528     assert(numElements && "no element count for a type with a destructor!");
1529 
1530     llvm::Value *arrayEnd =
1531       CGF.Builder.CreateInBoundsGEP(deletedPtr, numElements, "delete.end");
1532 
1533     // Note that it is legal to allocate a zero-length array, and we
1534     // can never fold the check away because the length should always
1535     // come from a cookie.
1536     CGF.emitArrayDestroy(deletedPtr, arrayEnd, elementType,
1537                          CGF.getDestroyer(dtorKind),
1538                          /*checkZeroLength*/ true,
1539                          CGF.needsEHCleanup(dtorKind));
1540   }
1541 
1542   // Pop the cleanup block.
1543   CGF.PopCleanupBlock();
1544 }
1545 
1546 void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
1547   const Expr *Arg = E->getArgument();
1548   llvm::Value *Ptr = EmitScalarExpr(Arg);
1549 
1550   // Null check the pointer.
1551   llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");
1552   llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");
1553 
1554   llvm::Value *IsNull = Builder.CreateIsNull(Ptr, "isnull");
1555 
1556   Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
1557   EmitBlock(DeleteNotNull);
1558 
1559   // We might be deleting a pointer to array.  If so, GEP down to the
1560   // first non-array element.
1561   // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*)
1562   QualType DeleteTy = Arg->getType()->getAs<PointerType>()->getPointeeType();
1563   if (DeleteTy->isConstantArrayType()) {
1564     llvm::Value *Zero = Builder.getInt32(0);
1565     SmallVector<llvm::Value*,8> GEP;
1566 
1567     GEP.push_back(Zero); // point at the outermost array
1568 
1569     // For each layer of array type we're pointing at:
1570     while (const ConstantArrayType *Arr
1571              = getContext().getAsConstantArrayType(DeleteTy)) {
1572       // 1. Unpeel the array type.
1573       DeleteTy = Arr->getElementType();
1574 
1575       // 2. GEP to the first element of the array.
1576       GEP.push_back(Zero);
1577     }
1578 
1579     Ptr = Builder.CreateInBoundsGEP(Ptr, GEP, "del.first");
1580   }
1581 
1582   assert(ConvertTypeForMem(DeleteTy) ==
1583          cast<llvm::PointerType>(Ptr->getType())->getElementType());
1584 
1585   if (E->isArrayForm()) {
1586     EmitArrayDelete(*this, E, Ptr, DeleteTy);
1587   } else {
1588     EmitObjectDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy,
1589                      E->isGlobalDelete());
1590   }
1591 
1592   EmitBlock(DeleteEnd);
1593 }
1594 
1595 static llvm::Constant *getBadTypeidFn(CodeGenFunction &CGF) {
1596   // void __cxa_bad_typeid();
1597   llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1598 
1599   return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
1600 }
1601 
1602 static void EmitBadTypeidCall(CodeGenFunction &CGF) {
1603   llvm::Value *Fn = getBadTypeidFn(CGF);
1604   CGF.EmitCallOrInvoke(Fn).setDoesNotReturn();
1605   CGF.Builder.CreateUnreachable();
1606 }
1607 
1608 static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF,
1609                                          const Expr *E,
1610                                          llvm::Type *StdTypeInfoPtrTy) {
1611   // Get the vtable pointer.
1612   llvm::Value *ThisPtr = CGF.EmitLValue(E).getAddress();
1613 
1614   // C++ [expr.typeid]p2:
1615   //   If the glvalue expression is obtained by applying the unary * operator to
1616   //   a pointer and the pointer is a null pointer value, the typeid expression
1617   //   throws the std::bad_typeid exception.
1618   if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E->IgnoreParens())) {
1619     if (UO->getOpcode() == UO_Deref) {
1620       llvm::BasicBlock *BadTypeidBlock =
1621         CGF.createBasicBlock("typeid.bad_typeid");
1622       llvm::BasicBlock *EndBlock =
1623         CGF.createBasicBlock("typeid.end");
1624 
1625       llvm::Value *IsNull = CGF.Builder.CreateIsNull(ThisPtr);
1626       CGF.Builder.CreateCondBr(IsNull, BadTypeidBlock, EndBlock);
1627 
1628       CGF.EmitBlock(BadTypeidBlock);
1629       EmitBadTypeidCall(CGF);
1630       CGF.EmitBlock(EndBlock);
1631     }
1632   }
1633 
1634   llvm::Value *Value = CGF.GetVTablePtr(ThisPtr,
1635                                         StdTypeInfoPtrTy->getPointerTo());
1636 
1637   // Load the type info.
1638   Value = CGF.Builder.CreateConstInBoundsGEP1_64(Value, -1ULL);
1639   return CGF.Builder.CreateLoad(Value);
1640 }
1641 
1642 llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
1643   llvm::Type *StdTypeInfoPtrTy =
1644     ConvertType(E->getType())->getPointerTo();
1645 
1646   if (E->isTypeOperand()) {
1647     llvm::Constant *TypeInfo =
1648       CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand());
1649     return Builder.CreateBitCast(TypeInfo, StdTypeInfoPtrTy);
1650   }
1651 
1652   // C++ [expr.typeid]p2:
1653   //   When typeid is applied to a glvalue expression whose type is a
1654   //   polymorphic class type, the result refers to a std::type_info object
1655   //   representing the type of the most derived object (that is, the dynamic
1656   //   type) to which the glvalue refers.
1657   if (E->isPotentiallyEvaluated())
1658     return EmitTypeidFromVTable(*this, E->getExprOperand(),
1659                                 StdTypeInfoPtrTy);
1660 
1661   QualType OperandTy = E->getExprOperand()->getType();
1662   return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(OperandTy),
1663                                StdTypeInfoPtrTy);
1664 }
1665 
1666 static llvm::Constant *getDynamicCastFn(CodeGenFunction &CGF) {
1667   // void *__dynamic_cast(const void *sub,
1668   //                      const abi::__class_type_info *src,
1669   //                      const abi::__class_type_info *dst,
1670   //                      std::ptrdiff_t src2dst_offset);
1671 
1672   llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
1673   llvm::Type *PtrDiffTy =
1674     CGF.ConvertType(CGF.getContext().getPointerDiffType());
1675 
1676   llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
1677 
1678   llvm::FunctionType *FTy =
1679     llvm::FunctionType::get(Int8PtrTy, Args, false);
1680 
1681   return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast");
1682 }
1683 
1684 static llvm::Constant *getBadCastFn(CodeGenFunction &CGF) {
1685   // void __cxa_bad_cast();
1686   llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1687   return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
1688 }
1689 
1690 static void EmitBadCastCall(CodeGenFunction &CGF) {
1691   llvm::Value *Fn = getBadCastFn(CGF);
1692   CGF.EmitCallOrInvoke(Fn).setDoesNotReturn();
1693   CGF.Builder.CreateUnreachable();
1694 }
1695 
1696 static llvm::Value *
1697 EmitDynamicCastCall(CodeGenFunction &CGF, llvm::Value *Value,
1698                     QualType SrcTy, QualType DestTy,
1699                     llvm::BasicBlock *CastEnd) {
1700   llvm::Type *PtrDiffLTy =
1701     CGF.ConvertType(CGF.getContext().getPointerDiffType());
1702   llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1703 
1704   if (const PointerType *PTy = DestTy->getAs<PointerType>()) {
1705     if (PTy->getPointeeType()->isVoidType()) {
1706       // C++ [expr.dynamic.cast]p7:
1707       //   If T is "pointer to cv void," then the result is a pointer to the
1708       //   most derived object pointed to by v.
1709 
1710       // Get the vtable pointer.
1711       llvm::Value *VTable = CGF.GetVTablePtr(Value, PtrDiffLTy->getPointerTo());
1712 
1713       // Get the offset-to-top from the vtable.
1714       llvm::Value *OffsetToTop =
1715         CGF.Builder.CreateConstInBoundsGEP1_64(VTable, -2ULL);
1716       OffsetToTop = CGF.Builder.CreateLoad(OffsetToTop, "offset.to.top");
1717 
1718       // Finally, add the offset to the pointer.
1719       Value = CGF.EmitCastToVoidPtr(Value);
1720       Value = CGF.Builder.CreateInBoundsGEP(Value, OffsetToTop);
1721 
1722       return CGF.Builder.CreateBitCast(Value, DestLTy);
1723     }
1724   }
1725 
1726   QualType SrcRecordTy;
1727   QualType DestRecordTy;
1728 
1729   if (const PointerType *DestPTy = DestTy->getAs<PointerType>()) {
1730     SrcRecordTy = SrcTy->castAs<PointerType>()->getPointeeType();
1731     DestRecordTy = DestPTy->getPointeeType();
1732   } else {
1733     SrcRecordTy = SrcTy;
1734     DestRecordTy = DestTy->castAs<ReferenceType>()->getPointeeType();
1735   }
1736 
1737   assert(SrcRecordTy->isRecordType() && "source type must be a record type!");
1738   assert(DestRecordTy->isRecordType() && "dest type must be a record type!");
1739 
1740   llvm::Value *SrcRTTI =
1741     CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
1742   llvm::Value *DestRTTI =
1743     CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType());
1744 
1745   // FIXME: Actually compute a hint here.
1746   llvm::Value *OffsetHint = llvm::ConstantInt::get(PtrDiffLTy, -1ULL);
1747 
1748   // Emit the call to __dynamic_cast.
1749   Value = CGF.EmitCastToVoidPtr(Value);
1750   Value = CGF.Builder.CreateCall4(getDynamicCastFn(CGF), Value,
1751                                   SrcRTTI, DestRTTI, OffsetHint);
1752   Value = CGF.Builder.CreateBitCast(Value, DestLTy);
1753 
1754   /// C++ [expr.dynamic.cast]p9:
1755   ///   A failed cast to reference type throws std::bad_cast
1756   if (DestTy->isReferenceType()) {
1757     llvm::BasicBlock *BadCastBlock =
1758       CGF.createBasicBlock("dynamic_cast.bad_cast");
1759 
1760     llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value);
1761     CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd);
1762 
1763     CGF.EmitBlock(BadCastBlock);
1764     EmitBadCastCall(CGF);
1765   }
1766 
1767   return Value;
1768 }
1769 
1770 static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF,
1771                                           QualType DestTy) {
1772   llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1773   if (DestTy->isPointerType())
1774     return llvm::Constant::getNullValue(DestLTy);
1775 
1776   /// C++ [expr.dynamic.cast]p9:
1777   ///   A failed cast to reference type throws std::bad_cast
1778   EmitBadCastCall(CGF);
1779 
1780   CGF.EmitBlock(CGF.createBasicBlock("dynamic_cast.end"));
1781   return llvm::UndefValue::get(DestLTy);
1782 }
1783 
1784 llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *Value,
1785                                               const CXXDynamicCastExpr *DCE) {
1786   QualType DestTy = DCE->getTypeAsWritten();
1787 
1788   if (DCE->isAlwaysNull())
1789     return EmitDynamicCastToNull(*this, DestTy);
1790 
1791   QualType SrcTy = DCE->getSubExpr()->getType();
1792 
1793   // C++ [expr.dynamic.cast]p4:
1794   //   If the value of v is a null pointer value in the pointer case, the result
1795   //   is the null pointer value of type T.
1796   bool ShouldNullCheckSrcValue = SrcTy->isPointerType();
1797 
1798   llvm::BasicBlock *CastNull = 0;
1799   llvm::BasicBlock *CastNotNull = 0;
1800   llvm::BasicBlock *CastEnd = createBasicBlock("dynamic_cast.end");
1801 
1802   if (ShouldNullCheckSrcValue) {
1803     CastNull = createBasicBlock("dynamic_cast.null");
1804     CastNotNull = createBasicBlock("dynamic_cast.notnull");
1805 
1806     llvm::Value *IsNull = Builder.CreateIsNull(Value);
1807     Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
1808     EmitBlock(CastNotNull);
1809   }
1810 
1811   Value = EmitDynamicCastCall(*this, Value, SrcTy, DestTy, CastEnd);
1812 
1813   if (ShouldNullCheckSrcValue) {
1814     EmitBranch(CastEnd);
1815 
1816     EmitBlock(CastNull);
1817     EmitBranch(CastEnd);
1818   }
1819 
1820   EmitBlock(CastEnd);
1821 
1822   if (ShouldNullCheckSrcValue) {
1823     llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
1824     PHI->addIncoming(Value, CastNotNull);
1825     PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull);
1826 
1827     Value = PHI;
1828   }
1829 
1830   return Value;
1831 }
1832 
1833 void CodeGenFunction::EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Slot) {
1834   RunCleanupsScope Scope(*this);
1835   LValue SlotLV = MakeAddrLValue(Slot.getAddr(), E->getType(),
1836                                  Slot.getAlignment());
1837 
1838   CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin();
1839   for (LambdaExpr::capture_init_iterator i = E->capture_init_begin(),
1840                                          e = E->capture_init_end();
1841        i != e; ++i, ++CurField) {
1842     // Emit initialization
1843 
1844     LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
1845     ArrayRef<VarDecl *> ArrayIndexes;
1846     if (CurField->getType()->isArrayType())
1847       ArrayIndexes = E->getCaptureInitIndexVars(i);
1848     EmitInitializerForField(*CurField, LV, *i, ArrayIndexes);
1849   }
1850 }
1851