1 //===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code dealing with code generation of C++ expressions
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CodeGenFunction.h"
15 #include "CGCUDARuntime.h"
16 #include "CGCXXABI.h"
17 #include "CGDebugInfo.h"
18 #include "CGObjCRuntime.h"
19 #include "clang/Frontend/CodeGenOptions.h"
20 #include "llvm/IR/Intrinsics.h"
21 #include "llvm/Support/CallSite.h"
22 
23 using namespace clang;
24 using namespace CodeGen;
25 
26 RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
27                                           SourceLocation CallLoc,
28                                           llvm::Value *Callee,
29                                           ReturnValueSlot ReturnValue,
30                                           llvm::Value *This,
31                                           llvm::Value *ImplicitParam,
32                                           QualType ImplicitParamTy,
33                                           CallExpr::const_arg_iterator ArgBeg,
34                                           CallExpr::const_arg_iterator ArgEnd) {
35   assert(MD->isInstance() &&
36          "Trying to emit a member call expr on a static method!");
37 
38   // C++11 [class.mfct.non-static]p2:
39   //   If a non-static member function of a class X is called for an object that
40   //   is not of type X, or of a type derived from X, the behavior is undefined.
41   EmitTypeCheck(isa<CXXConstructorDecl>(MD) ? TCK_ConstructorCall
42                                             : TCK_MemberCall,
43                 CallLoc, This, getContext().getRecordType(MD->getParent()));
44 
45   CallArgList Args;
46 
47   // Push the this ptr.
48   Args.add(RValue::get(This), MD->getThisType(getContext()));
49 
50   // If there is an implicit parameter (e.g. VTT), emit it.
51   if (ImplicitParam) {
52     Args.add(RValue::get(ImplicitParam), ImplicitParamTy);
53   }
54 
55   const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
56   RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, Args.size());
57 
58   // And the rest of the call args.
59   EmitCallArgs(Args, FPT, ArgBeg, ArgEnd);
60 
61   return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required),
62                   Callee, ReturnValue, Args, MD);
63 }
64 
65 // FIXME: Ideally Expr::IgnoreParenNoopCasts should do this, but it doesn't do
66 // quite what we want.
67 static const Expr *skipNoOpCastsAndParens(const Expr *E) {
68   while (true) {
69     if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
70       E = PE->getSubExpr();
71       continue;
72     }
73 
74     if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
75       if (CE->getCastKind() == CK_NoOp) {
76         E = CE->getSubExpr();
77         continue;
78       }
79     }
80     if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
81       if (UO->getOpcode() == UO_Extension) {
82         E = UO->getSubExpr();
83         continue;
84       }
85     }
86     return E;
87   }
88 }
89 
90 /// canDevirtualizeMemberFunctionCalls - Checks whether virtual calls on given
91 /// expr can be devirtualized.
92 static bool canDevirtualizeMemberFunctionCalls(ASTContext &Context,
93                                                const Expr *Base,
94                                                const CXXMethodDecl *MD) {
95 
96   // When building with -fapple-kext, all calls must go through the vtable since
97   // the kernel linker can do runtime patching of vtables.
98   if (Context.getLangOpts().AppleKext)
99     return false;
100 
101   // If the most derived class is marked final, we know that no subclass can
102   // override this member function and so we can devirtualize it. For example:
103   //
104   // struct A { virtual void f(); }
105   // struct B final : A { };
106   //
107   // void f(B *b) {
108   //   b->f();
109   // }
110   //
111   const CXXRecordDecl *MostDerivedClassDecl = Base->getBestDynamicClassType();
112   if (MostDerivedClassDecl->hasAttr<FinalAttr>())
113     return true;
114 
115   // If the member function is marked 'final', we know that it can't be
116   // overridden and can therefore devirtualize it.
117   if (MD->hasAttr<FinalAttr>())
118     return true;
119 
120   // Similarly, if the class itself is marked 'final' it can't be overridden
121   // and we can therefore devirtualize the member function call.
122   if (MD->getParent()->hasAttr<FinalAttr>())
123     return true;
124 
125   Base = skipNoOpCastsAndParens(Base);
126   if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) {
127     if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
128       // This is a record decl. We know the type and can devirtualize it.
129       return VD->getType()->isRecordType();
130     }
131 
132     return false;
133   }
134 
135   // We can devirtualize calls on an object accessed by a class member access
136   // expression, since by C++11 [basic.life]p6 we know that it can't refer to
137   // a derived class object constructed in the same location.
138   if (const MemberExpr *ME = dyn_cast<MemberExpr>(Base))
139     if (const ValueDecl *VD = dyn_cast<ValueDecl>(ME->getMemberDecl()))
140       return VD->getType()->isRecordType();
141 
142   // We can always devirtualize calls on temporary object expressions.
143   if (isa<CXXConstructExpr>(Base))
144     return true;
145 
146   // And calls on bound temporaries.
147   if (isa<CXXBindTemporaryExpr>(Base))
148     return true;
149 
150   // Check if this is a call expr that returns a record type.
151   if (const CallExpr *CE = dyn_cast<CallExpr>(Base))
152     return CE->getCallReturnType()->isRecordType();
153 
154   // We can't devirtualize the call.
155   return false;
156 }
157 
158 static CXXRecordDecl *getCXXRecord(const Expr *E) {
159   QualType T = E->getType();
160   if (const PointerType *PTy = T->getAs<PointerType>())
161     T = PTy->getPointeeType();
162   const RecordType *Ty = T->castAs<RecordType>();
163   return cast<CXXRecordDecl>(Ty->getDecl());
164 }
165 
166 // Note: This function also emit constructor calls to support a MSVC
167 // extensions allowing explicit constructor function call.
168 RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
169                                               ReturnValueSlot ReturnValue) {
170   const Expr *callee = CE->getCallee()->IgnoreParens();
171 
172   if (isa<BinaryOperator>(callee))
173     return EmitCXXMemberPointerCallExpr(CE, ReturnValue);
174 
175   const MemberExpr *ME = cast<MemberExpr>(callee);
176   const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
177 
178   if (MD->isStatic()) {
179     // The method is static, emit it as we would a regular call.
180     llvm::Value *Callee = CGM.GetAddrOfFunction(MD);
181     return EmitCall(getContext().getPointerType(MD->getType()), Callee,
182                     ReturnValue, CE->arg_begin(), CE->arg_end());
183   }
184 
185   // Compute the object pointer.
186   const Expr *Base = ME->getBase();
187   bool CanUseVirtualCall = MD->isVirtual() && !ME->hasQualifier();
188 
189   const CXXMethodDecl *DevirtualizedMethod = NULL;
190   if (CanUseVirtualCall &&
191       canDevirtualizeMemberFunctionCalls(getContext(), Base, MD)) {
192     const CXXRecordDecl *BestDynamicDecl = Base->getBestDynamicClassType();
193     DevirtualizedMethod = MD->getCorrespondingMethodInClass(BestDynamicDecl);
194     assert(DevirtualizedMethod);
195     const CXXRecordDecl *DevirtualizedClass = DevirtualizedMethod->getParent();
196     const Expr *Inner = Base->ignoreParenBaseCasts();
197     if (getCXXRecord(Inner) == DevirtualizedClass)
198       // If the class of the Inner expression is where the dynamic method
199       // is defined, build the this pointer from it.
200       Base = Inner;
201     else if (getCXXRecord(Base) != DevirtualizedClass) {
202       // If the method is defined in a class that is not the best dynamic
203       // one or the one of the full expression, we would have to build
204       // a derived-to-base cast to compute the correct this pointer, but
205       // we don't have support for that yet, so do a virtual call.
206       DevirtualizedMethod = NULL;
207     }
208     // If the return types are not the same, this might be a case where more
209     // code needs to run to compensate for it. For example, the derived
210     // method might return a type that inherits form from the return
211     // type of MD and has a prefix.
212     // For now we just avoid devirtualizing these covariant cases.
213     if (DevirtualizedMethod &&
214         DevirtualizedMethod->getResultType().getCanonicalType() !=
215         MD->getResultType().getCanonicalType())
216       DevirtualizedMethod = NULL;
217   }
218 
219   llvm::Value *This;
220   if (ME->isArrow())
221     This = EmitScalarExpr(Base);
222   else
223     This = EmitLValue(Base).getAddress();
224 
225 
226   if (MD->isTrivial()) {
227     if (isa<CXXDestructorDecl>(MD)) return RValue::get(0);
228     if (isa<CXXConstructorDecl>(MD) &&
229         cast<CXXConstructorDecl>(MD)->isDefaultConstructor())
230       return RValue::get(0);
231 
232     if (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) {
233       // We don't like to generate the trivial copy/move assignment operator
234       // when it isn't necessary; just produce the proper effect here.
235       llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
236       EmitAggregateAssign(This, RHS, CE->getType());
237       return RValue::get(This);
238     }
239 
240     if (isa<CXXConstructorDecl>(MD) &&
241         cast<CXXConstructorDecl>(MD)->isCopyOrMoveConstructor()) {
242       // Trivial move and copy ctor are the same.
243       llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
244       EmitSynthesizedCXXCopyCtorCall(cast<CXXConstructorDecl>(MD), This, RHS,
245                                      CE->arg_begin(), CE->arg_end());
246       return RValue::get(This);
247     }
248     llvm_unreachable("unknown trivial member function");
249   }
250 
251   // Compute the function type we're calling.
252   const CXXMethodDecl *CalleeDecl = DevirtualizedMethod ? DevirtualizedMethod : MD;
253   const CGFunctionInfo *FInfo = 0;
254   if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(CalleeDecl))
255     FInfo = &CGM.getTypes().arrangeCXXDestructor(Dtor,
256                                                  Dtor_Complete);
257   else if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(CalleeDecl))
258     FInfo = &CGM.getTypes().arrangeCXXConstructorDeclaration(Ctor,
259                                                              Ctor_Complete);
260   else
261     FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(CalleeDecl);
262 
263   llvm::Type *Ty = CGM.getTypes().GetFunctionType(*FInfo);
264 
265   // C++ [class.virtual]p12:
266   //   Explicit qualification with the scope operator (5.1) suppresses the
267   //   virtual call mechanism.
268   //
269   // We also don't emit a virtual call if the base expression has a record type
270   // because then we know what the type is.
271   bool UseVirtualCall = CanUseVirtualCall && !DevirtualizedMethod;
272   llvm::Value *Callee;
273 
274   if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD)) {
275     assert(CE->arg_begin() == CE->arg_end() &&
276            "Destructor shouldn't have explicit parameters");
277     assert(ReturnValue.isNull() && "Destructor shouldn't have return value");
278     if (UseVirtualCall) {
279       CGM.getCXXABI().EmitVirtualDestructorCall(*this, Dtor, Dtor_Complete,
280                                                 CE->getExprLoc(), This);
281     } else {
282       if (getLangOpts().AppleKext &&
283           MD->isVirtual() &&
284           ME->hasQualifier())
285         Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
286       else if (!DevirtualizedMethod)
287         Callee = CGM.GetAddrOfFunction(GlobalDecl(Dtor, Dtor_Complete), Ty);
288       else {
289         const CXXDestructorDecl *DDtor =
290           cast<CXXDestructorDecl>(DevirtualizedMethod);
291         Callee = CGM.GetAddrOfFunction(GlobalDecl(DDtor, Dtor_Complete), Ty);
292       }
293       EmitCXXMemberCall(MD, CE->getExprLoc(), Callee, ReturnValue, This,
294                         /*ImplicitParam=*/0, QualType(), 0, 0);
295     }
296     return RValue::get(0);
297   }
298 
299   if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(MD)) {
300     Callee = CGM.GetAddrOfFunction(GlobalDecl(Ctor, Ctor_Complete), Ty);
301   } else if (UseVirtualCall) {
302     Callee = BuildVirtualCall(MD, This, Ty);
303   } else {
304     if (getLangOpts().AppleKext &&
305         MD->isVirtual() &&
306         ME->hasQualifier())
307       Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
308     else if (!DevirtualizedMethod)
309       Callee = CGM.GetAddrOfFunction(MD, Ty);
310     else {
311       Callee = CGM.GetAddrOfFunction(DevirtualizedMethod, Ty);
312     }
313   }
314 
315   return EmitCXXMemberCall(MD, CE->getExprLoc(), Callee, ReturnValue, This,
316                            /*ImplicitParam=*/0, QualType(),
317                            CE->arg_begin(), CE->arg_end());
318 }
319 
320 RValue
321 CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
322                                               ReturnValueSlot ReturnValue) {
323   const BinaryOperator *BO =
324       cast<BinaryOperator>(E->getCallee()->IgnoreParens());
325   const Expr *BaseExpr = BO->getLHS();
326   const Expr *MemFnExpr = BO->getRHS();
327 
328   const MemberPointerType *MPT =
329     MemFnExpr->getType()->castAs<MemberPointerType>();
330 
331   const FunctionProtoType *FPT =
332     MPT->getPointeeType()->castAs<FunctionProtoType>();
333   const CXXRecordDecl *RD =
334     cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
335 
336   // Get the member function pointer.
337   llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
338 
339   // Emit the 'this' pointer.
340   llvm::Value *This;
341 
342   if (BO->getOpcode() == BO_PtrMemI)
343     This = EmitScalarExpr(BaseExpr);
344   else
345     This = EmitLValue(BaseExpr).getAddress();
346 
347   EmitTypeCheck(TCK_MemberCall, E->getExprLoc(), This,
348                 QualType(MPT->getClass(), 0));
349 
350   // Ask the ABI to load the callee.  Note that This is modified.
351   llvm::Value *Callee =
352     CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, This, MemFnPtr, MPT);
353 
354   CallArgList Args;
355 
356   QualType ThisType =
357     getContext().getPointerType(getContext().getTagDeclType(RD));
358 
359   // Push the this ptr.
360   Args.add(RValue::get(This), ThisType);
361 
362   RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, 1);
363 
364   // And the rest of the call args
365   EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end());
366   return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required), Callee,
367                   ReturnValue, Args);
368 }
369 
370 RValue
371 CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
372                                                const CXXMethodDecl *MD,
373                                                ReturnValueSlot ReturnValue) {
374   assert(MD->isInstance() &&
375          "Trying to emit a member call expr on a static method!");
376   LValue LV = EmitLValue(E->getArg(0));
377   llvm::Value *This = LV.getAddress();
378 
379   if ((MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) &&
380       MD->isTrivial()) {
381     llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress();
382     QualType Ty = E->getType();
383     EmitAggregateAssign(This, Src, Ty);
384     return RValue::get(This);
385   }
386 
387   llvm::Value *Callee = EmitCXXOperatorMemberCallee(E, MD, This);
388   return EmitCXXMemberCall(MD, E->getExprLoc(), Callee, ReturnValue, This,
389                            /*ImplicitParam=*/0, QualType(),
390                            E->arg_begin() + 1, E->arg_end());
391 }
392 
393 RValue CodeGenFunction::EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
394                                                ReturnValueSlot ReturnValue) {
395   return CGM.getCUDARuntime().EmitCUDAKernelCallExpr(*this, E, ReturnValue);
396 }
397 
398 static void EmitNullBaseClassInitialization(CodeGenFunction &CGF,
399                                             llvm::Value *DestPtr,
400                                             const CXXRecordDecl *Base) {
401   if (Base->isEmpty())
402     return;
403 
404   DestPtr = CGF.EmitCastToVoidPtr(DestPtr);
405 
406   const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(Base);
407   CharUnits Size = Layout.getNonVirtualSize();
408   CharUnits Align = Layout.getNonVirtualAlign();
409 
410   llvm::Value *SizeVal = CGF.CGM.getSize(Size);
411 
412   // If the type contains a pointer to data member we can't memset it to zero.
413   // Instead, create a null constant and copy it to the destination.
414   // TODO: there are other patterns besides zero that we can usefully memset,
415   // like -1, which happens to be the pattern used by member-pointers.
416   // TODO: isZeroInitializable can be over-conservative in the case where a
417   // virtual base contains a member pointer.
418   if (!CGF.CGM.getTypes().isZeroInitializable(Base)) {
419     llvm::Constant *NullConstant = CGF.CGM.EmitNullConstantForBase(Base);
420 
421     llvm::GlobalVariable *NullVariable =
422       new llvm::GlobalVariable(CGF.CGM.getModule(), NullConstant->getType(),
423                                /*isConstant=*/true,
424                                llvm::GlobalVariable::PrivateLinkage,
425                                NullConstant, Twine());
426     NullVariable->setAlignment(Align.getQuantity());
427     llvm::Value *SrcPtr = CGF.EmitCastToVoidPtr(NullVariable);
428 
429     // Get and call the appropriate llvm.memcpy overload.
430     CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, Align.getQuantity());
431     return;
432   }
433 
434   // Otherwise, just memset the whole thing to zero.  This is legal
435   // because in LLVM, all default initializers (other than the ones we just
436   // handled above) are guaranteed to have a bit pattern of all zeros.
437   CGF.Builder.CreateMemSet(DestPtr, CGF.Builder.getInt8(0), SizeVal,
438                            Align.getQuantity());
439 }
440 
441 void
442 CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
443                                       AggValueSlot Dest) {
444   assert(!Dest.isIgnored() && "Must have a destination!");
445   const CXXConstructorDecl *CD = E->getConstructor();
446 
447   // If we require zero initialization before (or instead of) calling the
448   // constructor, as can be the case with a non-user-provided default
449   // constructor, emit the zero initialization now, unless destination is
450   // already zeroed.
451   if (E->requiresZeroInitialization() && !Dest.isZeroed()) {
452     switch (E->getConstructionKind()) {
453     case CXXConstructExpr::CK_Delegating:
454     case CXXConstructExpr::CK_Complete:
455       EmitNullInitialization(Dest.getAddr(), E->getType());
456       break;
457     case CXXConstructExpr::CK_VirtualBase:
458     case CXXConstructExpr::CK_NonVirtualBase:
459       EmitNullBaseClassInitialization(*this, Dest.getAddr(), CD->getParent());
460       break;
461     }
462   }
463 
464   // If this is a call to a trivial default constructor, do nothing.
465   if (CD->isTrivial() && CD->isDefaultConstructor())
466     return;
467 
468   // Elide the constructor if we're constructing from a temporary.
469   // The temporary check is required because Sema sets this on NRVO
470   // returns.
471   if (getLangOpts().ElideConstructors && E->isElidable()) {
472     assert(getContext().hasSameUnqualifiedType(E->getType(),
473                                                E->getArg(0)->getType()));
474     if (E->getArg(0)->isTemporaryObject(getContext(), CD->getParent())) {
475       EmitAggExpr(E->getArg(0), Dest);
476       return;
477     }
478   }
479 
480   if (const ConstantArrayType *arrayType
481         = getContext().getAsConstantArrayType(E->getType())) {
482     EmitCXXAggrConstructorCall(CD, arrayType, Dest.getAddr(),
483                                E->arg_begin(), E->arg_end());
484   } else {
485     CXXCtorType Type = Ctor_Complete;
486     bool ForVirtualBase = false;
487     bool Delegating = false;
488 
489     switch (E->getConstructionKind()) {
490      case CXXConstructExpr::CK_Delegating:
491       // We should be emitting a constructor; GlobalDecl will assert this
492       Type = CurGD.getCtorType();
493       Delegating = true;
494       break;
495 
496      case CXXConstructExpr::CK_Complete:
497       Type = Ctor_Complete;
498       break;
499 
500      case CXXConstructExpr::CK_VirtualBase:
501       ForVirtualBase = true;
502       // fall-through
503 
504      case CXXConstructExpr::CK_NonVirtualBase:
505       Type = Ctor_Base;
506     }
507 
508     // Call the constructor.
509     EmitCXXConstructorCall(CD, Type, ForVirtualBase, Delegating, Dest.getAddr(),
510                            E->arg_begin(), E->arg_end());
511   }
512 }
513 
514 void
515 CodeGenFunction::EmitSynthesizedCXXCopyCtor(llvm::Value *Dest,
516                                             llvm::Value *Src,
517                                             const Expr *Exp) {
518   if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Exp))
519     Exp = E->getSubExpr();
520   assert(isa<CXXConstructExpr>(Exp) &&
521          "EmitSynthesizedCXXCopyCtor - unknown copy ctor expr");
522   const CXXConstructExpr* E = cast<CXXConstructExpr>(Exp);
523   const CXXConstructorDecl *CD = E->getConstructor();
524   RunCleanupsScope Scope(*this);
525 
526   // If we require zero initialization before (or instead of) calling the
527   // constructor, as can be the case with a non-user-provided default
528   // constructor, emit the zero initialization now.
529   // FIXME. Do I still need this for a copy ctor synthesis?
530   if (E->requiresZeroInitialization())
531     EmitNullInitialization(Dest, E->getType());
532 
533   assert(!getContext().getAsConstantArrayType(E->getType())
534          && "EmitSynthesizedCXXCopyCtor - Copied-in Array");
535   EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src,
536                                  E->arg_begin(), E->arg_end());
537 }
538 
539 static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,
540                                         const CXXNewExpr *E) {
541   if (!E->isArray())
542     return CharUnits::Zero();
543 
544   // No cookie is required if the operator new[] being used is the
545   // reserved placement operator new[].
546   if (E->getOperatorNew()->isReservedGlobalPlacementOperator())
547     return CharUnits::Zero();
548 
549   return CGF.CGM.getCXXABI().GetArrayCookieSize(E);
550 }
551 
552 static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
553                                         const CXXNewExpr *e,
554                                         unsigned minElements,
555                                         llvm::Value *&numElements,
556                                         llvm::Value *&sizeWithoutCookie) {
557   QualType type = e->getAllocatedType();
558 
559   if (!e->isArray()) {
560     CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
561     sizeWithoutCookie
562       = llvm::ConstantInt::get(CGF.SizeTy, typeSize.getQuantity());
563     return sizeWithoutCookie;
564   }
565 
566   // The width of size_t.
567   unsigned sizeWidth = CGF.SizeTy->getBitWidth();
568 
569   // Figure out the cookie size.
570   llvm::APInt cookieSize(sizeWidth,
571                          CalculateCookiePadding(CGF, e).getQuantity());
572 
573   // Emit the array size expression.
574   // We multiply the size of all dimensions for NumElements.
575   // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
576   numElements = CGF.EmitScalarExpr(e->getArraySize());
577   assert(isa<llvm::IntegerType>(numElements->getType()));
578 
579   // The number of elements can be have an arbitrary integer type;
580   // essentially, we need to multiply it by a constant factor, add a
581   // cookie size, and verify that the result is representable as a
582   // size_t.  That's just a gloss, though, and it's wrong in one
583   // important way: if the count is negative, it's an error even if
584   // the cookie size would bring the total size >= 0.
585   bool isSigned
586     = e->getArraySize()->getType()->isSignedIntegerOrEnumerationType();
587   llvm::IntegerType *numElementsType
588     = cast<llvm::IntegerType>(numElements->getType());
589   unsigned numElementsWidth = numElementsType->getBitWidth();
590 
591   // Compute the constant factor.
592   llvm::APInt arraySizeMultiplier(sizeWidth, 1);
593   while (const ConstantArrayType *CAT
594              = CGF.getContext().getAsConstantArrayType(type)) {
595     type = CAT->getElementType();
596     arraySizeMultiplier *= CAT->getSize();
597   }
598 
599   CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
600   llvm::APInt typeSizeMultiplier(sizeWidth, typeSize.getQuantity());
601   typeSizeMultiplier *= arraySizeMultiplier;
602 
603   // This will be a size_t.
604   llvm::Value *size;
605 
606   // If someone is doing 'new int[42]' there is no need to do a dynamic check.
607   // Don't bloat the -O0 code.
608   if (llvm::ConstantInt *numElementsC =
609         dyn_cast<llvm::ConstantInt>(numElements)) {
610     const llvm::APInt &count = numElementsC->getValue();
611 
612     bool hasAnyOverflow = false;
613 
614     // If 'count' was a negative number, it's an overflow.
615     if (isSigned && count.isNegative())
616       hasAnyOverflow = true;
617 
618     // We want to do all this arithmetic in size_t.  If numElements is
619     // wider than that, check whether it's already too big, and if so,
620     // overflow.
621     else if (numElementsWidth > sizeWidth &&
622              numElementsWidth - sizeWidth > count.countLeadingZeros())
623       hasAnyOverflow = true;
624 
625     // Okay, compute a count at the right width.
626     llvm::APInt adjustedCount = count.zextOrTrunc(sizeWidth);
627 
628     // If there is a brace-initializer, we cannot allocate fewer elements than
629     // there are initializers. If we do, that's treated like an overflow.
630     if (adjustedCount.ult(minElements))
631       hasAnyOverflow = true;
632 
633     // Scale numElements by that.  This might overflow, but we don't
634     // care because it only overflows if allocationSize does, too, and
635     // if that overflows then we shouldn't use this.
636     numElements = llvm::ConstantInt::get(CGF.SizeTy,
637                                          adjustedCount * arraySizeMultiplier);
638 
639     // Compute the size before cookie, and track whether it overflowed.
640     bool overflow;
641     llvm::APInt allocationSize
642       = adjustedCount.umul_ov(typeSizeMultiplier, overflow);
643     hasAnyOverflow |= overflow;
644 
645     // Add in the cookie, and check whether it's overflowed.
646     if (cookieSize != 0) {
647       // Save the current size without a cookie.  This shouldn't be
648       // used if there was overflow.
649       sizeWithoutCookie = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
650 
651       allocationSize = allocationSize.uadd_ov(cookieSize, overflow);
652       hasAnyOverflow |= overflow;
653     }
654 
655     // On overflow, produce a -1 so operator new will fail.
656     if (hasAnyOverflow) {
657       size = llvm::Constant::getAllOnesValue(CGF.SizeTy);
658     } else {
659       size = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
660     }
661 
662   // Otherwise, we might need to use the overflow intrinsics.
663   } else {
664     // There are up to five conditions we need to test for:
665     // 1) if isSigned, we need to check whether numElements is negative;
666     // 2) if numElementsWidth > sizeWidth, we need to check whether
667     //   numElements is larger than something representable in size_t;
668     // 3) if minElements > 0, we need to check whether numElements is smaller
669     //    than that.
670     // 4) we need to compute
671     //      sizeWithoutCookie := numElements * typeSizeMultiplier
672     //    and check whether it overflows; and
673     // 5) if we need a cookie, we need to compute
674     //      size := sizeWithoutCookie + cookieSize
675     //    and check whether it overflows.
676 
677     llvm::Value *hasOverflow = 0;
678 
679     // If numElementsWidth > sizeWidth, then one way or another, we're
680     // going to have to do a comparison for (2), and this happens to
681     // take care of (1), too.
682     if (numElementsWidth > sizeWidth) {
683       llvm::APInt threshold(numElementsWidth, 1);
684       threshold <<= sizeWidth;
685 
686       llvm::Value *thresholdV
687         = llvm::ConstantInt::get(numElementsType, threshold);
688 
689       hasOverflow = CGF.Builder.CreateICmpUGE(numElements, thresholdV);
690       numElements = CGF.Builder.CreateTrunc(numElements, CGF.SizeTy);
691 
692     // Otherwise, if we're signed, we want to sext up to size_t.
693     } else if (isSigned) {
694       if (numElementsWidth < sizeWidth)
695         numElements = CGF.Builder.CreateSExt(numElements, CGF.SizeTy);
696 
697       // If there's a non-1 type size multiplier, then we can do the
698       // signedness check at the same time as we do the multiply
699       // because a negative number times anything will cause an
700       // unsigned overflow.  Otherwise, we have to do it here. But at least
701       // in this case, we can subsume the >= minElements check.
702       if (typeSizeMultiplier == 1)
703         hasOverflow = CGF.Builder.CreateICmpSLT(numElements,
704                               llvm::ConstantInt::get(CGF.SizeTy, minElements));
705 
706     // Otherwise, zext up to size_t if necessary.
707     } else if (numElementsWidth < sizeWidth) {
708       numElements = CGF.Builder.CreateZExt(numElements, CGF.SizeTy);
709     }
710 
711     assert(numElements->getType() == CGF.SizeTy);
712 
713     if (minElements) {
714       // Don't allow allocation of fewer elements than we have initializers.
715       if (!hasOverflow) {
716         hasOverflow = CGF.Builder.CreateICmpULT(numElements,
717                               llvm::ConstantInt::get(CGF.SizeTy, minElements));
718       } else if (numElementsWidth > sizeWidth) {
719         // The other existing overflow subsumes this check.
720         // We do an unsigned comparison, since any signed value < -1 is
721         // taken care of either above or below.
722         hasOverflow = CGF.Builder.CreateOr(hasOverflow,
723                           CGF.Builder.CreateICmpULT(numElements,
724                               llvm::ConstantInt::get(CGF.SizeTy, minElements)));
725       }
726     }
727 
728     size = numElements;
729 
730     // Multiply by the type size if necessary.  This multiplier
731     // includes all the factors for nested arrays.
732     //
733     // This step also causes numElements to be scaled up by the
734     // nested-array factor if necessary.  Overflow on this computation
735     // can be ignored because the result shouldn't be used if
736     // allocation fails.
737     if (typeSizeMultiplier != 1) {
738       llvm::Value *umul_with_overflow
739         = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, CGF.SizeTy);
740 
741       llvm::Value *tsmV =
742         llvm::ConstantInt::get(CGF.SizeTy, typeSizeMultiplier);
743       llvm::Value *result =
744         CGF.Builder.CreateCall2(umul_with_overflow, size, tsmV);
745 
746       llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
747       if (hasOverflow)
748         hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
749       else
750         hasOverflow = overflowed;
751 
752       size = CGF.Builder.CreateExtractValue(result, 0);
753 
754       // Also scale up numElements by the array size multiplier.
755       if (arraySizeMultiplier != 1) {
756         // If the base element type size is 1, then we can re-use the
757         // multiply we just did.
758         if (typeSize.isOne()) {
759           assert(arraySizeMultiplier == typeSizeMultiplier);
760           numElements = size;
761 
762         // Otherwise we need a separate multiply.
763         } else {
764           llvm::Value *asmV =
765             llvm::ConstantInt::get(CGF.SizeTy, arraySizeMultiplier);
766           numElements = CGF.Builder.CreateMul(numElements, asmV);
767         }
768       }
769     } else {
770       // numElements doesn't need to be scaled.
771       assert(arraySizeMultiplier == 1);
772     }
773 
774     // Add in the cookie size if necessary.
775     if (cookieSize != 0) {
776       sizeWithoutCookie = size;
777 
778       llvm::Value *uadd_with_overflow
779         = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, CGF.SizeTy);
780 
781       llvm::Value *cookieSizeV = llvm::ConstantInt::get(CGF.SizeTy, cookieSize);
782       llvm::Value *result =
783         CGF.Builder.CreateCall2(uadd_with_overflow, size, cookieSizeV);
784 
785       llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
786       if (hasOverflow)
787         hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
788       else
789         hasOverflow = overflowed;
790 
791       size = CGF.Builder.CreateExtractValue(result, 0);
792     }
793 
794     // If we had any possibility of dynamic overflow, make a select to
795     // overwrite 'size' with an all-ones value, which should cause
796     // operator new to throw.
797     if (hasOverflow)
798       size = CGF.Builder.CreateSelect(hasOverflow,
799                                  llvm::Constant::getAllOnesValue(CGF.SizeTy),
800                                       size);
801   }
802 
803   if (cookieSize == 0)
804     sizeWithoutCookie = size;
805   else
806     assert(sizeWithoutCookie && "didn't set sizeWithoutCookie?");
807 
808   return size;
809 }
810 
811 static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const Expr *Init,
812                                     QualType AllocType, llvm::Value *NewPtr) {
813 
814   CharUnits Alignment = CGF.getContext().getTypeAlignInChars(AllocType);
815   switch (CGF.getEvaluationKind(AllocType)) {
816   case TEK_Scalar:
817     CGF.EmitScalarInit(Init, 0, CGF.MakeAddrLValue(NewPtr, AllocType,
818                                                    Alignment),
819                        false);
820     return;
821   case TEK_Complex:
822     CGF.EmitComplexExprIntoLValue(Init, CGF.MakeAddrLValue(NewPtr, AllocType,
823                                                            Alignment),
824                                   /*isInit*/ true);
825     return;
826   case TEK_Aggregate: {
827     AggValueSlot Slot
828       = AggValueSlot::forAddr(NewPtr, Alignment, AllocType.getQualifiers(),
829                               AggValueSlot::IsDestructed,
830                               AggValueSlot::DoesNotNeedGCBarriers,
831                               AggValueSlot::IsNotAliased);
832     CGF.EmitAggExpr(Init, Slot);
833     return;
834   }
835   }
836   llvm_unreachable("bad evaluation kind");
837 }
838 
839 void
840 CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E,
841                                          QualType elementType,
842                                          llvm::Value *beginPtr,
843                                          llvm::Value *numElements) {
844   if (!E->hasInitializer())
845     return; // We have a POD type.
846 
847   llvm::Value *explicitPtr = beginPtr;
848   // Find the end of the array, hoisted out of the loop.
849   llvm::Value *endPtr =
850     Builder.CreateInBoundsGEP(beginPtr, numElements, "array.end");
851 
852   unsigned initializerElements = 0;
853 
854   const Expr *Init = E->getInitializer();
855   llvm::AllocaInst *endOfInit = 0;
856   QualType::DestructionKind dtorKind = elementType.isDestructedType();
857   EHScopeStack::stable_iterator cleanup;
858   llvm::Instruction *cleanupDominator = 0;
859   // If the initializer is an initializer list, first do the explicit elements.
860   if (const InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) {
861     initializerElements = ILE->getNumInits();
862 
863     // Enter a partial-destruction cleanup if necessary.
864     if (needsEHCleanup(dtorKind)) {
865       // In principle we could tell the cleanup where we are more
866       // directly, but the control flow can get so varied here that it
867       // would actually be quite complex.  Therefore we go through an
868       // alloca.
869       endOfInit = CreateTempAlloca(beginPtr->getType(), "array.endOfInit");
870       cleanupDominator = Builder.CreateStore(beginPtr, endOfInit);
871       pushIrregularPartialArrayCleanup(beginPtr, endOfInit, elementType,
872                                        getDestroyer(dtorKind));
873       cleanup = EHStack.stable_begin();
874     }
875 
876     for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i) {
877       // Tell the cleanup that it needs to destroy up to this
878       // element.  TODO: some of these stores can be trivially
879       // observed to be unnecessary.
880       if (endOfInit) Builder.CreateStore(explicitPtr, endOfInit);
881       StoreAnyExprIntoOneUnit(*this, ILE->getInit(i), elementType, explicitPtr);
882       explicitPtr =Builder.CreateConstGEP1_32(explicitPtr, 1, "array.exp.next");
883     }
884 
885     // The remaining elements are filled with the array filler expression.
886     Init = ILE->getArrayFiller();
887   }
888 
889   // Create the continuation block.
890   llvm::BasicBlock *contBB = createBasicBlock("new.loop.end");
891 
892   // If the number of elements isn't constant, we have to now check if there is
893   // anything left to initialize.
894   if (llvm::ConstantInt *constNum = dyn_cast<llvm::ConstantInt>(numElements)) {
895     // If all elements have already been initialized, skip the whole loop.
896     if (constNum->getZExtValue() <= initializerElements) {
897       // If there was a cleanup, deactivate it.
898       if (cleanupDominator)
899         DeactivateCleanupBlock(cleanup, cleanupDominator);
900       return;
901     }
902   } else {
903     llvm::BasicBlock *nonEmptyBB = createBasicBlock("new.loop.nonempty");
904     llvm::Value *isEmpty = Builder.CreateICmpEQ(explicitPtr, endPtr,
905                                                 "array.isempty");
906     Builder.CreateCondBr(isEmpty, contBB, nonEmptyBB);
907     EmitBlock(nonEmptyBB);
908   }
909 
910   // Enter the loop.
911   llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
912   llvm::BasicBlock *loopBB = createBasicBlock("new.loop");
913 
914   EmitBlock(loopBB);
915 
916   // Set up the current-element phi.
917   llvm::PHINode *curPtr =
918     Builder.CreatePHI(explicitPtr->getType(), 2, "array.cur");
919   curPtr->addIncoming(explicitPtr, entryBB);
920 
921   // Store the new cleanup position for irregular cleanups.
922   if (endOfInit) Builder.CreateStore(curPtr, endOfInit);
923 
924   // Enter a partial-destruction cleanup if necessary.
925   if (!cleanupDominator && needsEHCleanup(dtorKind)) {
926     pushRegularPartialArrayCleanup(beginPtr, curPtr, elementType,
927                                    getDestroyer(dtorKind));
928     cleanup = EHStack.stable_begin();
929     cleanupDominator = Builder.CreateUnreachable();
930   }
931 
932   // Emit the initializer into this element.
933   StoreAnyExprIntoOneUnit(*this, Init, E->getAllocatedType(), curPtr);
934 
935   // Leave the cleanup if we entered one.
936   if (cleanupDominator) {
937     DeactivateCleanupBlock(cleanup, cleanupDominator);
938     cleanupDominator->eraseFromParent();
939   }
940 
941   // Advance to the next element.
942   llvm::Value *nextPtr = Builder.CreateConstGEP1_32(curPtr, 1, "array.next");
943 
944   // Check whether we've gotten to the end of the array and, if so,
945   // exit the loop.
946   llvm::Value *isEnd = Builder.CreateICmpEQ(nextPtr, endPtr, "array.atend");
947   Builder.CreateCondBr(isEnd, contBB, loopBB);
948   curPtr->addIncoming(nextPtr, Builder.GetInsertBlock());
949 
950   EmitBlock(contBB);
951 }
952 
953 static void EmitZeroMemSet(CodeGenFunction &CGF, QualType T,
954                            llvm::Value *NewPtr, llvm::Value *Size) {
955   CGF.EmitCastToVoidPtr(NewPtr);
956   CharUnits Alignment = CGF.getContext().getTypeAlignInChars(T);
957   CGF.Builder.CreateMemSet(NewPtr, CGF.Builder.getInt8(0), Size,
958                            Alignment.getQuantity(), false);
959 }
960 
961 static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
962                                QualType ElementType,
963                                llvm::Value *NewPtr,
964                                llvm::Value *NumElements,
965                                llvm::Value *AllocSizeWithoutCookie) {
966   const Expr *Init = E->getInitializer();
967   if (E->isArray()) {
968     if (const CXXConstructExpr *CCE = dyn_cast_or_null<CXXConstructExpr>(Init)){
969       CXXConstructorDecl *Ctor = CCE->getConstructor();
970       if (Ctor->isTrivial()) {
971         // If new expression did not specify value-initialization, then there
972         // is no initialization.
973         if (!CCE->requiresZeroInitialization() || Ctor->getParent()->isEmpty())
974           return;
975 
976         if (CGF.CGM.getTypes().isZeroInitializable(ElementType)) {
977           // Optimization: since zero initialization will just set the memory
978           // to all zeroes, generate a single memset to do it in one shot.
979           EmitZeroMemSet(CGF, ElementType, NewPtr, AllocSizeWithoutCookie);
980           return;
981         }
982       }
983 
984       CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr,
985                                      CCE->arg_begin(),  CCE->arg_end(),
986                                      CCE->requiresZeroInitialization());
987       return;
988     } else if (Init && isa<ImplicitValueInitExpr>(Init) &&
989                CGF.CGM.getTypes().isZeroInitializable(ElementType)) {
990       // Optimization: since zero initialization will just set the memory
991       // to all zeroes, generate a single memset to do it in one shot.
992       EmitZeroMemSet(CGF, ElementType, NewPtr, AllocSizeWithoutCookie);
993       return;
994     }
995     CGF.EmitNewArrayInitializer(E, ElementType, NewPtr, NumElements);
996     return;
997   }
998 
999   if (!Init)
1000     return;
1001 
1002   StoreAnyExprIntoOneUnit(CGF, Init, E->getAllocatedType(), NewPtr);
1003 }
1004 
1005 namespace {
1006   /// A cleanup to call the given 'operator delete' function upon
1007   /// abnormal exit from a new expression.
1008   class CallDeleteDuringNew : public EHScopeStack::Cleanup {
1009     size_t NumPlacementArgs;
1010     const FunctionDecl *OperatorDelete;
1011     llvm::Value *Ptr;
1012     llvm::Value *AllocSize;
1013 
1014     RValue *getPlacementArgs() { return reinterpret_cast<RValue*>(this+1); }
1015 
1016   public:
1017     static size_t getExtraSize(size_t NumPlacementArgs) {
1018       return NumPlacementArgs * sizeof(RValue);
1019     }
1020 
1021     CallDeleteDuringNew(size_t NumPlacementArgs,
1022                         const FunctionDecl *OperatorDelete,
1023                         llvm::Value *Ptr,
1024                         llvm::Value *AllocSize)
1025       : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
1026         Ptr(Ptr), AllocSize(AllocSize) {}
1027 
1028     void setPlacementArg(unsigned I, RValue Arg) {
1029       assert(I < NumPlacementArgs && "index out of range");
1030       getPlacementArgs()[I] = Arg;
1031     }
1032 
1033     void Emit(CodeGenFunction &CGF, Flags flags) {
1034       const FunctionProtoType *FPT
1035         = OperatorDelete->getType()->getAs<FunctionProtoType>();
1036       assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
1037              (FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
1038 
1039       CallArgList DeleteArgs;
1040 
1041       // The first argument is always a void*.
1042       FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
1043       DeleteArgs.add(RValue::get(Ptr), *AI++);
1044 
1045       // A member 'operator delete' can take an extra 'size_t' argument.
1046       if (FPT->getNumArgs() == NumPlacementArgs + 2)
1047         DeleteArgs.add(RValue::get(AllocSize), *AI++);
1048 
1049       // Pass the rest of the arguments, which must match exactly.
1050       for (unsigned I = 0; I != NumPlacementArgs; ++I)
1051         DeleteArgs.add(getPlacementArgs()[I], *AI++);
1052 
1053       // Call 'operator delete'.
1054       CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall(DeleteArgs, FPT),
1055                    CGF.CGM.GetAddrOfFunction(OperatorDelete),
1056                    ReturnValueSlot(), DeleteArgs, OperatorDelete);
1057     }
1058   };
1059 
1060   /// A cleanup to call the given 'operator delete' function upon
1061   /// abnormal exit from a new expression when the new expression is
1062   /// conditional.
1063   class CallDeleteDuringConditionalNew : public EHScopeStack::Cleanup {
1064     size_t NumPlacementArgs;
1065     const FunctionDecl *OperatorDelete;
1066     DominatingValue<RValue>::saved_type Ptr;
1067     DominatingValue<RValue>::saved_type AllocSize;
1068 
1069     DominatingValue<RValue>::saved_type *getPlacementArgs() {
1070       return reinterpret_cast<DominatingValue<RValue>::saved_type*>(this+1);
1071     }
1072 
1073   public:
1074     static size_t getExtraSize(size_t NumPlacementArgs) {
1075       return NumPlacementArgs * sizeof(DominatingValue<RValue>::saved_type);
1076     }
1077 
1078     CallDeleteDuringConditionalNew(size_t NumPlacementArgs,
1079                                    const FunctionDecl *OperatorDelete,
1080                                    DominatingValue<RValue>::saved_type Ptr,
1081                               DominatingValue<RValue>::saved_type AllocSize)
1082       : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
1083         Ptr(Ptr), AllocSize(AllocSize) {}
1084 
1085     void setPlacementArg(unsigned I, DominatingValue<RValue>::saved_type Arg) {
1086       assert(I < NumPlacementArgs && "index out of range");
1087       getPlacementArgs()[I] = Arg;
1088     }
1089 
1090     void Emit(CodeGenFunction &CGF, Flags flags) {
1091       const FunctionProtoType *FPT
1092         = OperatorDelete->getType()->getAs<FunctionProtoType>();
1093       assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
1094              (FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
1095 
1096       CallArgList DeleteArgs;
1097 
1098       // The first argument is always a void*.
1099       FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
1100       DeleteArgs.add(Ptr.restore(CGF), *AI++);
1101 
1102       // A member 'operator delete' can take an extra 'size_t' argument.
1103       if (FPT->getNumArgs() == NumPlacementArgs + 2) {
1104         RValue RV = AllocSize.restore(CGF);
1105         DeleteArgs.add(RV, *AI++);
1106       }
1107 
1108       // Pass the rest of the arguments, which must match exactly.
1109       for (unsigned I = 0; I != NumPlacementArgs; ++I) {
1110         RValue RV = getPlacementArgs()[I].restore(CGF);
1111         DeleteArgs.add(RV, *AI++);
1112       }
1113 
1114       // Call 'operator delete'.
1115       CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall(DeleteArgs, FPT),
1116                    CGF.CGM.GetAddrOfFunction(OperatorDelete),
1117                    ReturnValueSlot(), DeleteArgs, OperatorDelete);
1118     }
1119   };
1120 }
1121 
1122 /// Enter a cleanup to call 'operator delete' if the initializer in a
1123 /// new-expression throws.
1124 static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
1125                                   const CXXNewExpr *E,
1126                                   llvm::Value *NewPtr,
1127                                   llvm::Value *AllocSize,
1128                                   const CallArgList &NewArgs) {
1129   // If we're not inside a conditional branch, then the cleanup will
1130   // dominate and we can do the easier (and more efficient) thing.
1131   if (!CGF.isInConditionalBranch()) {
1132     CallDeleteDuringNew *Cleanup = CGF.EHStack
1133       .pushCleanupWithExtra<CallDeleteDuringNew>(EHCleanup,
1134                                                  E->getNumPlacementArgs(),
1135                                                  E->getOperatorDelete(),
1136                                                  NewPtr, AllocSize);
1137     for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
1138       Cleanup->setPlacementArg(I, NewArgs[I+1].RV);
1139 
1140     return;
1141   }
1142 
1143   // Otherwise, we need to save all this stuff.
1144   DominatingValue<RValue>::saved_type SavedNewPtr =
1145     DominatingValue<RValue>::save(CGF, RValue::get(NewPtr));
1146   DominatingValue<RValue>::saved_type SavedAllocSize =
1147     DominatingValue<RValue>::save(CGF, RValue::get(AllocSize));
1148 
1149   CallDeleteDuringConditionalNew *Cleanup = CGF.EHStack
1150     .pushCleanupWithExtra<CallDeleteDuringConditionalNew>(EHCleanup,
1151                                                  E->getNumPlacementArgs(),
1152                                                  E->getOperatorDelete(),
1153                                                  SavedNewPtr,
1154                                                  SavedAllocSize);
1155   for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
1156     Cleanup->setPlacementArg(I,
1157                      DominatingValue<RValue>::save(CGF, NewArgs[I+1].RV));
1158 
1159   CGF.initFullExprCleanup();
1160 }
1161 
1162 llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
1163   // The element type being allocated.
1164   QualType allocType = getContext().getBaseElementType(E->getAllocatedType());
1165 
1166   // 1. Build a call to the allocation function.
1167   FunctionDecl *allocator = E->getOperatorNew();
1168   const FunctionProtoType *allocatorType =
1169     allocator->getType()->castAs<FunctionProtoType>();
1170 
1171   CallArgList allocatorArgs;
1172 
1173   // The allocation size is the first argument.
1174   QualType sizeType = getContext().getSizeType();
1175 
1176   // If there is a brace-initializer, cannot allocate fewer elements than inits.
1177   unsigned minElements = 0;
1178   if (E->isArray() && E->hasInitializer()) {
1179     if (const InitListExpr *ILE = dyn_cast<InitListExpr>(E->getInitializer()))
1180       minElements = ILE->getNumInits();
1181   }
1182 
1183   llvm::Value *numElements = 0;
1184   llvm::Value *allocSizeWithoutCookie = 0;
1185   llvm::Value *allocSize =
1186     EmitCXXNewAllocSize(*this, E, minElements, numElements,
1187                         allocSizeWithoutCookie);
1188 
1189   allocatorArgs.add(RValue::get(allocSize), sizeType);
1190 
1191   // Emit the rest of the arguments.
1192   // FIXME: Ideally, this should just use EmitCallArgs.
1193   CXXNewExpr::const_arg_iterator placementArg = E->placement_arg_begin();
1194 
1195   // First, use the types from the function type.
1196   // We start at 1 here because the first argument (the allocation size)
1197   // has already been emitted.
1198   for (unsigned i = 1, e = allocatorType->getNumArgs(); i != e;
1199        ++i, ++placementArg) {
1200     QualType argType = allocatorType->getArgType(i);
1201 
1202     assert(getContext().hasSameUnqualifiedType(argType.getNonReferenceType(),
1203                                                placementArg->getType()) &&
1204            "type mismatch in call argument!");
1205 
1206     EmitCallArg(allocatorArgs, *placementArg, argType);
1207   }
1208 
1209   // Either we've emitted all the call args, or we have a call to a
1210   // variadic function.
1211   assert((placementArg == E->placement_arg_end() ||
1212           allocatorType->isVariadic()) &&
1213          "Extra arguments to non-variadic function!");
1214 
1215   // If we still have any arguments, emit them using the type of the argument.
1216   for (CXXNewExpr::const_arg_iterator placementArgsEnd = E->placement_arg_end();
1217        placementArg != placementArgsEnd; ++placementArg) {
1218     EmitCallArg(allocatorArgs, *placementArg, placementArg->getType());
1219   }
1220 
1221   // Emit the allocation call.  If the allocator is a global placement
1222   // operator, just "inline" it directly.
1223   RValue RV;
1224   if (allocator->isReservedGlobalPlacementOperator()) {
1225     assert(allocatorArgs.size() == 2);
1226     RV = allocatorArgs[1].RV;
1227     // TODO: kill any unnecessary computations done for the size
1228     // argument.
1229   } else {
1230     RV = EmitCall(CGM.getTypes().arrangeFreeFunctionCall(allocatorArgs,
1231                                                          allocatorType),
1232                   CGM.GetAddrOfFunction(allocator), ReturnValueSlot(),
1233                   allocatorArgs, allocator);
1234   }
1235 
1236   // Emit a null check on the allocation result if the allocation
1237   // function is allowed to return null (because it has a non-throwing
1238   // exception spec; for this part, we inline
1239   // CXXNewExpr::shouldNullCheckAllocation()) and we have an
1240   // interesting initializer.
1241   bool nullCheck = allocatorType->isNothrow(getContext()) &&
1242     (!allocType.isPODType(getContext()) || E->hasInitializer());
1243 
1244   llvm::BasicBlock *nullCheckBB = 0;
1245   llvm::BasicBlock *contBB = 0;
1246 
1247   llvm::Value *allocation = RV.getScalarVal();
1248   unsigned AS = allocation->getType()->getPointerAddressSpace();
1249 
1250   // The null-check means that the initializer is conditionally
1251   // evaluated.
1252   ConditionalEvaluation conditional(*this);
1253 
1254   if (nullCheck) {
1255     conditional.begin(*this);
1256 
1257     nullCheckBB = Builder.GetInsertBlock();
1258     llvm::BasicBlock *notNullBB = createBasicBlock("new.notnull");
1259     contBB = createBasicBlock("new.cont");
1260 
1261     llvm::Value *isNull = Builder.CreateIsNull(allocation, "new.isnull");
1262     Builder.CreateCondBr(isNull, contBB, notNullBB);
1263     EmitBlock(notNullBB);
1264   }
1265 
1266   // If there's an operator delete, enter a cleanup to call it if an
1267   // exception is thrown.
1268   EHScopeStack::stable_iterator operatorDeleteCleanup;
1269   llvm::Instruction *cleanupDominator = 0;
1270   if (E->getOperatorDelete() &&
1271       !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
1272     EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocatorArgs);
1273     operatorDeleteCleanup = EHStack.stable_begin();
1274     cleanupDominator = Builder.CreateUnreachable();
1275   }
1276 
1277   assert((allocSize == allocSizeWithoutCookie) ==
1278          CalculateCookiePadding(*this, E).isZero());
1279   if (allocSize != allocSizeWithoutCookie) {
1280     assert(E->isArray());
1281     allocation = CGM.getCXXABI().InitializeArrayCookie(*this, allocation,
1282                                                        numElements,
1283                                                        E, allocType);
1284   }
1285 
1286   llvm::Type *elementPtrTy
1287     = ConvertTypeForMem(allocType)->getPointerTo(AS);
1288   llvm::Value *result = Builder.CreateBitCast(allocation, elementPtrTy);
1289 
1290   EmitNewInitializer(*this, E, allocType, result, numElements,
1291                      allocSizeWithoutCookie);
1292   if (E->isArray()) {
1293     // NewPtr is a pointer to the base element type.  If we're
1294     // allocating an array of arrays, we'll need to cast back to the
1295     // array pointer type.
1296     llvm::Type *resultType = ConvertTypeForMem(E->getType());
1297     if (result->getType() != resultType)
1298       result = Builder.CreateBitCast(result, resultType);
1299   }
1300 
1301   // Deactivate the 'operator delete' cleanup if we finished
1302   // initialization.
1303   if (operatorDeleteCleanup.isValid()) {
1304     DeactivateCleanupBlock(operatorDeleteCleanup, cleanupDominator);
1305     cleanupDominator->eraseFromParent();
1306   }
1307 
1308   if (nullCheck) {
1309     conditional.end(*this);
1310 
1311     llvm::BasicBlock *notNullBB = Builder.GetInsertBlock();
1312     EmitBlock(contBB);
1313 
1314     llvm::PHINode *PHI = Builder.CreatePHI(result->getType(), 2);
1315     PHI->addIncoming(result, notNullBB);
1316     PHI->addIncoming(llvm::Constant::getNullValue(result->getType()),
1317                      nullCheckBB);
1318 
1319     result = PHI;
1320   }
1321 
1322   return result;
1323 }
1324 
1325 void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
1326                                      llvm::Value *Ptr,
1327                                      QualType DeleteTy) {
1328   assert(DeleteFD->getOverloadedOperator() == OO_Delete);
1329 
1330   const FunctionProtoType *DeleteFTy =
1331     DeleteFD->getType()->getAs<FunctionProtoType>();
1332 
1333   CallArgList DeleteArgs;
1334 
1335   // Check if we need to pass the size to the delete operator.
1336   llvm::Value *Size = 0;
1337   QualType SizeTy;
1338   if (DeleteFTy->getNumArgs() == 2) {
1339     SizeTy = DeleteFTy->getArgType(1);
1340     CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy);
1341     Size = llvm::ConstantInt::get(ConvertType(SizeTy),
1342                                   DeleteTypeSize.getQuantity());
1343   }
1344 
1345   QualType ArgTy = DeleteFTy->getArgType(0);
1346   llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy));
1347   DeleteArgs.add(RValue::get(DeletePtr), ArgTy);
1348 
1349   if (Size)
1350     DeleteArgs.add(RValue::get(Size), SizeTy);
1351 
1352   // Emit the call to delete.
1353   EmitCall(CGM.getTypes().arrangeFreeFunctionCall(DeleteArgs, DeleteFTy),
1354            CGM.GetAddrOfFunction(DeleteFD), ReturnValueSlot(),
1355            DeleteArgs, DeleteFD);
1356 }
1357 
1358 namespace {
1359   /// Calls the given 'operator delete' on a single object.
1360   struct CallObjectDelete : EHScopeStack::Cleanup {
1361     llvm::Value *Ptr;
1362     const FunctionDecl *OperatorDelete;
1363     QualType ElementType;
1364 
1365     CallObjectDelete(llvm::Value *Ptr,
1366                      const FunctionDecl *OperatorDelete,
1367                      QualType ElementType)
1368       : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {}
1369 
1370     void Emit(CodeGenFunction &CGF, Flags flags) {
1371       CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType);
1372     }
1373   };
1374 }
1375 
1376 /// Emit the code for deleting a single object.
1377 static void EmitObjectDelete(CodeGenFunction &CGF,
1378                              const FunctionDecl *OperatorDelete,
1379                              llvm::Value *Ptr,
1380                              QualType ElementType,
1381                              bool UseGlobalDelete) {
1382   // Find the destructor for the type, if applicable.  If the
1383   // destructor is virtual, we'll just emit the vcall and return.
1384   const CXXDestructorDecl *Dtor = 0;
1385   if (const RecordType *RT = ElementType->getAs<RecordType>()) {
1386     CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1387     if (RD->hasDefinition() && !RD->hasTrivialDestructor()) {
1388       Dtor = RD->getDestructor();
1389 
1390       if (Dtor->isVirtual()) {
1391         if (UseGlobalDelete) {
1392           // If we're supposed to call the global delete, make sure we do so
1393           // even if the destructor throws.
1394 
1395           // Derive the complete-object pointer, which is what we need
1396           // to pass to the deallocation function.
1397           llvm::Value *completePtr =
1398             CGF.CGM.getCXXABI().adjustToCompleteObject(CGF, Ptr, ElementType);
1399 
1400           CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
1401                                                     completePtr, OperatorDelete,
1402                                                     ElementType);
1403         }
1404 
1405         // FIXME: Provide a source location here.
1406         CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting;
1407         CGF.CGM.getCXXABI().EmitVirtualDestructorCall(CGF, Dtor, DtorType,
1408                                                       SourceLocation(), Ptr);
1409 
1410         if (UseGlobalDelete) {
1411           CGF.PopCleanupBlock();
1412         }
1413 
1414         return;
1415       }
1416     }
1417   }
1418 
1419   // Make sure that we call delete even if the dtor throws.
1420   // This doesn't have to a conditional cleanup because we're going
1421   // to pop it off in a second.
1422   CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
1423                                             Ptr, OperatorDelete, ElementType);
1424 
1425   if (Dtor)
1426     CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
1427                               /*ForVirtualBase=*/false,
1428                               /*Delegating=*/false,
1429                               Ptr);
1430   else if (CGF.getLangOpts().ObjCAutoRefCount &&
1431            ElementType->isObjCLifetimeType()) {
1432     switch (ElementType.getObjCLifetime()) {
1433     case Qualifiers::OCL_None:
1434     case Qualifiers::OCL_ExplicitNone:
1435     case Qualifiers::OCL_Autoreleasing:
1436       break;
1437 
1438     case Qualifiers::OCL_Strong: {
1439       // Load the pointer value.
1440       llvm::Value *PtrValue = CGF.Builder.CreateLoad(Ptr,
1441                                              ElementType.isVolatileQualified());
1442 
1443       CGF.EmitARCRelease(PtrValue, ARCPreciseLifetime);
1444       break;
1445     }
1446 
1447     case Qualifiers::OCL_Weak:
1448       CGF.EmitARCDestroyWeak(Ptr);
1449       break;
1450     }
1451   }
1452 
1453   CGF.PopCleanupBlock();
1454 }
1455 
1456 namespace {
1457   /// Calls the given 'operator delete' on an array of objects.
1458   struct CallArrayDelete : EHScopeStack::Cleanup {
1459     llvm::Value *Ptr;
1460     const FunctionDecl *OperatorDelete;
1461     llvm::Value *NumElements;
1462     QualType ElementType;
1463     CharUnits CookieSize;
1464 
1465     CallArrayDelete(llvm::Value *Ptr,
1466                     const FunctionDecl *OperatorDelete,
1467                     llvm::Value *NumElements,
1468                     QualType ElementType,
1469                     CharUnits CookieSize)
1470       : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements),
1471         ElementType(ElementType), CookieSize(CookieSize) {}
1472 
1473     void Emit(CodeGenFunction &CGF, Flags flags) {
1474       const FunctionProtoType *DeleteFTy =
1475         OperatorDelete->getType()->getAs<FunctionProtoType>();
1476       assert(DeleteFTy->getNumArgs() == 1 || DeleteFTy->getNumArgs() == 2);
1477 
1478       CallArgList Args;
1479 
1480       // Pass the pointer as the first argument.
1481       QualType VoidPtrTy = DeleteFTy->getArgType(0);
1482       llvm::Value *DeletePtr
1483         = CGF.Builder.CreateBitCast(Ptr, CGF.ConvertType(VoidPtrTy));
1484       Args.add(RValue::get(DeletePtr), VoidPtrTy);
1485 
1486       // Pass the original requested size as the second argument.
1487       if (DeleteFTy->getNumArgs() == 2) {
1488         QualType size_t = DeleteFTy->getArgType(1);
1489         llvm::IntegerType *SizeTy
1490           = cast<llvm::IntegerType>(CGF.ConvertType(size_t));
1491 
1492         CharUnits ElementTypeSize =
1493           CGF.CGM.getContext().getTypeSizeInChars(ElementType);
1494 
1495         // The size of an element, multiplied by the number of elements.
1496         llvm::Value *Size
1497           = llvm::ConstantInt::get(SizeTy, ElementTypeSize.getQuantity());
1498         Size = CGF.Builder.CreateMul(Size, NumElements);
1499 
1500         // Plus the size of the cookie if applicable.
1501         if (!CookieSize.isZero()) {
1502           llvm::Value *CookieSizeV
1503             = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
1504           Size = CGF.Builder.CreateAdd(Size, CookieSizeV);
1505         }
1506 
1507         Args.add(RValue::get(Size), size_t);
1508       }
1509 
1510       // Emit the call to delete.
1511       CGF.EmitCall(CGF.getTypes().arrangeFreeFunctionCall(Args, DeleteFTy),
1512                    CGF.CGM.GetAddrOfFunction(OperatorDelete),
1513                    ReturnValueSlot(), Args, OperatorDelete);
1514     }
1515   };
1516 }
1517 
1518 /// Emit the code for deleting an array of objects.
1519 static void EmitArrayDelete(CodeGenFunction &CGF,
1520                             const CXXDeleteExpr *E,
1521                             llvm::Value *deletedPtr,
1522                             QualType elementType) {
1523   llvm::Value *numElements = 0;
1524   llvm::Value *allocatedPtr = 0;
1525   CharUnits cookieSize;
1526   CGF.CGM.getCXXABI().ReadArrayCookie(CGF, deletedPtr, E, elementType,
1527                                       numElements, allocatedPtr, cookieSize);
1528 
1529   assert(allocatedPtr && "ReadArrayCookie didn't set allocated pointer");
1530 
1531   // Make sure that we call delete even if one of the dtors throws.
1532   const FunctionDecl *operatorDelete = E->getOperatorDelete();
1533   CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup,
1534                                            allocatedPtr, operatorDelete,
1535                                            numElements, elementType,
1536                                            cookieSize);
1537 
1538   // Destroy the elements.
1539   if (QualType::DestructionKind dtorKind = elementType.isDestructedType()) {
1540     assert(numElements && "no element count for a type with a destructor!");
1541 
1542     llvm::Value *arrayEnd =
1543       CGF.Builder.CreateInBoundsGEP(deletedPtr, numElements, "delete.end");
1544 
1545     // Note that it is legal to allocate a zero-length array, and we
1546     // can never fold the check away because the length should always
1547     // come from a cookie.
1548     CGF.emitArrayDestroy(deletedPtr, arrayEnd, elementType,
1549                          CGF.getDestroyer(dtorKind),
1550                          /*checkZeroLength*/ true,
1551                          CGF.needsEHCleanup(dtorKind));
1552   }
1553 
1554   // Pop the cleanup block.
1555   CGF.PopCleanupBlock();
1556 }
1557 
1558 void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
1559   const Expr *Arg = E->getArgument();
1560   llvm::Value *Ptr = EmitScalarExpr(Arg);
1561 
1562   // Null check the pointer.
1563   llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");
1564   llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");
1565 
1566   llvm::Value *IsNull = Builder.CreateIsNull(Ptr, "isnull");
1567 
1568   Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
1569   EmitBlock(DeleteNotNull);
1570 
1571   // We might be deleting a pointer to array.  If so, GEP down to the
1572   // first non-array element.
1573   // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*)
1574   QualType DeleteTy = Arg->getType()->getAs<PointerType>()->getPointeeType();
1575   if (DeleteTy->isConstantArrayType()) {
1576     llvm::Value *Zero = Builder.getInt32(0);
1577     SmallVector<llvm::Value*,8> GEP;
1578 
1579     GEP.push_back(Zero); // point at the outermost array
1580 
1581     // For each layer of array type we're pointing at:
1582     while (const ConstantArrayType *Arr
1583              = getContext().getAsConstantArrayType(DeleteTy)) {
1584       // 1. Unpeel the array type.
1585       DeleteTy = Arr->getElementType();
1586 
1587       // 2. GEP to the first element of the array.
1588       GEP.push_back(Zero);
1589     }
1590 
1591     Ptr = Builder.CreateInBoundsGEP(Ptr, GEP, "del.first");
1592   }
1593 
1594   assert(ConvertTypeForMem(DeleteTy) ==
1595          cast<llvm::PointerType>(Ptr->getType())->getElementType());
1596 
1597   if (E->isArrayForm()) {
1598     EmitArrayDelete(*this, E, Ptr, DeleteTy);
1599   } else {
1600     EmitObjectDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy,
1601                      E->isGlobalDelete());
1602   }
1603 
1604   EmitBlock(DeleteEnd);
1605 }
1606 
1607 static llvm::Constant *getBadTypeidFn(CodeGenFunction &CGF) {
1608   // void __cxa_bad_typeid();
1609   llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1610 
1611   return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
1612 }
1613 
1614 static void EmitBadTypeidCall(CodeGenFunction &CGF) {
1615   llvm::Value *Fn = getBadTypeidFn(CGF);
1616   CGF.EmitRuntimeCallOrInvoke(Fn).setDoesNotReturn();
1617   CGF.Builder.CreateUnreachable();
1618 }
1619 
1620 static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF,
1621                                          const Expr *E,
1622                                          llvm::Type *StdTypeInfoPtrTy) {
1623   // Get the vtable pointer.
1624   llvm::Value *ThisPtr = CGF.EmitLValue(E).getAddress();
1625 
1626   // C++ [expr.typeid]p2:
1627   //   If the glvalue expression is obtained by applying the unary * operator to
1628   //   a pointer and the pointer is a null pointer value, the typeid expression
1629   //   throws the std::bad_typeid exception.
1630   if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E->IgnoreParens())) {
1631     if (UO->getOpcode() == UO_Deref) {
1632       llvm::BasicBlock *BadTypeidBlock =
1633         CGF.createBasicBlock("typeid.bad_typeid");
1634       llvm::BasicBlock *EndBlock =
1635         CGF.createBasicBlock("typeid.end");
1636 
1637       llvm::Value *IsNull = CGF.Builder.CreateIsNull(ThisPtr);
1638       CGF.Builder.CreateCondBr(IsNull, BadTypeidBlock, EndBlock);
1639 
1640       CGF.EmitBlock(BadTypeidBlock);
1641       EmitBadTypeidCall(CGF);
1642       CGF.EmitBlock(EndBlock);
1643     }
1644   }
1645 
1646   llvm::Value *Value = CGF.GetVTablePtr(ThisPtr,
1647                                         StdTypeInfoPtrTy->getPointerTo());
1648 
1649   // Load the type info.
1650   Value = CGF.Builder.CreateConstInBoundsGEP1_64(Value, -1ULL);
1651   return CGF.Builder.CreateLoad(Value);
1652 }
1653 
1654 llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
1655   llvm::Type *StdTypeInfoPtrTy =
1656     ConvertType(E->getType())->getPointerTo();
1657 
1658   if (E->isTypeOperand()) {
1659     llvm::Constant *TypeInfo =
1660       CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand());
1661     return Builder.CreateBitCast(TypeInfo, StdTypeInfoPtrTy);
1662   }
1663 
1664   // C++ [expr.typeid]p2:
1665   //   When typeid is applied to a glvalue expression whose type is a
1666   //   polymorphic class type, the result refers to a std::type_info object
1667   //   representing the type of the most derived object (that is, the dynamic
1668   //   type) to which the glvalue refers.
1669   if (E->isPotentiallyEvaluated())
1670     return EmitTypeidFromVTable(*this, E->getExprOperand(),
1671                                 StdTypeInfoPtrTy);
1672 
1673   QualType OperandTy = E->getExprOperand()->getType();
1674   return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(OperandTy),
1675                                StdTypeInfoPtrTy);
1676 }
1677 
1678 static llvm::Constant *getDynamicCastFn(CodeGenFunction &CGF) {
1679   // void *__dynamic_cast(const void *sub,
1680   //                      const abi::__class_type_info *src,
1681   //                      const abi::__class_type_info *dst,
1682   //                      std::ptrdiff_t src2dst_offset);
1683 
1684   llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
1685   llvm::Type *PtrDiffTy =
1686     CGF.ConvertType(CGF.getContext().getPointerDiffType());
1687 
1688   llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
1689 
1690   llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false);
1691 
1692   // Mark the function as nounwind readonly.
1693   llvm::Attribute::AttrKind FuncAttrs[] = { llvm::Attribute::NoUnwind,
1694                                             llvm::Attribute::ReadOnly };
1695   llvm::AttributeSet Attrs = llvm::AttributeSet::get(
1696       CGF.getLLVMContext(), llvm::AttributeSet::FunctionIndex, FuncAttrs);
1697 
1698   return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast", Attrs);
1699 }
1700 
1701 static llvm::Constant *getBadCastFn(CodeGenFunction &CGF) {
1702   // void __cxa_bad_cast();
1703   llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1704   return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
1705 }
1706 
1707 static void EmitBadCastCall(CodeGenFunction &CGF) {
1708   llvm::Value *Fn = getBadCastFn(CGF);
1709   CGF.EmitRuntimeCallOrInvoke(Fn).setDoesNotReturn();
1710   CGF.Builder.CreateUnreachable();
1711 }
1712 
1713 /// \brief Compute the src2dst_offset hint as described in the
1714 /// Itanium C++ ABI [2.9.7]
1715 static CharUnits computeOffsetHint(ASTContext &Context,
1716                                    const CXXRecordDecl *Src,
1717                                    const CXXRecordDecl *Dst) {
1718   CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
1719                      /*DetectVirtual=*/false);
1720 
1721   // If Dst is not derived from Src we can skip the whole computation below and
1722   // return that Src is not a public base of Dst.  Record all inheritance paths.
1723   if (!Dst->isDerivedFrom(Src, Paths))
1724     return CharUnits::fromQuantity(-2ULL);
1725 
1726   unsigned NumPublicPaths = 0;
1727   CharUnits Offset;
1728 
1729   // Now walk all possible inheritance paths.
1730   for (CXXBasePaths::paths_iterator I = Paths.begin(), E = Paths.end();
1731        I != E; ++I) {
1732     if (I->Access != AS_public) // Ignore non-public inheritance.
1733       continue;
1734 
1735     ++NumPublicPaths;
1736 
1737     for (CXXBasePath::iterator J = I->begin(), JE = I->end(); J != JE; ++J) {
1738       // If the path contains a virtual base class we can't give any hint.
1739       // -1: no hint.
1740       if (J->Base->isVirtual())
1741         return CharUnits::fromQuantity(-1ULL);
1742 
1743       if (NumPublicPaths > 1) // Won't use offsets, skip computation.
1744         continue;
1745 
1746       // Accumulate the base class offsets.
1747       const ASTRecordLayout &L = Context.getASTRecordLayout(J->Class);
1748       Offset += L.getBaseClassOffset(J->Base->getType()->getAsCXXRecordDecl());
1749     }
1750   }
1751 
1752   // -2: Src is not a public base of Dst.
1753   if (NumPublicPaths == 0)
1754     return CharUnits::fromQuantity(-2ULL);
1755 
1756   // -3: Src is a multiple public base type but never a virtual base type.
1757   if (NumPublicPaths > 1)
1758     return CharUnits::fromQuantity(-3ULL);
1759 
1760   // Otherwise, the Src type is a unique public nonvirtual base type of Dst.
1761   // Return the offset of Src from the origin of Dst.
1762   return Offset;
1763 }
1764 
1765 static llvm::Value *
1766 EmitDynamicCastCall(CodeGenFunction &CGF, llvm::Value *Value,
1767                     QualType SrcTy, QualType DestTy,
1768                     llvm::BasicBlock *CastEnd) {
1769   llvm::Type *PtrDiffLTy =
1770     CGF.ConvertType(CGF.getContext().getPointerDiffType());
1771   llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1772 
1773   if (const PointerType *PTy = DestTy->getAs<PointerType>()) {
1774     if (PTy->getPointeeType()->isVoidType()) {
1775       // C++ [expr.dynamic.cast]p7:
1776       //   If T is "pointer to cv void," then the result is a pointer to the
1777       //   most derived object pointed to by v.
1778 
1779       // Get the vtable pointer.
1780       llvm::Value *VTable = CGF.GetVTablePtr(Value, PtrDiffLTy->getPointerTo());
1781 
1782       // Get the offset-to-top from the vtable.
1783       llvm::Value *OffsetToTop =
1784         CGF.Builder.CreateConstInBoundsGEP1_64(VTable, -2ULL);
1785       OffsetToTop = CGF.Builder.CreateLoad(OffsetToTop, "offset.to.top");
1786 
1787       // Finally, add the offset to the pointer.
1788       Value = CGF.EmitCastToVoidPtr(Value);
1789       Value = CGF.Builder.CreateInBoundsGEP(Value, OffsetToTop);
1790 
1791       return CGF.Builder.CreateBitCast(Value, DestLTy);
1792     }
1793   }
1794 
1795   QualType SrcRecordTy;
1796   QualType DestRecordTy;
1797 
1798   if (const PointerType *DestPTy = DestTy->getAs<PointerType>()) {
1799     SrcRecordTy = SrcTy->castAs<PointerType>()->getPointeeType();
1800     DestRecordTy = DestPTy->getPointeeType();
1801   } else {
1802     SrcRecordTy = SrcTy;
1803     DestRecordTy = DestTy->castAs<ReferenceType>()->getPointeeType();
1804   }
1805 
1806   assert(SrcRecordTy->isRecordType() && "source type must be a record type!");
1807   assert(DestRecordTy->isRecordType() && "dest type must be a record type!");
1808 
1809   llvm::Value *SrcRTTI =
1810     CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
1811   llvm::Value *DestRTTI =
1812     CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType());
1813 
1814   // Compute the offset hint.
1815   const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
1816   const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
1817   llvm::Value *OffsetHint =
1818     llvm::ConstantInt::get(PtrDiffLTy,
1819                            computeOffsetHint(CGF.getContext(), SrcDecl,
1820                                              DestDecl).getQuantity());
1821 
1822   // Emit the call to __dynamic_cast.
1823   Value = CGF.EmitCastToVoidPtr(Value);
1824 
1825   llvm::Value *args[] = { Value, SrcRTTI, DestRTTI, OffsetHint };
1826   Value = CGF.EmitNounwindRuntimeCall(getDynamicCastFn(CGF), args);
1827   Value = CGF.Builder.CreateBitCast(Value, DestLTy);
1828 
1829   /// C++ [expr.dynamic.cast]p9:
1830   ///   A failed cast to reference type throws std::bad_cast
1831   if (DestTy->isReferenceType()) {
1832     llvm::BasicBlock *BadCastBlock =
1833       CGF.createBasicBlock("dynamic_cast.bad_cast");
1834 
1835     llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value);
1836     CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd);
1837 
1838     CGF.EmitBlock(BadCastBlock);
1839     EmitBadCastCall(CGF);
1840   }
1841 
1842   return Value;
1843 }
1844 
1845 static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF,
1846                                           QualType DestTy) {
1847   llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1848   if (DestTy->isPointerType())
1849     return llvm::Constant::getNullValue(DestLTy);
1850 
1851   /// C++ [expr.dynamic.cast]p9:
1852   ///   A failed cast to reference type throws std::bad_cast
1853   EmitBadCastCall(CGF);
1854 
1855   CGF.EmitBlock(CGF.createBasicBlock("dynamic_cast.end"));
1856   return llvm::UndefValue::get(DestLTy);
1857 }
1858 
1859 llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *Value,
1860                                               const CXXDynamicCastExpr *DCE) {
1861   QualType DestTy = DCE->getTypeAsWritten();
1862 
1863   if (DCE->isAlwaysNull())
1864     return EmitDynamicCastToNull(*this, DestTy);
1865 
1866   QualType SrcTy = DCE->getSubExpr()->getType();
1867 
1868   // C++ [expr.dynamic.cast]p4:
1869   //   If the value of v is a null pointer value in the pointer case, the result
1870   //   is the null pointer value of type T.
1871   bool ShouldNullCheckSrcValue = SrcTy->isPointerType();
1872 
1873   llvm::BasicBlock *CastNull = 0;
1874   llvm::BasicBlock *CastNotNull = 0;
1875   llvm::BasicBlock *CastEnd = createBasicBlock("dynamic_cast.end");
1876 
1877   if (ShouldNullCheckSrcValue) {
1878     CastNull = createBasicBlock("dynamic_cast.null");
1879     CastNotNull = createBasicBlock("dynamic_cast.notnull");
1880 
1881     llvm::Value *IsNull = Builder.CreateIsNull(Value);
1882     Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
1883     EmitBlock(CastNotNull);
1884   }
1885 
1886   Value = EmitDynamicCastCall(*this, Value, SrcTy, DestTy, CastEnd);
1887 
1888   if (ShouldNullCheckSrcValue) {
1889     EmitBranch(CastEnd);
1890 
1891     EmitBlock(CastNull);
1892     EmitBranch(CastEnd);
1893   }
1894 
1895   EmitBlock(CastEnd);
1896 
1897   if (ShouldNullCheckSrcValue) {
1898     llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
1899     PHI->addIncoming(Value, CastNotNull);
1900     PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull);
1901 
1902     Value = PHI;
1903   }
1904 
1905   return Value;
1906 }
1907 
1908 void CodeGenFunction::EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Slot) {
1909   RunCleanupsScope Scope(*this);
1910   LValue SlotLV = MakeAddrLValue(Slot.getAddr(), E->getType(),
1911                                  Slot.getAlignment());
1912 
1913   CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin();
1914   for (LambdaExpr::capture_init_iterator i = E->capture_init_begin(),
1915                                          e = E->capture_init_end();
1916        i != e; ++i, ++CurField) {
1917     // Emit initialization
1918 
1919     LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
1920     ArrayRef<VarDecl *> ArrayIndexes;
1921     if (CurField->getType()->isArrayType())
1922       ArrayIndexes = E->getCaptureInitIndexVars(i);
1923     EmitInitializerForField(*CurField, LV, *i, ArrayIndexes);
1924   }
1925 }
1926