1 //===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code dealing with code generation of C++ expressions
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "clang/Frontend/CodeGenOptions.h"
15 #include "CodeGenFunction.h"
16 #include "CGCUDARuntime.h"
17 #include "CGCXXABI.h"
18 #include "CGObjCRuntime.h"
19 #include "CGDebugInfo.h"
20 #include "llvm/Intrinsics.h"
21 #include "llvm/Support/CallSite.h"
22 
23 using namespace clang;
24 using namespace CodeGen;
25 
26 RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
27                                           SourceLocation CallLoc,
28                                           llvm::Value *Callee,
29                                           ReturnValueSlot ReturnValue,
30                                           llvm::Value *This,
31                                           llvm::Value *VTT,
32                                           CallExpr::const_arg_iterator ArgBeg,
33                                           CallExpr::const_arg_iterator ArgEnd) {
34   assert(MD->isInstance() &&
35          "Trying to emit a member call expr on a static method!");
36 
37   // C++11 [class.mfct.non-static]p2:
38   //   If a non-static member function of a class X is called for an object that
39   //   is not of type X, or of a type derived from X, the behavior is undefined.
40   EmitTypeCheck(TCK_MemberCall, CallLoc, This,
41                 getContext().getRecordType(MD->getParent()));
42 
43   CallArgList Args;
44 
45   // Push the this ptr.
46   Args.add(RValue::get(This), MD->getThisType(getContext()));
47 
48   // If there is a VTT parameter, emit it.
49   if (VTT) {
50     QualType T = getContext().getPointerType(getContext().VoidPtrTy);
51     Args.add(RValue::get(VTT), T);
52   }
53 
54   const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
55   RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, Args.size());
56 
57   // And the rest of the call args.
58   EmitCallArgs(Args, FPT, ArgBeg, ArgEnd);
59 
60   return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required),
61                   Callee, ReturnValue, Args, MD);
62 }
63 
64 // FIXME: Ideally Expr::IgnoreParenNoopCasts should do this, but it doesn't do
65 // quite what we want.
66 static const Expr *skipNoOpCastsAndParens(const Expr *E) {
67   while (true) {
68     if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
69       E = PE->getSubExpr();
70       continue;
71     }
72 
73     if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
74       if (CE->getCastKind() == CK_NoOp) {
75         E = CE->getSubExpr();
76         continue;
77       }
78     }
79     if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
80       if (UO->getOpcode() == UO_Extension) {
81         E = UO->getSubExpr();
82         continue;
83       }
84     }
85     return E;
86   }
87 }
88 
89 /// canDevirtualizeMemberFunctionCalls - Checks whether virtual calls on given
90 /// expr can be devirtualized.
91 static bool canDevirtualizeMemberFunctionCalls(ASTContext &Context,
92                                                const Expr *Base,
93                                                const CXXMethodDecl *MD) {
94 
95   // When building with -fapple-kext, all calls must go through the vtable since
96   // the kernel linker can do runtime patching of vtables.
97   if (Context.getLangOpts().AppleKext)
98     return false;
99 
100   // If the most derived class is marked final, we know that no subclass can
101   // override this member function and so we can devirtualize it. For example:
102   //
103   // struct A { virtual void f(); }
104   // struct B final : A { };
105   //
106   // void f(B *b) {
107   //   b->f();
108   // }
109   //
110   const CXXRecordDecl *MostDerivedClassDecl = Base->getBestDynamicClassType();
111   if (MostDerivedClassDecl->hasAttr<FinalAttr>())
112     return true;
113 
114   // If the member function is marked 'final', we know that it can't be
115   // overridden and can therefore devirtualize it.
116   if (MD->hasAttr<FinalAttr>())
117     return true;
118 
119   // Similarly, if the class itself is marked 'final' it can't be overridden
120   // and we can therefore devirtualize the member function call.
121   if (MD->getParent()->hasAttr<FinalAttr>())
122     return true;
123 
124   Base = skipNoOpCastsAndParens(Base);
125   if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) {
126     if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
127       // This is a record decl. We know the type and can devirtualize it.
128       return VD->getType()->isRecordType();
129     }
130 
131     return false;
132   }
133 
134   // We can devirtualize calls on an object accessed by a class member access
135   // expression, since by C++11 [basic.life]p6 we know that it can't refer to
136   // a derived class object constructed in the same location.
137   if (const MemberExpr *ME = dyn_cast<MemberExpr>(Base))
138     if (const ValueDecl *VD = dyn_cast<ValueDecl>(ME->getMemberDecl()))
139       return VD->getType()->isRecordType();
140 
141   // We can always devirtualize calls on temporary object expressions.
142   if (isa<CXXConstructExpr>(Base))
143     return true;
144 
145   // And calls on bound temporaries.
146   if (isa<CXXBindTemporaryExpr>(Base))
147     return true;
148 
149   // Check if this is a call expr that returns a record type.
150   if (const CallExpr *CE = dyn_cast<CallExpr>(Base))
151     return CE->getCallReturnType()->isRecordType();
152 
153   // We can't devirtualize the call.
154   return false;
155 }
156 
157 static CXXRecordDecl *getCXXRecord(const Expr *E) {
158   QualType T = E->getType();
159   if (const PointerType *PTy = T->getAs<PointerType>())
160     T = PTy->getPointeeType();
161   const RecordType *Ty = T->castAs<RecordType>();
162   return cast<CXXRecordDecl>(Ty->getDecl());
163 }
164 
165 // Note: This function also emit constructor calls to support a MSVC
166 // extensions allowing explicit constructor function call.
167 RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
168                                               ReturnValueSlot ReturnValue) {
169   const Expr *callee = CE->getCallee()->IgnoreParens();
170 
171   if (isa<BinaryOperator>(callee))
172     return EmitCXXMemberPointerCallExpr(CE, ReturnValue);
173 
174   const MemberExpr *ME = cast<MemberExpr>(callee);
175   const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
176 
177   CGDebugInfo *DI = getDebugInfo();
178   if (DI && CGM.getCodeGenOpts().DebugInfo == CodeGenOptions::LimitedDebugInfo
179       && !isa<CallExpr>(ME->getBase())) {
180     QualType PQTy = ME->getBase()->IgnoreParenImpCasts()->getType();
181     if (const PointerType * PTy = dyn_cast<PointerType>(PQTy)) {
182       DI->getOrCreateRecordType(PTy->getPointeeType(),
183                                 MD->getParent()->getLocation());
184     }
185   }
186 
187   if (MD->isStatic()) {
188     // The method is static, emit it as we would a regular call.
189     llvm::Value *Callee = CGM.GetAddrOfFunction(MD);
190     return EmitCall(getContext().getPointerType(MD->getType()), Callee,
191                     ReturnValue, CE->arg_begin(), CE->arg_end());
192   }
193 
194   // Compute the object pointer.
195   const Expr *Base = ME->getBase();
196   bool CanUseVirtualCall = MD->isVirtual() && !ME->hasQualifier();
197 
198   const CXXMethodDecl *DevirtualizedMethod = NULL;
199   if (CanUseVirtualCall &&
200       canDevirtualizeMemberFunctionCalls(getContext(), Base, MD)) {
201     const CXXRecordDecl *BestDynamicDecl = Base->getBestDynamicClassType();
202     DevirtualizedMethod = MD->getCorrespondingMethodInClass(BestDynamicDecl);
203     assert(DevirtualizedMethod);
204     const CXXRecordDecl *DevirtualizedClass = DevirtualizedMethod->getParent();
205     const Expr *Inner = Base->ignoreParenBaseCasts();
206     if (getCXXRecord(Inner) == DevirtualizedClass)
207       // If the class of the Inner expression is where the dynamic method
208       // is defined, build the this pointer from it.
209       Base = Inner;
210     else if (getCXXRecord(Base) != DevirtualizedClass) {
211       // If the method is defined in a class that is not the best dynamic
212       // one or the one of the full expression, we would have to build
213       // a derived-to-base cast to compute the correct this pointer, but
214       // we don't have support for that yet, so do a virtual call.
215       DevirtualizedMethod = NULL;
216     }
217     // If the return types are not the same, this might be a case where more
218     // code needs to run to compensate for it. For example, the derived
219     // method might return a type that inherits form from the return
220     // type of MD and has a prefix.
221     // For now we just avoid devirtualizing these covariant cases.
222     if (DevirtualizedMethod &&
223         DevirtualizedMethod->getResultType().getCanonicalType() !=
224         MD->getResultType().getCanonicalType())
225       DevirtualizedMethod = NULL;
226   }
227 
228   llvm::Value *This;
229   if (ME->isArrow())
230     This = EmitScalarExpr(Base);
231   else
232     This = EmitLValue(Base).getAddress();
233 
234 
235   if (MD->isTrivial()) {
236     if (isa<CXXDestructorDecl>(MD)) return RValue::get(0);
237     if (isa<CXXConstructorDecl>(MD) &&
238         cast<CXXConstructorDecl>(MD)->isDefaultConstructor())
239       return RValue::get(0);
240 
241     if (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) {
242       // We don't like to generate the trivial copy/move assignment operator
243       // when it isn't necessary; just produce the proper effect here.
244       llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
245       EmitAggregateAssign(This, RHS, CE->getType());
246       return RValue::get(This);
247     }
248 
249     if (isa<CXXConstructorDecl>(MD) &&
250         cast<CXXConstructorDecl>(MD)->isCopyOrMoveConstructor()) {
251       // Trivial move and copy ctor are the same.
252       llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
253       EmitSynthesizedCXXCopyCtorCall(cast<CXXConstructorDecl>(MD), This, RHS,
254                                      CE->arg_begin(), CE->arg_end());
255       return RValue::get(This);
256     }
257     llvm_unreachable("unknown trivial member function");
258   }
259 
260   // Compute the function type we're calling.
261   const CGFunctionInfo *FInfo = 0;
262   if (isa<CXXDestructorDecl>(MD))
263     FInfo = &CGM.getTypes().arrangeCXXDestructor(cast<CXXDestructorDecl>(MD),
264                                                  Dtor_Complete);
265   else if (isa<CXXConstructorDecl>(MD))
266     FInfo = &CGM.getTypes().arrangeCXXConstructorDeclaration(
267                                                  cast<CXXConstructorDecl>(MD),
268                                                  Ctor_Complete);
269   else
270     FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(MD);
271 
272   llvm::Type *Ty = CGM.getTypes().GetFunctionType(*FInfo);
273 
274   // C++ [class.virtual]p12:
275   //   Explicit qualification with the scope operator (5.1) suppresses the
276   //   virtual call mechanism.
277   //
278   // We also don't emit a virtual call if the base expression has a record type
279   // because then we know what the type is.
280   bool UseVirtualCall = CanUseVirtualCall && !DevirtualizedMethod;
281 
282   llvm::Value *Callee;
283   if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD)) {
284     if (UseVirtualCall) {
285       Callee = BuildVirtualCall(Dtor, Dtor_Complete, This, Ty);
286     } else {
287       if (getContext().getLangOpts().AppleKext &&
288           MD->isVirtual() &&
289           ME->hasQualifier())
290         Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
291       else if (!DevirtualizedMethod)
292         Callee = CGM.GetAddrOfFunction(GlobalDecl(Dtor, Dtor_Complete), Ty);
293       else {
294         const CXXDestructorDecl *DDtor =
295           cast<CXXDestructorDecl>(DevirtualizedMethod);
296         Callee = CGM.GetAddrOfFunction(GlobalDecl(DDtor, Dtor_Complete), Ty);
297       }
298     }
299   } else if (const CXXConstructorDecl *Ctor =
300                dyn_cast<CXXConstructorDecl>(MD)) {
301     Callee = CGM.GetAddrOfFunction(GlobalDecl(Ctor, Ctor_Complete), Ty);
302   } else if (UseVirtualCall) {
303       Callee = BuildVirtualCall(MD, This, Ty);
304   } else {
305     if (getContext().getLangOpts().AppleKext &&
306         MD->isVirtual() &&
307         ME->hasQualifier())
308       Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
309     else if (!DevirtualizedMethod)
310       Callee = CGM.GetAddrOfFunction(MD, Ty);
311     else {
312       Callee = CGM.GetAddrOfFunction(DevirtualizedMethod, Ty);
313     }
314   }
315 
316   return EmitCXXMemberCall(MD, CE->getExprLoc(), Callee, ReturnValue, This,
317                            /*VTT=*/0, CE->arg_begin(), CE->arg_end());
318 }
319 
320 RValue
321 CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
322                                               ReturnValueSlot ReturnValue) {
323   const BinaryOperator *BO =
324       cast<BinaryOperator>(E->getCallee()->IgnoreParens());
325   const Expr *BaseExpr = BO->getLHS();
326   const Expr *MemFnExpr = BO->getRHS();
327 
328   const MemberPointerType *MPT =
329     MemFnExpr->getType()->castAs<MemberPointerType>();
330 
331   const FunctionProtoType *FPT =
332     MPT->getPointeeType()->castAs<FunctionProtoType>();
333   const CXXRecordDecl *RD =
334     cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
335 
336   // Get the member function pointer.
337   llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
338 
339   // Emit the 'this' pointer.
340   llvm::Value *This;
341 
342   if (BO->getOpcode() == BO_PtrMemI)
343     This = EmitScalarExpr(BaseExpr);
344   else
345     This = EmitLValue(BaseExpr).getAddress();
346 
347   EmitTypeCheck(TCK_MemberCall, E->getExprLoc(), This,
348                 QualType(MPT->getClass(), 0));
349 
350   // Ask the ABI to load the callee.  Note that This is modified.
351   llvm::Value *Callee =
352     CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, This, MemFnPtr, MPT);
353 
354   CallArgList Args;
355 
356   QualType ThisType =
357     getContext().getPointerType(getContext().getTagDeclType(RD));
358 
359   // Push the this ptr.
360   Args.add(RValue::get(This), ThisType);
361 
362   RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, 1);
363 
364   // And the rest of the call args
365   EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end());
366   return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required), Callee,
367                   ReturnValue, Args);
368 }
369 
370 RValue
371 CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
372                                                const CXXMethodDecl *MD,
373                                                ReturnValueSlot ReturnValue) {
374   assert(MD->isInstance() &&
375          "Trying to emit a member call expr on a static method!");
376   LValue LV = EmitLValue(E->getArg(0));
377   llvm::Value *This = LV.getAddress();
378 
379   if ((MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) &&
380       MD->isTrivial()) {
381     llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress();
382     QualType Ty = E->getType();
383     EmitAggregateAssign(This, Src, Ty);
384     return RValue::get(This);
385   }
386 
387   llvm::Value *Callee = EmitCXXOperatorMemberCallee(E, MD, This);
388   return EmitCXXMemberCall(MD, E->getExprLoc(), Callee, ReturnValue, This,
389                            /*VTT=*/0, E->arg_begin() + 1, E->arg_end());
390 }
391 
392 RValue CodeGenFunction::EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
393                                                ReturnValueSlot ReturnValue) {
394   return CGM.getCUDARuntime().EmitCUDAKernelCallExpr(*this, E, ReturnValue);
395 }
396 
397 static void EmitNullBaseClassInitialization(CodeGenFunction &CGF,
398                                             llvm::Value *DestPtr,
399                                             const CXXRecordDecl *Base) {
400   if (Base->isEmpty())
401     return;
402 
403   DestPtr = CGF.EmitCastToVoidPtr(DestPtr);
404 
405   const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(Base);
406   CharUnits Size = Layout.getNonVirtualSize();
407   CharUnits Align = Layout.getNonVirtualAlign();
408 
409   llvm::Value *SizeVal = CGF.CGM.getSize(Size);
410 
411   // If the type contains a pointer to data member we can't memset it to zero.
412   // Instead, create a null constant and copy it to the destination.
413   // TODO: there are other patterns besides zero that we can usefully memset,
414   // like -1, which happens to be the pattern used by member-pointers.
415   // TODO: isZeroInitializable can be over-conservative in the case where a
416   // virtual base contains a member pointer.
417   if (!CGF.CGM.getTypes().isZeroInitializable(Base)) {
418     llvm::Constant *NullConstant = CGF.CGM.EmitNullConstantForBase(Base);
419 
420     llvm::GlobalVariable *NullVariable =
421       new llvm::GlobalVariable(CGF.CGM.getModule(), NullConstant->getType(),
422                                /*isConstant=*/true,
423                                llvm::GlobalVariable::PrivateLinkage,
424                                NullConstant, Twine());
425     NullVariable->setAlignment(Align.getQuantity());
426     llvm::Value *SrcPtr = CGF.EmitCastToVoidPtr(NullVariable);
427 
428     // Get and call the appropriate llvm.memcpy overload.
429     CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, Align.getQuantity());
430     return;
431   }
432 
433   // Otherwise, just memset the whole thing to zero.  This is legal
434   // because in LLVM, all default initializers (other than the ones we just
435   // handled above) are guaranteed to have a bit pattern of all zeros.
436   CGF.Builder.CreateMemSet(DestPtr, CGF.Builder.getInt8(0), SizeVal,
437                            Align.getQuantity());
438 }
439 
440 void
441 CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
442                                       AggValueSlot Dest) {
443   assert(!Dest.isIgnored() && "Must have a destination!");
444   const CXXConstructorDecl *CD = E->getConstructor();
445 
446   // If we require zero initialization before (or instead of) calling the
447   // constructor, as can be the case with a non-user-provided default
448   // constructor, emit the zero initialization now, unless destination is
449   // already zeroed.
450   if (E->requiresZeroInitialization() && !Dest.isZeroed()) {
451     switch (E->getConstructionKind()) {
452     case CXXConstructExpr::CK_Delegating:
453     case CXXConstructExpr::CK_Complete:
454       EmitNullInitialization(Dest.getAddr(), E->getType());
455       break;
456     case CXXConstructExpr::CK_VirtualBase:
457     case CXXConstructExpr::CK_NonVirtualBase:
458       EmitNullBaseClassInitialization(*this, Dest.getAddr(), CD->getParent());
459       break;
460     }
461   }
462 
463   // If this is a call to a trivial default constructor, do nothing.
464   if (CD->isTrivial() && CD->isDefaultConstructor())
465     return;
466 
467   // Elide the constructor if we're constructing from a temporary.
468   // The temporary check is required because Sema sets this on NRVO
469   // returns.
470   if (getContext().getLangOpts().ElideConstructors && E->isElidable()) {
471     assert(getContext().hasSameUnqualifiedType(E->getType(),
472                                                E->getArg(0)->getType()));
473     if (E->getArg(0)->isTemporaryObject(getContext(), CD->getParent())) {
474       EmitAggExpr(E->getArg(0), Dest);
475       return;
476     }
477   }
478 
479   if (const ConstantArrayType *arrayType
480         = getContext().getAsConstantArrayType(E->getType())) {
481     EmitCXXAggrConstructorCall(CD, arrayType, Dest.getAddr(),
482                                E->arg_begin(), E->arg_end());
483   } else {
484     CXXCtorType Type = Ctor_Complete;
485     bool ForVirtualBase = false;
486 
487     switch (E->getConstructionKind()) {
488      case CXXConstructExpr::CK_Delegating:
489       // We should be emitting a constructor; GlobalDecl will assert this
490       Type = CurGD.getCtorType();
491       break;
492 
493      case CXXConstructExpr::CK_Complete:
494       Type = Ctor_Complete;
495       break;
496 
497      case CXXConstructExpr::CK_VirtualBase:
498       ForVirtualBase = true;
499       // fall-through
500 
501      case CXXConstructExpr::CK_NonVirtualBase:
502       Type = Ctor_Base;
503     }
504 
505     // Call the constructor.
506     EmitCXXConstructorCall(CD, Type, ForVirtualBase, Dest.getAddr(),
507                            E->arg_begin(), E->arg_end());
508   }
509 }
510 
511 void
512 CodeGenFunction::EmitSynthesizedCXXCopyCtor(llvm::Value *Dest,
513                                             llvm::Value *Src,
514                                             const Expr *Exp) {
515   if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Exp))
516     Exp = E->getSubExpr();
517   assert(isa<CXXConstructExpr>(Exp) &&
518          "EmitSynthesizedCXXCopyCtor - unknown copy ctor expr");
519   const CXXConstructExpr* E = cast<CXXConstructExpr>(Exp);
520   const CXXConstructorDecl *CD = E->getConstructor();
521   RunCleanupsScope Scope(*this);
522 
523   // If we require zero initialization before (or instead of) calling the
524   // constructor, as can be the case with a non-user-provided default
525   // constructor, emit the zero initialization now.
526   // FIXME. Do I still need this for a copy ctor synthesis?
527   if (E->requiresZeroInitialization())
528     EmitNullInitialization(Dest, E->getType());
529 
530   assert(!getContext().getAsConstantArrayType(E->getType())
531          && "EmitSynthesizedCXXCopyCtor - Copied-in Array");
532   EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src,
533                                  E->arg_begin(), E->arg_end());
534 }
535 
536 static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,
537                                         const CXXNewExpr *E) {
538   if (!E->isArray())
539     return CharUnits::Zero();
540 
541   // No cookie is required if the operator new[] being used is the
542   // reserved placement operator new[].
543   if (E->getOperatorNew()->isReservedGlobalPlacementOperator())
544     return CharUnits::Zero();
545 
546   return CGF.CGM.getCXXABI().GetArrayCookieSize(E);
547 }
548 
549 static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
550                                         const CXXNewExpr *e,
551                                         unsigned minElements,
552                                         llvm::Value *&numElements,
553                                         llvm::Value *&sizeWithoutCookie) {
554   QualType type = e->getAllocatedType();
555 
556   if (!e->isArray()) {
557     CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
558     sizeWithoutCookie
559       = llvm::ConstantInt::get(CGF.SizeTy, typeSize.getQuantity());
560     return sizeWithoutCookie;
561   }
562 
563   // The width of size_t.
564   unsigned sizeWidth = CGF.SizeTy->getBitWidth();
565 
566   // Figure out the cookie size.
567   llvm::APInt cookieSize(sizeWidth,
568                          CalculateCookiePadding(CGF, e).getQuantity());
569 
570   // Emit the array size expression.
571   // We multiply the size of all dimensions for NumElements.
572   // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
573   numElements = CGF.EmitScalarExpr(e->getArraySize());
574   assert(isa<llvm::IntegerType>(numElements->getType()));
575 
576   // The number of elements can be have an arbitrary integer type;
577   // essentially, we need to multiply it by a constant factor, add a
578   // cookie size, and verify that the result is representable as a
579   // size_t.  That's just a gloss, though, and it's wrong in one
580   // important way: if the count is negative, it's an error even if
581   // the cookie size would bring the total size >= 0.
582   bool isSigned
583     = e->getArraySize()->getType()->isSignedIntegerOrEnumerationType();
584   llvm::IntegerType *numElementsType
585     = cast<llvm::IntegerType>(numElements->getType());
586   unsigned numElementsWidth = numElementsType->getBitWidth();
587 
588   // Compute the constant factor.
589   llvm::APInt arraySizeMultiplier(sizeWidth, 1);
590   while (const ConstantArrayType *CAT
591              = CGF.getContext().getAsConstantArrayType(type)) {
592     type = CAT->getElementType();
593     arraySizeMultiplier *= CAT->getSize();
594   }
595 
596   CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
597   llvm::APInt typeSizeMultiplier(sizeWidth, typeSize.getQuantity());
598   typeSizeMultiplier *= arraySizeMultiplier;
599 
600   // This will be a size_t.
601   llvm::Value *size;
602 
603   // If someone is doing 'new int[42]' there is no need to do a dynamic check.
604   // Don't bloat the -O0 code.
605   if (llvm::ConstantInt *numElementsC =
606         dyn_cast<llvm::ConstantInt>(numElements)) {
607     const llvm::APInt &count = numElementsC->getValue();
608 
609     bool hasAnyOverflow = false;
610 
611     // If 'count' was a negative number, it's an overflow.
612     if (isSigned && count.isNegative())
613       hasAnyOverflow = true;
614 
615     // We want to do all this arithmetic in size_t.  If numElements is
616     // wider than that, check whether it's already too big, and if so,
617     // overflow.
618     else if (numElementsWidth > sizeWidth &&
619              numElementsWidth - sizeWidth > count.countLeadingZeros())
620       hasAnyOverflow = true;
621 
622     // Okay, compute a count at the right width.
623     llvm::APInt adjustedCount = count.zextOrTrunc(sizeWidth);
624 
625     // If there is a brace-initializer, we cannot allocate fewer elements than
626     // there are initializers. If we do, that's treated like an overflow.
627     if (adjustedCount.ult(minElements))
628       hasAnyOverflow = true;
629 
630     // Scale numElements by that.  This might overflow, but we don't
631     // care because it only overflows if allocationSize does, too, and
632     // if that overflows then we shouldn't use this.
633     numElements = llvm::ConstantInt::get(CGF.SizeTy,
634                                          adjustedCount * arraySizeMultiplier);
635 
636     // Compute the size before cookie, and track whether it overflowed.
637     bool overflow;
638     llvm::APInt allocationSize
639       = adjustedCount.umul_ov(typeSizeMultiplier, overflow);
640     hasAnyOverflow |= overflow;
641 
642     // Add in the cookie, and check whether it's overflowed.
643     if (cookieSize != 0) {
644       // Save the current size without a cookie.  This shouldn't be
645       // used if there was overflow.
646       sizeWithoutCookie = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
647 
648       allocationSize = allocationSize.uadd_ov(cookieSize, overflow);
649       hasAnyOverflow |= overflow;
650     }
651 
652     // On overflow, produce a -1 so operator new will fail.
653     if (hasAnyOverflow) {
654       size = llvm::Constant::getAllOnesValue(CGF.SizeTy);
655     } else {
656       size = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
657     }
658 
659   // Otherwise, we might need to use the overflow intrinsics.
660   } else {
661     // There are up to five conditions we need to test for:
662     // 1) if isSigned, we need to check whether numElements is negative;
663     // 2) if numElementsWidth > sizeWidth, we need to check whether
664     //   numElements is larger than something representable in size_t;
665     // 3) if minElements > 0, we need to check whether numElements is smaller
666     //    than that.
667     // 4) we need to compute
668     //      sizeWithoutCookie := numElements * typeSizeMultiplier
669     //    and check whether it overflows; and
670     // 5) if we need a cookie, we need to compute
671     //      size := sizeWithoutCookie + cookieSize
672     //    and check whether it overflows.
673 
674     llvm::Value *hasOverflow = 0;
675 
676     // If numElementsWidth > sizeWidth, then one way or another, we're
677     // going to have to do a comparison for (2), and this happens to
678     // take care of (1), too.
679     if (numElementsWidth > sizeWidth) {
680       llvm::APInt threshold(numElementsWidth, 1);
681       threshold <<= sizeWidth;
682 
683       llvm::Value *thresholdV
684         = llvm::ConstantInt::get(numElementsType, threshold);
685 
686       hasOverflow = CGF.Builder.CreateICmpUGE(numElements, thresholdV);
687       numElements = CGF.Builder.CreateTrunc(numElements, CGF.SizeTy);
688 
689     // Otherwise, if we're signed, we want to sext up to size_t.
690     } else if (isSigned) {
691       if (numElementsWidth < sizeWidth)
692         numElements = CGF.Builder.CreateSExt(numElements, CGF.SizeTy);
693 
694       // If there's a non-1 type size multiplier, then we can do the
695       // signedness check at the same time as we do the multiply
696       // because a negative number times anything will cause an
697       // unsigned overflow.  Otherwise, we have to do it here. But at least
698       // in this case, we can subsume the >= minElements check.
699       if (typeSizeMultiplier == 1)
700         hasOverflow = CGF.Builder.CreateICmpSLT(numElements,
701                               llvm::ConstantInt::get(CGF.SizeTy, minElements));
702 
703     // Otherwise, zext up to size_t if necessary.
704     } else if (numElementsWidth < sizeWidth) {
705       numElements = CGF.Builder.CreateZExt(numElements, CGF.SizeTy);
706     }
707 
708     assert(numElements->getType() == CGF.SizeTy);
709 
710     if (minElements) {
711       // Don't allow allocation of fewer elements than we have initializers.
712       if (!hasOverflow) {
713         hasOverflow = CGF.Builder.CreateICmpULT(numElements,
714                               llvm::ConstantInt::get(CGF.SizeTy, minElements));
715       } else if (numElementsWidth > sizeWidth) {
716         // The other existing overflow subsumes this check.
717         // We do an unsigned comparison, since any signed value < -1 is
718         // taken care of either above or below.
719         hasOverflow = CGF.Builder.CreateOr(hasOverflow,
720                           CGF.Builder.CreateICmpULT(numElements,
721                               llvm::ConstantInt::get(CGF.SizeTy, minElements)));
722       }
723     }
724 
725     size = numElements;
726 
727     // Multiply by the type size if necessary.  This multiplier
728     // includes all the factors for nested arrays.
729     //
730     // This step also causes numElements to be scaled up by the
731     // nested-array factor if necessary.  Overflow on this computation
732     // can be ignored because the result shouldn't be used if
733     // allocation fails.
734     if (typeSizeMultiplier != 1) {
735       llvm::Value *umul_with_overflow
736         = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, CGF.SizeTy);
737 
738       llvm::Value *tsmV =
739         llvm::ConstantInt::get(CGF.SizeTy, typeSizeMultiplier);
740       llvm::Value *result =
741         CGF.Builder.CreateCall2(umul_with_overflow, size, tsmV);
742 
743       llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
744       if (hasOverflow)
745         hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
746       else
747         hasOverflow = overflowed;
748 
749       size = CGF.Builder.CreateExtractValue(result, 0);
750 
751       // Also scale up numElements by the array size multiplier.
752       if (arraySizeMultiplier != 1) {
753         // If the base element type size is 1, then we can re-use the
754         // multiply we just did.
755         if (typeSize.isOne()) {
756           assert(arraySizeMultiplier == typeSizeMultiplier);
757           numElements = size;
758 
759         // Otherwise we need a separate multiply.
760         } else {
761           llvm::Value *asmV =
762             llvm::ConstantInt::get(CGF.SizeTy, arraySizeMultiplier);
763           numElements = CGF.Builder.CreateMul(numElements, asmV);
764         }
765       }
766     } else {
767       // numElements doesn't need to be scaled.
768       assert(arraySizeMultiplier == 1);
769     }
770 
771     // Add in the cookie size if necessary.
772     if (cookieSize != 0) {
773       sizeWithoutCookie = size;
774 
775       llvm::Value *uadd_with_overflow
776         = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, CGF.SizeTy);
777 
778       llvm::Value *cookieSizeV = llvm::ConstantInt::get(CGF.SizeTy, cookieSize);
779       llvm::Value *result =
780         CGF.Builder.CreateCall2(uadd_with_overflow, size, cookieSizeV);
781 
782       llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
783       if (hasOverflow)
784         hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
785       else
786         hasOverflow = overflowed;
787 
788       size = CGF.Builder.CreateExtractValue(result, 0);
789     }
790 
791     // If we had any possibility of dynamic overflow, make a select to
792     // overwrite 'size' with an all-ones value, which should cause
793     // operator new to throw.
794     if (hasOverflow)
795       size = CGF.Builder.CreateSelect(hasOverflow,
796                                  llvm::Constant::getAllOnesValue(CGF.SizeTy),
797                                       size);
798   }
799 
800   if (cookieSize == 0)
801     sizeWithoutCookie = size;
802   else
803     assert(sizeWithoutCookie && "didn't set sizeWithoutCookie?");
804 
805   return size;
806 }
807 
808 static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const Expr *Init,
809                                     QualType AllocType, llvm::Value *NewPtr) {
810 
811   CharUnits Alignment = CGF.getContext().getTypeAlignInChars(AllocType);
812   if (!CGF.hasAggregateLLVMType(AllocType))
813     CGF.EmitScalarInit(Init, 0, CGF.MakeAddrLValue(NewPtr, AllocType,
814                                                    Alignment),
815                        false);
816   else if (AllocType->isAnyComplexType())
817     CGF.EmitComplexExprIntoAddr(Init, NewPtr,
818                                 AllocType.isVolatileQualified());
819   else {
820     AggValueSlot Slot
821       = AggValueSlot::forAddr(NewPtr, Alignment, AllocType.getQualifiers(),
822                               AggValueSlot::IsDestructed,
823                               AggValueSlot::DoesNotNeedGCBarriers,
824                               AggValueSlot::IsNotAliased);
825     CGF.EmitAggExpr(Init, Slot);
826 
827     CGF.MaybeEmitStdInitializerListCleanup(NewPtr, Init);
828   }
829 }
830 
831 void
832 CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E,
833                                          QualType elementType,
834                                          llvm::Value *beginPtr,
835                                          llvm::Value *numElements) {
836   if (!E->hasInitializer())
837     return; // We have a POD type.
838 
839   llvm::Value *explicitPtr = beginPtr;
840   // Find the end of the array, hoisted out of the loop.
841   llvm::Value *endPtr =
842     Builder.CreateInBoundsGEP(beginPtr, numElements, "array.end");
843 
844   unsigned initializerElements = 0;
845 
846   const Expr *Init = E->getInitializer();
847   llvm::AllocaInst *endOfInit = 0;
848   QualType::DestructionKind dtorKind = elementType.isDestructedType();
849   EHScopeStack::stable_iterator cleanup;
850   llvm::Instruction *cleanupDominator = 0;
851   // If the initializer is an initializer list, first do the explicit elements.
852   if (const InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) {
853     initializerElements = ILE->getNumInits();
854 
855     // Enter a partial-destruction cleanup if necessary.
856     if (needsEHCleanup(dtorKind)) {
857       // In principle we could tell the cleanup where we are more
858       // directly, but the control flow can get so varied here that it
859       // would actually be quite complex.  Therefore we go through an
860       // alloca.
861       endOfInit = CreateTempAlloca(beginPtr->getType(), "array.endOfInit");
862       cleanupDominator = Builder.CreateStore(beginPtr, endOfInit);
863       pushIrregularPartialArrayCleanup(beginPtr, endOfInit, elementType,
864                                        getDestroyer(dtorKind));
865       cleanup = EHStack.stable_begin();
866     }
867 
868     for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i) {
869       // Tell the cleanup that it needs to destroy up to this
870       // element.  TODO: some of these stores can be trivially
871       // observed to be unnecessary.
872       if (endOfInit) Builder.CreateStore(explicitPtr, endOfInit);
873       StoreAnyExprIntoOneUnit(*this, ILE->getInit(i), elementType, explicitPtr);
874       explicitPtr =Builder.CreateConstGEP1_32(explicitPtr, 1, "array.exp.next");
875     }
876 
877     // The remaining elements are filled with the array filler expression.
878     Init = ILE->getArrayFiller();
879   }
880 
881   // Create the continuation block.
882   llvm::BasicBlock *contBB = createBasicBlock("new.loop.end");
883 
884   // If the number of elements isn't constant, we have to now check if there is
885   // anything left to initialize.
886   if (llvm::ConstantInt *constNum = dyn_cast<llvm::ConstantInt>(numElements)) {
887     // If all elements have already been initialized, skip the whole loop.
888     if (constNum->getZExtValue() <= initializerElements) {
889       // If there was a cleanup, deactivate it.
890       if (cleanupDominator)
891         DeactivateCleanupBlock(cleanup, cleanupDominator);
892       return;
893     }
894   } else {
895     llvm::BasicBlock *nonEmptyBB = createBasicBlock("new.loop.nonempty");
896     llvm::Value *isEmpty = Builder.CreateICmpEQ(explicitPtr, endPtr,
897                                                 "array.isempty");
898     Builder.CreateCondBr(isEmpty, contBB, nonEmptyBB);
899     EmitBlock(nonEmptyBB);
900   }
901 
902   // Enter the loop.
903   llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
904   llvm::BasicBlock *loopBB = createBasicBlock("new.loop");
905 
906   EmitBlock(loopBB);
907 
908   // Set up the current-element phi.
909   llvm::PHINode *curPtr =
910     Builder.CreatePHI(explicitPtr->getType(), 2, "array.cur");
911   curPtr->addIncoming(explicitPtr, entryBB);
912 
913   // Store the new cleanup position for irregular cleanups.
914   if (endOfInit) Builder.CreateStore(curPtr, endOfInit);
915 
916   // Enter a partial-destruction cleanup if necessary.
917   if (!cleanupDominator && needsEHCleanup(dtorKind)) {
918     pushRegularPartialArrayCleanup(beginPtr, curPtr, elementType,
919                                    getDestroyer(dtorKind));
920     cleanup = EHStack.stable_begin();
921     cleanupDominator = Builder.CreateUnreachable();
922   }
923 
924   // Emit the initializer into this element.
925   StoreAnyExprIntoOneUnit(*this, Init, E->getAllocatedType(), curPtr);
926 
927   // Leave the cleanup if we entered one.
928   if (cleanupDominator) {
929     DeactivateCleanupBlock(cleanup, cleanupDominator);
930     cleanupDominator->eraseFromParent();
931   }
932 
933   // Advance to the next element.
934   llvm::Value *nextPtr = Builder.CreateConstGEP1_32(curPtr, 1, "array.next");
935 
936   // Check whether we've gotten to the end of the array and, if so,
937   // exit the loop.
938   llvm::Value *isEnd = Builder.CreateICmpEQ(nextPtr, endPtr, "array.atend");
939   Builder.CreateCondBr(isEnd, contBB, loopBB);
940   curPtr->addIncoming(nextPtr, Builder.GetInsertBlock());
941 
942   EmitBlock(contBB);
943 }
944 
945 static void EmitZeroMemSet(CodeGenFunction &CGF, QualType T,
946                            llvm::Value *NewPtr, llvm::Value *Size) {
947   CGF.EmitCastToVoidPtr(NewPtr);
948   CharUnits Alignment = CGF.getContext().getTypeAlignInChars(T);
949   CGF.Builder.CreateMemSet(NewPtr, CGF.Builder.getInt8(0), Size,
950                            Alignment.getQuantity(), false);
951 }
952 
953 static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
954                                QualType ElementType,
955                                llvm::Value *NewPtr,
956                                llvm::Value *NumElements,
957                                llvm::Value *AllocSizeWithoutCookie) {
958   const Expr *Init = E->getInitializer();
959   if (E->isArray()) {
960     if (const CXXConstructExpr *CCE = dyn_cast_or_null<CXXConstructExpr>(Init)){
961       CXXConstructorDecl *Ctor = CCE->getConstructor();
962       if (Ctor->isTrivial()) {
963         // If new expression did not specify value-initialization, then there
964         // is no initialization.
965         if (!CCE->requiresZeroInitialization() || Ctor->getParent()->isEmpty())
966           return;
967 
968         if (CGF.CGM.getTypes().isZeroInitializable(ElementType)) {
969           // Optimization: since zero initialization will just set the memory
970           // to all zeroes, generate a single memset to do it in one shot.
971           EmitZeroMemSet(CGF, ElementType, NewPtr, AllocSizeWithoutCookie);
972           return;
973         }
974       }
975 
976       CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr,
977                                      CCE->arg_begin(),  CCE->arg_end(),
978                                      CCE->requiresZeroInitialization());
979       return;
980     } else if (Init && isa<ImplicitValueInitExpr>(Init) &&
981                CGF.CGM.getTypes().isZeroInitializable(ElementType)) {
982       // Optimization: since zero initialization will just set the memory
983       // to all zeroes, generate a single memset to do it in one shot.
984       EmitZeroMemSet(CGF, ElementType, NewPtr, AllocSizeWithoutCookie);
985       return;
986     }
987     CGF.EmitNewArrayInitializer(E, ElementType, NewPtr, NumElements);
988     return;
989   }
990 
991   if (!Init)
992     return;
993 
994   StoreAnyExprIntoOneUnit(CGF, Init, E->getAllocatedType(), NewPtr);
995 }
996 
997 namespace {
998   /// A cleanup to call the given 'operator delete' function upon
999   /// abnormal exit from a new expression.
1000   class CallDeleteDuringNew : public EHScopeStack::Cleanup {
1001     size_t NumPlacementArgs;
1002     const FunctionDecl *OperatorDelete;
1003     llvm::Value *Ptr;
1004     llvm::Value *AllocSize;
1005 
1006     RValue *getPlacementArgs() { return reinterpret_cast<RValue*>(this+1); }
1007 
1008   public:
1009     static size_t getExtraSize(size_t NumPlacementArgs) {
1010       return NumPlacementArgs * sizeof(RValue);
1011     }
1012 
1013     CallDeleteDuringNew(size_t NumPlacementArgs,
1014                         const FunctionDecl *OperatorDelete,
1015                         llvm::Value *Ptr,
1016                         llvm::Value *AllocSize)
1017       : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
1018         Ptr(Ptr), AllocSize(AllocSize) {}
1019 
1020     void setPlacementArg(unsigned I, RValue Arg) {
1021       assert(I < NumPlacementArgs && "index out of range");
1022       getPlacementArgs()[I] = Arg;
1023     }
1024 
1025     void Emit(CodeGenFunction &CGF, Flags flags) {
1026       const FunctionProtoType *FPT
1027         = OperatorDelete->getType()->getAs<FunctionProtoType>();
1028       assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
1029              (FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
1030 
1031       CallArgList DeleteArgs;
1032 
1033       // The first argument is always a void*.
1034       FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
1035       DeleteArgs.add(RValue::get(Ptr), *AI++);
1036 
1037       // A member 'operator delete' can take an extra 'size_t' argument.
1038       if (FPT->getNumArgs() == NumPlacementArgs + 2)
1039         DeleteArgs.add(RValue::get(AllocSize), *AI++);
1040 
1041       // Pass the rest of the arguments, which must match exactly.
1042       for (unsigned I = 0; I != NumPlacementArgs; ++I)
1043         DeleteArgs.add(getPlacementArgs()[I], *AI++);
1044 
1045       // Call 'operator delete'.
1046       CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall(DeleteArgs, FPT),
1047                    CGF.CGM.GetAddrOfFunction(OperatorDelete),
1048                    ReturnValueSlot(), DeleteArgs, OperatorDelete);
1049     }
1050   };
1051 
1052   /// A cleanup to call the given 'operator delete' function upon
1053   /// abnormal exit from a new expression when the new expression is
1054   /// conditional.
1055   class CallDeleteDuringConditionalNew : public EHScopeStack::Cleanup {
1056     size_t NumPlacementArgs;
1057     const FunctionDecl *OperatorDelete;
1058     DominatingValue<RValue>::saved_type Ptr;
1059     DominatingValue<RValue>::saved_type AllocSize;
1060 
1061     DominatingValue<RValue>::saved_type *getPlacementArgs() {
1062       return reinterpret_cast<DominatingValue<RValue>::saved_type*>(this+1);
1063     }
1064 
1065   public:
1066     static size_t getExtraSize(size_t NumPlacementArgs) {
1067       return NumPlacementArgs * sizeof(DominatingValue<RValue>::saved_type);
1068     }
1069 
1070     CallDeleteDuringConditionalNew(size_t NumPlacementArgs,
1071                                    const FunctionDecl *OperatorDelete,
1072                                    DominatingValue<RValue>::saved_type Ptr,
1073                               DominatingValue<RValue>::saved_type AllocSize)
1074       : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
1075         Ptr(Ptr), AllocSize(AllocSize) {}
1076 
1077     void setPlacementArg(unsigned I, DominatingValue<RValue>::saved_type Arg) {
1078       assert(I < NumPlacementArgs && "index out of range");
1079       getPlacementArgs()[I] = Arg;
1080     }
1081 
1082     void Emit(CodeGenFunction &CGF, Flags flags) {
1083       const FunctionProtoType *FPT
1084         = OperatorDelete->getType()->getAs<FunctionProtoType>();
1085       assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
1086              (FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
1087 
1088       CallArgList DeleteArgs;
1089 
1090       // The first argument is always a void*.
1091       FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
1092       DeleteArgs.add(Ptr.restore(CGF), *AI++);
1093 
1094       // A member 'operator delete' can take an extra 'size_t' argument.
1095       if (FPT->getNumArgs() == NumPlacementArgs + 2) {
1096         RValue RV = AllocSize.restore(CGF);
1097         DeleteArgs.add(RV, *AI++);
1098       }
1099 
1100       // Pass the rest of the arguments, which must match exactly.
1101       for (unsigned I = 0; I != NumPlacementArgs; ++I) {
1102         RValue RV = getPlacementArgs()[I].restore(CGF);
1103         DeleteArgs.add(RV, *AI++);
1104       }
1105 
1106       // Call 'operator delete'.
1107       CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall(DeleteArgs, FPT),
1108                    CGF.CGM.GetAddrOfFunction(OperatorDelete),
1109                    ReturnValueSlot(), DeleteArgs, OperatorDelete);
1110     }
1111   };
1112 }
1113 
1114 /// Enter a cleanup to call 'operator delete' if the initializer in a
1115 /// new-expression throws.
1116 static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
1117                                   const CXXNewExpr *E,
1118                                   llvm::Value *NewPtr,
1119                                   llvm::Value *AllocSize,
1120                                   const CallArgList &NewArgs) {
1121   // If we're not inside a conditional branch, then the cleanup will
1122   // dominate and we can do the easier (and more efficient) thing.
1123   if (!CGF.isInConditionalBranch()) {
1124     CallDeleteDuringNew *Cleanup = CGF.EHStack
1125       .pushCleanupWithExtra<CallDeleteDuringNew>(EHCleanup,
1126                                                  E->getNumPlacementArgs(),
1127                                                  E->getOperatorDelete(),
1128                                                  NewPtr, AllocSize);
1129     for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
1130       Cleanup->setPlacementArg(I, NewArgs[I+1].RV);
1131 
1132     return;
1133   }
1134 
1135   // Otherwise, we need to save all this stuff.
1136   DominatingValue<RValue>::saved_type SavedNewPtr =
1137     DominatingValue<RValue>::save(CGF, RValue::get(NewPtr));
1138   DominatingValue<RValue>::saved_type SavedAllocSize =
1139     DominatingValue<RValue>::save(CGF, RValue::get(AllocSize));
1140 
1141   CallDeleteDuringConditionalNew *Cleanup = CGF.EHStack
1142     .pushCleanupWithExtra<CallDeleteDuringConditionalNew>(EHCleanup,
1143                                                  E->getNumPlacementArgs(),
1144                                                  E->getOperatorDelete(),
1145                                                  SavedNewPtr,
1146                                                  SavedAllocSize);
1147   for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
1148     Cleanup->setPlacementArg(I,
1149                      DominatingValue<RValue>::save(CGF, NewArgs[I+1].RV));
1150 
1151   CGF.initFullExprCleanup();
1152 }
1153 
1154 llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
1155   // The element type being allocated.
1156   QualType allocType = getContext().getBaseElementType(E->getAllocatedType());
1157 
1158   // 1. Build a call to the allocation function.
1159   FunctionDecl *allocator = E->getOperatorNew();
1160   const FunctionProtoType *allocatorType =
1161     allocator->getType()->castAs<FunctionProtoType>();
1162 
1163   CallArgList allocatorArgs;
1164 
1165   // The allocation size is the first argument.
1166   QualType sizeType = getContext().getSizeType();
1167 
1168   // If there is a brace-initializer, cannot allocate fewer elements than inits.
1169   unsigned minElements = 0;
1170   if (E->isArray() && E->hasInitializer()) {
1171     if (const InitListExpr *ILE = dyn_cast<InitListExpr>(E->getInitializer()))
1172       minElements = ILE->getNumInits();
1173   }
1174 
1175   llvm::Value *numElements = 0;
1176   llvm::Value *allocSizeWithoutCookie = 0;
1177   llvm::Value *allocSize =
1178     EmitCXXNewAllocSize(*this, E, minElements, numElements,
1179                         allocSizeWithoutCookie);
1180 
1181   allocatorArgs.add(RValue::get(allocSize), sizeType);
1182 
1183   // Emit the rest of the arguments.
1184   // FIXME: Ideally, this should just use EmitCallArgs.
1185   CXXNewExpr::const_arg_iterator placementArg = E->placement_arg_begin();
1186 
1187   // First, use the types from the function type.
1188   // We start at 1 here because the first argument (the allocation size)
1189   // has already been emitted.
1190   for (unsigned i = 1, e = allocatorType->getNumArgs(); i != e;
1191        ++i, ++placementArg) {
1192     QualType argType = allocatorType->getArgType(i);
1193 
1194     assert(getContext().hasSameUnqualifiedType(argType.getNonReferenceType(),
1195                                                placementArg->getType()) &&
1196            "type mismatch in call argument!");
1197 
1198     EmitCallArg(allocatorArgs, *placementArg, argType);
1199   }
1200 
1201   // Either we've emitted all the call args, or we have a call to a
1202   // variadic function.
1203   assert((placementArg == E->placement_arg_end() ||
1204           allocatorType->isVariadic()) &&
1205          "Extra arguments to non-variadic function!");
1206 
1207   // If we still have any arguments, emit them using the type of the argument.
1208   for (CXXNewExpr::const_arg_iterator placementArgsEnd = E->placement_arg_end();
1209        placementArg != placementArgsEnd; ++placementArg) {
1210     EmitCallArg(allocatorArgs, *placementArg, placementArg->getType());
1211   }
1212 
1213   // Emit the allocation call.  If the allocator is a global placement
1214   // operator, just "inline" it directly.
1215   RValue RV;
1216   if (allocator->isReservedGlobalPlacementOperator()) {
1217     assert(allocatorArgs.size() == 2);
1218     RV = allocatorArgs[1].RV;
1219     // TODO: kill any unnecessary computations done for the size
1220     // argument.
1221   } else {
1222     RV = EmitCall(CGM.getTypes().arrangeFreeFunctionCall(allocatorArgs,
1223                                                          allocatorType),
1224                   CGM.GetAddrOfFunction(allocator), ReturnValueSlot(),
1225                   allocatorArgs, allocator);
1226   }
1227 
1228   // Emit a null check on the allocation result if the allocation
1229   // function is allowed to return null (because it has a non-throwing
1230   // exception spec; for this part, we inline
1231   // CXXNewExpr::shouldNullCheckAllocation()) and we have an
1232   // interesting initializer.
1233   bool nullCheck = allocatorType->isNothrow(getContext()) &&
1234     (!allocType.isPODType(getContext()) || E->hasInitializer());
1235 
1236   llvm::BasicBlock *nullCheckBB = 0;
1237   llvm::BasicBlock *contBB = 0;
1238 
1239   llvm::Value *allocation = RV.getScalarVal();
1240   unsigned AS =
1241     cast<llvm::PointerType>(allocation->getType())->getAddressSpace();
1242 
1243   // The null-check means that the initializer is conditionally
1244   // evaluated.
1245   ConditionalEvaluation conditional(*this);
1246 
1247   if (nullCheck) {
1248     conditional.begin(*this);
1249 
1250     nullCheckBB = Builder.GetInsertBlock();
1251     llvm::BasicBlock *notNullBB = createBasicBlock("new.notnull");
1252     contBB = createBasicBlock("new.cont");
1253 
1254     llvm::Value *isNull = Builder.CreateIsNull(allocation, "new.isnull");
1255     Builder.CreateCondBr(isNull, contBB, notNullBB);
1256     EmitBlock(notNullBB);
1257   }
1258 
1259   // If there's an operator delete, enter a cleanup to call it if an
1260   // exception is thrown.
1261   EHScopeStack::stable_iterator operatorDeleteCleanup;
1262   llvm::Instruction *cleanupDominator = 0;
1263   if (E->getOperatorDelete() &&
1264       !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
1265     EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocatorArgs);
1266     operatorDeleteCleanup = EHStack.stable_begin();
1267     cleanupDominator = Builder.CreateUnreachable();
1268   }
1269 
1270   assert((allocSize == allocSizeWithoutCookie) ==
1271          CalculateCookiePadding(*this, E).isZero());
1272   if (allocSize != allocSizeWithoutCookie) {
1273     assert(E->isArray());
1274     allocation = CGM.getCXXABI().InitializeArrayCookie(*this, allocation,
1275                                                        numElements,
1276                                                        E, allocType);
1277   }
1278 
1279   llvm::Type *elementPtrTy
1280     = ConvertTypeForMem(allocType)->getPointerTo(AS);
1281   llvm::Value *result = Builder.CreateBitCast(allocation, elementPtrTy);
1282 
1283   EmitNewInitializer(*this, E, allocType, result, numElements,
1284                      allocSizeWithoutCookie);
1285   if (E->isArray()) {
1286     // NewPtr is a pointer to the base element type.  If we're
1287     // allocating an array of arrays, we'll need to cast back to the
1288     // array pointer type.
1289     llvm::Type *resultType = ConvertTypeForMem(E->getType());
1290     if (result->getType() != resultType)
1291       result = Builder.CreateBitCast(result, resultType);
1292   }
1293 
1294   // Deactivate the 'operator delete' cleanup if we finished
1295   // initialization.
1296   if (operatorDeleteCleanup.isValid()) {
1297     DeactivateCleanupBlock(operatorDeleteCleanup, cleanupDominator);
1298     cleanupDominator->eraseFromParent();
1299   }
1300 
1301   if (nullCheck) {
1302     conditional.end(*this);
1303 
1304     llvm::BasicBlock *notNullBB = Builder.GetInsertBlock();
1305     EmitBlock(contBB);
1306 
1307     llvm::PHINode *PHI = Builder.CreatePHI(result->getType(), 2);
1308     PHI->addIncoming(result, notNullBB);
1309     PHI->addIncoming(llvm::Constant::getNullValue(result->getType()),
1310                      nullCheckBB);
1311 
1312     result = PHI;
1313   }
1314 
1315   return result;
1316 }
1317 
1318 void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
1319                                      llvm::Value *Ptr,
1320                                      QualType DeleteTy) {
1321   assert(DeleteFD->getOverloadedOperator() == OO_Delete);
1322 
1323   const FunctionProtoType *DeleteFTy =
1324     DeleteFD->getType()->getAs<FunctionProtoType>();
1325 
1326   CallArgList DeleteArgs;
1327 
1328   // Check if we need to pass the size to the delete operator.
1329   llvm::Value *Size = 0;
1330   QualType SizeTy;
1331   if (DeleteFTy->getNumArgs() == 2) {
1332     SizeTy = DeleteFTy->getArgType(1);
1333     CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy);
1334     Size = llvm::ConstantInt::get(ConvertType(SizeTy),
1335                                   DeleteTypeSize.getQuantity());
1336   }
1337 
1338   QualType ArgTy = DeleteFTy->getArgType(0);
1339   llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy));
1340   DeleteArgs.add(RValue::get(DeletePtr), ArgTy);
1341 
1342   if (Size)
1343     DeleteArgs.add(RValue::get(Size), SizeTy);
1344 
1345   // Emit the call to delete.
1346   EmitCall(CGM.getTypes().arrangeFreeFunctionCall(DeleteArgs, DeleteFTy),
1347            CGM.GetAddrOfFunction(DeleteFD), ReturnValueSlot(),
1348            DeleteArgs, DeleteFD);
1349 }
1350 
1351 namespace {
1352   /// Calls the given 'operator delete' on a single object.
1353   struct CallObjectDelete : EHScopeStack::Cleanup {
1354     llvm::Value *Ptr;
1355     const FunctionDecl *OperatorDelete;
1356     QualType ElementType;
1357 
1358     CallObjectDelete(llvm::Value *Ptr,
1359                      const FunctionDecl *OperatorDelete,
1360                      QualType ElementType)
1361       : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {}
1362 
1363     void Emit(CodeGenFunction &CGF, Flags flags) {
1364       CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType);
1365     }
1366   };
1367 }
1368 
1369 /// Emit the code for deleting a single object.
1370 static void EmitObjectDelete(CodeGenFunction &CGF,
1371                              const FunctionDecl *OperatorDelete,
1372                              llvm::Value *Ptr,
1373                              QualType ElementType,
1374                              bool UseGlobalDelete) {
1375   // Find the destructor for the type, if applicable.  If the
1376   // destructor is virtual, we'll just emit the vcall and return.
1377   const CXXDestructorDecl *Dtor = 0;
1378   if (const RecordType *RT = ElementType->getAs<RecordType>()) {
1379     CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1380     if (RD->hasDefinition() && !RD->hasTrivialDestructor()) {
1381       Dtor = RD->getDestructor();
1382 
1383       if (Dtor->isVirtual()) {
1384         if (UseGlobalDelete) {
1385           // If we're supposed to call the global delete, make sure we do so
1386           // even if the destructor throws.
1387 
1388           // Derive the complete-object pointer, which is what we need
1389           // to pass to the deallocation function.
1390           llvm::Value *completePtr =
1391             CGF.CGM.getCXXABI().adjustToCompleteObject(CGF, Ptr, ElementType);
1392 
1393           CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
1394                                                     completePtr, OperatorDelete,
1395                                                     ElementType);
1396         }
1397 
1398         llvm::Type *Ty =
1399           CGF.getTypes().GetFunctionType(
1400                          CGF.getTypes().arrangeCXXDestructor(Dtor, Dtor_Complete));
1401 
1402         llvm::Value *Callee
1403           = CGF.BuildVirtualCall(Dtor,
1404                                  UseGlobalDelete? Dtor_Complete : Dtor_Deleting,
1405                                  Ptr, Ty);
1406         // FIXME: Provide a source location here.
1407         CGF.EmitCXXMemberCall(Dtor, SourceLocation(), Callee, ReturnValueSlot(),
1408                               Ptr, /*VTT=*/0, 0, 0);
1409 
1410         if (UseGlobalDelete) {
1411           CGF.PopCleanupBlock();
1412         }
1413 
1414         return;
1415       }
1416     }
1417   }
1418 
1419   // Make sure that we call delete even if the dtor throws.
1420   // This doesn't have to a conditional cleanup because we're going
1421   // to pop it off in a second.
1422   CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
1423                                             Ptr, OperatorDelete, ElementType);
1424 
1425   if (Dtor)
1426     CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
1427                               /*ForVirtualBase=*/false, Ptr);
1428   else if (CGF.getLangOpts().ObjCAutoRefCount &&
1429            ElementType->isObjCLifetimeType()) {
1430     switch (ElementType.getObjCLifetime()) {
1431     case Qualifiers::OCL_None:
1432     case Qualifiers::OCL_ExplicitNone:
1433     case Qualifiers::OCL_Autoreleasing:
1434       break;
1435 
1436     case Qualifiers::OCL_Strong: {
1437       // Load the pointer value.
1438       llvm::Value *PtrValue = CGF.Builder.CreateLoad(Ptr,
1439                                              ElementType.isVolatileQualified());
1440 
1441       CGF.EmitARCRelease(PtrValue, /*precise*/ true);
1442       break;
1443     }
1444 
1445     case Qualifiers::OCL_Weak:
1446       CGF.EmitARCDestroyWeak(Ptr);
1447       break;
1448     }
1449   }
1450 
1451   CGF.PopCleanupBlock();
1452 }
1453 
1454 namespace {
1455   /// Calls the given 'operator delete' on an array of objects.
1456   struct CallArrayDelete : EHScopeStack::Cleanup {
1457     llvm::Value *Ptr;
1458     const FunctionDecl *OperatorDelete;
1459     llvm::Value *NumElements;
1460     QualType ElementType;
1461     CharUnits CookieSize;
1462 
1463     CallArrayDelete(llvm::Value *Ptr,
1464                     const FunctionDecl *OperatorDelete,
1465                     llvm::Value *NumElements,
1466                     QualType ElementType,
1467                     CharUnits CookieSize)
1468       : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements),
1469         ElementType(ElementType), CookieSize(CookieSize) {}
1470 
1471     void Emit(CodeGenFunction &CGF, Flags flags) {
1472       const FunctionProtoType *DeleteFTy =
1473         OperatorDelete->getType()->getAs<FunctionProtoType>();
1474       assert(DeleteFTy->getNumArgs() == 1 || DeleteFTy->getNumArgs() == 2);
1475 
1476       CallArgList Args;
1477 
1478       // Pass the pointer as the first argument.
1479       QualType VoidPtrTy = DeleteFTy->getArgType(0);
1480       llvm::Value *DeletePtr
1481         = CGF.Builder.CreateBitCast(Ptr, CGF.ConvertType(VoidPtrTy));
1482       Args.add(RValue::get(DeletePtr), VoidPtrTy);
1483 
1484       // Pass the original requested size as the second argument.
1485       if (DeleteFTy->getNumArgs() == 2) {
1486         QualType size_t = DeleteFTy->getArgType(1);
1487         llvm::IntegerType *SizeTy
1488           = cast<llvm::IntegerType>(CGF.ConvertType(size_t));
1489 
1490         CharUnits ElementTypeSize =
1491           CGF.CGM.getContext().getTypeSizeInChars(ElementType);
1492 
1493         // The size of an element, multiplied by the number of elements.
1494         llvm::Value *Size
1495           = llvm::ConstantInt::get(SizeTy, ElementTypeSize.getQuantity());
1496         Size = CGF.Builder.CreateMul(Size, NumElements);
1497 
1498         // Plus the size of the cookie if applicable.
1499         if (!CookieSize.isZero()) {
1500           llvm::Value *CookieSizeV
1501             = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
1502           Size = CGF.Builder.CreateAdd(Size, CookieSizeV);
1503         }
1504 
1505         Args.add(RValue::get(Size), size_t);
1506       }
1507 
1508       // Emit the call to delete.
1509       CGF.EmitCall(CGF.getTypes().arrangeFreeFunctionCall(Args, DeleteFTy),
1510                    CGF.CGM.GetAddrOfFunction(OperatorDelete),
1511                    ReturnValueSlot(), Args, OperatorDelete);
1512     }
1513   };
1514 }
1515 
1516 /// Emit the code for deleting an array of objects.
1517 static void EmitArrayDelete(CodeGenFunction &CGF,
1518                             const CXXDeleteExpr *E,
1519                             llvm::Value *deletedPtr,
1520                             QualType elementType) {
1521   llvm::Value *numElements = 0;
1522   llvm::Value *allocatedPtr = 0;
1523   CharUnits cookieSize;
1524   CGF.CGM.getCXXABI().ReadArrayCookie(CGF, deletedPtr, E, elementType,
1525                                       numElements, allocatedPtr, cookieSize);
1526 
1527   assert(allocatedPtr && "ReadArrayCookie didn't set allocated pointer");
1528 
1529   // Make sure that we call delete even if one of the dtors throws.
1530   const FunctionDecl *operatorDelete = E->getOperatorDelete();
1531   CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup,
1532                                            allocatedPtr, operatorDelete,
1533                                            numElements, elementType,
1534                                            cookieSize);
1535 
1536   // Destroy the elements.
1537   if (QualType::DestructionKind dtorKind = elementType.isDestructedType()) {
1538     assert(numElements && "no element count for a type with a destructor!");
1539 
1540     llvm::Value *arrayEnd =
1541       CGF.Builder.CreateInBoundsGEP(deletedPtr, numElements, "delete.end");
1542 
1543     // Note that it is legal to allocate a zero-length array, and we
1544     // can never fold the check away because the length should always
1545     // come from a cookie.
1546     CGF.emitArrayDestroy(deletedPtr, arrayEnd, elementType,
1547                          CGF.getDestroyer(dtorKind),
1548                          /*checkZeroLength*/ true,
1549                          CGF.needsEHCleanup(dtorKind));
1550   }
1551 
1552   // Pop the cleanup block.
1553   CGF.PopCleanupBlock();
1554 }
1555 
1556 void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
1557   const Expr *Arg = E->getArgument();
1558   llvm::Value *Ptr = EmitScalarExpr(Arg);
1559 
1560   // Null check the pointer.
1561   llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");
1562   llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");
1563 
1564   llvm::Value *IsNull = Builder.CreateIsNull(Ptr, "isnull");
1565 
1566   Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
1567   EmitBlock(DeleteNotNull);
1568 
1569   // We might be deleting a pointer to array.  If so, GEP down to the
1570   // first non-array element.
1571   // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*)
1572   QualType DeleteTy = Arg->getType()->getAs<PointerType>()->getPointeeType();
1573   if (DeleteTy->isConstantArrayType()) {
1574     llvm::Value *Zero = Builder.getInt32(0);
1575     SmallVector<llvm::Value*,8> GEP;
1576 
1577     GEP.push_back(Zero); // point at the outermost array
1578 
1579     // For each layer of array type we're pointing at:
1580     while (const ConstantArrayType *Arr
1581              = getContext().getAsConstantArrayType(DeleteTy)) {
1582       // 1. Unpeel the array type.
1583       DeleteTy = Arr->getElementType();
1584 
1585       // 2. GEP to the first element of the array.
1586       GEP.push_back(Zero);
1587     }
1588 
1589     Ptr = Builder.CreateInBoundsGEP(Ptr, GEP, "del.first");
1590   }
1591 
1592   assert(ConvertTypeForMem(DeleteTy) ==
1593          cast<llvm::PointerType>(Ptr->getType())->getElementType());
1594 
1595   if (E->isArrayForm()) {
1596     EmitArrayDelete(*this, E, Ptr, DeleteTy);
1597   } else {
1598     EmitObjectDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy,
1599                      E->isGlobalDelete());
1600   }
1601 
1602   EmitBlock(DeleteEnd);
1603 }
1604 
1605 static llvm::Constant *getBadTypeidFn(CodeGenFunction &CGF) {
1606   // void __cxa_bad_typeid();
1607   llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1608 
1609   return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
1610 }
1611 
1612 static void EmitBadTypeidCall(CodeGenFunction &CGF) {
1613   llvm::Value *Fn = getBadTypeidFn(CGF);
1614   CGF.EmitCallOrInvoke(Fn).setDoesNotReturn();
1615   CGF.Builder.CreateUnreachable();
1616 }
1617 
1618 static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF,
1619                                          const Expr *E,
1620                                          llvm::Type *StdTypeInfoPtrTy) {
1621   // Get the vtable pointer.
1622   llvm::Value *ThisPtr = CGF.EmitLValue(E).getAddress();
1623 
1624   // C++ [expr.typeid]p2:
1625   //   If the glvalue expression is obtained by applying the unary * operator to
1626   //   a pointer and the pointer is a null pointer value, the typeid expression
1627   //   throws the std::bad_typeid exception.
1628   if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E->IgnoreParens())) {
1629     if (UO->getOpcode() == UO_Deref) {
1630       llvm::BasicBlock *BadTypeidBlock =
1631         CGF.createBasicBlock("typeid.bad_typeid");
1632       llvm::BasicBlock *EndBlock =
1633         CGF.createBasicBlock("typeid.end");
1634 
1635       llvm::Value *IsNull = CGF.Builder.CreateIsNull(ThisPtr);
1636       CGF.Builder.CreateCondBr(IsNull, BadTypeidBlock, EndBlock);
1637 
1638       CGF.EmitBlock(BadTypeidBlock);
1639       EmitBadTypeidCall(CGF);
1640       CGF.EmitBlock(EndBlock);
1641     }
1642   }
1643 
1644   llvm::Value *Value = CGF.GetVTablePtr(ThisPtr,
1645                                         StdTypeInfoPtrTy->getPointerTo());
1646 
1647   // Load the type info.
1648   Value = CGF.Builder.CreateConstInBoundsGEP1_64(Value, -1ULL);
1649   return CGF.Builder.CreateLoad(Value);
1650 }
1651 
1652 llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
1653   llvm::Type *StdTypeInfoPtrTy =
1654     ConvertType(E->getType())->getPointerTo();
1655 
1656   if (E->isTypeOperand()) {
1657     llvm::Constant *TypeInfo =
1658       CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand());
1659     return Builder.CreateBitCast(TypeInfo, StdTypeInfoPtrTy);
1660   }
1661 
1662   // C++ [expr.typeid]p2:
1663   //   When typeid is applied to a glvalue expression whose type is a
1664   //   polymorphic class type, the result refers to a std::type_info object
1665   //   representing the type of the most derived object (that is, the dynamic
1666   //   type) to which the glvalue refers.
1667   if (E->isPotentiallyEvaluated())
1668     return EmitTypeidFromVTable(*this, E->getExprOperand(),
1669                                 StdTypeInfoPtrTy);
1670 
1671   QualType OperandTy = E->getExprOperand()->getType();
1672   return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(OperandTy),
1673                                StdTypeInfoPtrTy);
1674 }
1675 
1676 static llvm::Constant *getDynamicCastFn(CodeGenFunction &CGF) {
1677   // void *__dynamic_cast(const void *sub,
1678   //                      const abi::__class_type_info *src,
1679   //                      const abi::__class_type_info *dst,
1680   //                      std::ptrdiff_t src2dst_offset);
1681 
1682   llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
1683   llvm::Type *PtrDiffTy =
1684     CGF.ConvertType(CGF.getContext().getPointerDiffType());
1685 
1686   llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
1687 
1688   llvm::FunctionType *FTy =
1689     llvm::FunctionType::get(Int8PtrTy, Args, false);
1690 
1691   return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast");
1692 }
1693 
1694 static llvm::Constant *getBadCastFn(CodeGenFunction &CGF) {
1695   // void __cxa_bad_cast();
1696   llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1697   return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
1698 }
1699 
1700 static void EmitBadCastCall(CodeGenFunction &CGF) {
1701   llvm::Value *Fn = getBadCastFn(CGF);
1702   CGF.EmitCallOrInvoke(Fn).setDoesNotReturn();
1703   CGF.Builder.CreateUnreachable();
1704 }
1705 
1706 static llvm::Value *
1707 EmitDynamicCastCall(CodeGenFunction &CGF, llvm::Value *Value,
1708                     QualType SrcTy, QualType DestTy,
1709                     llvm::BasicBlock *CastEnd) {
1710   llvm::Type *PtrDiffLTy =
1711     CGF.ConvertType(CGF.getContext().getPointerDiffType());
1712   llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1713 
1714   if (const PointerType *PTy = DestTy->getAs<PointerType>()) {
1715     if (PTy->getPointeeType()->isVoidType()) {
1716       // C++ [expr.dynamic.cast]p7:
1717       //   If T is "pointer to cv void," then the result is a pointer to the
1718       //   most derived object pointed to by v.
1719 
1720       // Get the vtable pointer.
1721       llvm::Value *VTable = CGF.GetVTablePtr(Value, PtrDiffLTy->getPointerTo());
1722 
1723       // Get the offset-to-top from the vtable.
1724       llvm::Value *OffsetToTop =
1725         CGF.Builder.CreateConstInBoundsGEP1_64(VTable, -2ULL);
1726       OffsetToTop = CGF.Builder.CreateLoad(OffsetToTop, "offset.to.top");
1727 
1728       // Finally, add the offset to the pointer.
1729       Value = CGF.EmitCastToVoidPtr(Value);
1730       Value = CGF.Builder.CreateInBoundsGEP(Value, OffsetToTop);
1731 
1732       return CGF.Builder.CreateBitCast(Value, DestLTy);
1733     }
1734   }
1735 
1736   QualType SrcRecordTy;
1737   QualType DestRecordTy;
1738 
1739   if (const PointerType *DestPTy = DestTy->getAs<PointerType>()) {
1740     SrcRecordTy = SrcTy->castAs<PointerType>()->getPointeeType();
1741     DestRecordTy = DestPTy->getPointeeType();
1742   } else {
1743     SrcRecordTy = SrcTy;
1744     DestRecordTy = DestTy->castAs<ReferenceType>()->getPointeeType();
1745   }
1746 
1747   assert(SrcRecordTy->isRecordType() && "source type must be a record type!");
1748   assert(DestRecordTy->isRecordType() && "dest type must be a record type!");
1749 
1750   llvm::Value *SrcRTTI =
1751     CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
1752   llvm::Value *DestRTTI =
1753     CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType());
1754 
1755   // FIXME: Actually compute a hint here.
1756   llvm::Value *OffsetHint = llvm::ConstantInt::get(PtrDiffLTy, -1ULL);
1757 
1758   // Emit the call to __dynamic_cast.
1759   Value = CGF.EmitCastToVoidPtr(Value);
1760   Value = CGF.Builder.CreateCall4(getDynamicCastFn(CGF), Value,
1761                                   SrcRTTI, DestRTTI, OffsetHint);
1762   Value = CGF.Builder.CreateBitCast(Value, DestLTy);
1763 
1764   /// C++ [expr.dynamic.cast]p9:
1765   ///   A failed cast to reference type throws std::bad_cast
1766   if (DestTy->isReferenceType()) {
1767     llvm::BasicBlock *BadCastBlock =
1768       CGF.createBasicBlock("dynamic_cast.bad_cast");
1769 
1770     llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value);
1771     CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd);
1772 
1773     CGF.EmitBlock(BadCastBlock);
1774     EmitBadCastCall(CGF);
1775   }
1776 
1777   return Value;
1778 }
1779 
1780 static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF,
1781                                           QualType DestTy) {
1782   llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1783   if (DestTy->isPointerType())
1784     return llvm::Constant::getNullValue(DestLTy);
1785 
1786   /// C++ [expr.dynamic.cast]p9:
1787   ///   A failed cast to reference type throws std::bad_cast
1788   EmitBadCastCall(CGF);
1789 
1790   CGF.EmitBlock(CGF.createBasicBlock("dynamic_cast.end"));
1791   return llvm::UndefValue::get(DestLTy);
1792 }
1793 
1794 llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *Value,
1795                                               const CXXDynamicCastExpr *DCE) {
1796   QualType DestTy = DCE->getTypeAsWritten();
1797 
1798   if (DCE->isAlwaysNull())
1799     return EmitDynamicCastToNull(*this, DestTy);
1800 
1801   QualType SrcTy = DCE->getSubExpr()->getType();
1802 
1803   // C++ [expr.dynamic.cast]p4:
1804   //   If the value of v is a null pointer value in the pointer case, the result
1805   //   is the null pointer value of type T.
1806   bool ShouldNullCheckSrcValue = SrcTy->isPointerType();
1807 
1808   llvm::BasicBlock *CastNull = 0;
1809   llvm::BasicBlock *CastNotNull = 0;
1810   llvm::BasicBlock *CastEnd = createBasicBlock("dynamic_cast.end");
1811 
1812   if (ShouldNullCheckSrcValue) {
1813     CastNull = createBasicBlock("dynamic_cast.null");
1814     CastNotNull = createBasicBlock("dynamic_cast.notnull");
1815 
1816     llvm::Value *IsNull = Builder.CreateIsNull(Value);
1817     Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
1818     EmitBlock(CastNotNull);
1819   }
1820 
1821   Value = EmitDynamicCastCall(*this, Value, SrcTy, DestTy, CastEnd);
1822 
1823   if (ShouldNullCheckSrcValue) {
1824     EmitBranch(CastEnd);
1825 
1826     EmitBlock(CastNull);
1827     EmitBranch(CastEnd);
1828   }
1829 
1830   EmitBlock(CastEnd);
1831 
1832   if (ShouldNullCheckSrcValue) {
1833     llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
1834     PHI->addIncoming(Value, CastNotNull);
1835     PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull);
1836 
1837     Value = PHI;
1838   }
1839 
1840   return Value;
1841 }
1842 
1843 void CodeGenFunction::EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Slot) {
1844   RunCleanupsScope Scope(*this);
1845   LValue SlotLV = MakeAddrLValue(Slot.getAddr(), E->getType(),
1846                                  Slot.getAlignment());
1847 
1848   CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin();
1849   for (LambdaExpr::capture_init_iterator i = E->capture_init_begin(),
1850                                          e = E->capture_init_end();
1851        i != e; ++i, ++CurField) {
1852     // Emit initialization
1853 
1854     LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
1855     ArrayRef<VarDecl *> ArrayIndexes;
1856     if (CurField->getType()->isArrayType())
1857       ArrayIndexes = E->getCaptureInitIndexVars(i);
1858     EmitInitializerForField(*CurField, LV, *i, ArrayIndexes);
1859   }
1860 }
1861