1 //===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code dealing with code generation of C++ expressions
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CodeGenFunction.h"
15 #include "CGCUDARuntime.h"
16 #include "CGCXXABI.h"
17 #include "CGDebugInfo.h"
18 #include "CGObjCRuntime.h"
19 #include "clang/Frontend/CodeGenOptions.h"
20 #include "llvm/IR/Intrinsics.h"
21 #include "llvm/Support/CallSite.h"
22 
23 using namespace clang;
24 using namespace CodeGen;
25 
26 RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
27                                           SourceLocation CallLoc,
28                                           llvm::Value *Callee,
29                                           ReturnValueSlot ReturnValue,
30                                           llvm::Value *This,
31                                           llvm::Value *ImplicitParam,
32                                           QualType ImplicitParamTy,
33                                           CallExpr::const_arg_iterator ArgBeg,
34                                           CallExpr::const_arg_iterator ArgEnd) {
35   assert(MD->isInstance() &&
36          "Trying to emit a member call expr on a static method!");
37 
38   // C++11 [class.mfct.non-static]p2:
39   //   If a non-static member function of a class X is called for an object that
40   //   is not of type X, or of a type derived from X, the behavior is undefined.
41   EmitTypeCheck(isa<CXXConstructorDecl>(MD) ? TCK_ConstructorCall
42                                             : TCK_MemberCall,
43                 CallLoc, This, getContext().getRecordType(MD->getParent()));
44 
45   CallArgList Args;
46 
47   // Push the this ptr.
48   Args.add(RValue::get(This), MD->getThisType(getContext()));
49 
50   // If there is an implicit parameter (e.g. VTT), emit it.
51   if (ImplicitParam) {
52     Args.add(RValue::get(ImplicitParam), ImplicitParamTy);
53   }
54 
55   const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
56   RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, Args.size());
57 
58   // And the rest of the call args.
59   EmitCallArgs(Args, FPT, ArgBeg, ArgEnd);
60 
61   return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required),
62                   Callee, ReturnValue, Args, MD);
63 }
64 
65 static CXXRecordDecl *getCXXRecord(const Expr *E) {
66   QualType T = E->getType();
67   if (const PointerType *PTy = T->getAs<PointerType>())
68     T = PTy->getPointeeType();
69   const RecordType *Ty = T->castAs<RecordType>();
70   return cast<CXXRecordDecl>(Ty->getDecl());
71 }
72 
73 // Note: This function also emit constructor calls to support a MSVC
74 // extensions allowing explicit constructor function call.
75 RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
76                                               ReturnValueSlot ReturnValue) {
77   const Expr *callee = CE->getCallee()->IgnoreParens();
78 
79   if (isa<BinaryOperator>(callee))
80     return EmitCXXMemberPointerCallExpr(CE, ReturnValue);
81 
82   const MemberExpr *ME = cast<MemberExpr>(callee);
83   const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
84 
85   if (MD->isStatic()) {
86     // The method is static, emit it as we would a regular call.
87     llvm::Value *Callee = CGM.GetAddrOfFunction(MD);
88     return EmitCall(getContext().getPointerType(MD->getType()), Callee,
89                     CE->getLocStart(), ReturnValue, CE->arg_begin(),
90                     CE->arg_end());
91   }
92 
93   // Compute the object pointer.
94   const Expr *Base = ME->getBase();
95   bool CanUseVirtualCall = MD->isVirtual() && !ME->hasQualifier();
96 
97   const CXXMethodDecl *DevirtualizedMethod = NULL;
98   if (CanUseVirtualCall && CanDevirtualizeMemberFunctionCall(Base, MD)) {
99     const CXXRecordDecl *BestDynamicDecl = Base->getBestDynamicClassType();
100     DevirtualizedMethod = MD->getCorrespondingMethodInClass(BestDynamicDecl);
101     assert(DevirtualizedMethod);
102     const CXXRecordDecl *DevirtualizedClass = DevirtualizedMethod->getParent();
103     const Expr *Inner = Base->ignoreParenBaseCasts();
104     if (getCXXRecord(Inner) == DevirtualizedClass)
105       // If the class of the Inner expression is where the dynamic method
106       // is defined, build the this pointer from it.
107       Base = Inner;
108     else if (getCXXRecord(Base) != DevirtualizedClass) {
109       // If the method is defined in a class that is not the best dynamic
110       // one or the one of the full expression, we would have to build
111       // a derived-to-base cast to compute the correct this pointer, but
112       // we don't have support for that yet, so do a virtual call.
113       DevirtualizedMethod = NULL;
114     }
115     // If the return types are not the same, this might be a case where more
116     // code needs to run to compensate for it. For example, the derived
117     // method might return a type that inherits form from the return
118     // type of MD and has a prefix.
119     // For now we just avoid devirtualizing these covariant cases.
120     if (DevirtualizedMethod &&
121         DevirtualizedMethod->getResultType().getCanonicalType() !=
122         MD->getResultType().getCanonicalType())
123       DevirtualizedMethod = NULL;
124   }
125 
126   llvm::Value *This;
127   if (ME->isArrow())
128     This = EmitScalarExpr(Base);
129   else
130     This = EmitLValue(Base).getAddress();
131 
132 
133   if (MD->isTrivial()) {
134     if (isa<CXXDestructorDecl>(MD)) return RValue::get(0);
135     if (isa<CXXConstructorDecl>(MD) &&
136         cast<CXXConstructorDecl>(MD)->isDefaultConstructor())
137       return RValue::get(0);
138 
139     if (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) {
140       // We don't like to generate the trivial copy/move assignment operator
141       // when it isn't necessary; just produce the proper effect here.
142       llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
143       EmitAggregateAssign(This, RHS, CE->getType());
144       return RValue::get(This);
145     }
146 
147     if (isa<CXXConstructorDecl>(MD) &&
148         cast<CXXConstructorDecl>(MD)->isCopyOrMoveConstructor()) {
149       // Trivial move and copy ctor are the same.
150       llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
151       EmitSynthesizedCXXCopyCtorCall(cast<CXXConstructorDecl>(MD), This, RHS,
152                                      CE->arg_begin(), CE->arg_end());
153       return RValue::get(This);
154     }
155     llvm_unreachable("unknown trivial member function");
156   }
157 
158   // Compute the function type we're calling.
159   const CXXMethodDecl *CalleeDecl = DevirtualizedMethod ? DevirtualizedMethod : MD;
160   const CGFunctionInfo *FInfo = 0;
161   if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(CalleeDecl))
162     FInfo = &CGM.getTypes().arrangeCXXDestructor(Dtor,
163                                                  Dtor_Complete);
164   else if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(CalleeDecl))
165     FInfo = &CGM.getTypes().arrangeCXXConstructorDeclaration(Ctor,
166                                                              Ctor_Complete);
167   else
168     FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(CalleeDecl);
169 
170   llvm::FunctionType *Ty = CGM.getTypes().GetFunctionType(*FInfo);
171 
172   // C++ [class.virtual]p12:
173   //   Explicit qualification with the scope operator (5.1) suppresses the
174   //   virtual call mechanism.
175   //
176   // We also don't emit a virtual call if the base expression has a record type
177   // because then we know what the type is.
178   bool UseVirtualCall = CanUseVirtualCall && !DevirtualizedMethod;
179   llvm::Value *Callee;
180 
181   if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD)) {
182     assert(CE->arg_begin() == CE->arg_end() &&
183            "Destructor shouldn't have explicit parameters");
184     assert(ReturnValue.isNull() && "Destructor shouldn't have return value");
185     if (UseVirtualCall) {
186       CGM.getCXXABI().EmitVirtualDestructorCall(*this, Dtor, Dtor_Complete,
187                                                 CE->getExprLoc(), This);
188     } else {
189       if (getLangOpts().AppleKext &&
190           MD->isVirtual() &&
191           ME->hasQualifier())
192         Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
193       else if (!DevirtualizedMethod)
194         Callee = CGM.GetAddrOfCXXDestructor(Dtor, Dtor_Complete, FInfo, Ty);
195       else {
196         const CXXDestructorDecl *DDtor =
197           cast<CXXDestructorDecl>(DevirtualizedMethod);
198         Callee = CGM.GetAddrOfFunction(GlobalDecl(DDtor, Dtor_Complete), Ty);
199       }
200       EmitCXXMemberCall(MD, CE->getExprLoc(), Callee, ReturnValue, This,
201                         /*ImplicitParam=*/0, QualType(), 0, 0);
202     }
203     return RValue::get(0);
204   }
205 
206   if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(MD)) {
207     Callee = CGM.GetAddrOfFunction(GlobalDecl(Ctor, Ctor_Complete), Ty);
208   } else if (UseVirtualCall) {
209     Callee = CGM.getCXXABI().getVirtualFunctionPointer(*this, MD, This, Ty);
210   } else {
211     if (getLangOpts().AppleKext &&
212         MD->isVirtual() &&
213         ME->hasQualifier())
214       Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
215     else if (!DevirtualizedMethod)
216       Callee = CGM.GetAddrOfFunction(MD, Ty);
217     else {
218       Callee = CGM.GetAddrOfFunction(DevirtualizedMethod, Ty);
219     }
220   }
221 
222   if (MD->isVirtual())
223     This = CGM.getCXXABI().adjustThisArgumentForVirtualCall(*this, MD, This);
224 
225   return EmitCXXMemberCall(MD, CE->getExprLoc(), Callee, ReturnValue, This,
226                            /*ImplicitParam=*/0, QualType(),
227                            CE->arg_begin(), CE->arg_end());
228 }
229 
230 RValue
231 CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
232                                               ReturnValueSlot ReturnValue) {
233   const BinaryOperator *BO =
234       cast<BinaryOperator>(E->getCallee()->IgnoreParens());
235   const Expr *BaseExpr = BO->getLHS();
236   const Expr *MemFnExpr = BO->getRHS();
237 
238   const MemberPointerType *MPT =
239     MemFnExpr->getType()->castAs<MemberPointerType>();
240 
241   const FunctionProtoType *FPT =
242     MPT->getPointeeType()->castAs<FunctionProtoType>();
243   const CXXRecordDecl *RD =
244     cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
245 
246   // Get the member function pointer.
247   llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
248 
249   // Emit the 'this' pointer.
250   llvm::Value *This;
251 
252   if (BO->getOpcode() == BO_PtrMemI)
253     This = EmitScalarExpr(BaseExpr);
254   else
255     This = EmitLValue(BaseExpr).getAddress();
256 
257   EmitTypeCheck(TCK_MemberCall, E->getExprLoc(), This,
258                 QualType(MPT->getClass(), 0));
259 
260   // Ask the ABI to load the callee.  Note that This is modified.
261   llvm::Value *Callee =
262     CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, This, MemFnPtr, MPT);
263 
264   CallArgList Args;
265 
266   QualType ThisType =
267     getContext().getPointerType(getContext().getTagDeclType(RD));
268 
269   // Push the this ptr.
270   Args.add(RValue::get(This), ThisType);
271 
272   RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, 1);
273 
274   // And the rest of the call args
275   EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end());
276   return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required),
277                   Callee, ReturnValue, Args);
278 }
279 
280 RValue
281 CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
282                                                const CXXMethodDecl *MD,
283                                                ReturnValueSlot ReturnValue) {
284   assert(MD->isInstance() &&
285          "Trying to emit a member call expr on a static method!");
286   LValue LV = EmitLValue(E->getArg(0));
287   llvm::Value *This = LV.getAddress();
288 
289   if ((MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) &&
290       MD->isTrivial()) {
291     llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress();
292     QualType Ty = E->getType();
293     EmitAggregateAssign(This, Src, Ty);
294     return RValue::get(This);
295   }
296 
297   llvm::Value *Callee = EmitCXXOperatorMemberCallee(E, MD, This);
298   return EmitCXXMemberCall(MD, E->getExprLoc(), Callee, ReturnValue, This,
299                            /*ImplicitParam=*/0, QualType(),
300                            E->arg_begin() + 1, E->arg_end());
301 }
302 
303 RValue CodeGenFunction::EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
304                                                ReturnValueSlot ReturnValue) {
305   return CGM.getCUDARuntime().EmitCUDAKernelCallExpr(*this, E, ReturnValue);
306 }
307 
308 static void EmitNullBaseClassInitialization(CodeGenFunction &CGF,
309                                             llvm::Value *DestPtr,
310                                             const CXXRecordDecl *Base) {
311   if (Base->isEmpty())
312     return;
313 
314   DestPtr = CGF.EmitCastToVoidPtr(DestPtr);
315 
316   const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(Base);
317   CharUnits Size = Layout.getNonVirtualSize();
318   CharUnits Align = Layout.getNonVirtualAlign();
319 
320   llvm::Value *SizeVal = CGF.CGM.getSize(Size);
321 
322   // If the type contains a pointer to data member we can't memset it to zero.
323   // Instead, create a null constant and copy it to the destination.
324   // TODO: there are other patterns besides zero that we can usefully memset,
325   // like -1, which happens to be the pattern used by member-pointers.
326   // TODO: isZeroInitializable can be over-conservative in the case where a
327   // virtual base contains a member pointer.
328   if (!CGF.CGM.getTypes().isZeroInitializable(Base)) {
329     llvm::Constant *NullConstant = CGF.CGM.EmitNullConstantForBase(Base);
330 
331     llvm::GlobalVariable *NullVariable =
332       new llvm::GlobalVariable(CGF.CGM.getModule(), NullConstant->getType(),
333                                /*isConstant=*/true,
334                                llvm::GlobalVariable::PrivateLinkage,
335                                NullConstant, Twine());
336     NullVariable->setAlignment(Align.getQuantity());
337     llvm::Value *SrcPtr = CGF.EmitCastToVoidPtr(NullVariable);
338 
339     // Get and call the appropriate llvm.memcpy overload.
340     CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, Align.getQuantity());
341     return;
342   }
343 
344   // Otherwise, just memset the whole thing to zero.  This is legal
345   // because in LLVM, all default initializers (other than the ones we just
346   // handled above) are guaranteed to have a bit pattern of all zeros.
347   CGF.Builder.CreateMemSet(DestPtr, CGF.Builder.getInt8(0), SizeVal,
348                            Align.getQuantity());
349 }
350 
351 void
352 CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
353                                       AggValueSlot Dest) {
354   assert(!Dest.isIgnored() && "Must have a destination!");
355   const CXXConstructorDecl *CD = E->getConstructor();
356 
357   // If we require zero initialization before (or instead of) calling the
358   // constructor, as can be the case with a non-user-provided default
359   // constructor, emit the zero initialization now, unless destination is
360   // already zeroed.
361   if (E->requiresZeroInitialization() && !Dest.isZeroed()) {
362     switch (E->getConstructionKind()) {
363     case CXXConstructExpr::CK_Delegating:
364     case CXXConstructExpr::CK_Complete:
365       EmitNullInitialization(Dest.getAddr(), E->getType());
366       break;
367     case CXXConstructExpr::CK_VirtualBase:
368     case CXXConstructExpr::CK_NonVirtualBase:
369       EmitNullBaseClassInitialization(*this, Dest.getAddr(), CD->getParent());
370       break;
371     }
372   }
373 
374   // If this is a call to a trivial default constructor, do nothing.
375   if (CD->isTrivial() && CD->isDefaultConstructor())
376     return;
377 
378   // Elide the constructor if we're constructing from a temporary.
379   // The temporary check is required because Sema sets this on NRVO
380   // returns.
381   if (getLangOpts().ElideConstructors && E->isElidable()) {
382     assert(getContext().hasSameUnqualifiedType(E->getType(),
383                                                E->getArg(0)->getType()));
384     if (E->getArg(0)->isTemporaryObject(getContext(), CD->getParent())) {
385       EmitAggExpr(E->getArg(0), Dest);
386       return;
387     }
388   }
389 
390   if (const ConstantArrayType *arrayType
391         = getContext().getAsConstantArrayType(E->getType())) {
392     EmitCXXAggrConstructorCall(CD, arrayType, Dest.getAddr(),
393                                E->arg_begin(), E->arg_end());
394   } else {
395     CXXCtorType Type = Ctor_Complete;
396     bool ForVirtualBase = false;
397     bool Delegating = false;
398 
399     switch (E->getConstructionKind()) {
400      case CXXConstructExpr::CK_Delegating:
401       // We should be emitting a constructor; GlobalDecl will assert this
402       Type = CurGD.getCtorType();
403       Delegating = true;
404       break;
405 
406      case CXXConstructExpr::CK_Complete:
407       Type = Ctor_Complete;
408       break;
409 
410      case CXXConstructExpr::CK_VirtualBase:
411       ForVirtualBase = true;
412       // fall-through
413 
414      case CXXConstructExpr::CK_NonVirtualBase:
415       Type = Ctor_Base;
416     }
417 
418     // Call the constructor.
419     EmitCXXConstructorCall(CD, Type, ForVirtualBase, Delegating, Dest.getAddr(),
420                            E->arg_begin(), E->arg_end());
421   }
422 }
423 
424 void
425 CodeGenFunction::EmitSynthesizedCXXCopyCtor(llvm::Value *Dest,
426                                             llvm::Value *Src,
427                                             const Expr *Exp) {
428   if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Exp))
429     Exp = E->getSubExpr();
430   assert(isa<CXXConstructExpr>(Exp) &&
431          "EmitSynthesizedCXXCopyCtor - unknown copy ctor expr");
432   const CXXConstructExpr* E = cast<CXXConstructExpr>(Exp);
433   const CXXConstructorDecl *CD = E->getConstructor();
434   RunCleanupsScope Scope(*this);
435 
436   // If we require zero initialization before (or instead of) calling the
437   // constructor, as can be the case with a non-user-provided default
438   // constructor, emit the zero initialization now.
439   // FIXME. Do I still need this for a copy ctor synthesis?
440   if (E->requiresZeroInitialization())
441     EmitNullInitialization(Dest, E->getType());
442 
443   assert(!getContext().getAsConstantArrayType(E->getType())
444          && "EmitSynthesizedCXXCopyCtor - Copied-in Array");
445   EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src, E->arg_begin(), E->arg_end());
446 }
447 
448 static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,
449                                         const CXXNewExpr *E) {
450   if (!E->isArray())
451     return CharUnits::Zero();
452 
453   // No cookie is required if the operator new[] being used is the
454   // reserved placement operator new[].
455   if (E->getOperatorNew()->isReservedGlobalPlacementOperator())
456     return CharUnits::Zero();
457 
458   return CGF.CGM.getCXXABI().GetArrayCookieSize(E);
459 }
460 
461 static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
462                                         const CXXNewExpr *e,
463                                         unsigned minElements,
464                                         llvm::Value *&numElements,
465                                         llvm::Value *&sizeWithoutCookie) {
466   QualType type = e->getAllocatedType();
467 
468   if (!e->isArray()) {
469     CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
470     sizeWithoutCookie
471       = llvm::ConstantInt::get(CGF.SizeTy, typeSize.getQuantity());
472     return sizeWithoutCookie;
473   }
474 
475   // The width of size_t.
476   unsigned sizeWidth = CGF.SizeTy->getBitWidth();
477 
478   // Figure out the cookie size.
479   llvm::APInt cookieSize(sizeWidth,
480                          CalculateCookiePadding(CGF, e).getQuantity());
481 
482   // Emit the array size expression.
483   // We multiply the size of all dimensions for NumElements.
484   // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
485   numElements = CGF.EmitScalarExpr(e->getArraySize());
486   assert(isa<llvm::IntegerType>(numElements->getType()));
487 
488   // The number of elements can be have an arbitrary integer type;
489   // essentially, we need to multiply it by a constant factor, add a
490   // cookie size, and verify that the result is representable as a
491   // size_t.  That's just a gloss, though, and it's wrong in one
492   // important way: if the count is negative, it's an error even if
493   // the cookie size would bring the total size >= 0.
494   bool isSigned
495     = e->getArraySize()->getType()->isSignedIntegerOrEnumerationType();
496   llvm::IntegerType *numElementsType
497     = cast<llvm::IntegerType>(numElements->getType());
498   unsigned numElementsWidth = numElementsType->getBitWidth();
499 
500   // Compute the constant factor.
501   llvm::APInt arraySizeMultiplier(sizeWidth, 1);
502   while (const ConstantArrayType *CAT
503              = CGF.getContext().getAsConstantArrayType(type)) {
504     type = CAT->getElementType();
505     arraySizeMultiplier *= CAT->getSize();
506   }
507 
508   CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
509   llvm::APInt typeSizeMultiplier(sizeWidth, typeSize.getQuantity());
510   typeSizeMultiplier *= arraySizeMultiplier;
511 
512   // This will be a size_t.
513   llvm::Value *size;
514 
515   // If someone is doing 'new int[42]' there is no need to do a dynamic check.
516   // Don't bloat the -O0 code.
517   if (llvm::ConstantInt *numElementsC =
518         dyn_cast<llvm::ConstantInt>(numElements)) {
519     const llvm::APInt &count = numElementsC->getValue();
520 
521     bool hasAnyOverflow = false;
522 
523     // If 'count' was a negative number, it's an overflow.
524     if (isSigned && count.isNegative())
525       hasAnyOverflow = true;
526 
527     // We want to do all this arithmetic in size_t.  If numElements is
528     // wider than that, check whether it's already too big, and if so,
529     // overflow.
530     else if (numElementsWidth > sizeWidth &&
531              numElementsWidth - sizeWidth > count.countLeadingZeros())
532       hasAnyOverflow = true;
533 
534     // Okay, compute a count at the right width.
535     llvm::APInt adjustedCount = count.zextOrTrunc(sizeWidth);
536 
537     // If there is a brace-initializer, we cannot allocate fewer elements than
538     // there are initializers. If we do, that's treated like an overflow.
539     if (adjustedCount.ult(minElements))
540       hasAnyOverflow = true;
541 
542     // Scale numElements by that.  This might overflow, but we don't
543     // care because it only overflows if allocationSize does, too, and
544     // if that overflows then we shouldn't use this.
545     numElements = llvm::ConstantInt::get(CGF.SizeTy,
546                                          adjustedCount * arraySizeMultiplier);
547 
548     // Compute the size before cookie, and track whether it overflowed.
549     bool overflow;
550     llvm::APInt allocationSize
551       = adjustedCount.umul_ov(typeSizeMultiplier, overflow);
552     hasAnyOverflow |= overflow;
553 
554     // Add in the cookie, and check whether it's overflowed.
555     if (cookieSize != 0) {
556       // Save the current size without a cookie.  This shouldn't be
557       // used if there was overflow.
558       sizeWithoutCookie = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
559 
560       allocationSize = allocationSize.uadd_ov(cookieSize, overflow);
561       hasAnyOverflow |= overflow;
562     }
563 
564     // On overflow, produce a -1 so operator new will fail.
565     if (hasAnyOverflow) {
566       size = llvm::Constant::getAllOnesValue(CGF.SizeTy);
567     } else {
568       size = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
569     }
570 
571   // Otherwise, we might need to use the overflow intrinsics.
572   } else {
573     // There are up to five conditions we need to test for:
574     // 1) if isSigned, we need to check whether numElements is negative;
575     // 2) if numElementsWidth > sizeWidth, we need to check whether
576     //   numElements is larger than something representable in size_t;
577     // 3) if minElements > 0, we need to check whether numElements is smaller
578     //    than that.
579     // 4) we need to compute
580     //      sizeWithoutCookie := numElements * typeSizeMultiplier
581     //    and check whether it overflows; and
582     // 5) if we need a cookie, we need to compute
583     //      size := sizeWithoutCookie + cookieSize
584     //    and check whether it overflows.
585 
586     llvm::Value *hasOverflow = 0;
587 
588     // If numElementsWidth > sizeWidth, then one way or another, we're
589     // going to have to do a comparison for (2), and this happens to
590     // take care of (1), too.
591     if (numElementsWidth > sizeWidth) {
592       llvm::APInt threshold(numElementsWidth, 1);
593       threshold <<= sizeWidth;
594 
595       llvm::Value *thresholdV
596         = llvm::ConstantInt::get(numElementsType, threshold);
597 
598       hasOverflow = CGF.Builder.CreateICmpUGE(numElements, thresholdV);
599       numElements = CGF.Builder.CreateTrunc(numElements, CGF.SizeTy);
600 
601     // Otherwise, if we're signed, we want to sext up to size_t.
602     } else if (isSigned) {
603       if (numElementsWidth < sizeWidth)
604         numElements = CGF.Builder.CreateSExt(numElements, CGF.SizeTy);
605 
606       // If there's a non-1 type size multiplier, then we can do the
607       // signedness check at the same time as we do the multiply
608       // because a negative number times anything will cause an
609       // unsigned overflow.  Otherwise, we have to do it here. But at least
610       // in this case, we can subsume the >= minElements check.
611       if (typeSizeMultiplier == 1)
612         hasOverflow = CGF.Builder.CreateICmpSLT(numElements,
613                               llvm::ConstantInt::get(CGF.SizeTy, minElements));
614 
615     // Otherwise, zext up to size_t if necessary.
616     } else if (numElementsWidth < sizeWidth) {
617       numElements = CGF.Builder.CreateZExt(numElements, CGF.SizeTy);
618     }
619 
620     assert(numElements->getType() == CGF.SizeTy);
621 
622     if (minElements) {
623       // Don't allow allocation of fewer elements than we have initializers.
624       if (!hasOverflow) {
625         hasOverflow = CGF.Builder.CreateICmpULT(numElements,
626                               llvm::ConstantInt::get(CGF.SizeTy, minElements));
627       } else if (numElementsWidth > sizeWidth) {
628         // The other existing overflow subsumes this check.
629         // We do an unsigned comparison, since any signed value < -1 is
630         // taken care of either above or below.
631         hasOverflow = CGF.Builder.CreateOr(hasOverflow,
632                           CGF.Builder.CreateICmpULT(numElements,
633                               llvm::ConstantInt::get(CGF.SizeTy, minElements)));
634       }
635     }
636 
637     size = numElements;
638 
639     // Multiply by the type size if necessary.  This multiplier
640     // includes all the factors for nested arrays.
641     //
642     // This step also causes numElements to be scaled up by the
643     // nested-array factor if necessary.  Overflow on this computation
644     // can be ignored because the result shouldn't be used if
645     // allocation fails.
646     if (typeSizeMultiplier != 1) {
647       llvm::Value *umul_with_overflow
648         = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, CGF.SizeTy);
649 
650       llvm::Value *tsmV =
651         llvm::ConstantInt::get(CGF.SizeTy, typeSizeMultiplier);
652       llvm::Value *result =
653         CGF.Builder.CreateCall2(umul_with_overflow, size, tsmV);
654 
655       llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
656       if (hasOverflow)
657         hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
658       else
659         hasOverflow = overflowed;
660 
661       size = CGF.Builder.CreateExtractValue(result, 0);
662 
663       // Also scale up numElements by the array size multiplier.
664       if (arraySizeMultiplier != 1) {
665         // If the base element type size is 1, then we can re-use the
666         // multiply we just did.
667         if (typeSize.isOne()) {
668           assert(arraySizeMultiplier == typeSizeMultiplier);
669           numElements = size;
670 
671         // Otherwise we need a separate multiply.
672         } else {
673           llvm::Value *asmV =
674             llvm::ConstantInt::get(CGF.SizeTy, arraySizeMultiplier);
675           numElements = CGF.Builder.CreateMul(numElements, asmV);
676         }
677       }
678     } else {
679       // numElements doesn't need to be scaled.
680       assert(arraySizeMultiplier == 1);
681     }
682 
683     // Add in the cookie size if necessary.
684     if (cookieSize != 0) {
685       sizeWithoutCookie = size;
686 
687       llvm::Value *uadd_with_overflow
688         = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, CGF.SizeTy);
689 
690       llvm::Value *cookieSizeV = llvm::ConstantInt::get(CGF.SizeTy, cookieSize);
691       llvm::Value *result =
692         CGF.Builder.CreateCall2(uadd_with_overflow, size, cookieSizeV);
693 
694       llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
695       if (hasOverflow)
696         hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
697       else
698         hasOverflow = overflowed;
699 
700       size = CGF.Builder.CreateExtractValue(result, 0);
701     }
702 
703     // If we had any possibility of dynamic overflow, make a select to
704     // overwrite 'size' with an all-ones value, which should cause
705     // operator new to throw.
706     if (hasOverflow)
707       size = CGF.Builder.CreateSelect(hasOverflow,
708                                  llvm::Constant::getAllOnesValue(CGF.SizeTy),
709                                       size);
710   }
711 
712   if (cookieSize == 0)
713     sizeWithoutCookie = size;
714   else
715     assert(sizeWithoutCookie && "didn't set sizeWithoutCookie?");
716 
717   return size;
718 }
719 
720 static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const Expr *Init,
721                                     QualType AllocType, llvm::Value *NewPtr) {
722 
723   CharUnits Alignment = CGF.getContext().getTypeAlignInChars(AllocType);
724   switch (CGF.getEvaluationKind(AllocType)) {
725   case TEK_Scalar:
726     CGF.EmitScalarInit(Init, 0, CGF.MakeAddrLValue(NewPtr, AllocType,
727                                                    Alignment),
728                        false);
729     return;
730   case TEK_Complex:
731     CGF.EmitComplexExprIntoLValue(Init, CGF.MakeAddrLValue(NewPtr, AllocType,
732                                                            Alignment),
733                                   /*isInit*/ true);
734     return;
735   case TEK_Aggregate: {
736     AggValueSlot Slot
737       = AggValueSlot::forAddr(NewPtr, Alignment, AllocType.getQualifiers(),
738                               AggValueSlot::IsDestructed,
739                               AggValueSlot::DoesNotNeedGCBarriers,
740                               AggValueSlot::IsNotAliased);
741     CGF.EmitAggExpr(Init, Slot);
742     return;
743   }
744   }
745   llvm_unreachable("bad evaluation kind");
746 }
747 
748 void
749 CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E,
750                                          QualType elementType,
751                                          llvm::Value *beginPtr,
752                                          llvm::Value *numElements) {
753   if (!E->hasInitializer())
754     return; // We have a POD type.
755 
756   llvm::Value *explicitPtr = beginPtr;
757   // Find the end of the array, hoisted out of the loop.
758   llvm::Value *endPtr =
759     Builder.CreateInBoundsGEP(beginPtr, numElements, "array.end");
760 
761   unsigned initializerElements = 0;
762 
763   const Expr *Init = E->getInitializer();
764   llvm::AllocaInst *endOfInit = 0;
765   QualType::DestructionKind dtorKind = elementType.isDestructedType();
766   EHScopeStack::stable_iterator cleanup;
767   llvm::Instruction *cleanupDominator = 0;
768   // If the initializer is an initializer list, first do the explicit elements.
769   if (const InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) {
770     initializerElements = ILE->getNumInits();
771 
772     // Enter a partial-destruction cleanup if necessary.
773     if (needsEHCleanup(dtorKind)) {
774       // In principle we could tell the cleanup where we are more
775       // directly, but the control flow can get so varied here that it
776       // would actually be quite complex.  Therefore we go through an
777       // alloca.
778       endOfInit = CreateTempAlloca(beginPtr->getType(), "array.endOfInit");
779       cleanupDominator = Builder.CreateStore(beginPtr, endOfInit);
780       pushIrregularPartialArrayCleanup(beginPtr, endOfInit, elementType,
781                                        getDestroyer(dtorKind));
782       cleanup = EHStack.stable_begin();
783     }
784 
785     for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i) {
786       // Tell the cleanup that it needs to destroy up to this
787       // element.  TODO: some of these stores can be trivially
788       // observed to be unnecessary.
789       if (endOfInit) Builder.CreateStore(explicitPtr, endOfInit);
790       StoreAnyExprIntoOneUnit(*this, ILE->getInit(i), elementType, explicitPtr);
791       explicitPtr =Builder.CreateConstGEP1_32(explicitPtr, 1, "array.exp.next");
792     }
793 
794     // The remaining elements are filled with the array filler expression.
795     Init = ILE->getArrayFiller();
796   }
797 
798   // Create the continuation block.
799   llvm::BasicBlock *contBB = createBasicBlock("new.loop.end");
800 
801   // If the number of elements isn't constant, we have to now check if there is
802   // anything left to initialize.
803   if (llvm::ConstantInt *constNum = dyn_cast<llvm::ConstantInt>(numElements)) {
804     // If all elements have already been initialized, skip the whole loop.
805     if (constNum->getZExtValue() <= initializerElements) {
806       // If there was a cleanup, deactivate it.
807       if (cleanupDominator)
808         DeactivateCleanupBlock(cleanup, cleanupDominator);
809       return;
810     }
811   } else {
812     llvm::BasicBlock *nonEmptyBB = createBasicBlock("new.loop.nonempty");
813     llvm::Value *isEmpty = Builder.CreateICmpEQ(explicitPtr, endPtr,
814                                                 "array.isempty");
815     Builder.CreateCondBr(isEmpty, contBB, nonEmptyBB);
816     EmitBlock(nonEmptyBB);
817   }
818 
819   // Enter the loop.
820   llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
821   llvm::BasicBlock *loopBB = createBasicBlock("new.loop");
822 
823   EmitBlock(loopBB);
824 
825   // Set up the current-element phi.
826   llvm::PHINode *curPtr =
827     Builder.CreatePHI(explicitPtr->getType(), 2, "array.cur");
828   curPtr->addIncoming(explicitPtr, entryBB);
829 
830   // Store the new cleanup position for irregular cleanups.
831   if (endOfInit) Builder.CreateStore(curPtr, endOfInit);
832 
833   // Enter a partial-destruction cleanup if necessary.
834   if (!cleanupDominator && needsEHCleanup(dtorKind)) {
835     pushRegularPartialArrayCleanup(beginPtr, curPtr, elementType,
836                                    getDestroyer(dtorKind));
837     cleanup = EHStack.stable_begin();
838     cleanupDominator = Builder.CreateUnreachable();
839   }
840 
841   // Emit the initializer into this element.
842   StoreAnyExprIntoOneUnit(*this, Init, E->getAllocatedType(), curPtr);
843 
844   // Leave the cleanup if we entered one.
845   if (cleanupDominator) {
846     DeactivateCleanupBlock(cleanup, cleanupDominator);
847     cleanupDominator->eraseFromParent();
848   }
849 
850   // Advance to the next element.
851   llvm::Value *nextPtr = Builder.CreateConstGEP1_32(curPtr, 1, "array.next");
852 
853   // Check whether we've gotten to the end of the array and, if so,
854   // exit the loop.
855   llvm::Value *isEnd = Builder.CreateICmpEQ(nextPtr, endPtr, "array.atend");
856   Builder.CreateCondBr(isEnd, contBB, loopBB);
857   curPtr->addIncoming(nextPtr, Builder.GetInsertBlock());
858 
859   EmitBlock(contBB);
860 }
861 
862 static void EmitZeroMemSet(CodeGenFunction &CGF, QualType T,
863                            llvm::Value *NewPtr, llvm::Value *Size) {
864   CGF.EmitCastToVoidPtr(NewPtr);
865   CharUnits Alignment = CGF.getContext().getTypeAlignInChars(T);
866   CGF.Builder.CreateMemSet(NewPtr, CGF.Builder.getInt8(0), Size,
867                            Alignment.getQuantity(), false);
868 }
869 
870 static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
871                                QualType ElementType,
872                                llvm::Value *NewPtr,
873                                llvm::Value *NumElements,
874                                llvm::Value *AllocSizeWithoutCookie) {
875   const Expr *Init = E->getInitializer();
876   if (E->isArray()) {
877     if (const CXXConstructExpr *CCE = dyn_cast_or_null<CXXConstructExpr>(Init)){
878       CXXConstructorDecl *Ctor = CCE->getConstructor();
879       if (Ctor->isTrivial()) {
880         // If new expression did not specify value-initialization, then there
881         // is no initialization.
882         if (!CCE->requiresZeroInitialization() || Ctor->getParent()->isEmpty())
883           return;
884 
885         if (CGF.CGM.getTypes().isZeroInitializable(ElementType)) {
886           // Optimization: since zero initialization will just set the memory
887           // to all zeroes, generate a single memset to do it in one shot.
888           EmitZeroMemSet(CGF, ElementType, NewPtr, AllocSizeWithoutCookie);
889           return;
890         }
891       }
892 
893       CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr,
894                                      CCE->arg_begin(),  CCE->arg_end(),
895                                      CCE->requiresZeroInitialization());
896       return;
897     } else if (Init && isa<ImplicitValueInitExpr>(Init) &&
898                CGF.CGM.getTypes().isZeroInitializable(ElementType)) {
899       // Optimization: since zero initialization will just set the memory
900       // to all zeroes, generate a single memset to do it in one shot.
901       EmitZeroMemSet(CGF, ElementType, NewPtr, AllocSizeWithoutCookie);
902       return;
903     }
904     CGF.EmitNewArrayInitializer(E, ElementType, NewPtr, NumElements);
905     return;
906   }
907 
908   if (!Init)
909     return;
910 
911   StoreAnyExprIntoOneUnit(CGF, Init, E->getAllocatedType(), NewPtr);
912 }
913 
914 /// Emit a call to an operator new or operator delete function, as implicitly
915 /// created by new-expressions and delete-expressions.
916 static RValue EmitNewDeleteCall(CodeGenFunction &CGF,
917                                 const FunctionDecl *Callee,
918                                 const FunctionProtoType *CalleeType,
919                                 const CallArgList &Args) {
920   llvm::Instruction *CallOrInvoke;
921   llvm::Value *CalleeAddr = CGF.CGM.GetAddrOfFunction(Callee);
922   RValue RV =
923       CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall(Args, CalleeType),
924                    CalleeAddr, ReturnValueSlot(), Args,
925                    Callee, &CallOrInvoke);
926 
927   /// C++1y [expr.new]p10:
928   ///   [In a new-expression,] an implementation is allowed to omit a call
929   ///   to a replaceable global allocation function.
930   ///
931   /// We model such elidable calls with the 'builtin' attribute.
932   llvm::Function *Fn = dyn_cast<llvm::Function>(CalleeAddr);
933   if (Callee->isReplaceableGlobalAllocationFunction() &&
934       Fn && Fn->hasFnAttribute(llvm::Attribute::NoBuiltin)) {
935     // FIXME: Add addAttribute to CallSite.
936     if (llvm::CallInst *CI = dyn_cast<llvm::CallInst>(CallOrInvoke))
937       CI->addAttribute(llvm::AttributeSet::FunctionIndex,
938                        llvm::Attribute::Builtin);
939     else if (llvm::InvokeInst *II = dyn_cast<llvm::InvokeInst>(CallOrInvoke))
940       II->addAttribute(llvm::AttributeSet::FunctionIndex,
941                        llvm::Attribute::Builtin);
942     else
943       llvm_unreachable("unexpected kind of call instruction");
944   }
945 
946   return RV;
947 }
948 
949 namespace {
950   /// A cleanup to call the given 'operator delete' function upon
951   /// abnormal exit from a new expression.
952   class CallDeleteDuringNew : public EHScopeStack::Cleanup {
953     size_t NumPlacementArgs;
954     const FunctionDecl *OperatorDelete;
955     llvm::Value *Ptr;
956     llvm::Value *AllocSize;
957 
958     RValue *getPlacementArgs() { return reinterpret_cast<RValue*>(this+1); }
959 
960   public:
961     static size_t getExtraSize(size_t NumPlacementArgs) {
962       return NumPlacementArgs * sizeof(RValue);
963     }
964 
965     CallDeleteDuringNew(size_t NumPlacementArgs,
966                         const FunctionDecl *OperatorDelete,
967                         llvm::Value *Ptr,
968                         llvm::Value *AllocSize)
969       : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
970         Ptr(Ptr), AllocSize(AllocSize) {}
971 
972     void setPlacementArg(unsigned I, RValue Arg) {
973       assert(I < NumPlacementArgs && "index out of range");
974       getPlacementArgs()[I] = Arg;
975     }
976 
977     void Emit(CodeGenFunction &CGF, Flags flags) {
978       const FunctionProtoType *FPT
979         = OperatorDelete->getType()->getAs<FunctionProtoType>();
980       assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
981              (FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
982 
983       CallArgList DeleteArgs;
984 
985       // The first argument is always a void*.
986       FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
987       DeleteArgs.add(RValue::get(Ptr), *AI++);
988 
989       // A member 'operator delete' can take an extra 'size_t' argument.
990       if (FPT->getNumArgs() == NumPlacementArgs + 2)
991         DeleteArgs.add(RValue::get(AllocSize), *AI++);
992 
993       // Pass the rest of the arguments, which must match exactly.
994       for (unsigned I = 0; I != NumPlacementArgs; ++I)
995         DeleteArgs.add(getPlacementArgs()[I], *AI++);
996 
997       // Call 'operator delete'.
998       EmitNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs);
999     }
1000   };
1001 
1002   /// A cleanup to call the given 'operator delete' function upon
1003   /// abnormal exit from a new expression when the new expression is
1004   /// conditional.
1005   class CallDeleteDuringConditionalNew : public EHScopeStack::Cleanup {
1006     size_t NumPlacementArgs;
1007     const FunctionDecl *OperatorDelete;
1008     DominatingValue<RValue>::saved_type Ptr;
1009     DominatingValue<RValue>::saved_type AllocSize;
1010 
1011     DominatingValue<RValue>::saved_type *getPlacementArgs() {
1012       return reinterpret_cast<DominatingValue<RValue>::saved_type*>(this+1);
1013     }
1014 
1015   public:
1016     static size_t getExtraSize(size_t NumPlacementArgs) {
1017       return NumPlacementArgs * sizeof(DominatingValue<RValue>::saved_type);
1018     }
1019 
1020     CallDeleteDuringConditionalNew(size_t NumPlacementArgs,
1021                                    const FunctionDecl *OperatorDelete,
1022                                    DominatingValue<RValue>::saved_type Ptr,
1023                               DominatingValue<RValue>::saved_type AllocSize)
1024       : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
1025         Ptr(Ptr), AllocSize(AllocSize) {}
1026 
1027     void setPlacementArg(unsigned I, DominatingValue<RValue>::saved_type Arg) {
1028       assert(I < NumPlacementArgs && "index out of range");
1029       getPlacementArgs()[I] = Arg;
1030     }
1031 
1032     void Emit(CodeGenFunction &CGF, Flags flags) {
1033       const FunctionProtoType *FPT
1034         = OperatorDelete->getType()->getAs<FunctionProtoType>();
1035       assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
1036              (FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
1037 
1038       CallArgList DeleteArgs;
1039 
1040       // The first argument is always a void*.
1041       FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
1042       DeleteArgs.add(Ptr.restore(CGF), *AI++);
1043 
1044       // A member 'operator delete' can take an extra 'size_t' argument.
1045       if (FPT->getNumArgs() == NumPlacementArgs + 2) {
1046         RValue RV = AllocSize.restore(CGF);
1047         DeleteArgs.add(RV, *AI++);
1048       }
1049 
1050       // Pass the rest of the arguments, which must match exactly.
1051       for (unsigned I = 0; I != NumPlacementArgs; ++I) {
1052         RValue RV = getPlacementArgs()[I].restore(CGF);
1053         DeleteArgs.add(RV, *AI++);
1054       }
1055 
1056       // Call 'operator delete'.
1057       EmitNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs);
1058     }
1059   };
1060 }
1061 
1062 /// Enter a cleanup to call 'operator delete' if the initializer in a
1063 /// new-expression throws.
1064 static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
1065                                   const CXXNewExpr *E,
1066                                   llvm::Value *NewPtr,
1067                                   llvm::Value *AllocSize,
1068                                   const CallArgList &NewArgs) {
1069   // If we're not inside a conditional branch, then the cleanup will
1070   // dominate and we can do the easier (and more efficient) thing.
1071   if (!CGF.isInConditionalBranch()) {
1072     CallDeleteDuringNew *Cleanup = CGF.EHStack
1073       .pushCleanupWithExtra<CallDeleteDuringNew>(EHCleanup,
1074                                                  E->getNumPlacementArgs(),
1075                                                  E->getOperatorDelete(),
1076                                                  NewPtr, AllocSize);
1077     for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
1078       Cleanup->setPlacementArg(I, NewArgs[I+1].RV);
1079 
1080     return;
1081   }
1082 
1083   // Otherwise, we need to save all this stuff.
1084   DominatingValue<RValue>::saved_type SavedNewPtr =
1085     DominatingValue<RValue>::save(CGF, RValue::get(NewPtr));
1086   DominatingValue<RValue>::saved_type SavedAllocSize =
1087     DominatingValue<RValue>::save(CGF, RValue::get(AllocSize));
1088 
1089   CallDeleteDuringConditionalNew *Cleanup = CGF.EHStack
1090     .pushCleanupWithExtra<CallDeleteDuringConditionalNew>(EHCleanup,
1091                                                  E->getNumPlacementArgs(),
1092                                                  E->getOperatorDelete(),
1093                                                  SavedNewPtr,
1094                                                  SavedAllocSize);
1095   for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
1096     Cleanup->setPlacementArg(I,
1097                      DominatingValue<RValue>::save(CGF, NewArgs[I+1].RV));
1098 
1099   CGF.initFullExprCleanup();
1100 }
1101 
1102 llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
1103   // The element type being allocated.
1104   QualType allocType = getContext().getBaseElementType(E->getAllocatedType());
1105 
1106   // 1. Build a call to the allocation function.
1107   FunctionDecl *allocator = E->getOperatorNew();
1108   const FunctionProtoType *allocatorType =
1109     allocator->getType()->castAs<FunctionProtoType>();
1110 
1111   CallArgList allocatorArgs;
1112 
1113   // The allocation size is the first argument.
1114   QualType sizeType = getContext().getSizeType();
1115 
1116   // If there is a brace-initializer, cannot allocate fewer elements than inits.
1117   unsigned minElements = 0;
1118   if (E->isArray() && E->hasInitializer()) {
1119     if (const InitListExpr *ILE = dyn_cast<InitListExpr>(E->getInitializer()))
1120       minElements = ILE->getNumInits();
1121   }
1122 
1123   llvm::Value *numElements = 0;
1124   llvm::Value *allocSizeWithoutCookie = 0;
1125   llvm::Value *allocSize =
1126     EmitCXXNewAllocSize(*this, E, minElements, numElements,
1127                         allocSizeWithoutCookie);
1128 
1129   allocatorArgs.add(RValue::get(allocSize), sizeType);
1130 
1131   // Emit the rest of the arguments.
1132   // FIXME: Ideally, this should just use EmitCallArgs.
1133   CXXNewExpr::const_arg_iterator placementArg = E->placement_arg_begin();
1134 
1135   // First, use the types from the function type.
1136   // We start at 1 here because the first argument (the allocation size)
1137   // has already been emitted.
1138   for (unsigned i = 1, e = allocatorType->getNumArgs(); i != e;
1139        ++i, ++placementArg) {
1140     QualType argType = allocatorType->getArgType(i);
1141 
1142     assert(getContext().hasSameUnqualifiedType(argType.getNonReferenceType(),
1143                                                placementArg->getType()) &&
1144            "type mismatch in call argument!");
1145 
1146     EmitCallArg(allocatorArgs, *placementArg, argType);
1147   }
1148 
1149   // Either we've emitted all the call args, or we have a call to a
1150   // variadic function.
1151   assert((placementArg == E->placement_arg_end() ||
1152           allocatorType->isVariadic()) &&
1153          "Extra arguments to non-variadic function!");
1154 
1155   // If we still have any arguments, emit them using the type of the argument.
1156   for (CXXNewExpr::const_arg_iterator placementArgsEnd = E->placement_arg_end();
1157        placementArg != placementArgsEnd; ++placementArg) {
1158     EmitCallArg(allocatorArgs, *placementArg, placementArg->getType());
1159   }
1160 
1161   // Emit the allocation call.  If the allocator is a global placement
1162   // operator, just "inline" it directly.
1163   RValue RV;
1164   if (allocator->isReservedGlobalPlacementOperator()) {
1165     assert(allocatorArgs.size() == 2);
1166     RV = allocatorArgs[1].RV;
1167     // TODO: kill any unnecessary computations done for the size
1168     // argument.
1169   } else {
1170     RV = EmitNewDeleteCall(*this, allocator, allocatorType, allocatorArgs);
1171   }
1172 
1173   // Emit a null check on the allocation result if the allocation
1174   // function is allowed to return null (because it has a non-throwing
1175   // exception spec; for this part, we inline
1176   // CXXNewExpr::shouldNullCheckAllocation()) and we have an
1177   // interesting initializer.
1178   bool nullCheck = allocatorType->isNothrow(getContext()) &&
1179     (!allocType.isPODType(getContext()) || E->hasInitializer());
1180 
1181   llvm::BasicBlock *nullCheckBB = 0;
1182   llvm::BasicBlock *contBB = 0;
1183 
1184   llvm::Value *allocation = RV.getScalarVal();
1185   unsigned AS = allocation->getType()->getPointerAddressSpace();
1186 
1187   // The null-check means that the initializer is conditionally
1188   // evaluated.
1189   ConditionalEvaluation conditional(*this);
1190 
1191   if (nullCheck) {
1192     conditional.begin(*this);
1193 
1194     nullCheckBB = Builder.GetInsertBlock();
1195     llvm::BasicBlock *notNullBB = createBasicBlock("new.notnull");
1196     contBB = createBasicBlock("new.cont");
1197 
1198     llvm::Value *isNull = Builder.CreateIsNull(allocation, "new.isnull");
1199     Builder.CreateCondBr(isNull, contBB, notNullBB);
1200     EmitBlock(notNullBB);
1201   }
1202 
1203   // If there's an operator delete, enter a cleanup to call it if an
1204   // exception is thrown.
1205   EHScopeStack::stable_iterator operatorDeleteCleanup;
1206   llvm::Instruction *cleanupDominator = 0;
1207   if (E->getOperatorDelete() &&
1208       !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
1209     EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocatorArgs);
1210     operatorDeleteCleanup = EHStack.stable_begin();
1211     cleanupDominator = Builder.CreateUnreachable();
1212   }
1213 
1214   assert((allocSize == allocSizeWithoutCookie) ==
1215          CalculateCookiePadding(*this, E).isZero());
1216   if (allocSize != allocSizeWithoutCookie) {
1217     assert(E->isArray());
1218     allocation = CGM.getCXXABI().InitializeArrayCookie(*this, allocation,
1219                                                        numElements,
1220                                                        E, allocType);
1221   }
1222 
1223   llvm::Type *elementPtrTy
1224     = ConvertTypeForMem(allocType)->getPointerTo(AS);
1225   llvm::Value *result = Builder.CreateBitCast(allocation, elementPtrTy);
1226 
1227   EmitNewInitializer(*this, E, allocType, result, numElements,
1228                      allocSizeWithoutCookie);
1229   if (E->isArray()) {
1230     // NewPtr is a pointer to the base element type.  If we're
1231     // allocating an array of arrays, we'll need to cast back to the
1232     // array pointer type.
1233     llvm::Type *resultType = ConvertTypeForMem(E->getType());
1234     if (result->getType() != resultType)
1235       result = Builder.CreateBitCast(result, resultType);
1236   }
1237 
1238   // Deactivate the 'operator delete' cleanup if we finished
1239   // initialization.
1240   if (operatorDeleteCleanup.isValid()) {
1241     DeactivateCleanupBlock(operatorDeleteCleanup, cleanupDominator);
1242     cleanupDominator->eraseFromParent();
1243   }
1244 
1245   if (nullCheck) {
1246     conditional.end(*this);
1247 
1248     llvm::BasicBlock *notNullBB = Builder.GetInsertBlock();
1249     EmitBlock(contBB);
1250 
1251     llvm::PHINode *PHI = Builder.CreatePHI(result->getType(), 2);
1252     PHI->addIncoming(result, notNullBB);
1253     PHI->addIncoming(llvm::Constant::getNullValue(result->getType()),
1254                      nullCheckBB);
1255 
1256     result = PHI;
1257   }
1258 
1259   return result;
1260 }
1261 
1262 void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
1263                                      llvm::Value *Ptr,
1264                                      QualType DeleteTy) {
1265   assert(DeleteFD->getOverloadedOperator() == OO_Delete);
1266 
1267   const FunctionProtoType *DeleteFTy =
1268     DeleteFD->getType()->getAs<FunctionProtoType>();
1269 
1270   CallArgList DeleteArgs;
1271 
1272   // Check if we need to pass the size to the delete operator.
1273   llvm::Value *Size = 0;
1274   QualType SizeTy;
1275   if (DeleteFTy->getNumArgs() == 2) {
1276     SizeTy = DeleteFTy->getArgType(1);
1277     CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy);
1278     Size = llvm::ConstantInt::get(ConvertType(SizeTy),
1279                                   DeleteTypeSize.getQuantity());
1280   }
1281 
1282   QualType ArgTy = DeleteFTy->getArgType(0);
1283   llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy));
1284   DeleteArgs.add(RValue::get(DeletePtr), ArgTy);
1285 
1286   if (Size)
1287     DeleteArgs.add(RValue::get(Size), SizeTy);
1288 
1289   // Emit the call to delete.
1290   EmitNewDeleteCall(*this, DeleteFD, DeleteFTy, DeleteArgs);
1291 }
1292 
1293 namespace {
1294   /// Calls the given 'operator delete' on a single object.
1295   struct CallObjectDelete : EHScopeStack::Cleanup {
1296     llvm::Value *Ptr;
1297     const FunctionDecl *OperatorDelete;
1298     QualType ElementType;
1299 
1300     CallObjectDelete(llvm::Value *Ptr,
1301                      const FunctionDecl *OperatorDelete,
1302                      QualType ElementType)
1303       : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {}
1304 
1305     void Emit(CodeGenFunction &CGF, Flags flags) {
1306       CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType);
1307     }
1308   };
1309 }
1310 
1311 /// Emit the code for deleting a single object.
1312 static void EmitObjectDelete(CodeGenFunction &CGF,
1313                              const FunctionDecl *OperatorDelete,
1314                              llvm::Value *Ptr,
1315                              QualType ElementType,
1316                              bool UseGlobalDelete) {
1317   // Find the destructor for the type, if applicable.  If the
1318   // destructor is virtual, we'll just emit the vcall and return.
1319   const CXXDestructorDecl *Dtor = 0;
1320   if (const RecordType *RT = ElementType->getAs<RecordType>()) {
1321     CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1322     if (RD->hasDefinition() && !RD->hasTrivialDestructor()) {
1323       Dtor = RD->getDestructor();
1324 
1325       if (Dtor->isVirtual()) {
1326         if (UseGlobalDelete) {
1327           // If we're supposed to call the global delete, make sure we do so
1328           // even if the destructor throws.
1329 
1330           // Derive the complete-object pointer, which is what we need
1331           // to pass to the deallocation function.
1332           llvm::Value *completePtr =
1333             CGF.CGM.getCXXABI().adjustToCompleteObject(CGF, Ptr, ElementType);
1334 
1335           CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
1336                                                     completePtr, OperatorDelete,
1337                                                     ElementType);
1338         }
1339 
1340         // FIXME: Provide a source location here.
1341         CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting;
1342         CGF.CGM.getCXXABI().EmitVirtualDestructorCall(CGF, Dtor, DtorType,
1343                                                       SourceLocation(), Ptr);
1344 
1345         if (UseGlobalDelete) {
1346           CGF.PopCleanupBlock();
1347         }
1348 
1349         return;
1350       }
1351     }
1352   }
1353 
1354   // Make sure that we call delete even if the dtor throws.
1355   // This doesn't have to a conditional cleanup because we're going
1356   // to pop it off in a second.
1357   CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
1358                                             Ptr, OperatorDelete, ElementType);
1359 
1360   if (Dtor)
1361     CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
1362                               /*ForVirtualBase=*/false,
1363                               /*Delegating=*/false,
1364                               Ptr);
1365   else if (CGF.getLangOpts().ObjCAutoRefCount &&
1366            ElementType->isObjCLifetimeType()) {
1367     switch (ElementType.getObjCLifetime()) {
1368     case Qualifiers::OCL_None:
1369     case Qualifiers::OCL_ExplicitNone:
1370     case Qualifiers::OCL_Autoreleasing:
1371       break;
1372 
1373     case Qualifiers::OCL_Strong: {
1374       // Load the pointer value.
1375       llvm::Value *PtrValue = CGF.Builder.CreateLoad(Ptr,
1376                                              ElementType.isVolatileQualified());
1377 
1378       CGF.EmitARCRelease(PtrValue, ARCPreciseLifetime);
1379       break;
1380     }
1381 
1382     case Qualifiers::OCL_Weak:
1383       CGF.EmitARCDestroyWeak(Ptr);
1384       break;
1385     }
1386   }
1387 
1388   CGF.PopCleanupBlock();
1389 }
1390 
1391 namespace {
1392   /// Calls the given 'operator delete' on an array of objects.
1393   struct CallArrayDelete : EHScopeStack::Cleanup {
1394     llvm::Value *Ptr;
1395     const FunctionDecl *OperatorDelete;
1396     llvm::Value *NumElements;
1397     QualType ElementType;
1398     CharUnits CookieSize;
1399 
1400     CallArrayDelete(llvm::Value *Ptr,
1401                     const FunctionDecl *OperatorDelete,
1402                     llvm::Value *NumElements,
1403                     QualType ElementType,
1404                     CharUnits CookieSize)
1405       : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements),
1406         ElementType(ElementType), CookieSize(CookieSize) {}
1407 
1408     void Emit(CodeGenFunction &CGF, Flags flags) {
1409       const FunctionProtoType *DeleteFTy =
1410         OperatorDelete->getType()->getAs<FunctionProtoType>();
1411       assert(DeleteFTy->getNumArgs() == 1 || DeleteFTy->getNumArgs() == 2);
1412 
1413       CallArgList Args;
1414 
1415       // Pass the pointer as the first argument.
1416       QualType VoidPtrTy = DeleteFTy->getArgType(0);
1417       llvm::Value *DeletePtr
1418         = CGF.Builder.CreateBitCast(Ptr, CGF.ConvertType(VoidPtrTy));
1419       Args.add(RValue::get(DeletePtr), VoidPtrTy);
1420 
1421       // Pass the original requested size as the second argument.
1422       if (DeleteFTy->getNumArgs() == 2) {
1423         QualType size_t = DeleteFTy->getArgType(1);
1424         llvm::IntegerType *SizeTy
1425           = cast<llvm::IntegerType>(CGF.ConvertType(size_t));
1426 
1427         CharUnits ElementTypeSize =
1428           CGF.CGM.getContext().getTypeSizeInChars(ElementType);
1429 
1430         // The size of an element, multiplied by the number of elements.
1431         llvm::Value *Size
1432           = llvm::ConstantInt::get(SizeTy, ElementTypeSize.getQuantity());
1433         Size = CGF.Builder.CreateMul(Size, NumElements);
1434 
1435         // Plus the size of the cookie if applicable.
1436         if (!CookieSize.isZero()) {
1437           llvm::Value *CookieSizeV
1438             = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
1439           Size = CGF.Builder.CreateAdd(Size, CookieSizeV);
1440         }
1441 
1442         Args.add(RValue::get(Size), size_t);
1443       }
1444 
1445       // Emit the call to delete.
1446       EmitNewDeleteCall(CGF, OperatorDelete, DeleteFTy, Args);
1447     }
1448   };
1449 }
1450 
1451 /// Emit the code for deleting an array of objects.
1452 static void EmitArrayDelete(CodeGenFunction &CGF,
1453                             const CXXDeleteExpr *E,
1454                             llvm::Value *deletedPtr,
1455                             QualType elementType) {
1456   llvm::Value *numElements = 0;
1457   llvm::Value *allocatedPtr = 0;
1458   CharUnits cookieSize;
1459   CGF.CGM.getCXXABI().ReadArrayCookie(CGF, deletedPtr, E, elementType,
1460                                       numElements, allocatedPtr, cookieSize);
1461 
1462   assert(allocatedPtr && "ReadArrayCookie didn't set allocated pointer");
1463 
1464   // Make sure that we call delete even if one of the dtors throws.
1465   const FunctionDecl *operatorDelete = E->getOperatorDelete();
1466   CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup,
1467                                            allocatedPtr, operatorDelete,
1468                                            numElements, elementType,
1469                                            cookieSize);
1470 
1471   // Destroy the elements.
1472   if (QualType::DestructionKind dtorKind = elementType.isDestructedType()) {
1473     assert(numElements && "no element count for a type with a destructor!");
1474 
1475     llvm::Value *arrayEnd =
1476       CGF.Builder.CreateInBoundsGEP(deletedPtr, numElements, "delete.end");
1477 
1478     // Note that it is legal to allocate a zero-length array, and we
1479     // can never fold the check away because the length should always
1480     // come from a cookie.
1481     CGF.emitArrayDestroy(deletedPtr, arrayEnd, elementType,
1482                          CGF.getDestroyer(dtorKind),
1483                          /*checkZeroLength*/ true,
1484                          CGF.needsEHCleanup(dtorKind));
1485   }
1486 
1487   // Pop the cleanup block.
1488   CGF.PopCleanupBlock();
1489 }
1490 
1491 void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
1492   const Expr *Arg = E->getArgument();
1493   llvm::Value *Ptr = EmitScalarExpr(Arg);
1494 
1495   // Null check the pointer.
1496   llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");
1497   llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");
1498 
1499   llvm::Value *IsNull = Builder.CreateIsNull(Ptr, "isnull");
1500 
1501   Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
1502   EmitBlock(DeleteNotNull);
1503 
1504   // We might be deleting a pointer to array.  If so, GEP down to the
1505   // first non-array element.
1506   // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*)
1507   QualType DeleteTy = Arg->getType()->getAs<PointerType>()->getPointeeType();
1508   if (DeleteTy->isConstantArrayType()) {
1509     llvm::Value *Zero = Builder.getInt32(0);
1510     SmallVector<llvm::Value*,8> GEP;
1511 
1512     GEP.push_back(Zero); // point at the outermost array
1513 
1514     // For each layer of array type we're pointing at:
1515     while (const ConstantArrayType *Arr
1516              = getContext().getAsConstantArrayType(DeleteTy)) {
1517       // 1. Unpeel the array type.
1518       DeleteTy = Arr->getElementType();
1519 
1520       // 2. GEP to the first element of the array.
1521       GEP.push_back(Zero);
1522     }
1523 
1524     Ptr = Builder.CreateInBoundsGEP(Ptr, GEP, "del.first");
1525   }
1526 
1527   assert(ConvertTypeForMem(DeleteTy) ==
1528          cast<llvm::PointerType>(Ptr->getType())->getElementType());
1529 
1530   if (E->isArrayForm()) {
1531     EmitArrayDelete(*this, E, Ptr, DeleteTy);
1532   } else {
1533     EmitObjectDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy,
1534                      E->isGlobalDelete());
1535   }
1536 
1537   EmitBlock(DeleteEnd);
1538 }
1539 
1540 static llvm::Constant *getBadTypeidFn(CodeGenFunction &CGF) {
1541   // void __cxa_bad_typeid();
1542   llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1543 
1544   return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
1545 }
1546 
1547 static void EmitBadTypeidCall(CodeGenFunction &CGF) {
1548   llvm::Value *Fn = getBadTypeidFn(CGF);
1549   CGF.EmitRuntimeCallOrInvoke(Fn).setDoesNotReturn();
1550   CGF.Builder.CreateUnreachable();
1551 }
1552 
1553 static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF,
1554                                          const Expr *E,
1555                                          llvm::Type *StdTypeInfoPtrTy) {
1556   // Get the vtable pointer.
1557   llvm::Value *ThisPtr = CGF.EmitLValue(E).getAddress();
1558 
1559   // C++ [expr.typeid]p2:
1560   //   If the glvalue expression is obtained by applying the unary * operator to
1561   //   a pointer and the pointer is a null pointer value, the typeid expression
1562   //   throws the std::bad_typeid exception.
1563   if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E->IgnoreParens())) {
1564     if (UO->getOpcode() == UO_Deref) {
1565       llvm::BasicBlock *BadTypeidBlock =
1566         CGF.createBasicBlock("typeid.bad_typeid");
1567       llvm::BasicBlock *EndBlock =
1568         CGF.createBasicBlock("typeid.end");
1569 
1570       llvm::Value *IsNull = CGF.Builder.CreateIsNull(ThisPtr);
1571       CGF.Builder.CreateCondBr(IsNull, BadTypeidBlock, EndBlock);
1572 
1573       CGF.EmitBlock(BadTypeidBlock);
1574       EmitBadTypeidCall(CGF);
1575       CGF.EmitBlock(EndBlock);
1576     }
1577   }
1578 
1579   llvm::Value *Value = CGF.GetVTablePtr(ThisPtr,
1580                                         StdTypeInfoPtrTy->getPointerTo());
1581 
1582   // Load the type info.
1583   Value = CGF.Builder.CreateConstInBoundsGEP1_64(Value, -1ULL);
1584   return CGF.Builder.CreateLoad(Value);
1585 }
1586 
1587 llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
1588   llvm::Type *StdTypeInfoPtrTy =
1589     ConvertType(E->getType())->getPointerTo();
1590 
1591   if (E->isTypeOperand()) {
1592     llvm::Constant *TypeInfo =
1593         CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand(getContext()));
1594     return Builder.CreateBitCast(TypeInfo, StdTypeInfoPtrTy);
1595   }
1596 
1597   // C++ [expr.typeid]p2:
1598   //   When typeid is applied to a glvalue expression whose type is a
1599   //   polymorphic class type, the result refers to a std::type_info object
1600   //   representing the type of the most derived object (that is, the dynamic
1601   //   type) to which the glvalue refers.
1602   if (E->isPotentiallyEvaluated())
1603     return EmitTypeidFromVTable(*this, E->getExprOperand(),
1604                                 StdTypeInfoPtrTy);
1605 
1606   QualType OperandTy = E->getExprOperand()->getType();
1607   return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(OperandTy),
1608                                StdTypeInfoPtrTy);
1609 }
1610 
1611 static llvm::Constant *getDynamicCastFn(CodeGenFunction &CGF) {
1612   // void *__dynamic_cast(const void *sub,
1613   //                      const abi::__class_type_info *src,
1614   //                      const abi::__class_type_info *dst,
1615   //                      std::ptrdiff_t src2dst_offset);
1616 
1617   llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
1618   llvm::Type *PtrDiffTy =
1619     CGF.ConvertType(CGF.getContext().getPointerDiffType());
1620 
1621   llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
1622 
1623   llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false);
1624 
1625   // Mark the function as nounwind readonly.
1626   llvm::Attribute::AttrKind FuncAttrs[] = { llvm::Attribute::NoUnwind,
1627                                             llvm::Attribute::ReadOnly };
1628   llvm::AttributeSet Attrs = llvm::AttributeSet::get(
1629       CGF.getLLVMContext(), llvm::AttributeSet::FunctionIndex, FuncAttrs);
1630 
1631   return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast", Attrs);
1632 }
1633 
1634 static llvm::Constant *getBadCastFn(CodeGenFunction &CGF) {
1635   // void __cxa_bad_cast();
1636   llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1637   return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
1638 }
1639 
1640 static void EmitBadCastCall(CodeGenFunction &CGF) {
1641   llvm::Value *Fn = getBadCastFn(CGF);
1642   CGF.EmitRuntimeCallOrInvoke(Fn).setDoesNotReturn();
1643   CGF.Builder.CreateUnreachable();
1644 }
1645 
1646 /// \brief Compute the src2dst_offset hint as described in the
1647 /// Itanium C++ ABI [2.9.7]
1648 static CharUnits computeOffsetHint(ASTContext &Context,
1649                                    const CXXRecordDecl *Src,
1650                                    const CXXRecordDecl *Dst) {
1651   CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
1652                      /*DetectVirtual=*/false);
1653 
1654   // If Dst is not derived from Src we can skip the whole computation below and
1655   // return that Src is not a public base of Dst.  Record all inheritance paths.
1656   if (!Dst->isDerivedFrom(Src, Paths))
1657     return CharUnits::fromQuantity(-2ULL);
1658 
1659   unsigned NumPublicPaths = 0;
1660   CharUnits Offset;
1661 
1662   // Now walk all possible inheritance paths.
1663   for (CXXBasePaths::paths_iterator I = Paths.begin(), E = Paths.end();
1664        I != E; ++I) {
1665     if (I->Access != AS_public) // Ignore non-public inheritance.
1666       continue;
1667 
1668     ++NumPublicPaths;
1669 
1670     for (CXXBasePath::iterator J = I->begin(), JE = I->end(); J != JE; ++J) {
1671       // If the path contains a virtual base class we can't give any hint.
1672       // -1: no hint.
1673       if (J->Base->isVirtual())
1674         return CharUnits::fromQuantity(-1ULL);
1675 
1676       if (NumPublicPaths > 1) // Won't use offsets, skip computation.
1677         continue;
1678 
1679       // Accumulate the base class offsets.
1680       const ASTRecordLayout &L = Context.getASTRecordLayout(J->Class);
1681       Offset += L.getBaseClassOffset(J->Base->getType()->getAsCXXRecordDecl());
1682     }
1683   }
1684 
1685   // -2: Src is not a public base of Dst.
1686   if (NumPublicPaths == 0)
1687     return CharUnits::fromQuantity(-2ULL);
1688 
1689   // -3: Src is a multiple public base type but never a virtual base type.
1690   if (NumPublicPaths > 1)
1691     return CharUnits::fromQuantity(-3ULL);
1692 
1693   // Otherwise, the Src type is a unique public nonvirtual base type of Dst.
1694   // Return the offset of Src from the origin of Dst.
1695   return Offset;
1696 }
1697 
1698 static llvm::Value *
1699 EmitDynamicCastCall(CodeGenFunction &CGF, llvm::Value *Value,
1700                     QualType SrcTy, QualType DestTy,
1701                     llvm::BasicBlock *CastEnd) {
1702   llvm::Type *PtrDiffLTy =
1703     CGF.ConvertType(CGF.getContext().getPointerDiffType());
1704   llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1705 
1706   if (const PointerType *PTy = DestTy->getAs<PointerType>()) {
1707     if (PTy->getPointeeType()->isVoidType()) {
1708       // C++ [expr.dynamic.cast]p7:
1709       //   If T is "pointer to cv void," then the result is a pointer to the
1710       //   most derived object pointed to by v.
1711 
1712       // Get the vtable pointer.
1713       llvm::Value *VTable = CGF.GetVTablePtr(Value, PtrDiffLTy->getPointerTo());
1714 
1715       // Get the offset-to-top from the vtable.
1716       llvm::Value *OffsetToTop =
1717         CGF.Builder.CreateConstInBoundsGEP1_64(VTable, -2ULL);
1718       OffsetToTop = CGF.Builder.CreateLoad(OffsetToTop, "offset.to.top");
1719 
1720       // Finally, add the offset to the pointer.
1721       Value = CGF.EmitCastToVoidPtr(Value);
1722       Value = CGF.Builder.CreateInBoundsGEP(Value, OffsetToTop);
1723 
1724       return CGF.Builder.CreateBitCast(Value, DestLTy);
1725     }
1726   }
1727 
1728   QualType SrcRecordTy;
1729   QualType DestRecordTy;
1730 
1731   if (const PointerType *DestPTy = DestTy->getAs<PointerType>()) {
1732     SrcRecordTy = SrcTy->castAs<PointerType>()->getPointeeType();
1733     DestRecordTy = DestPTy->getPointeeType();
1734   } else {
1735     SrcRecordTy = SrcTy;
1736     DestRecordTy = DestTy->castAs<ReferenceType>()->getPointeeType();
1737   }
1738 
1739   assert(SrcRecordTy->isRecordType() && "source type must be a record type!");
1740   assert(DestRecordTy->isRecordType() && "dest type must be a record type!");
1741 
1742   llvm::Value *SrcRTTI =
1743     CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
1744   llvm::Value *DestRTTI =
1745     CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType());
1746 
1747   // Compute the offset hint.
1748   const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
1749   const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
1750   llvm::Value *OffsetHint =
1751     llvm::ConstantInt::get(PtrDiffLTy,
1752                            computeOffsetHint(CGF.getContext(), SrcDecl,
1753                                              DestDecl).getQuantity());
1754 
1755   // Emit the call to __dynamic_cast.
1756   Value = CGF.EmitCastToVoidPtr(Value);
1757 
1758   llvm::Value *args[] = { Value, SrcRTTI, DestRTTI, OffsetHint };
1759   Value = CGF.EmitNounwindRuntimeCall(getDynamicCastFn(CGF), args);
1760   Value = CGF.Builder.CreateBitCast(Value, DestLTy);
1761 
1762   /// C++ [expr.dynamic.cast]p9:
1763   ///   A failed cast to reference type throws std::bad_cast
1764   if (DestTy->isReferenceType()) {
1765     llvm::BasicBlock *BadCastBlock =
1766       CGF.createBasicBlock("dynamic_cast.bad_cast");
1767 
1768     llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value);
1769     CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd);
1770 
1771     CGF.EmitBlock(BadCastBlock);
1772     EmitBadCastCall(CGF);
1773   }
1774 
1775   return Value;
1776 }
1777 
1778 static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF,
1779                                           QualType DestTy) {
1780   llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1781   if (DestTy->isPointerType())
1782     return llvm::Constant::getNullValue(DestLTy);
1783 
1784   /// C++ [expr.dynamic.cast]p9:
1785   ///   A failed cast to reference type throws std::bad_cast
1786   EmitBadCastCall(CGF);
1787 
1788   CGF.EmitBlock(CGF.createBasicBlock("dynamic_cast.end"));
1789   return llvm::UndefValue::get(DestLTy);
1790 }
1791 
1792 llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *Value,
1793                                               const CXXDynamicCastExpr *DCE) {
1794   QualType DestTy = DCE->getTypeAsWritten();
1795 
1796   if (DCE->isAlwaysNull())
1797     return EmitDynamicCastToNull(*this, DestTy);
1798 
1799   QualType SrcTy = DCE->getSubExpr()->getType();
1800 
1801   // C++ [expr.dynamic.cast]p4:
1802   //   If the value of v is a null pointer value in the pointer case, the result
1803   //   is the null pointer value of type T.
1804   bool ShouldNullCheckSrcValue = SrcTy->isPointerType();
1805 
1806   llvm::BasicBlock *CastNull = 0;
1807   llvm::BasicBlock *CastNotNull = 0;
1808   llvm::BasicBlock *CastEnd = createBasicBlock("dynamic_cast.end");
1809 
1810   if (ShouldNullCheckSrcValue) {
1811     CastNull = createBasicBlock("dynamic_cast.null");
1812     CastNotNull = createBasicBlock("dynamic_cast.notnull");
1813 
1814     llvm::Value *IsNull = Builder.CreateIsNull(Value);
1815     Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
1816     EmitBlock(CastNotNull);
1817   }
1818 
1819   Value = EmitDynamicCastCall(*this, Value, SrcTy, DestTy, CastEnd);
1820 
1821   if (ShouldNullCheckSrcValue) {
1822     EmitBranch(CastEnd);
1823 
1824     EmitBlock(CastNull);
1825     EmitBranch(CastEnd);
1826   }
1827 
1828   EmitBlock(CastEnd);
1829 
1830   if (ShouldNullCheckSrcValue) {
1831     llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
1832     PHI->addIncoming(Value, CastNotNull);
1833     PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull);
1834 
1835     Value = PHI;
1836   }
1837 
1838   return Value;
1839 }
1840 
1841 void CodeGenFunction::EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Slot) {
1842   RunCleanupsScope Scope(*this);
1843   LValue SlotLV = MakeAddrLValue(Slot.getAddr(), E->getType(),
1844                                  Slot.getAlignment());
1845 
1846   CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin();
1847   for (LambdaExpr::capture_init_iterator i = E->capture_init_begin(),
1848                                          e = E->capture_init_end();
1849        i != e; ++i, ++CurField) {
1850     // Emit initialization
1851 
1852     LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
1853     ArrayRef<VarDecl *> ArrayIndexes;
1854     if (CurField->getType()->isArrayType())
1855       ArrayIndexes = E->getCaptureInitIndexVars(i);
1856     EmitInitializerForField(*CurField, LV, *i, ArrayIndexes);
1857   }
1858 }
1859