1 //===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code dealing with code generation of C++ expressions
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "clang/Frontend/CodeGenOptions.h"
15 #include "CodeGenFunction.h"
16 #include "CGCXXABI.h"
17 #include "CGObjCRuntime.h"
18 #include "CGDebugInfo.h"
19 #include "llvm/Intrinsics.h"
20 #include "llvm/Support/CallSite.h"
21 
22 using namespace clang;
23 using namespace CodeGen;
24 
25 RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
26                                           llvm::Value *Callee,
27                                           ReturnValueSlot ReturnValue,
28                                           llvm::Value *This,
29                                           llvm::Value *VTT,
30                                           CallExpr::const_arg_iterator ArgBeg,
31                                           CallExpr::const_arg_iterator ArgEnd) {
32   assert(MD->isInstance() &&
33          "Trying to emit a member call expr on a static method!");
34 
35   const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
36 
37   CallArgList Args;
38 
39   // Push the this ptr.
40   Args.push_back(std::make_pair(RValue::get(This),
41                                 MD->getThisType(getContext())));
42 
43   // If there is a VTT parameter, emit it.
44   if (VTT) {
45     QualType T = getContext().getPointerType(getContext().VoidPtrTy);
46     Args.push_back(std::make_pair(RValue::get(VTT), T));
47   }
48 
49   // And the rest of the call args
50   EmitCallArgs(Args, FPT, ArgBeg, ArgEnd);
51 
52   QualType ResultType = FPT->getResultType();
53   return EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args,
54                                                  FPT->getExtInfo()),
55                   Callee, ReturnValue, Args, MD);
56 }
57 
58 static const CXXRecordDecl *getMostDerivedClassDecl(const Expr *Base) {
59   const Expr *E = Base;
60 
61   while (true) {
62     E = E->IgnoreParens();
63     if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
64       if (CE->getCastKind() == CK_DerivedToBase ||
65           CE->getCastKind() == CK_UncheckedDerivedToBase ||
66           CE->getCastKind() == CK_NoOp) {
67         E = CE->getSubExpr();
68         continue;
69       }
70     }
71 
72     break;
73   }
74 
75   QualType DerivedType = E->getType();
76   if (const PointerType *PTy = DerivedType->getAs<PointerType>())
77     DerivedType = PTy->getPointeeType();
78 
79   return cast<CXXRecordDecl>(DerivedType->castAs<RecordType>()->getDecl());
80 }
81 
82 // FIXME: Ideally Expr::IgnoreParenNoopCasts should do this, but it doesn't do
83 // quite what we want.
84 static const Expr *skipNoOpCastsAndParens(const Expr *E) {
85   while (true) {
86     if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
87       E = PE->getSubExpr();
88       continue;
89     }
90 
91     if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
92       if (CE->getCastKind() == CK_NoOp) {
93         E = CE->getSubExpr();
94         continue;
95       }
96     }
97     if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
98       if (UO->getOpcode() == UO_Extension) {
99         E = UO->getSubExpr();
100         continue;
101       }
102     }
103     return E;
104   }
105 }
106 
107 /// canDevirtualizeMemberFunctionCalls - Checks whether virtual calls on given
108 /// expr can be devirtualized.
109 static bool canDevirtualizeMemberFunctionCalls(ASTContext &Context,
110                                                const Expr *Base,
111                                                const CXXMethodDecl *MD) {
112 
113   // When building with -fapple-kext, all calls must go through the vtable since
114   // the kernel linker can do runtime patching of vtables.
115   if (Context.getLangOptions().AppleKext)
116     return false;
117 
118   // If the most derived class is marked final, we know that no subclass can
119   // override this member function and so we can devirtualize it. For example:
120   //
121   // struct A { virtual void f(); }
122   // struct B final : A { };
123   //
124   // void f(B *b) {
125   //   b->f();
126   // }
127   //
128   const CXXRecordDecl *MostDerivedClassDecl = getMostDerivedClassDecl(Base);
129   if (MostDerivedClassDecl->hasAttr<FinalAttr>())
130     return true;
131 
132   // If the member function is marked 'final', we know that it can't be
133   // overridden and can therefore devirtualize it.
134   if (MD->hasAttr<FinalAttr>())
135     return true;
136 
137   // Similarly, if the class itself is marked 'final' it can't be overridden
138   // and we can therefore devirtualize the member function call.
139   if (MD->getParent()->hasAttr<FinalAttr>())
140     return true;
141 
142   Base = skipNoOpCastsAndParens(Base);
143   if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) {
144     if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
145       // This is a record decl. We know the type and can devirtualize it.
146       return VD->getType()->isRecordType();
147     }
148 
149     return false;
150   }
151 
152   // We can always devirtualize calls on temporary object expressions.
153   if (isa<CXXConstructExpr>(Base))
154     return true;
155 
156   // And calls on bound temporaries.
157   if (isa<CXXBindTemporaryExpr>(Base))
158     return true;
159 
160   // Check if this is a call expr that returns a record type.
161   if (const CallExpr *CE = dyn_cast<CallExpr>(Base))
162     return CE->getCallReturnType()->isRecordType();
163 
164   // We can't devirtualize the call.
165   return false;
166 }
167 
168 // Note: This function also emit constructor calls to support a MSVC
169 // extensions allowing explicit constructor function call.
170 RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
171                                               ReturnValueSlot ReturnValue) {
172   const Expr *callee = CE->getCallee()->IgnoreParens();
173 
174   if (isa<BinaryOperator>(callee))
175     return EmitCXXMemberPointerCallExpr(CE, ReturnValue);
176 
177   const MemberExpr *ME = cast<MemberExpr>(callee);
178   const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
179 
180   CGDebugInfo *DI = getDebugInfo();
181   if (DI && CGM.getCodeGenOpts().LimitDebugInfo
182       && !isa<CallExpr>(ME->getBase())) {
183     QualType PQTy = ME->getBase()->IgnoreParenImpCasts()->getType();
184     if (const PointerType * PTy = dyn_cast<PointerType>(PQTy)) {
185       DI->getOrCreateRecordType(PTy->getPointeeType(),
186                                 MD->getParent()->getLocation());
187     }
188   }
189 
190   if (MD->isStatic()) {
191     // The method is static, emit it as we would a regular call.
192     llvm::Value *Callee = CGM.GetAddrOfFunction(MD);
193     return EmitCall(getContext().getPointerType(MD->getType()), Callee,
194                     ReturnValue, CE->arg_begin(), CE->arg_end());
195   }
196 
197   // Compute the object pointer.
198   llvm::Value *This;
199   if (ME->isArrow())
200     This = EmitScalarExpr(ME->getBase());
201   else
202     This = EmitLValue(ME->getBase()).getAddress();
203 
204   if (MD->isTrivial()) {
205     if (isa<CXXDestructorDecl>(MD)) return RValue::get(0);
206     if (isa<CXXConstructorDecl>(MD) &&
207         cast<CXXConstructorDecl>(MD)->isDefaultConstructor())
208       return RValue::get(0);
209 
210     if (MD->isCopyAssignmentOperator()) {
211       // We don't like to generate the trivial copy assignment operator when
212       // it isn't necessary; just produce the proper effect here.
213       llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
214       EmitAggregateCopy(This, RHS, CE->getType());
215       return RValue::get(This);
216     }
217 
218     if (isa<CXXConstructorDecl>(MD) &&
219         cast<CXXConstructorDecl>(MD)->isCopyConstructor()) {
220       llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
221       EmitSynthesizedCXXCopyCtorCall(cast<CXXConstructorDecl>(MD), This, RHS,
222                                      CE->arg_begin(), CE->arg_end());
223       return RValue::get(This);
224     }
225     llvm_unreachable("unknown trivial member function");
226   }
227 
228   // Compute the function type we're calling.
229   const CGFunctionInfo *FInfo = 0;
230   if (isa<CXXDestructorDecl>(MD))
231     FInfo = &CGM.getTypes().getFunctionInfo(cast<CXXDestructorDecl>(MD),
232                                            Dtor_Complete);
233   else if (isa<CXXConstructorDecl>(MD))
234     FInfo = &CGM.getTypes().getFunctionInfo(cast<CXXConstructorDecl>(MD),
235                                             Ctor_Complete);
236   else
237     FInfo = &CGM.getTypes().getFunctionInfo(MD);
238 
239   const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
240   const llvm::Type *Ty
241     = CGM.getTypes().GetFunctionType(*FInfo, FPT->isVariadic());
242 
243   // C++ [class.virtual]p12:
244   //   Explicit qualification with the scope operator (5.1) suppresses the
245   //   virtual call mechanism.
246   //
247   // We also don't emit a virtual call if the base expression has a record type
248   // because then we know what the type is.
249   bool UseVirtualCall;
250   UseVirtualCall = MD->isVirtual() && !ME->hasQualifier()
251                    && !canDevirtualizeMemberFunctionCalls(getContext(),
252                                                           ME->getBase(), MD);
253   llvm::Value *Callee;
254   if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD)) {
255     if (UseVirtualCall) {
256       Callee = BuildVirtualCall(Dtor, Dtor_Complete, This, Ty);
257     } else {
258       if (getContext().getLangOptions().AppleKext &&
259           MD->isVirtual() &&
260           ME->hasQualifier())
261         Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
262       else
263         Callee = CGM.GetAddrOfFunction(GlobalDecl(Dtor, Dtor_Complete), Ty);
264     }
265   } else if (const CXXConstructorDecl *Ctor =
266                dyn_cast<CXXConstructorDecl>(MD)) {
267     Callee = CGM.GetAddrOfFunction(GlobalDecl(Ctor, Ctor_Complete), Ty);
268   } else if (UseVirtualCall) {
269       Callee = BuildVirtualCall(MD, This, Ty);
270   } else {
271     if (getContext().getLangOptions().AppleKext &&
272         MD->isVirtual() &&
273         ME->hasQualifier())
274       Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
275     else
276       Callee = CGM.GetAddrOfFunction(MD, Ty);
277   }
278 
279   return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
280                            CE->arg_begin(), CE->arg_end());
281 }
282 
283 RValue
284 CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
285                                               ReturnValueSlot ReturnValue) {
286   const BinaryOperator *BO =
287       cast<BinaryOperator>(E->getCallee()->IgnoreParens());
288   const Expr *BaseExpr = BO->getLHS();
289   const Expr *MemFnExpr = BO->getRHS();
290 
291   const MemberPointerType *MPT =
292     MemFnExpr->getType()->getAs<MemberPointerType>();
293 
294   const FunctionProtoType *FPT =
295     MPT->getPointeeType()->getAs<FunctionProtoType>();
296   const CXXRecordDecl *RD =
297     cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
298 
299   // Get the member function pointer.
300   llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
301 
302   // Emit the 'this' pointer.
303   llvm::Value *This;
304 
305   if (BO->getOpcode() == BO_PtrMemI)
306     This = EmitScalarExpr(BaseExpr);
307   else
308     This = EmitLValue(BaseExpr).getAddress();
309 
310   // Ask the ABI to load the callee.  Note that This is modified.
311   llvm::Value *Callee =
312     CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, This, MemFnPtr, MPT);
313 
314   CallArgList Args;
315 
316   QualType ThisType =
317     getContext().getPointerType(getContext().getTagDeclType(RD));
318 
319   // Push the this ptr.
320   Args.push_back(std::make_pair(RValue::get(This), ThisType));
321 
322   // And the rest of the call args
323   EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end());
324   const FunctionType *BO_FPT = BO->getType()->getAs<FunctionProtoType>();
325   return EmitCall(CGM.getTypes().getFunctionInfo(Args, BO_FPT), Callee,
326                   ReturnValue, Args);
327 }
328 
329 RValue
330 CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
331                                                const CXXMethodDecl *MD,
332                                                ReturnValueSlot ReturnValue) {
333   assert(MD->isInstance() &&
334          "Trying to emit a member call expr on a static method!");
335   LValue LV = EmitLValue(E->getArg(0));
336   llvm::Value *This = LV.getAddress();
337 
338   if (MD->isCopyAssignmentOperator()) {
339     const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(MD->getDeclContext());
340     if (ClassDecl->hasTrivialCopyAssignment()) {
341       assert(!ClassDecl->hasUserDeclaredCopyAssignment() &&
342              "EmitCXXOperatorMemberCallExpr - user declared copy assignment");
343       llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress();
344       QualType Ty = E->getType();
345       EmitAggregateCopy(This, Src, Ty);
346       return RValue::get(This);
347     }
348   }
349 
350   const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
351   const llvm::Type *Ty =
352     CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
353                                    FPT->isVariadic());
354   llvm::Value *Callee;
355   if (MD->isVirtual() &&
356       !canDevirtualizeMemberFunctionCalls(getContext(),
357                                            E->getArg(0), MD))
358     Callee = BuildVirtualCall(MD, This, Ty);
359   else
360     Callee = CGM.GetAddrOfFunction(MD, Ty);
361 
362   return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
363                            E->arg_begin() + 1, E->arg_end());
364 }
365 
366 void
367 CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
368                                       AggValueSlot Dest) {
369   assert(!Dest.isIgnored() && "Must have a destination!");
370   const CXXConstructorDecl *CD = E->getConstructor();
371 
372   // If we require zero initialization before (or instead of) calling the
373   // constructor, as can be the case with a non-user-provided default
374   // constructor, emit the zero initialization now.
375   if (E->requiresZeroInitialization())
376     EmitNullInitialization(Dest.getAddr(), E->getType());
377 
378   // If this is a call to a trivial default constructor, do nothing.
379   if (CD->isTrivial() && CD->isDefaultConstructor())
380     return;
381 
382   // Elide the constructor if we're constructing from a temporary.
383   // The temporary check is required because Sema sets this on NRVO
384   // returns.
385   if (getContext().getLangOptions().ElideConstructors && E->isElidable()) {
386     assert(getContext().hasSameUnqualifiedType(E->getType(),
387                                                E->getArg(0)->getType()));
388     if (E->getArg(0)->isTemporaryObject(getContext(), CD->getParent())) {
389       EmitAggExpr(E->getArg(0), Dest);
390       return;
391     }
392   }
393 
394   const ConstantArrayType *Array
395     = getContext().getAsConstantArrayType(E->getType());
396   if (Array) {
397     QualType BaseElementTy = getContext().getBaseElementType(Array);
398     const llvm::Type *BasePtr = ConvertType(BaseElementTy);
399     BasePtr = llvm::PointerType::getUnqual(BasePtr);
400     llvm::Value *BaseAddrPtr =
401       Builder.CreateBitCast(Dest.getAddr(), BasePtr);
402 
403     EmitCXXAggrConstructorCall(CD, Array, BaseAddrPtr,
404                                E->arg_begin(), E->arg_end());
405   }
406   else {
407     CXXCtorType Type =
408       (E->getConstructionKind() == CXXConstructExpr::CK_Complete)
409       ? Ctor_Complete : Ctor_Base;
410     bool ForVirtualBase =
411       E->getConstructionKind() == CXXConstructExpr::CK_VirtualBase;
412 
413     // Call the constructor.
414     EmitCXXConstructorCall(CD, Type, ForVirtualBase, Dest.getAddr(),
415                            E->arg_begin(), E->arg_end());
416   }
417 }
418 
419 void
420 CodeGenFunction::EmitSynthesizedCXXCopyCtor(llvm::Value *Dest,
421                                             llvm::Value *Src,
422                                             const Expr *Exp) {
423   if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Exp))
424     Exp = E->getSubExpr();
425   assert(isa<CXXConstructExpr>(Exp) &&
426          "EmitSynthesizedCXXCopyCtor - unknown copy ctor expr");
427   const CXXConstructExpr* E = cast<CXXConstructExpr>(Exp);
428   const CXXConstructorDecl *CD = E->getConstructor();
429   RunCleanupsScope Scope(*this);
430 
431   // If we require zero initialization before (or instead of) calling the
432   // constructor, as can be the case with a non-user-provided default
433   // constructor, emit the zero initialization now.
434   // FIXME. Do I still need this for a copy ctor synthesis?
435   if (E->requiresZeroInitialization())
436     EmitNullInitialization(Dest, E->getType());
437 
438   assert(!getContext().getAsConstantArrayType(E->getType())
439          && "EmitSynthesizedCXXCopyCtor - Copied-in Array");
440   EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src,
441                                  E->arg_begin(), E->arg_end());
442 }
443 
444 /// Check whether the given operator new[] is the global placement
445 /// operator new[].
446 static bool IsPlacementOperatorNewArray(ASTContext &Ctx,
447                                         const FunctionDecl *Fn) {
448   // Must be in global scope.  Note that allocation functions can't be
449   // declared in namespaces.
450   if (!Fn->getDeclContext()->getRedeclContext()->isFileContext())
451     return false;
452 
453   // Signature must be void *operator new[](size_t, void*).
454   // The size_t is common to all operator new[]s.
455   if (Fn->getNumParams() != 2)
456     return false;
457 
458   CanQualType ParamType = Ctx.getCanonicalType(Fn->getParamDecl(1)->getType());
459   return (ParamType == Ctx.VoidPtrTy);
460 }
461 
462 static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,
463                                         const CXXNewExpr *E) {
464   if (!E->isArray())
465     return CharUnits::Zero();
466 
467   // No cookie is required if the new operator being used is
468   // ::operator new[](size_t, void*).
469   const FunctionDecl *OperatorNew = E->getOperatorNew();
470   if (IsPlacementOperatorNewArray(CGF.getContext(), OperatorNew))
471     return CharUnits::Zero();
472 
473   return CGF.CGM.getCXXABI().GetArrayCookieSize(E);
474 }
475 
476 static llvm::Value *EmitCXXNewAllocSize(ASTContext &Context,
477                                         CodeGenFunction &CGF,
478                                         const CXXNewExpr *E,
479                                         llvm::Value *&NumElements,
480                                         llvm::Value *&SizeWithoutCookie) {
481   QualType ElemType = E->getAllocatedType();
482 
483   const llvm::IntegerType *SizeTy =
484     cast<llvm::IntegerType>(CGF.ConvertType(CGF.getContext().getSizeType()));
485 
486   CharUnits TypeSize = CGF.getContext().getTypeSizeInChars(ElemType);
487 
488   if (!E->isArray()) {
489     SizeWithoutCookie = llvm::ConstantInt::get(SizeTy, TypeSize.getQuantity());
490     return SizeWithoutCookie;
491   }
492 
493   // Figure out the cookie size.
494   CharUnits CookieSize = CalculateCookiePadding(CGF, E);
495 
496   // Emit the array size expression.
497   // We multiply the size of all dimensions for NumElements.
498   // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
499   NumElements = CGF.EmitScalarExpr(E->getArraySize());
500   assert(NumElements->getType() == SizeTy && "element count not a size_t");
501 
502   uint64_t ArraySizeMultiplier = 1;
503   while (const ConstantArrayType *CAT
504              = CGF.getContext().getAsConstantArrayType(ElemType)) {
505     ElemType = CAT->getElementType();
506     ArraySizeMultiplier *= CAT->getSize().getZExtValue();
507   }
508 
509   llvm::Value *Size;
510 
511   // If someone is doing 'new int[42]' there is no need to do a dynamic check.
512   // Don't bloat the -O0 code.
513   if (llvm::ConstantInt *NumElementsC =
514         dyn_cast<llvm::ConstantInt>(NumElements)) {
515     llvm::APInt NEC = NumElementsC->getValue();
516     unsigned SizeWidth = NEC.getBitWidth();
517 
518     // Determine if there is an overflow here by doing an extended multiply.
519     NEC = NEC.zext(SizeWidth*2);
520     llvm::APInt SC(SizeWidth*2, TypeSize.getQuantity());
521     SC *= NEC;
522 
523     if (!CookieSize.isZero()) {
524       // Save the current size without a cookie.  We don't care if an
525       // overflow's already happened because SizeWithoutCookie isn't
526       // used if the allocator returns null or throws, as it should
527       // always do on an overflow.
528       llvm::APInt SWC = SC.trunc(SizeWidth);
529       SizeWithoutCookie = llvm::ConstantInt::get(SizeTy, SWC);
530 
531       // Add the cookie size.
532       SC += llvm::APInt(SizeWidth*2, CookieSize.getQuantity());
533     }
534 
535     if (SC.countLeadingZeros() >= SizeWidth) {
536       SC = SC.trunc(SizeWidth);
537       Size = llvm::ConstantInt::get(SizeTy, SC);
538     } else {
539       // On overflow, produce a -1 so operator new throws.
540       Size = llvm::Constant::getAllOnesValue(SizeTy);
541     }
542 
543     // Scale NumElements while we're at it.
544     uint64_t N = NEC.getZExtValue() * ArraySizeMultiplier;
545     NumElements = llvm::ConstantInt::get(SizeTy, N);
546 
547   // Otherwise, we don't need to do an overflow-checked multiplication if
548   // we're multiplying by one.
549   } else if (TypeSize.isOne()) {
550     assert(ArraySizeMultiplier == 1);
551 
552     Size = NumElements;
553 
554     // If we need a cookie, add its size in with an overflow check.
555     // This is maybe a little paranoid.
556     if (!CookieSize.isZero()) {
557       SizeWithoutCookie = Size;
558 
559       llvm::Value *CookieSizeV
560         = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
561 
562       const llvm::Type *Types[] = { SizeTy };
563       llvm::Value *UAddF
564         = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, Types, 1);
565       llvm::Value *AddRes
566         = CGF.Builder.CreateCall2(UAddF, Size, CookieSizeV);
567 
568       Size = CGF.Builder.CreateExtractValue(AddRes, 0);
569       llvm::Value *DidOverflow = CGF.Builder.CreateExtractValue(AddRes, 1);
570       Size = CGF.Builder.CreateSelect(DidOverflow,
571                                       llvm::ConstantInt::get(SizeTy, -1),
572                                       Size);
573     }
574 
575   // Otherwise use the int.umul.with.overflow intrinsic.
576   } else {
577     llvm::Value *OutermostElementSize
578       = llvm::ConstantInt::get(SizeTy, TypeSize.getQuantity());
579 
580     llvm::Value *NumOutermostElements = NumElements;
581 
582     // Scale NumElements by the array size multiplier.  This might
583     // overflow, but only if the multiplication below also overflows,
584     // in which case this multiplication isn't used.
585     if (ArraySizeMultiplier != 1)
586       NumElements = CGF.Builder.CreateMul(NumElements,
587                          llvm::ConstantInt::get(SizeTy, ArraySizeMultiplier));
588 
589     // The requested size of the outermost array is non-constant.
590     // Multiply that by the static size of the elements of that array;
591     // on unsigned overflow, set the size to -1 to trigger an
592     // exception from the allocation routine.  This is sufficient to
593     // prevent buffer overruns from the allocator returning a
594     // seemingly valid pointer to insufficient space.  This idea comes
595     // originally from MSVC, and GCC has an open bug requesting
596     // similar behavior:
597     //   http://gcc.gnu.org/bugzilla/show_bug.cgi?id=19351
598     //
599     // This will not be sufficient for C++0x, which requires a
600     // specific exception class (std::bad_array_new_length).
601     // That will require ABI support that has not yet been specified.
602     const llvm::Type *Types[] = { SizeTy };
603     llvm::Value *UMulF
604       = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, Types, 1);
605     llvm::Value *MulRes = CGF.Builder.CreateCall2(UMulF, NumOutermostElements,
606                                                   OutermostElementSize);
607 
608     // The overflow bit.
609     llvm::Value *DidOverflow = CGF.Builder.CreateExtractValue(MulRes, 1);
610 
611     // The result of the multiplication.
612     Size = CGF.Builder.CreateExtractValue(MulRes, 0);
613 
614     // If we have a cookie, we need to add that size in, too.
615     if (!CookieSize.isZero()) {
616       SizeWithoutCookie = Size;
617 
618       llvm::Value *CookieSizeV
619         = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
620       llvm::Value *UAddF
621         = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, Types, 1);
622       llvm::Value *AddRes
623         = CGF.Builder.CreateCall2(UAddF, SizeWithoutCookie, CookieSizeV);
624 
625       Size = CGF.Builder.CreateExtractValue(AddRes, 0);
626 
627       llvm::Value *AddDidOverflow = CGF.Builder.CreateExtractValue(AddRes, 1);
628       DidOverflow = CGF.Builder.CreateOr(DidOverflow, AddDidOverflow);
629     }
630 
631     Size = CGF.Builder.CreateSelect(DidOverflow,
632                                     llvm::ConstantInt::get(SizeTy, -1),
633                                     Size);
634   }
635 
636   if (CookieSize.isZero())
637     SizeWithoutCookie = Size;
638   else
639     assert(SizeWithoutCookie && "didn't set SizeWithoutCookie?");
640 
641   return Size;
642 }
643 
644 static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const CXXNewExpr *E,
645                                     llvm::Value *NewPtr) {
646 
647   assert(E->getNumConstructorArgs() == 1 &&
648          "Can only have one argument to initializer of POD type.");
649 
650   const Expr *Init = E->getConstructorArg(0);
651   QualType AllocType = E->getAllocatedType();
652 
653   unsigned Alignment =
654     CGF.getContext().getTypeAlignInChars(AllocType).getQuantity();
655   if (!CGF.hasAggregateLLVMType(AllocType))
656     CGF.EmitStoreOfScalar(CGF.EmitScalarExpr(Init), NewPtr,
657                           AllocType.isVolatileQualified(), Alignment,
658                           AllocType);
659   else if (AllocType->isAnyComplexType())
660     CGF.EmitComplexExprIntoAddr(Init, NewPtr,
661                                 AllocType.isVolatileQualified());
662   else {
663     AggValueSlot Slot
664       = AggValueSlot::forAddr(NewPtr, AllocType.isVolatileQualified(), true);
665     CGF.EmitAggExpr(Init, Slot);
666   }
667 }
668 
669 void
670 CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E,
671                                          llvm::Value *NewPtr,
672                                          llvm::Value *NumElements) {
673   // We have a POD type.
674   if (E->getNumConstructorArgs() == 0)
675     return;
676 
677   const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
678 
679   // Create a temporary for the loop index and initialize it with 0.
680   llvm::Value *IndexPtr = CreateTempAlloca(SizeTy, "loop.index");
681   llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy);
682   Builder.CreateStore(Zero, IndexPtr);
683 
684   // Start the loop with a block that tests the condition.
685   llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
686   llvm::BasicBlock *AfterFor = createBasicBlock("for.end");
687 
688   EmitBlock(CondBlock);
689 
690   llvm::BasicBlock *ForBody = createBasicBlock("for.body");
691 
692   // Generate: if (loop-index < number-of-elements fall to the loop body,
693   // otherwise, go to the block after the for-loop.
694   llvm::Value *Counter = Builder.CreateLoad(IndexPtr);
695   llvm::Value *IsLess = Builder.CreateICmpULT(Counter, NumElements, "isless");
696   // If the condition is true, execute the body.
697   Builder.CreateCondBr(IsLess, ForBody, AfterFor);
698 
699   EmitBlock(ForBody);
700 
701   llvm::BasicBlock *ContinueBlock = createBasicBlock("for.inc");
702   // Inside the loop body, emit the constructor call on the array element.
703   Counter = Builder.CreateLoad(IndexPtr);
704   llvm::Value *Address = Builder.CreateInBoundsGEP(NewPtr, Counter,
705                                                    "arrayidx");
706   StoreAnyExprIntoOneUnit(*this, E, Address);
707 
708   EmitBlock(ContinueBlock);
709 
710   // Emit the increment of the loop counter.
711   llvm::Value *NextVal = llvm::ConstantInt::get(SizeTy, 1);
712   Counter = Builder.CreateLoad(IndexPtr);
713   NextVal = Builder.CreateAdd(Counter, NextVal, "inc");
714   Builder.CreateStore(NextVal, IndexPtr);
715 
716   // Finally, branch back up to the condition for the next iteration.
717   EmitBranch(CondBlock);
718 
719   // Emit the fall-through block.
720   EmitBlock(AfterFor, true);
721 }
722 
723 static void EmitZeroMemSet(CodeGenFunction &CGF, QualType T,
724                            llvm::Value *NewPtr, llvm::Value *Size) {
725   CGF.EmitCastToVoidPtr(NewPtr);
726   CharUnits Alignment = CGF.getContext().getTypeAlignInChars(T);
727   CGF.Builder.CreateMemSet(NewPtr, CGF.Builder.getInt8(0), Size,
728                            Alignment.getQuantity(), false);
729 }
730 
731 static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
732                                llvm::Value *NewPtr,
733                                llvm::Value *NumElements,
734                                llvm::Value *AllocSizeWithoutCookie) {
735   if (E->isArray()) {
736     if (CXXConstructorDecl *Ctor = E->getConstructor()) {
737       bool RequiresZeroInitialization = false;
738       if (Ctor->getParent()->hasTrivialConstructor()) {
739         // If new expression did not specify value-initialization, then there
740         // is no initialization.
741         if (!E->hasInitializer() || Ctor->getParent()->isEmpty())
742           return;
743 
744         if (CGF.CGM.getTypes().isZeroInitializable(E->getAllocatedType())) {
745           // Optimization: since zero initialization will just set the memory
746           // to all zeroes, generate a single memset to do it in one shot.
747           EmitZeroMemSet(CGF, E->getAllocatedType(), NewPtr,
748                          AllocSizeWithoutCookie);
749           return;
750         }
751 
752         RequiresZeroInitialization = true;
753       }
754 
755       CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr,
756                                      E->constructor_arg_begin(),
757                                      E->constructor_arg_end(),
758                                      RequiresZeroInitialization);
759       return;
760     } else if (E->getNumConstructorArgs() == 1 &&
761                isa<ImplicitValueInitExpr>(E->getConstructorArg(0))) {
762       // Optimization: since zero initialization will just set the memory
763       // to all zeroes, generate a single memset to do it in one shot.
764       EmitZeroMemSet(CGF, E->getAllocatedType(), NewPtr,
765                      AllocSizeWithoutCookie);
766       return;
767     } else {
768       CGF.EmitNewArrayInitializer(E, NewPtr, NumElements);
769       return;
770     }
771   }
772 
773   if (CXXConstructorDecl *Ctor = E->getConstructor()) {
774     // Per C++ [expr.new]p15, if we have an initializer, then we're performing
775     // direct initialization. C++ [dcl.init]p5 requires that we
776     // zero-initialize storage if there are no user-declared constructors.
777     if (E->hasInitializer() &&
778         !Ctor->getParent()->hasUserDeclaredConstructor() &&
779         !Ctor->getParent()->isEmpty())
780       CGF.EmitNullInitialization(NewPtr, E->getAllocatedType());
781 
782     CGF.EmitCXXConstructorCall(Ctor, Ctor_Complete, /*ForVirtualBase=*/false,
783                                NewPtr, E->constructor_arg_begin(),
784                                E->constructor_arg_end());
785 
786     return;
787   }
788   // We have a POD type.
789   if (E->getNumConstructorArgs() == 0)
790     return;
791 
792   StoreAnyExprIntoOneUnit(CGF, E, NewPtr);
793 }
794 
795 namespace {
796   /// A cleanup to call the given 'operator delete' function upon
797   /// abnormal exit from a new expression.
798   class CallDeleteDuringNew : public EHScopeStack::Cleanup {
799     size_t NumPlacementArgs;
800     const FunctionDecl *OperatorDelete;
801     llvm::Value *Ptr;
802     llvm::Value *AllocSize;
803 
804     RValue *getPlacementArgs() { return reinterpret_cast<RValue*>(this+1); }
805 
806   public:
807     static size_t getExtraSize(size_t NumPlacementArgs) {
808       return NumPlacementArgs * sizeof(RValue);
809     }
810 
811     CallDeleteDuringNew(size_t NumPlacementArgs,
812                         const FunctionDecl *OperatorDelete,
813                         llvm::Value *Ptr,
814                         llvm::Value *AllocSize)
815       : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
816         Ptr(Ptr), AllocSize(AllocSize) {}
817 
818     void setPlacementArg(unsigned I, RValue Arg) {
819       assert(I < NumPlacementArgs && "index out of range");
820       getPlacementArgs()[I] = Arg;
821     }
822 
823     void Emit(CodeGenFunction &CGF, bool IsForEH) {
824       const FunctionProtoType *FPT
825         = OperatorDelete->getType()->getAs<FunctionProtoType>();
826       assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
827              (FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
828 
829       CallArgList DeleteArgs;
830 
831       // The first argument is always a void*.
832       FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
833       DeleteArgs.push_back(std::make_pair(RValue::get(Ptr), *AI++));
834 
835       // A member 'operator delete' can take an extra 'size_t' argument.
836       if (FPT->getNumArgs() == NumPlacementArgs + 2)
837         DeleteArgs.push_back(std::make_pair(RValue::get(AllocSize), *AI++));
838 
839       // Pass the rest of the arguments, which must match exactly.
840       for (unsigned I = 0; I != NumPlacementArgs; ++I)
841         DeleteArgs.push_back(std::make_pair(getPlacementArgs()[I], *AI++));
842 
843       // Call 'operator delete'.
844       CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(DeleteArgs, FPT),
845                    CGF.CGM.GetAddrOfFunction(OperatorDelete),
846                    ReturnValueSlot(), DeleteArgs, OperatorDelete);
847     }
848   };
849 
850   /// A cleanup to call the given 'operator delete' function upon
851   /// abnormal exit from a new expression when the new expression is
852   /// conditional.
853   class CallDeleteDuringConditionalNew : public EHScopeStack::Cleanup {
854     size_t NumPlacementArgs;
855     const FunctionDecl *OperatorDelete;
856     DominatingValue<RValue>::saved_type Ptr;
857     DominatingValue<RValue>::saved_type AllocSize;
858 
859     DominatingValue<RValue>::saved_type *getPlacementArgs() {
860       return reinterpret_cast<DominatingValue<RValue>::saved_type*>(this+1);
861     }
862 
863   public:
864     static size_t getExtraSize(size_t NumPlacementArgs) {
865       return NumPlacementArgs * sizeof(DominatingValue<RValue>::saved_type);
866     }
867 
868     CallDeleteDuringConditionalNew(size_t NumPlacementArgs,
869                                    const FunctionDecl *OperatorDelete,
870                                    DominatingValue<RValue>::saved_type Ptr,
871                               DominatingValue<RValue>::saved_type AllocSize)
872       : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
873         Ptr(Ptr), AllocSize(AllocSize) {}
874 
875     void setPlacementArg(unsigned I, DominatingValue<RValue>::saved_type Arg) {
876       assert(I < NumPlacementArgs && "index out of range");
877       getPlacementArgs()[I] = Arg;
878     }
879 
880     void Emit(CodeGenFunction &CGF, bool IsForEH) {
881       const FunctionProtoType *FPT
882         = OperatorDelete->getType()->getAs<FunctionProtoType>();
883       assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
884              (FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
885 
886       CallArgList DeleteArgs;
887 
888       // The first argument is always a void*.
889       FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
890       DeleteArgs.push_back(std::make_pair(Ptr.restore(CGF), *AI++));
891 
892       // A member 'operator delete' can take an extra 'size_t' argument.
893       if (FPT->getNumArgs() == NumPlacementArgs + 2) {
894         RValue RV = AllocSize.restore(CGF);
895         DeleteArgs.push_back(std::make_pair(RV, *AI++));
896       }
897 
898       // Pass the rest of the arguments, which must match exactly.
899       for (unsigned I = 0; I != NumPlacementArgs; ++I) {
900         RValue RV = getPlacementArgs()[I].restore(CGF);
901         DeleteArgs.push_back(std::make_pair(RV, *AI++));
902       }
903 
904       // Call 'operator delete'.
905       CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(DeleteArgs, FPT),
906                    CGF.CGM.GetAddrOfFunction(OperatorDelete),
907                    ReturnValueSlot(), DeleteArgs, OperatorDelete);
908     }
909   };
910 }
911 
912 /// Enter a cleanup to call 'operator delete' if the initializer in a
913 /// new-expression throws.
914 static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
915                                   const CXXNewExpr *E,
916                                   llvm::Value *NewPtr,
917                                   llvm::Value *AllocSize,
918                                   const CallArgList &NewArgs) {
919   // If we're not inside a conditional branch, then the cleanup will
920   // dominate and we can do the easier (and more efficient) thing.
921   if (!CGF.isInConditionalBranch()) {
922     CallDeleteDuringNew *Cleanup = CGF.EHStack
923       .pushCleanupWithExtra<CallDeleteDuringNew>(EHCleanup,
924                                                  E->getNumPlacementArgs(),
925                                                  E->getOperatorDelete(),
926                                                  NewPtr, AllocSize);
927     for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
928       Cleanup->setPlacementArg(I, NewArgs[I+1].first);
929 
930     return;
931   }
932 
933   // Otherwise, we need to save all this stuff.
934   DominatingValue<RValue>::saved_type SavedNewPtr =
935     DominatingValue<RValue>::save(CGF, RValue::get(NewPtr));
936   DominatingValue<RValue>::saved_type SavedAllocSize =
937     DominatingValue<RValue>::save(CGF, RValue::get(AllocSize));
938 
939   CallDeleteDuringConditionalNew *Cleanup = CGF.EHStack
940     .pushCleanupWithExtra<CallDeleteDuringConditionalNew>(InactiveEHCleanup,
941                                                  E->getNumPlacementArgs(),
942                                                  E->getOperatorDelete(),
943                                                  SavedNewPtr,
944                                                  SavedAllocSize);
945   for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
946     Cleanup->setPlacementArg(I,
947                      DominatingValue<RValue>::save(CGF, NewArgs[I+1].first));
948 
949   CGF.ActivateCleanupBlock(CGF.EHStack.stable_begin());
950 }
951 
952 llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
953   // The element type being allocated.
954   QualType allocType = getContext().getBaseElementType(E->getAllocatedType());
955 
956   // 1. Build a call to the allocation function.
957   FunctionDecl *allocator = E->getOperatorNew();
958   const FunctionProtoType *allocatorType =
959     allocator->getType()->castAs<FunctionProtoType>();
960 
961   CallArgList allocatorArgs;
962 
963   // The allocation size is the first argument.
964   QualType sizeType = getContext().getSizeType();
965 
966   llvm::Value *numElements = 0;
967   llvm::Value *allocSizeWithoutCookie = 0;
968   llvm::Value *allocSize =
969     EmitCXXNewAllocSize(getContext(), *this, E, numElements,
970                         allocSizeWithoutCookie);
971 
972   allocatorArgs.push_back(std::make_pair(RValue::get(allocSize), sizeType));
973 
974   // Emit the rest of the arguments.
975   // FIXME: Ideally, this should just use EmitCallArgs.
976   CXXNewExpr::const_arg_iterator placementArg = E->placement_arg_begin();
977 
978   // First, use the types from the function type.
979   // We start at 1 here because the first argument (the allocation size)
980   // has already been emitted.
981   for (unsigned i = 1, e = allocatorType->getNumArgs(); i != e;
982        ++i, ++placementArg) {
983     QualType argType = allocatorType->getArgType(i);
984 
985     assert(getContext().hasSameUnqualifiedType(argType.getNonReferenceType(),
986                                                placementArg->getType()) &&
987            "type mismatch in call argument!");
988 
989     EmitCallArg(allocatorArgs, *placementArg, argType);
990   }
991 
992   // Either we've emitted all the call args, or we have a call to a
993   // variadic function.
994   assert((placementArg == E->placement_arg_end() ||
995           allocatorType->isVariadic()) &&
996          "Extra arguments to non-variadic function!");
997 
998   // If we still have any arguments, emit them using the type of the argument.
999   for (CXXNewExpr::const_arg_iterator placementArgsEnd = E->placement_arg_end();
1000        placementArg != placementArgsEnd; ++placementArg) {
1001     EmitCallArg(allocatorArgs, *placementArg, placementArg->getType());
1002   }
1003 
1004   // Emit the allocation call.
1005   RValue RV =
1006     EmitCall(CGM.getTypes().getFunctionInfo(allocatorArgs, allocatorType),
1007              CGM.GetAddrOfFunction(allocator), ReturnValueSlot(),
1008              allocatorArgs, allocator);
1009 
1010   // Emit a null check on the allocation result if the allocation
1011   // function is allowed to return null (because it has a non-throwing
1012   // exception spec; for this part, we inline
1013   // CXXNewExpr::shouldNullCheckAllocation()) and we have an
1014   // interesting initializer.
1015   bool nullCheck = allocatorType->isNothrow(getContext()) &&
1016     !(allocType->isPODType() && !E->hasInitializer());
1017 
1018   llvm::BasicBlock *nullCheckBB = 0;
1019   llvm::BasicBlock *contBB = 0;
1020 
1021   llvm::Value *allocation = RV.getScalarVal();
1022   unsigned AS =
1023     cast<llvm::PointerType>(allocation->getType())->getAddressSpace();
1024 
1025   // The null-check means that the initializer is conditionally
1026   // evaluated.
1027   ConditionalEvaluation conditional(*this);
1028 
1029   if (nullCheck) {
1030     conditional.begin(*this);
1031 
1032     nullCheckBB = Builder.GetInsertBlock();
1033     llvm::BasicBlock *notNullBB = createBasicBlock("new.notnull");
1034     contBB = createBasicBlock("new.cont");
1035 
1036     llvm::Value *isNull = Builder.CreateIsNull(allocation, "new.isnull");
1037     Builder.CreateCondBr(isNull, contBB, notNullBB);
1038     EmitBlock(notNullBB);
1039   }
1040 
1041   assert((allocSize == allocSizeWithoutCookie) ==
1042          CalculateCookiePadding(*this, E).isZero());
1043   if (allocSize != allocSizeWithoutCookie) {
1044     assert(E->isArray());
1045     allocation = CGM.getCXXABI().InitializeArrayCookie(*this, allocation,
1046                                                        numElements,
1047                                                        E, allocType);
1048   }
1049 
1050   // If there's an operator delete, enter a cleanup to call it if an
1051   // exception is thrown.
1052   EHScopeStack::stable_iterator operatorDeleteCleanup;
1053   if (E->getOperatorDelete()) {
1054     EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocatorArgs);
1055     operatorDeleteCleanup = EHStack.stable_begin();
1056   }
1057 
1058   const llvm::Type *elementPtrTy
1059     = ConvertTypeForMem(allocType)->getPointerTo(AS);
1060   llvm::Value *result = Builder.CreateBitCast(allocation, elementPtrTy);
1061 
1062   if (E->isArray()) {
1063     EmitNewInitializer(*this, E, result, numElements, allocSizeWithoutCookie);
1064 
1065     // NewPtr is a pointer to the base element type.  If we're
1066     // allocating an array of arrays, we'll need to cast back to the
1067     // array pointer type.
1068     const llvm::Type *resultType = ConvertTypeForMem(E->getType());
1069     if (result->getType() != resultType)
1070       result = Builder.CreateBitCast(result, resultType);
1071   } else {
1072     EmitNewInitializer(*this, E, result, numElements, allocSizeWithoutCookie);
1073   }
1074 
1075   // Deactivate the 'operator delete' cleanup if we finished
1076   // initialization.
1077   if (operatorDeleteCleanup.isValid())
1078     DeactivateCleanupBlock(operatorDeleteCleanup);
1079 
1080   if (nullCheck) {
1081     conditional.end(*this);
1082 
1083     llvm::BasicBlock *notNullBB = Builder.GetInsertBlock();
1084     EmitBlock(contBB);
1085 
1086     llvm::PHINode *PHI = Builder.CreatePHI(result->getType(), 2);
1087     PHI->addIncoming(result, notNullBB);
1088     PHI->addIncoming(llvm::Constant::getNullValue(result->getType()),
1089                      nullCheckBB);
1090 
1091     result = PHI;
1092   }
1093 
1094   return result;
1095 }
1096 
1097 void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
1098                                      llvm::Value *Ptr,
1099                                      QualType DeleteTy) {
1100   assert(DeleteFD->getOverloadedOperator() == OO_Delete);
1101 
1102   const FunctionProtoType *DeleteFTy =
1103     DeleteFD->getType()->getAs<FunctionProtoType>();
1104 
1105   CallArgList DeleteArgs;
1106 
1107   // Check if we need to pass the size to the delete operator.
1108   llvm::Value *Size = 0;
1109   QualType SizeTy;
1110   if (DeleteFTy->getNumArgs() == 2) {
1111     SizeTy = DeleteFTy->getArgType(1);
1112     CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy);
1113     Size = llvm::ConstantInt::get(ConvertType(SizeTy),
1114                                   DeleteTypeSize.getQuantity());
1115   }
1116 
1117   QualType ArgTy = DeleteFTy->getArgType(0);
1118   llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy));
1119   DeleteArgs.push_back(std::make_pair(RValue::get(DeletePtr), ArgTy));
1120 
1121   if (Size)
1122     DeleteArgs.push_back(std::make_pair(RValue::get(Size), SizeTy));
1123 
1124   // Emit the call to delete.
1125   EmitCall(CGM.getTypes().getFunctionInfo(DeleteArgs, DeleteFTy),
1126            CGM.GetAddrOfFunction(DeleteFD), ReturnValueSlot(),
1127            DeleteArgs, DeleteFD);
1128 }
1129 
1130 namespace {
1131   /// Calls the given 'operator delete' on a single object.
1132   struct CallObjectDelete : EHScopeStack::Cleanup {
1133     llvm::Value *Ptr;
1134     const FunctionDecl *OperatorDelete;
1135     QualType ElementType;
1136 
1137     CallObjectDelete(llvm::Value *Ptr,
1138                      const FunctionDecl *OperatorDelete,
1139                      QualType ElementType)
1140       : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {}
1141 
1142     void Emit(CodeGenFunction &CGF, bool IsForEH) {
1143       CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType);
1144     }
1145   };
1146 }
1147 
1148 /// Emit the code for deleting a single object.
1149 static void EmitObjectDelete(CodeGenFunction &CGF,
1150                              const FunctionDecl *OperatorDelete,
1151                              llvm::Value *Ptr,
1152                              QualType ElementType) {
1153   // Find the destructor for the type, if applicable.  If the
1154   // destructor is virtual, we'll just emit the vcall and return.
1155   const CXXDestructorDecl *Dtor = 0;
1156   if (const RecordType *RT = ElementType->getAs<RecordType>()) {
1157     CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1158     if (!RD->hasTrivialDestructor()) {
1159       Dtor = RD->getDestructor();
1160 
1161       if (Dtor->isVirtual()) {
1162         const llvm::Type *Ty =
1163           CGF.getTypes().GetFunctionType(CGF.getTypes().getFunctionInfo(Dtor,
1164                                                                Dtor_Complete),
1165                                          /*isVariadic=*/false);
1166 
1167         llvm::Value *Callee
1168           = CGF.BuildVirtualCall(Dtor, Dtor_Deleting, Ptr, Ty);
1169         CGF.EmitCXXMemberCall(Dtor, Callee, ReturnValueSlot(), Ptr, /*VTT=*/0,
1170                               0, 0);
1171 
1172         // The dtor took care of deleting the object.
1173         return;
1174       }
1175     }
1176   }
1177 
1178   // Make sure that we call delete even if the dtor throws.
1179   // This doesn't have to a conditional cleanup because we're going
1180   // to pop it off in a second.
1181   CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
1182                                             Ptr, OperatorDelete, ElementType);
1183 
1184   if (Dtor)
1185     CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
1186                               /*ForVirtualBase=*/false, Ptr);
1187 
1188   CGF.PopCleanupBlock();
1189 }
1190 
1191 namespace {
1192   /// Calls the given 'operator delete' on an array of objects.
1193   struct CallArrayDelete : EHScopeStack::Cleanup {
1194     llvm::Value *Ptr;
1195     const FunctionDecl *OperatorDelete;
1196     llvm::Value *NumElements;
1197     QualType ElementType;
1198     CharUnits CookieSize;
1199 
1200     CallArrayDelete(llvm::Value *Ptr,
1201                     const FunctionDecl *OperatorDelete,
1202                     llvm::Value *NumElements,
1203                     QualType ElementType,
1204                     CharUnits CookieSize)
1205       : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements),
1206         ElementType(ElementType), CookieSize(CookieSize) {}
1207 
1208     void Emit(CodeGenFunction &CGF, bool IsForEH) {
1209       const FunctionProtoType *DeleteFTy =
1210         OperatorDelete->getType()->getAs<FunctionProtoType>();
1211       assert(DeleteFTy->getNumArgs() == 1 || DeleteFTy->getNumArgs() == 2);
1212 
1213       CallArgList Args;
1214 
1215       // Pass the pointer as the first argument.
1216       QualType VoidPtrTy = DeleteFTy->getArgType(0);
1217       llvm::Value *DeletePtr
1218         = CGF.Builder.CreateBitCast(Ptr, CGF.ConvertType(VoidPtrTy));
1219       Args.push_back(std::make_pair(RValue::get(DeletePtr), VoidPtrTy));
1220 
1221       // Pass the original requested size as the second argument.
1222       if (DeleteFTy->getNumArgs() == 2) {
1223         QualType size_t = DeleteFTy->getArgType(1);
1224         const llvm::IntegerType *SizeTy
1225           = cast<llvm::IntegerType>(CGF.ConvertType(size_t));
1226 
1227         CharUnits ElementTypeSize =
1228           CGF.CGM.getContext().getTypeSizeInChars(ElementType);
1229 
1230         // The size of an element, multiplied by the number of elements.
1231         llvm::Value *Size
1232           = llvm::ConstantInt::get(SizeTy, ElementTypeSize.getQuantity());
1233         Size = CGF.Builder.CreateMul(Size, NumElements);
1234 
1235         // Plus the size of the cookie if applicable.
1236         if (!CookieSize.isZero()) {
1237           llvm::Value *CookieSizeV
1238             = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
1239           Size = CGF.Builder.CreateAdd(Size, CookieSizeV);
1240         }
1241 
1242         Args.push_back(std::make_pair(RValue::get(Size), size_t));
1243       }
1244 
1245       // Emit the call to delete.
1246       CGF.EmitCall(CGF.getTypes().getFunctionInfo(Args, DeleteFTy),
1247                    CGF.CGM.GetAddrOfFunction(OperatorDelete),
1248                    ReturnValueSlot(), Args, OperatorDelete);
1249     }
1250   };
1251 }
1252 
1253 /// Emit the code for deleting an array of objects.
1254 static void EmitArrayDelete(CodeGenFunction &CGF,
1255                             const CXXDeleteExpr *E,
1256                             llvm::Value *Ptr,
1257                             QualType ElementType) {
1258   llvm::Value *NumElements = 0;
1259   llvm::Value *AllocatedPtr = 0;
1260   CharUnits CookieSize;
1261   CGF.CGM.getCXXABI().ReadArrayCookie(CGF, Ptr, E, ElementType,
1262                                       NumElements, AllocatedPtr, CookieSize);
1263 
1264   assert(AllocatedPtr && "ReadArrayCookie didn't set AllocatedPtr");
1265 
1266   // Make sure that we call delete even if one of the dtors throws.
1267   const FunctionDecl *OperatorDelete = E->getOperatorDelete();
1268   CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup,
1269                                            AllocatedPtr, OperatorDelete,
1270                                            NumElements, ElementType,
1271                                            CookieSize);
1272 
1273   if (const CXXRecordDecl *RD = ElementType->getAsCXXRecordDecl()) {
1274     if (!RD->hasTrivialDestructor()) {
1275       assert(NumElements && "ReadArrayCookie didn't find element count"
1276                             " for a class with destructor");
1277       CGF.EmitCXXAggrDestructorCall(RD->getDestructor(), NumElements, Ptr);
1278     }
1279   }
1280 
1281   CGF.PopCleanupBlock();
1282 }
1283 
1284 void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
1285 
1286   // Get at the argument before we performed the implicit conversion
1287   // to void*.
1288   const Expr *Arg = E->getArgument();
1289   while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) {
1290     if (ICE->getCastKind() != CK_UserDefinedConversion &&
1291         ICE->getType()->isVoidPointerType())
1292       Arg = ICE->getSubExpr();
1293     else
1294       break;
1295   }
1296 
1297   llvm::Value *Ptr = EmitScalarExpr(Arg);
1298 
1299   // Null check the pointer.
1300   llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");
1301   llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");
1302 
1303   llvm::Value *IsNull = Builder.CreateIsNull(Ptr, "isnull");
1304 
1305   Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
1306   EmitBlock(DeleteNotNull);
1307 
1308   // We might be deleting a pointer to array.  If so, GEP down to the
1309   // first non-array element.
1310   // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*)
1311   QualType DeleteTy = Arg->getType()->getAs<PointerType>()->getPointeeType();
1312   if (DeleteTy->isConstantArrayType()) {
1313     llvm::Value *Zero = Builder.getInt32(0);
1314     llvm::SmallVector<llvm::Value*,8> GEP;
1315 
1316     GEP.push_back(Zero); // point at the outermost array
1317 
1318     // For each layer of array type we're pointing at:
1319     while (const ConstantArrayType *Arr
1320              = getContext().getAsConstantArrayType(DeleteTy)) {
1321       // 1. Unpeel the array type.
1322       DeleteTy = Arr->getElementType();
1323 
1324       // 2. GEP to the first element of the array.
1325       GEP.push_back(Zero);
1326     }
1327 
1328     Ptr = Builder.CreateInBoundsGEP(Ptr, GEP.begin(), GEP.end(), "del.first");
1329   }
1330 
1331   assert(ConvertTypeForMem(DeleteTy) ==
1332          cast<llvm::PointerType>(Ptr->getType())->getElementType());
1333 
1334   if (E->isArrayForm()) {
1335     EmitArrayDelete(*this, E, Ptr, DeleteTy);
1336   } else {
1337     EmitObjectDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy);
1338   }
1339 
1340   EmitBlock(DeleteEnd);
1341 }
1342 
1343 static llvm::Constant *getBadTypeidFn(CodeGenFunction &CGF) {
1344   // void __cxa_bad_typeid();
1345 
1346   const llvm::Type *VoidTy = llvm::Type::getVoidTy(CGF.getLLVMContext());
1347   const llvm::FunctionType *FTy =
1348   llvm::FunctionType::get(VoidTy, false);
1349 
1350   return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
1351 }
1352 
1353 static void EmitBadTypeidCall(CodeGenFunction &CGF) {
1354   llvm::Value *Fn = getBadTypeidFn(CGF);
1355   CGF.EmitCallOrInvoke(Fn, 0, 0).setDoesNotReturn();
1356   CGF.Builder.CreateUnreachable();
1357 }
1358 
1359 llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
1360   QualType Ty = E->getType();
1361   const llvm::Type *LTy = ConvertType(Ty)->getPointerTo();
1362 
1363   if (E->isTypeOperand()) {
1364     llvm::Constant *TypeInfo =
1365       CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand());
1366     return Builder.CreateBitCast(TypeInfo, LTy);
1367   }
1368 
1369   Expr *subE = E->getExprOperand();
1370   Ty = subE->getType();
1371   CanQualType CanTy = CGM.getContext().getCanonicalType(Ty);
1372   Ty = CanTy.getUnqualifiedType().getNonReferenceType();
1373   if (const RecordType *RT = Ty->getAs<RecordType>()) {
1374     const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1375     if (RD->isPolymorphic()) {
1376       // FIXME: if subE is an lvalue do
1377       LValue Obj = EmitLValue(subE);
1378       llvm::Value *This = Obj.getAddress();
1379       // We need to do a zero check for *p, unless it has NonNullAttr.
1380       // FIXME: PointerType->hasAttr<NonNullAttr>()
1381       bool CanBeZero = false;
1382       if (UnaryOperator *UO = dyn_cast<UnaryOperator>(subE->IgnoreParens()))
1383         if (UO->getOpcode() == UO_Deref)
1384           CanBeZero = true;
1385       if (CanBeZero) {
1386         llvm::BasicBlock *NonZeroBlock = createBasicBlock();
1387         llvm::BasicBlock *ZeroBlock = createBasicBlock();
1388 
1389         llvm::Value *Zero = llvm::Constant::getNullValue(This->getType());
1390         Builder.CreateCondBr(Builder.CreateICmpNE(This, Zero),
1391                              NonZeroBlock, ZeroBlock);
1392         EmitBlock(ZeroBlock);
1393 
1394         EmitBadTypeidCall(*this);
1395 
1396         EmitBlock(NonZeroBlock);
1397       }
1398       llvm::Value *V = GetVTablePtr(This, LTy->getPointerTo());
1399       V = Builder.CreateConstInBoundsGEP1_64(V, -1ULL);
1400       V = Builder.CreateLoad(V);
1401       return V;
1402     }
1403   }
1404   return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(Ty), LTy);
1405 }
1406 
1407 static llvm::Constant *getDynamicCastFn(CodeGenFunction &CGF) {
1408   // void *__dynamic_cast(const void *sub,
1409   //                      const abi::__class_type_info *src,
1410   //                      const abi::__class_type_info *dst,
1411   //                      std::ptrdiff_t src2dst_offset);
1412 
1413   const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
1414   const llvm::Type *PtrDiffTy =
1415     CGF.ConvertType(CGF.getContext().getPointerDiffType());
1416 
1417   const llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
1418 
1419   const llvm::FunctionType *FTy =
1420     llvm::FunctionType::get(Int8PtrTy, Args, false);
1421 
1422   return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast");
1423 }
1424 
1425 static llvm::Constant *getBadCastFn(CodeGenFunction &CGF) {
1426   // void __cxa_bad_cast();
1427 
1428   const llvm::Type *VoidTy = llvm::Type::getVoidTy(CGF.getLLVMContext());
1429   const llvm::FunctionType *FTy =
1430     llvm::FunctionType::get(VoidTy, false);
1431 
1432   return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
1433 }
1434 
1435 static void EmitBadCastCall(CodeGenFunction &CGF) {
1436   llvm::Value *Fn = getBadCastFn(CGF);
1437   CGF.EmitCallOrInvoke(Fn, 0, 0).setDoesNotReturn();
1438   CGF.Builder.CreateUnreachable();
1439 }
1440 
1441 static llvm::Value *
1442 EmitDynamicCastCall(CodeGenFunction &CGF, llvm::Value *Value,
1443                     QualType SrcTy, QualType DestTy,
1444                     llvm::BasicBlock *CastEnd) {
1445   const llvm::Type *PtrDiffLTy =
1446     CGF.ConvertType(CGF.getContext().getPointerDiffType());
1447   const llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1448 
1449   if (const PointerType *PTy = DestTy->getAs<PointerType>()) {
1450     if (PTy->getPointeeType()->isVoidType()) {
1451       // C++ [expr.dynamic.cast]p7:
1452       //   If T is "pointer to cv void," then the result is a pointer to the
1453       //   most derived object pointed to by v.
1454 
1455       // Get the vtable pointer.
1456       llvm::Value *VTable = CGF.GetVTablePtr(Value, PtrDiffLTy->getPointerTo());
1457 
1458       // Get the offset-to-top from the vtable.
1459       llvm::Value *OffsetToTop =
1460         CGF.Builder.CreateConstInBoundsGEP1_64(VTable, -2ULL);
1461       OffsetToTop = CGF.Builder.CreateLoad(OffsetToTop, "offset.to.top");
1462 
1463       // Finally, add the offset to the pointer.
1464       Value = CGF.EmitCastToVoidPtr(Value);
1465       Value = CGF.Builder.CreateInBoundsGEP(Value, OffsetToTop);
1466 
1467       return CGF.Builder.CreateBitCast(Value, DestLTy);
1468     }
1469   }
1470 
1471   QualType SrcRecordTy;
1472   QualType DestRecordTy;
1473 
1474   if (const PointerType *DestPTy = DestTy->getAs<PointerType>()) {
1475     SrcRecordTy = SrcTy->castAs<PointerType>()->getPointeeType();
1476     DestRecordTy = DestPTy->getPointeeType();
1477   } else {
1478     SrcRecordTy = SrcTy;
1479     DestRecordTy = DestTy->castAs<ReferenceType>()->getPointeeType();
1480   }
1481 
1482   assert(SrcRecordTy->isRecordType() && "source type must be a record type!");
1483   assert(DestRecordTy->isRecordType() && "dest type must be a record type!");
1484 
1485   llvm::Value *SrcRTTI =
1486     CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
1487   llvm::Value *DestRTTI =
1488     CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType());
1489 
1490   // FIXME: Actually compute a hint here.
1491   llvm::Value *OffsetHint = llvm::ConstantInt::get(PtrDiffLTy, -1ULL);
1492 
1493   // Emit the call to __dynamic_cast.
1494   Value = CGF.EmitCastToVoidPtr(Value);
1495   Value = CGF.Builder.CreateCall4(getDynamicCastFn(CGF), Value,
1496                                   SrcRTTI, DestRTTI, OffsetHint);
1497   Value = CGF.Builder.CreateBitCast(Value, DestLTy);
1498 
1499   /// C++ [expr.dynamic.cast]p9:
1500   ///   A failed cast to reference type throws std::bad_cast
1501   if (DestTy->isReferenceType()) {
1502     llvm::BasicBlock *BadCastBlock =
1503       CGF.createBasicBlock("dynamic_cast.bad_cast");
1504 
1505     llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value);
1506     CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd);
1507 
1508     CGF.EmitBlock(BadCastBlock);
1509     EmitBadCastCall(CGF);
1510   }
1511 
1512   return Value;
1513 }
1514 
1515 static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF,
1516                                           QualType DestTy) {
1517   const llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1518   if (DestTy->isPointerType())
1519     return llvm::Constant::getNullValue(DestLTy);
1520 
1521   /// C++ [expr.dynamic.cast]p9:
1522   ///   A failed cast to reference type throws std::bad_cast
1523   EmitBadCastCall(CGF);
1524 
1525   CGF.EmitBlock(CGF.createBasicBlock("dynamic_cast.end"));
1526   return llvm::UndefValue::get(DestLTy);
1527 }
1528 
1529 llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *Value,
1530                                               const CXXDynamicCastExpr *DCE) {
1531   QualType DestTy = DCE->getTypeAsWritten();
1532 
1533   if (DCE->isAlwaysNull())
1534     return EmitDynamicCastToNull(*this, DestTy);
1535 
1536   QualType SrcTy = DCE->getSubExpr()->getType();
1537 
1538   // C++ [expr.dynamic.cast]p4:
1539   //   If the value of v is a null pointer value in the pointer case, the result
1540   //   is the null pointer value of type T.
1541   bool ShouldNullCheckSrcValue = SrcTy->isPointerType();
1542 
1543   llvm::BasicBlock *CastNull = 0;
1544   llvm::BasicBlock *CastNotNull = 0;
1545   llvm::BasicBlock *CastEnd = createBasicBlock("dynamic_cast.end");
1546 
1547   if (ShouldNullCheckSrcValue) {
1548     CastNull = createBasicBlock("dynamic_cast.null");
1549     CastNotNull = createBasicBlock("dynamic_cast.notnull");
1550 
1551     llvm::Value *IsNull = Builder.CreateIsNull(Value);
1552     Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
1553     EmitBlock(CastNotNull);
1554   }
1555 
1556   Value = EmitDynamicCastCall(*this, Value, SrcTy, DestTy, CastEnd);
1557 
1558   if (ShouldNullCheckSrcValue) {
1559     EmitBranch(CastEnd);
1560 
1561     EmitBlock(CastNull);
1562     EmitBranch(CastEnd);
1563   }
1564 
1565   EmitBlock(CastEnd);
1566 
1567   if (ShouldNullCheckSrcValue) {
1568     llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
1569     PHI->addIncoming(Value, CastNotNull);
1570     PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull);
1571 
1572     Value = PHI;
1573   }
1574 
1575   return Value;
1576 }
1577