1 //===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code dealing with code generation of C++ expressions
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "clang/Frontend/CodeGenOptions.h"
15 #include "CodeGenFunction.h"
16 #include "CGCXXABI.h"
17 #include "CGObjCRuntime.h"
18 #include "CGDebugInfo.h"
19 #include "llvm/Intrinsics.h"
20 #include "llvm/Support/CallSite.h"
21 
22 using namespace clang;
23 using namespace CodeGen;
24 
25 RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
26                                           llvm::Value *Callee,
27                                           ReturnValueSlot ReturnValue,
28                                           llvm::Value *This,
29                                           llvm::Value *VTT,
30                                           CallExpr::const_arg_iterator ArgBeg,
31                                           CallExpr::const_arg_iterator ArgEnd) {
32   assert(MD->isInstance() &&
33          "Trying to emit a member call expr on a static method!");
34 
35   const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
36 
37   CallArgList Args;
38 
39   // Push the this ptr.
40   Args.add(RValue::get(This), MD->getThisType(getContext()));
41 
42   // If there is a VTT parameter, emit it.
43   if (VTT) {
44     QualType T = getContext().getPointerType(getContext().VoidPtrTy);
45     Args.add(RValue::get(VTT), T);
46   }
47 
48   // And the rest of the call args
49   EmitCallArgs(Args, FPT, ArgBeg, ArgEnd);
50 
51   QualType ResultType = FPT->getResultType();
52   return EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args,
53                                                  FPT->getExtInfo()),
54                   Callee, ReturnValue, Args, MD);
55 }
56 
57 static const CXXRecordDecl *getMostDerivedClassDecl(const Expr *Base) {
58   const Expr *E = Base;
59 
60   while (true) {
61     E = E->IgnoreParens();
62     if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
63       if (CE->getCastKind() == CK_DerivedToBase ||
64           CE->getCastKind() == CK_UncheckedDerivedToBase ||
65           CE->getCastKind() == CK_NoOp) {
66         E = CE->getSubExpr();
67         continue;
68       }
69     }
70 
71     break;
72   }
73 
74   QualType DerivedType = E->getType();
75   if (const PointerType *PTy = DerivedType->getAs<PointerType>())
76     DerivedType = PTy->getPointeeType();
77 
78   return cast<CXXRecordDecl>(DerivedType->castAs<RecordType>()->getDecl());
79 }
80 
81 // FIXME: Ideally Expr::IgnoreParenNoopCasts should do this, but it doesn't do
82 // quite what we want.
83 static const Expr *skipNoOpCastsAndParens(const Expr *E) {
84   while (true) {
85     if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
86       E = PE->getSubExpr();
87       continue;
88     }
89 
90     if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
91       if (CE->getCastKind() == CK_NoOp) {
92         E = CE->getSubExpr();
93         continue;
94       }
95     }
96     if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
97       if (UO->getOpcode() == UO_Extension) {
98         E = UO->getSubExpr();
99         continue;
100       }
101     }
102     return E;
103   }
104 }
105 
106 /// canDevirtualizeMemberFunctionCalls - Checks whether virtual calls on given
107 /// expr can be devirtualized.
108 static bool canDevirtualizeMemberFunctionCalls(ASTContext &Context,
109                                                const Expr *Base,
110                                                const CXXMethodDecl *MD) {
111 
112   // When building with -fapple-kext, all calls must go through the vtable since
113   // the kernel linker can do runtime patching of vtables.
114   if (Context.getLangOptions().AppleKext)
115     return false;
116 
117   // If the most derived class is marked final, we know that no subclass can
118   // override this member function and so we can devirtualize it. For example:
119   //
120   // struct A { virtual void f(); }
121   // struct B final : A { };
122   //
123   // void f(B *b) {
124   //   b->f();
125   // }
126   //
127   const CXXRecordDecl *MostDerivedClassDecl = getMostDerivedClassDecl(Base);
128   if (MostDerivedClassDecl->hasAttr<FinalAttr>())
129     return true;
130 
131   // If the member function is marked 'final', we know that it can't be
132   // overridden and can therefore devirtualize it.
133   if (MD->hasAttr<FinalAttr>())
134     return true;
135 
136   // Similarly, if the class itself is marked 'final' it can't be overridden
137   // and we can therefore devirtualize the member function call.
138   if (MD->getParent()->hasAttr<FinalAttr>())
139     return true;
140 
141   Base = skipNoOpCastsAndParens(Base);
142   if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) {
143     if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
144       // This is a record decl. We know the type and can devirtualize it.
145       return VD->getType()->isRecordType();
146     }
147 
148     return false;
149   }
150 
151   // We can always devirtualize calls on temporary object expressions.
152   if (isa<CXXConstructExpr>(Base))
153     return true;
154 
155   // And calls on bound temporaries.
156   if (isa<CXXBindTemporaryExpr>(Base))
157     return true;
158 
159   // Check if this is a call expr that returns a record type.
160   if (const CallExpr *CE = dyn_cast<CallExpr>(Base))
161     return CE->getCallReturnType()->isRecordType();
162 
163   // We can't devirtualize the call.
164   return false;
165 }
166 
167 // Note: This function also emit constructor calls to support a MSVC
168 // extensions allowing explicit constructor function call.
169 RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
170                                               ReturnValueSlot ReturnValue) {
171   const Expr *callee = CE->getCallee()->IgnoreParens();
172 
173   if (isa<BinaryOperator>(callee))
174     return EmitCXXMemberPointerCallExpr(CE, ReturnValue);
175 
176   const MemberExpr *ME = cast<MemberExpr>(callee);
177   const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
178 
179   CGDebugInfo *DI = getDebugInfo();
180   if (DI && CGM.getCodeGenOpts().LimitDebugInfo
181       && !isa<CallExpr>(ME->getBase())) {
182     QualType PQTy = ME->getBase()->IgnoreParenImpCasts()->getType();
183     if (const PointerType * PTy = dyn_cast<PointerType>(PQTy)) {
184       DI->getOrCreateRecordType(PTy->getPointeeType(),
185                                 MD->getParent()->getLocation());
186     }
187   }
188 
189   if (MD->isStatic()) {
190     // The method is static, emit it as we would a regular call.
191     llvm::Value *Callee = CGM.GetAddrOfFunction(MD);
192     return EmitCall(getContext().getPointerType(MD->getType()), Callee,
193                     ReturnValue, CE->arg_begin(), CE->arg_end());
194   }
195 
196   // Compute the object pointer.
197   llvm::Value *This;
198   if (ME->isArrow())
199     This = EmitScalarExpr(ME->getBase());
200   else
201     This = EmitLValue(ME->getBase()).getAddress();
202 
203   if (MD->isTrivial()) {
204     if (isa<CXXDestructorDecl>(MD)) return RValue::get(0);
205     if (isa<CXXConstructorDecl>(MD) &&
206         cast<CXXConstructorDecl>(MD)->isDefaultConstructor())
207       return RValue::get(0);
208 
209     if (MD->isCopyAssignmentOperator()) {
210       // We don't like to generate the trivial copy assignment operator when
211       // it isn't necessary; just produce the proper effect here.
212       llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
213       EmitAggregateCopy(This, RHS, CE->getType());
214       return RValue::get(This);
215     }
216 
217     if (isa<CXXConstructorDecl>(MD) &&
218         cast<CXXConstructorDecl>(MD)->isCopyConstructor()) {
219       llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
220       EmitSynthesizedCXXCopyCtorCall(cast<CXXConstructorDecl>(MD), This, RHS,
221                                      CE->arg_begin(), CE->arg_end());
222       return RValue::get(This);
223     }
224     llvm_unreachable("unknown trivial member function");
225   }
226 
227   // Compute the function type we're calling.
228   const CGFunctionInfo *FInfo = 0;
229   if (isa<CXXDestructorDecl>(MD))
230     FInfo = &CGM.getTypes().getFunctionInfo(cast<CXXDestructorDecl>(MD),
231                                            Dtor_Complete);
232   else if (isa<CXXConstructorDecl>(MD))
233     FInfo = &CGM.getTypes().getFunctionInfo(cast<CXXConstructorDecl>(MD),
234                                             Ctor_Complete);
235   else
236     FInfo = &CGM.getTypes().getFunctionInfo(MD);
237 
238   const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
239   const llvm::Type *Ty
240     = CGM.getTypes().GetFunctionType(*FInfo, FPT->isVariadic());
241 
242   // C++ [class.virtual]p12:
243   //   Explicit qualification with the scope operator (5.1) suppresses the
244   //   virtual call mechanism.
245   //
246   // We also don't emit a virtual call if the base expression has a record type
247   // because then we know what the type is.
248   bool UseVirtualCall;
249   UseVirtualCall = MD->isVirtual() && !ME->hasQualifier()
250                    && !canDevirtualizeMemberFunctionCalls(getContext(),
251                                                           ME->getBase(), MD);
252   llvm::Value *Callee;
253   if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD)) {
254     if (UseVirtualCall) {
255       Callee = BuildVirtualCall(Dtor, Dtor_Complete, This, Ty);
256     } else {
257       if (getContext().getLangOptions().AppleKext &&
258           MD->isVirtual() &&
259           ME->hasQualifier())
260         Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
261       else
262         Callee = CGM.GetAddrOfFunction(GlobalDecl(Dtor, Dtor_Complete), Ty);
263     }
264   } else if (const CXXConstructorDecl *Ctor =
265                dyn_cast<CXXConstructorDecl>(MD)) {
266     Callee = CGM.GetAddrOfFunction(GlobalDecl(Ctor, Ctor_Complete), Ty);
267   } else if (UseVirtualCall) {
268       Callee = BuildVirtualCall(MD, This, Ty);
269   } else {
270     if (getContext().getLangOptions().AppleKext &&
271         MD->isVirtual() &&
272         ME->hasQualifier())
273       Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
274     else
275       Callee = CGM.GetAddrOfFunction(MD, Ty);
276   }
277 
278   return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
279                            CE->arg_begin(), CE->arg_end());
280 }
281 
282 RValue
283 CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
284                                               ReturnValueSlot ReturnValue) {
285   const BinaryOperator *BO =
286       cast<BinaryOperator>(E->getCallee()->IgnoreParens());
287   const Expr *BaseExpr = BO->getLHS();
288   const Expr *MemFnExpr = BO->getRHS();
289 
290   const MemberPointerType *MPT =
291     MemFnExpr->getType()->castAs<MemberPointerType>();
292 
293   const FunctionProtoType *FPT =
294     MPT->getPointeeType()->castAs<FunctionProtoType>();
295   const CXXRecordDecl *RD =
296     cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
297 
298   // Get the member function pointer.
299   llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
300 
301   // Emit the 'this' pointer.
302   llvm::Value *This;
303 
304   if (BO->getOpcode() == BO_PtrMemI)
305     This = EmitScalarExpr(BaseExpr);
306   else
307     This = EmitLValue(BaseExpr).getAddress();
308 
309   // Ask the ABI to load the callee.  Note that This is modified.
310   llvm::Value *Callee =
311     CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, This, MemFnPtr, MPT);
312 
313   CallArgList Args;
314 
315   QualType ThisType =
316     getContext().getPointerType(getContext().getTagDeclType(RD));
317 
318   // Push the this ptr.
319   Args.add(RValue::get(This), ThisType);
320 
321   // And the rest of the call args
322   EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end());
323   return EmitCall(CGM.getTypes().getFunctionInfo(Args, FPT), Callee,
324                   ReturnValue, Args);
325 }
326 
327 RValue
328 CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
329                                                const CXXMethodDecl *MD,
330                                                ReturnValueSlot ReturnValue) {
331   assert(MD->isInstance() &&
332          "Trying to emit a member call expr on a static method!");
333   LValue LV = EmitLValue(E->getArg(0));
334   llvm::Value *This = LV.getAddress();
335 
336   if (MD->isCopyAssignmentOperator()) {
337     const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(MD->getDeclContext());
338     if (ClassDecl->hasTrivialCopyAssignment()) {
339       assert(!ClassDecl->hasUserDeclaredCopyAssignment() &&
340              "EmitCXXOperatorMemberCallExpr - user declared copy assignment");
341       llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress();
342       QualType Ty = E->getType();
343       EmitAggregateCopy(This, Src, Ty);
344       return RValue::get(This);
345     }
346   }
347 
348   const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
349   const llvm::Type *Ty =
350     CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
351                                    FPT->isVariadic());
352   llvm::Value *Callee;
353   if (MD->isVirtual() &&
354       !canDevirtualizeMemberFunctionCalls(getContext(),
355                                            E->getArg(0), MD))
356     Callee = BuildVirtualCall(MD, This, Ty);
357   else
358     Callee = CGM.GetAddrOfFunction(MD, Ty);
359 
360   return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
361                            E->arg_begin() + 1, E->arg_end());
362 }
363 
364 void
365 CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
366                                       AggValueSlot Dest) {
367   assert(!Dest.isIgnored() && "Must have a destination!");
368   const CXXConstructorDecl *CD = E->getConstructor();
369 
370   // If we require zero initialization before (or instead of) calling the
371   // constructor, as can be the case with a non-user-provided default
372   // constructor, emit the zero initialization now, unless destination is
373   // already zeroed.
374   if (E->requiresZeroInitialization() && !Dest.isZeroed())
375     EmitNullInitialization(Dest.getAddr(), E->getType());
376 
377   // If this is a call to a trivial default constructor, do nothing.
378   if (CD->isTrivial() && CD->isDefaultConstructor())
379     return;
380 
381   // Elide the constructor if we're constructing from a temporary.
382   // The temporary check is required because Sema sets this on NRVO
383   // returns.
384   if (getContext().getLangOptions().ElideConstructors && E->isElidable()) {
385     assert(getContext().hasSameUnqualifiedType(E->getType(),
386                                                E->getArg(0)->getType()));
387     if (E->getArg(0)->isTemporaryObject(getContext(), CD->getParent())) {
388       EmitAggExpr(E->getArg(0), Dest);
389       return;
390     }
391   }
392 
393   const ConstantArrayType *Array
394     = getContext().getAsConstantArrayType(E->getType());
395   if (Array) {
396     QualType BaseElementTy = getContext().getBaseElementType(Array);
397     const llvm::Type *BasePtr = ConvertType(BaseElementTy);
398     BasePtr = llvm::PointerType::getUnqual(BasePtr);
399     llvm::Value *BaseAddrPtr =
400       Builder.CreateBitCast(Dest.getAddr(), BasePtr);
401 
402     EmitCXXAggrConstructorCall(CD, Array, BaseAddrPtr,
403                                E->arg_begin(), E->arg_end());
404   }
405   else {
406     CXXCtorType Type;
407     bool ForVirtualBase = false;
408 
409     switch (E->getConstructionKind()) {
410      case CXXConstructExpr::CK_Delegating:
411       // We should be emitting a constructor; GlobalDecl will assert this
412       Type = CurGD.getCtorType();
413       break;
414 
415      case CXXConstructExpr::CK_Complete:
416       Type = Ctor_Complete;
417       break;
418 
419      case CXXConstructExpr::CK_VirtualBase:
420       ForVirtualBase = true;
421       // fall-through
422 
423      case CXXConstructExpr::CK_NonVirtualBase:
424       Type = Ctor_Base;
425     }
426 
427     // Call the constructor.
428     EmitCXXConstructorCall(CD, Type, ForVirtualBase, Dest.getAddr(),
429                            E->arg_begin(), E->arg_end());
430   }
431 }
432 
433 void
434 CodeGenFunction::EmitSynthesizedCXXCopyCtor(llvm::Value *Dest,
435                                             llvm::Value *Src,
436                                             const Expr *Exp) {
437   if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Exp))
438     Exp = E->getSubExpr();
439   assert(isa<CXXConstructExpr>(Exp) &&
440          "EmitSynthesizedCXXCopyCtor - unknown copy ctor expr");
441   const CXXConstructExpr* E = cast<CXXConstructExpr>(Exp);
442   const CXXConstructorDecl *CD = E->getConstructor();
443   RunCleanupsScope Scope(*this);
444 
445   // If we require zero initialization before (or instead of) calling the
446   // constructor, as can be the case with a non-user-provided default
447   // constructor, emit the zero initialization now.
448   // FIXME. Do I still need this for a copy ctor synthesis?
449   if (E->requiresZeroInitialization())
450     EmitNullInitialization(Dest, E->getType());
451 
452   assert(!getContext().getAsConstantArrayType(E->getType())
453          && "EmitSynthesizedCXXCopyCtor - Copied-in Array");
454   EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src,
455                                  E->arg_begin(), E->arg_end());
456 }
457 
458 /// Check whether the given operator new[] is the global placement
459 /// operator new[].
460 static bool IsPlacementOperatorNewArray(ASTContext &Ctx,
461                                         const FunctionDecl *Fn) {
462   // Must be in global scope.  Note that allocation functions can't be
463   // declared in namespaces.
464   if (!Fn->getDeclContext()->getRedeclContext()->isFileContext())
465     return false;
466 
467   // Signature must be void *operator new[](size_t, void*).
468   // The size_t is common to all operator new[]s.
469   if (Fn->getNumParams() != 2)
470     return false;
471 
472   CanQualType ParamType = Ctx.getCanonicalType(Fn->getParamDecl(1)->getType());
473   return (ParamType == Ctx.VoidPtrTy);
474 }
475 
476 static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,
477                                         const CXXNewExpr *E) {
478   if (!E->isArray())
479     return CharUnits::Zero();
480 
481   // No cookie is required if the new operator being used is
482   // ::operator new[](size_t, void*).
483   const FunctionDecl *OperatorNew = E->getOperatorNew();
484   if (IsPlacementOperatorNewArray(CGF.getContext(), OperatorNew))
485     return CharUnits::Zero();
486 
487   return CGF.CGM.getCXXABI().GetArrayCookieSize(E);
488 }
489 
490 static llvm::Value *EmitCXXNewAllocSize(ASTContext &Context,
491                                         CodeGenFunction &CGF,
492                                         const CXXNewExpr *E,
493                                         llvm::Value *&NumElements,
494                                         llvm::Value *&SizeWithoutCookie) {
495   QualType ElemType = E->getAllocatedType();
496 
497   const llvm::IntegerType *SizeTy =
498     cast<llvm::IntegerType>(CGF.ConvertType(CGF.getContext().getSizeType()));
499 
500   CharUnits TypeSize = CGF.getContext().getTypeSizeInChars(ElemType);
501 
502   if (!E->isArray()) {
503     SizeWithoutCookie = llvm::ConstantInt::get(SizeTy, TypeSize.getQuantity());
504     return SizeWithoutCookie;
505   }
506 
507   // Figure out the cookie size.
508   CharUnits CookieSize = CalculateCookiePadding(CGF, E);
509 
510   // Emit the array size expression.
511   // We multiply the size of all dimensions for NumElements.
512   // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
513   NumElements = CGF.EmitScalarExpr(E->getArraySize());
514   assert(NumElements->getType() == SizeTy && "element count not a size_t");
515 
516   uint64_t ArraySizeMultiplier = 1;
517   while (const ConstantArrayType *CAT
518              = CGF.getContext().getAsConstantArrayType(ElemType)) {
519     ElemType = CAT->getElementType();
520     ArraySizeMultiplier *= CAT->getSize().getZExtValue();
521   }
522 
523   llvm::Value *Size;
524 
525   // If someone is doing 'new int[42]' there is no need to do a dynamic check.
526   // Don't bloat the -O0 code.
527   if (llvm::ConstantInt *NumElementsC =
528         dyn_cast<llvm::ConstantInt>(NumElements)) {
529     llvm::APInt NEC = NumElementsC->getValue();
530     unsigned SizeWidth = NEC.getBitWidth();
531 
532     // Determine if there is an overflow here by doing an extended multiply.
533     NEC = NEC.zext(SizeWidth*2);
534     llvm::APInt SC(SizeWidth*2, TypeSize.getQuantity());
535     SC *= NEC;
536 
537     if (!CookieSize.isZero()) {
538       // Save the current size without a cookie.  We don't care if an
539       // overflow's already happened because SizeWithoutCookie isn't
540       // used if the allocator returns null or throws, as it should
541       // always do on an overflow.
542       llvm::APInt SWC = SC.trunc(SizeWidth);
543       SizeWithoutCookie = llvm::ConstantInt::get(SizeTy, SWC);
544 
545       // Add the cookie size.
546       SC += llvm::APInt(SizeWidth*2, CookieSize.getQuantity());
547     }
548 
549     if (SC.countLeadingZeros() >= SizeWidth) {
550       SC = SC.trunc(SizeWidth);
551       Size = llvm::ConstantInt::get(SizeTy, SC);
552     } else {
553       // On overflow, produce a -1 so operator new throws.
554       Size = llvm::Constant::getAllOnesValue(SizeTy);
555     }
556 
557     // Scale NumElements while we're at it.
558     uint64_t N = NEC.getZExtValue() * ArraySizeMultiplier;
559     NumElements = llvm::ConstantInt::get(SizeTy, N);
560 
561   // Otherwise, we don't need to do an overflow-checked multiplication if
562   // we're multiplying by one.
563   } else if (TypeSize.isOne()) {
564     assert(ArraySizeMultiplier == 1);
565 
566     Size = NumElements;
567 
568     // If we need a cookie, add its size in with an overflow check.
569     // This is maybe a little paranoid.
570     if (!CookieSize.isZero()) {
571       SizeWithoutCookie = Size;
572 
573       llvm::Value *CookieSizeV
574         = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
575 
576       const llvm::Type *Types[] = { SizeTy };
577       llvm::Value *UAddF
578         = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, Types, 1);
579       llvm::Value *AddRes
580         = CGF.Builder.CreateCall2(UAddF, Size, CookieSizeV);
581 
582       Size = CGF.Builder.CreateExtractValue(AddRes, 0);
583       llvm::Value *DidOverflow = CGF.Builder.CreateExtractValue(AddRes, 1);
584       Size = CGF.Builder.CreateSelect(DidOverflow,
585                                       llvm::ConstantInt::get(SizeTy, -1),
586                                       Size);
587     }
588 
589   // Otherwise use the int.umul.with.overflow intrinsic.
590   } else {
591     llvm::Value *OutermostElementSize
592       = llvm::ConstantInt::get(SizeTy, TypeSize.getQuantity());
593 
594     llvm::Value *NumOutermostElements = NumElements;
595 
596     // Scale NumElements by the array size multiplier.  This might
597     // overflow, but only if the multiplication below also overflows,
598     // in which case this multiplication isn't used.
599     if (ArraySizeMultiplier != 1)
600       NumElements = CGF.Builder.CreateMul(NumElements,
601                          llvm::ConstantInt::get(SizeTy, ArraySizeMultiplier));
602 
603     // The requested size of the outermost array is non-constant.
604     // Multiply that by the static size of the elements of that array;
605     // on unsigned overflow, set the size to -1 to trigger an
606     // exception from the allocation routine.  This is sufficient to
607     // prevent buffer overruns from the allocator returning a
608     // seemingly valid pointer to insufficient space.  This idea comes
609     // originally from MSVC, and GCC has an open bug requesting
610     // similar behavior:
611     //   http://gcc.gnu.org/bugzilla/show_bug.cgi?id=19351
612     //
613     // This will not be sufficient for C++0x, which requires a
614     // specific exception class (std::bad_array_new_length).
615     // That will require ABI support that has not yet been specified.
616     const llvm::Type *Types[] = { SizeTy };
617     llvm::Value *UMulF
618       = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, Types, 1);
619     llvm::Value *MulRes = CGF.Builder.CreateCall2(UMulF, NumOutermostElements,
620                                                   OutermostElementSize);
621 
622     // The overflow bit.
623     llvm::Value *DidOverflow = CGF.Builder.CreateExtractValue(MulRes, 1);
624 
625     // The result of the multiplication.
626     Size = CGF.Builder.CreateExtractValue(MulRes, 0);
627 
628     // If we have a cookie, we need to add that size in, too.
629     if (!CookieSize.isZero()) {
630       SizeWithoutCookie = Size;
631 
632       llvm::Value *CookieSizeV
633         = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
634       llvm::Value *UAddF
635         = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, Types, 1);
636       llvm::Value *AddRes
637         = CGF.Builder.CreateCall2(UAddF, SizeWithoutCookie, CookieSizeV);
638 
639       Size = CGF.Builder.CreateExtractValue(AddRes, 0);
640 
641       llvm::Value *AddDidOverflow = CGF.Builder.CreateExtractValue(AddRes, 1);
642       DidOverflow = CGF.Builder.CreateOr(DidOverflow, AddDidOverflow);
643     }
644 
645     Size = CGF.Builder.CreateSelect(DidOverflow,
646                                     llvm::ConstantInt::get(SizeTy, -1),
647                                     Size);
648   }
649 
650   if (CookieSize.isZero())
651     SizeWithoutCookie = Size;
652   else
653     assert(SizeWithoutCookie && "didn't set SizeWithoutCookie?");
654 
655   return Size;
656 }
657 
658 static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const CXXNewExpr *E,
659                                     llvm::Value *NewPtr) {
660 
661   assert(E->getNumConstructorArgs() == 1 &&
662          "Can only have one argument to initializer of POD type.");
663 
664   const Expr *Init = E->getConstructorArg(0);
665   QualType AllocType = E->getAllocatedType();
666 
667   unsigned Alignment =
668     CGF.getContext().getTypeAlignInChars(AllocType).getQuantity();
669   if (!CGF.hasAggregateLLVMType(AllocType))
670     CGF.EmitStoreOfScalar(CGF.EmitScalarExpr(Init), NewPtr,
671                           AllocType.isVolatileQualified(), Alignment,
672                           AllocType);
673   else if (AllocType->isAnyComplexType())
674     CGF.EmitComplexExprIntoAddr(Init, NewPtr,
675                                 AllocType.isVolatileQualified());
676   else {
677     AggValueSlot Slot
678       = AggValueSlot::forAddr(NewPtr, AllocType.isVolatileQualified(), true);
679     CGF.EmitAggExpr(Init, Slot);
680   }
681 }
682 
683 void
684 CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E,
685                                          llvm::Value *NewPtr,
686                                          llvm::Value *NumElements) {
687   // We have a POD type.
688   if (E->getNumConstructorArgs() == 0)
689     return;
690 
691   const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
692 
693   // Create a temporary for the loop index and initialize it with 0.
694   llvm::Value *IndexPtr = CreateTempAlloca(SizeTy, "loop.index");
695   llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy);
696   Builder.CreateStore(Zero, IndexPtr);
697 
698   // Start the loop with a block that tests the condition.
699   llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
700   llvm::BasicBlock *AfterFor = createBasicBlock("for.end");
701 
702   EmitBlock(CondBlock);
703 
704   llvm::BasicBlock *ForBody = createBasicBlock("for.body");
705 
706   // Generate: if (loop-index < number-of-elements fall to the loop body,
707   // otherwise, go to the block after the for-loop.
708   llvm::Value *Counter = Builder.CreateLoad(IndexPtr);
709   llvm::Value *IsLess = Builder.CreateICmpULT(Counter, NumElements, "isless");
710   // If the condition is true, execute the body.
711   Builder.CreateCondBr(IsLess, ForBody, AfterFor);
712 
713   EmitBlock(ForBody);
714 
715   llvm::BasicBlock *ContinueBlock = createBasicBlock("for.inc");
716   // Inside the loop body, emit the constructor call on the array element.
717   Counter = Builder.CreateLoad(IndexPtr);
718   llvm::Value *Address = Builder.CreateInBoundsGEP(NewPtr, Counter,
719                                                    "arrayidx");
720   StoreAnyExprIntoOneUnit(*this, E, Address);
721 
722   EmitBlock(ContinueBlock);
723 
724   // Emit the increment of the loop counter.
725   llvm::Value *NextVal = llvm::ConstantInt::get(SizeTy, 1);
726   Counter = Builder.CreateLoad(IndexPtr);
727   NextVal = Builder.CreateAdd(Counter, NextVal, "inc");
728   Builder.CreateStore(NextVal, IndexPtr);
729 
730   // Finally, branch back up to the condition for the next iteration.
731   EmitBranch(CondBlock);
732 
733   // Emit the fall-through block.
734   EmitBlock(AfterFor, true);
735 }
736 
737 static void EmitZeroMemSet(CodeGenFunction &CGF, QualType T,
738                            llvm::Value *NewPtr, llvm::Value *Size) {
739   CGF.EmitCastToVoidPtr(NewPtr);
740   CharUnits Alignment = CGF.getContext().getTypeAlignInChars(T);
741   CGF.Builder.CreateMemSet(NewPtr, CGF.Builder.getInt8(0), Size,
742                            Alignment.getQuantity(), false);
743 }
744 
745 static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
746                                llvm::Value *NewPtr,
747                                llvm::Value *NumElements,
748                                llvm::Value *AllocSizeWithoutCookie) {
749   if (E->isArray()) {
750     if (CXXConstructorDecl *Ctor = E->getConstructor()) {
751       bool RequiresZeroInitialization = false;
752       if (Ctor->getParent()->hasTrivialConstructor()) {
753         // If new expression did not specify value-initialization, then there
754         // is no initialization.
755         if (!E->hasInitializer() || Ctor->getParent()->isEmpty())
756           return;
757 
758         if (CGF.CGM.getTypes().isZeroInitializable(E->getAllocatedType())) {
759           // Optimization: since zero initialization will just set the memory
760           // to all zeroes, generate a single memset to do it in one shot.
761           EmitZeroMemSet(CGF, E->getAllocatedType(), NewPtr,
762                          AllocSizeWithoutCookie);
763           return;
764         }
765 
766         RequiresZeroInitialization = true;
767       }
768 
769       CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr,
770                                      E->constructor_arg_begin(),
771                                      E->constructor_arg_end(),
772                                      RequiresZeroInitialization);
773       return;
774     } else if (E->getNumConstructorArgs() == 1 &&
775                isa<ImplicitValueInitExpr>(E->getConstructorArg(0))) {
776       // Optimization: since zero initialization will just set the memory
777       // to all zeroes, generate a single memset to do it in one shot.
778       EmitZeroMemSet(CGF, E->getAllocatedType(), NewPtr,
779                      AllocSizeWithoutCookie);
780       return;
781     } else {
782       CGF.EmitNewArrayInitializer(E, NewPtr, NumElements);
783       return;
784     }
785   }
786 
787   if (CXXConstructorDecl *Ctor = E->getConstructor()) {
788     // Per C++ [expr.new]p15, if we have an initializer, then we're performing
789     // direct initialization. C++ [dcl.init]p5 requires that we
790     // zero-initialize storage if there are no user-declared constructors.
791     if (E->hasInitializer() &&
792         !Ctor->getParent()->hasUserDeclaredConstructor() &&
793         !Ctor->getParent()->isEmpty())
794       CGF.EmitNullInitialization(NewPtr, E->getAllocatedType());
795 
796     CGF.EmitCXXConstructorCall(Ctor, Ctor_Complete, /*ForVirtualBase=*/false,
797                                NewPtr, E->constructor_arg_begin(),
798                                E->constructor_arg_end());
799 
800     return;
801   }
802   // We have a POD type.
803   if (E->getNumConstructorArgs() == 0)
804     return;
805 
806   StoreAnyExprIntoOneUnit(CGF, E, NewPtr);
807 }
808 
809 namespace {
810   /// A cleanup to call the given 'operator delete' function upon
811   /// abnormal exit from a new expression.
812   class CallDeleteDuringNew : public EHScopeStack::Cleanup {
813     size_t NumPlacementArgs;
814     const FunctionDecl *OperatorDelete;
815     llvm::Value *Ptr;
816     llvm::Value *AllocSize;
817 
818     RValue *getPlacementArgs() { return reinterpret_cast<RValue*>(this+1); }
819 
820   public:
821     static size_t getExtraSize(size_t NumPlacementArgs) {
822       return NumPlacementArgs * sizeof(RValue);
823     }
824 
825     CallDeleteDuringNew(size_t NumPlacementArgs,
826                         const FunctionDecl *OperatorDelete,
827                         llvm::Value *Ptr,
828                         llvm::Value *AllocSize)
829       : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
830         Ptr(Ptr), AllocSize(AllocSize) {}
831 
832     void setPlacementArg(unsigned I, RValue Arg) {
833       assert(I < NumPlacementArgs && "index out of range");
834       getPlacementArgs()[I] = Arg;
835     }
836 
837     void Emit(CodeGenFunction &CGF, bool IsForEH) {
838       const FunctionProtoType *FPT
839         = OperatorDelete->getType()->getAs<FunctionProtoType>();
840       assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
841              (FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
842 
843       CallArgList DeleteArgs;
844 
845       // The first argument is always a void*.
846       FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
847       DeleteArgs.add(RValue::get(Ptr), *AI++);
848 
849       // A member 'operator delete' can take an extra 'size_t' argument.
850       if (FPT->getNumArgs() == NumPlacementArgs + 2)
851         DeleteArgs.add(RValue::get(AllocSize), *AI++);
852 
853       // Pass the rest of the arguments, which must match exactly.
854       for (unsigned I = 0; I != NumPlacementArgs; ++I)
855         DeleteArgs.add(getPlacementArgs()[I], *AI++);
856 
857       // Call 'operator delete'.
858       CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(DeleteArgs, FPT),
859                    CGF.CGM.GetAddrOfFunction(OperatorDelete),
860                    ReturnValueSlot(), DeleteArgs, OperatorDelete);
861     }
862   };
863 
864   /// A cleanup to call the given 'operator delete' function upon
865   /// abnormal exit from a new expression when the new expression is
866   /// conditional.
867   class CallDeleteDuringConditionalNew : public EHScopeStack::Cleanup {
868     size_t NumPlacementArgs;
869     const FunctionDecl *OperatorDelete;
870     DominatingValue<RValue>::saved_type Ptr;
871     DominatingValue<RValue>::saved_type AllocSize;
872 
873     DominatingValue<RValue>::saved_type *getPlacementArgs() {
874       return reinterpret_cast<DominatingValue<RValue>::saved_type*>(this+1);
875     }
876 
877   public:
878     static size_t getExtraSize(size_t NumPlacementArgs) {
879       return NumPlacementArgs * sizeof(DominatingValue<RValue>::saved_type);
880     }
881 
882     CallDeleteDuringConditionalNew(size_t NumPlacementArgs,
883                                    const FunctionDecl *OperatorDelete,
884                                    DominatingValue<RValue>::saved_type Ptr,
885                               DominatingValue<RValue>::saved_type AllocSize)
886       : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
887         Ptr(Ptr), AllocSize(AllocSize) {}
888 
889     void setPlacementArg(unsigned I, DominatingValue<RValue>::saved_type Arg) {
890       assert(I < NumPlacementArgs && "index out of range");
891       getPlacementArgs()[I] = Arg;
892     }
893 
894     void Emit(CodeGenFunction &CGF, bool IsForEH) {
895       const FunctionProtoType *FPT
896         = OperatorDelete->getType()->getAs<FunctionProtoType>();
897       assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
898              (FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
899 
900       CallArgList DeleteArgs;
901 
902       // The first argument is always a void*.
903       FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
904       DeleteArgs.add(Ptr.restore(CGF), *AI++);
905 
906       // A member 'operator delete' can take an extra 'size_t' argument.
907       if (FPT->getNumArgs() == NumPlacementArgs + 2) {
908         RValue RV = AllocSize.restore(CGF);
909         DeleteArgs.add(RV, *AI++);
910       }
911 
912       // Pass the rest of the arguments, which must match exactly.
913       for (unsigned I = 0; I != NumPlacementArgs; ++I) {
914         RValue RV = getPlacementArgs()[I].restore(CGF);
915         DeleteArgs.add(RV, *AI++);
916       }
917 
918       // Call 'operator delete'.
919       CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(DeleteArgs, FPT),
920                    CGF.CGM.GetAddrOfFunction(OperatorDelete),
921                    ReturnValueSlot(), DeleteArgs, OperatorDelete);
922     }
923   };
924 }
925 
926 /// Enter a cleanup to call 'operator delete' if the initializer in a
927 /// new-expression throws.
928 static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
929                                   const CXXNewExpr *E,
930                                   llvm::Value *NewPtr,
931                                   llvm::Value *AllocSize,
932                                   const CallArgList &NewArgs) {
933   // If we're not inside a conditional branch, then the cleanup will
934   // dominate and we can do the easier (and more efficient) thing.
935   if (!CGF.isInConditionalBranch()) {
936     CallDeleteDuringNew *Cleanup = CGF.EHStack
937       .pushCleanupWithExtra<CallDeleteDuringNew>(EHCleanup,
938                                                  E->getNumPlacementArgs(),
939                                                  E->getOperatorDelete(),
940                                                  NewPtr, AllocSize);
941     for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
942       Cleanup->setPlacementArg(I, NewArgs[I+1].RV);
943 
944     return;
945   }
946 
947   // Otherwise, we need to save all this stuff.
948   DominatingValue<RValue>::saved_type SavedNewPtr =
949     DominatingValue<RValue>::save(CGF, RValue::get(NewPtr));
950   DominatingValue<RValue>::saved_type SavedAllocSize =
951     DominatingValue<RValue>::save(CGF, RValue::get(AllocSize));
952 
953   CallDeleteDuringConditionalNew *Cleanup = CGF.EHStack
954     .pushCleanupWithExtra<CallDeleteDuringConditionalNew>(InactiveEHCleanup,
955                                                  E->getNumPlacementArgs(),
956                                                  E->getOperatorDelete(),
957                                                  SavedNewPtr,
958                                                  SavedAllocSize);
959   for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
960     Cleanup->setPlacementArg(I,
961                      DominatingValue<RValue>::save(CGF, NewArgs[I+1].RV));
962 
963   CGF.ActivateCleanupBlock(CGF.EHStack.stable_begin());
964 }
965 
966 llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
967   // The element type being allocated.
968   QualType allocType = getContext().getBaseElementType(E->getAllocatedType());
969 
970   // 1. Build a call to the allocation function.
971   FunctionDecl *allocator = E->getOperatorNew();
972   const FunctionProtoType *allocatorType =
973     allocator->getType()->castAs<FunctionProtoType>();
974 
975   CallArgList allocatorArgs;
976 
977   // The allocation size is the first argument.
978   QualType sizeType = getContext().getSizeType();
979 
980   llvm::Value *numElements = 0;
981   llvm::Value *allocSizeWithoutCookie = 0;
982   llvm::Value *allocSize =
983     EmitCXXNewAllocSize(getContext(), *this, E, numElements,
984                         allocSizeWithoutCookie);
985 
986   allocatorArgs.add(RValue::get(allocSize), sizeType);
987 
988   // Emit the rest of the arguments.
989   // FIXME: Ideally, this should just use EmitCallArgs.
990   CXXNewExpr::const_arg_iterator placementArg = E->placement_arg_begin();
991 
992   // First, use the types from the function type.
993   // We start at 1 here because the first argument (the allocation size)
994   // has already been emitted.
995   for (unsigned i = 1, e = allocatorType->getNumArgs(); i != e;
996        ++i, ++placementArg) {
997     QualType argType = allocatorType->getArgType(i);
998 
999     assert(getContext().hasSameUnqualifiedType(argType.getNonReferenceType(),
1000                                                placementArg->getType()) &&
1001            "type mismatch in call argument!");
1002 
1003     EmitCallArg(allocatorArgs, *placementArg, argType);
1004   }
1005 
1006   // Either we've emitted all the call args, or we have a call to a
1007   // variadic function.
1008   assert((placementArg == E->placement_arg_end() ||
1009           allocatorType->isVariadic()) &&
1010          "Extra arguments to non-variadic function!");
1011 
1012   // If we still have any arguments, emit them using the type of the argument.
1013   for (CXXNewExpr::const_arg_iterator placementArgsEnd = E->placement_arg_end();
1014        placementArg != placementArgsEnd; ++placementArg) {
1015     EmitCallArg(allocatorArgs, *placementArg, placementArg->getType());
1016   }
1017 
1018   // Emit the allocation call.
1019   RValue RV =
1020     EmitCall(CGM.getTypes().getFunctionInfo(allocatorArgs, allocatorType),
1021              CGM.GetAddrOfFunction(allocator), ReturnValueSlot(),
1022              allocatorArgs, allocator);
1023 
1024   // Emit a null check on the allocation result if the allocation
1025   // function is allowed to return null (because it has a non-throwing
1026   // exception spec; for this part, we inline
1027   // CXXNewExpr::shouldNullCheckAllocation()) and we have an
1028   // interesting initializer.
1029   bool nullCheck = allocatorType->isNothrow(getContext()) &&
1030     !(allocType->isPODType() && !E->hasInitializer());
1031 
1032   llvm::BasicBlock *nullCheckBB = 0;
1033   llvm::BasicBlock *contBB = 0;
1034 
1035   llvm::Value *allocation = RV.getScalarVal();
1036   unsigned AS =
1037     cast<llvm::PointerType>(allocation->getType())->getAddressSpace();
1038 
1039   // The null-check means that the initializer is conditionally
1040   // evaluated.
1041   ConditionalEvaluation conditional(*this);
1042 
1043   if (nullCheck) {
1044     conditional.begin(*this);
1045 
1046     nullCheckBB = Builder.GetInsertBlock();
1047     llvm::BasicBlock *notNullBB = createBasicBlock("new.notnull");
1048     contBB = createBasicBlock("new.cont");
1049 
1050     llvm::Value *isNull = Builder.CreateIsNull(allocation, "new.isnull");
1051     Builder.CreateCondBr(isNull, contBB, notNullBB);
1052     EmitBlock(notNullBB);
1053   }
1054 
1055   assert((allocSize == allocSizeWithoutCookie) ==
1056          CalculateCookiePadding(*this, E).isZero());
1057   if (allocSize != allocSizeWithoutCookie) {
1058     assert(E->isArray());
1059     allocation = CGM.getCXXABI().InitializeArrayCookie(*this, allocation,
1060                                                        numElements,
1061                                                        E, allocType);
1062   }
1063 
1064   // If there's an operator delete, enter a cleanup to call it if an
1065   // exception is thrown.
1066   EHScopeStack::stable_iterator operatorDeleteCleanup;
1067   if (E->getOperatorDelete()) {
1068     EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocatorArgs);
1069     operatorDeleteCleanup = EHStack.stable_begin();
1070   }
1071 
1072   const llvm::Type *elementPtrTy
1073     = ConvertTypeForMem(allocType)->getPointerTo(AS);
1074   llvm::Value *result = Builder.CreateBitCast(allocation, elementPtrTy);
1075 
1076   if (E->isArray()) {
1077     EmitNewInitializer(*this, E, result, numElements, allocSizeWithoutCookie);
1078 
1079     // NewPtr is a pointer to the base element type.  If we're
1080     // allocating an array of arrays, we'll need to cast back to the
1081     // array pointer type.
1082     const llvm::Type *resultType = ConvertTypeForMem(E->getType());
1083     if (result->getType() != resultType)
1084       result = Builder.CreateBitCast(result, resultType);
1085   } else {
1086     EmitNewInitializer(*this, E, result, numElements, allocSizeWithoutCookie);
1087   }
1088 
1089   // Deactivate the 'operator delete' cleanup if we finished
1090   // initialization.
1091   if (operatorDeleteCleanup.isValid())
1092     DeactivateCleanupBlock(operatorDeleteCleanup);
1093 
1094   if (nullCheck) {
1095     conditional.end(*this);
1096 
1097     llvm::BasicBlock *notNullBB = Builder.GetInsertBlock();
1098     EmitBlock(contBB);
1099 
1100     llvm::PHINode *PHI = Builder.CreatePHI(result->getType(), 2);
1101     PHI->addIncoming(result, notNullBB);
1102     PHI->addIncoming(llvm::Constant::getNullValue(result->getType()),
1103                      nullCheckBB);
1104 
1105     result = PHI;
1106   }
1107 
1108   return result;
1109 }
1110 
1111 void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
1112                                      llvm::Value *Ptr,
1113                                      QualType DeleteTy) {
1114   assert(DeleteFD->getOverloadedOperator() == OO_Delete);
1115 
1116   const FunctionProtoType *DeleteFTy =
1117     DeleteFD->getType()->getAs<FunctionProtoType>();
1118 
1119   CallArgList DeleteArgs;
1120 
1121   // Check if we need to pass the size to the delete operator.
1122   llvm::Value *Size = 0;
1123   QualType SizeTy;
1124   if (DeleteFTy->getNumArgs() == 2) {
1125     SizeTy = DeleteFTy->getArgType(1);
1126     CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy);
1127     Size = llvm::ConstantInt::get(ConvertType(SizeTy),
1128                                   DeleteTypeSize.getQuantity());
1129   }
1130 
1131   QualType ArgTy = DeleteFTy->getArgType(0);
1132   llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy));
1133   DeleteArgs.add(RValue::get(DeletePtr), ArgTy);
1134 
1135   if (Size)
1136     DeleteArgs.add(RValue::get(Size), SizeTy);
1137 
1138   // Emit the call to delete.
1139   EmitCall(CGM.getTypes().getFunctionInfo(DeleteArgs, DeleteFTy),
1140            CGM.GetAddrOfFunction(DeleteFD), ReturnValueSlot(),
1141            DeleteArgs, DeleteFD);
1142 }
1143 
1144 namespace {
1145   /// Calls the given 'operator delete' on a single object.
1146   struct CallObjectDelete : EHScopeStack::Cleanup {
1147     llvm::Value *Ptr;
1148     const FunctionDecl *OperatorDelete;
1149     QualType ElementType;
1150 
1151     CallObjectDelete(llvm::Value *Ptr,
1152                      const FunctionDecl *OperatorDelete,
1153                      QualType ElementType)
1154       : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {}
1155 
1156     void Emit(CodeGenFunction &CGF, bool IsForEH) {
1157       CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType);
1158     }
1159   };
1160 }
1161 
1162 /// Emit the code for deleting a single object.
1163 static void EmitObjectDelete(CodeGenFunction &CGF,
1164                              const FunctionDecl *OperatorDelete,
1165                              llvm::Value *Ptr,
1166                              QualType ElementType) {
1167   // Find the destructor for the type, if applicable.  If the
1168   // destructor is virtual, we'll just emit the vcall and return.
1169   const CXXDestructorDecl *Dtor = 0;
1170   if (const RecordType *RT = ElementType->getAs<RecordType>()) {
1171     CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1172     if (!RD->hasTrivialDestructor()) {
1173       Dtor = RD->getDestructor();
1174 
1175       if (Dtor->isVirtual()) {
1176         const llvm::Type *Ty =
1177           CGF.getTypes().GetFunctionType(CGF.getTypes().getFunctionInfo(Dtor,
1178                                                                Dtor_Complete),
1179                                          /*isVariadic=*/false);
1180 
1181         llvm::Value *Callee
1182           = CGF.BuildVirtualCall(Dtor, Dtor_Deleting, Ptr, Ty);
1183         CGF.EmitCXXMemberCall(Dtor, Callee, ReturnValueSlot(), Ptr, /*VTT=*/0,
1184                               0, 0);
1185 
1186         // The dtor took care of deleting the object.
1187         return;
1188       }
1189     }
1190   }
1191 
1192   // Make sure that we call delete even if the dtor throws.
1193   // This doesn't have to a conditional cleanup because we're going
1194   // to pop it off in a second.
1195   CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
1196                                             Ptr, OperatorDelete, ElementType);
1197 
1198   if (Dtor)
1199     CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
1200                               /*ForVirtualBase=*/false, Ptr);
1201 
1202   CGF.PopCleanupBlock();
1203 }
1204 
1205 namespace {
1206   /// Calls the given 'operator delete' on an array of objects.
1207   struct CallArrayDelete : EHScopeStack::Cleanup {
1208     llvm::Value *Ptr;
1209     const FunctionDecl *OperatorDelete;
1210     llvm::Value *NumElements;
1211     QualType ElementType;
1212     CharUnits CookieSize;
1213 
1214     CallArrayDelete(llvm::Value *Ptr,
1215                     const FunctionDecl *OperatorDelete,
1216                     llvm::Value *NumElements,
1217                     QualType ElementType,
1218                     CharUnits CookieSize)
1219       : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements),
1220         ElementType(ElementType), CookieSize(CookieSize) {}
1221 
1222     void Emit(CodeGenFunction &CGF, bool IsForEH) {
1223       const FunctionProtoType *DeleteFTy =
1224         OperatorDelete->getType()->getAs<FunctionProtoType>();
1225       assert(DeleteFTy->getNumArgs() == 1 || DeleteFTy->getNumArgs() == 2);
1226 
1227       CallArgList Args;
1228 
1229       // Pass the pointer as the first argument.
1230       QualType VoidPtrTy = DeleteFTy->getArgType(0);
1231       llvm::Value *DeletePtr
1232         = CGF.Builder.CreateBitCast(Ptr, CGF.ConvertType(VoidPtrTy));
1233       Args.add(RValue::get(DeletePtr), VoidPtrTy);
1234 
1235       // Pass the original requested size as the second argument.
1236       if (DeleteFTy->getNumArgs() == 2) {
1237         QualType size_t = DeleteFTy->getArgType(1);
1238         const llvm::IntegerType *SizeTy
1239           = cast<llvm::IntegerType>(CGF.ConvertType(size_t));
1240 
1241         CharUnits ElementTypeSize =
1242           CGF.CGM.getContext().getTypeSizeInChars(ElementType);
1243 
1244         // The size of an element, multiplied by the number of elements.
1245         llvm::Value *Size
1246           = llvm::ConstantInt::get(SizeTy, ElementTypeSize.getQuantity());
1247         Size = CGF.Builder.CreateMul(Size, NumElements);
1248 
1249         // Plus the size of the cookie if applicable.
1250         if (!CookieSize.isZero()) {
1251           llvm::Value *CookieSizeV
1252             = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
1253           Size = CGF.Builder.CreateAdd(Size, CookieSizeV);
1254         }
1255 
1256         Args.add(RValue::get(Size), size_t);
1257       }
1258 
1259       // Emit the call to delete.
1260       CGF.EmitCall(CGF.getTypes().getFunctionInfo(Args, DeleteFTy),
1261                    CGF.CGM.GetAddrOfFunction(OperatorDelete),
1262                    ReturnValueSlot(), Args, OperatorDelete);
1263     }
1264   };
1265 }
1266 
1267 /// Emit the code for deleting an array of objects.
1268 static void EmitArrayDelete(CodeGenFunction &CGF,
1269                             const CXXDeleteExpr *E,
1270                             llvm::Value *Ptr,
1271                             QualType ElementType) {
1272   llvm::Value *NumElements = 0;
1273   llvm::Value *AllocatedPtr = 0;
1274   CharUnits CookieSize;
1275   CGF.CGM.getCXXABI().ReadArrayCookie(CGF, Ptr, E, ElementType,
1276                                       NumElements, AllocatedPtr, CookieSize);
1277 
1278   assert(AllocatedPtr && "ReadArrayCookie didn't set AllocatedPtr");
1279 
1280   // Make sure that we call delete even if one of the dtors throws.
1281   const FunctionDecl *OperatorDelete = E->getOperatorDelete();
1282   CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup,
1283                                            AllocatedPtr, OperatorDelete,
1284                                            NumElements, ElementType,
1285                                            CookieSize);
1286 
1287   if (const CXXRecordDecl *RD = ElementType->getAsCXXRecordDecl()) {
1288     if (!RD->hasTrivialDestructor()) {
1289       assert(NumElements && "ReadArrayCookie didn't find element count"
1290                             " for a class with destructor");
1291       CGF.EmitCXXAggrDestructorCall(RD->getDestructor(), NumElements, Ptr);
1292     }
1293   }
1294 
1295   CGF.PopCleanupBlock();
1296 }
1297 
1298 void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
1299 
1300   // Get at the argument before we performed the implicit conversion
1301   // to void*.
1302   const Expr *Arg = E->getArgument();
1303   while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) {
1304     if (ICE->getCastKind() != CK_UserDefinedConversion &&
1305         ICE->getType()->isVoidPointerType())
1306       Arg = ICE->getSubExpr();
1307     else
1308       break;
1309   }
1310 
1311   llvm::Value *Ptr = EmitScalarExpr(Arg);
1312 
1313   // Null check the pointer.
1314   llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");
1315   llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");
1316 
1317   llvm::Value *IsNull = Builder.CreateIsNull(Ptr, "isnull");
1318 
1319   Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
1320   EmitBlock(DeleteNotNull);
1321 
1322   // We might be deleting a pointer to array.  If so, GEP down to the
1323   // first non-array element.
1324   // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*)
1325   QualType DeleteTy = Arg->getType()->getAs<PointerType>()->getPointeeType();
1326   if (DeleteTy->isConstantArrayType()) {
1327     llvm::Value *Zero = Builder.getInt32(0);
1328     llvm::SmallVector<llvm::Value*,8> GEP;
1329 
1330     GEP.push_back(Zero); // point at the outermost array
1331 
1332     // For each layer of array type we're pointing at:
1333     while (const ConstantArrayType *Arr
1334              = getContext().getAsConstantArrayType(DeleteTy)) {
1335       // 1. Unpeel the array type.
1336       DeleteTy = Arr->getElementType();
1337 
1338       // 2. GEP to the first element of the array.
1339       GEP.push_back(Zero);
1340     }
1341 
1342     Ptr = Builder.CreateInBoundsGEP(Ptr, GEP.begin(), GEP.end(), "del.first");
1343   }
1344 
1345   assert(ConvertTypeForMem(DeleteTy) ==
1346          cast<llvm::PointerType>(Ptr->getType())->getElementType());
1347 
1348   if (E->isArrayForm()) {
1349     EmitArrayDelete(*this, E, Ptr, DeleteTy);
1350   } else {
1351     EmitObjectDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy);
1352   }
1353 
1354   EmitBlock(DeleteEnd);
1355 }
1356 
1357 static llvm::Constant *getBadTypeidFn(CodeGenFunction &CGF) {
1358   // void __cxa_bad_typeid();
1359 
1360   const llvm::Type *VoidTy = llvm::Type::getVoidTy(CGF.getLLVMContext());
1361   const llvm::FunctionType *FTy =
1362   llvm::FunctionType::get(VoidTy, false);
1363 
1364   return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
1365 }
1366 
1367 static void EmitBadTypeidCall(CodeGenFunction &CGF) {
1368   llvm::Value *Fn = getBadTypeidFn(CGF);
1369   CGF.EmitCallOrInvoke(Fn, 0, 0).setDoesNotReturn();
1370   CGF.Builder.CreateUnreachable();
1371 }
1372 
1373 static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF,
1374                                          const Expr *E,
1375                                          const llvm::Type *StdTypeInfoPtrTy) {
1376   // Get the vtable pointer.
1377   llvm::Value *ThisPtr = CGF.EmitLValue(E).getAddress();
1378 
1379   // C++ [expr.typeid]p2:
1380   //   If the glvalue expression is obtained by applying the unary * operator to
1381   //   a pointer and the pointer is a null pointer value, the typeid expression
1382   //   throws the std::bad_typeid exception.
1383   if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E->IgnoreParens())) {
1384     if (UO->getOpcode() == UO_Deref) {
1385       llvm::BasicBlock *BadTypeidBlock =
1386         CGF.createBasicBlock("typeid.bad_typeid");
1387       llvm::BasicBlock *EndBlock =
1388         CGF.createBasicBlock("typeid.end");
1389 
1390       llvm::Value *IsNull = CGF.Builder.CreateIsNull(ThisPtr);
1391       CGF.Builder.CreateCondBr(IsNull, BadTypeidBlock, EndBlock);
1392 
1393       CGF.EmitBlock(BadTypeidBlock);
1394       EmitBadTypeidCall(CGF);
1395       CGF.EmitBlock(EndBlock);
1396     }
1397   }
1398 
1399   llvm::Value *Value = CGF.GetVTablePtr(ThisPtr,
1400                                         StdTypeInfoPtrTy->getPointerTo());
1401 
1402   // Load the type info.
1403   Value = CGF.Builder.CreateConstInBoundsGEP1_64(Value, -1ULL);
1404   return CGF.Builder.CreateLoad(Value);
1405 }
1406 
1407 llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
1408   const llvm::Type *StdTypeInfoPtrTy =
1409     ConvertType(E->getType())->getPointerTo();
1410 
1411   if (E->isTypeOperand()) {
1412     llvm::Constant *TypeInfo =
1413       CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand());
1414     return Builder.CreateBitCast(TypeInfo, StdTypeInfoPtrTy);
1415   }
1416 
1417   // C++ [expr.typeid]p2:
1418   //   When typeid is applied to a glvalue expression whose type is a
1419   //   polymorphic class type, the result refers to a std::type_info object
1420   //   representing the type of the most derived object (that is, the dynamic
1421   //   type) to which the glvalue refers.
1422   if (E->getExprOperand()->isGLValue()) {
1423     if (const RecordType *RT =
1424           E->getExprOperand()->getType()->getAs<RecordType>()) {
1425       const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1426       if (RD->isPolymorphic())
1427         return EmitTypeidFromVTable(*this, E->getExprOperand(),
1428                                     StdTypeInfoPtrTy);
1429     }
1430   }
1431 
1432   QualType OperandTy = E->getExprOperand()->getType();
1433   return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(OperandTy),
1434                                StdTypeInfoPtrTy);
1435 }
1436 
1437 static llvm::Constant *getDynamicCastFn(CodeGenFunction &CGF) {
1438   // void *__dynamic_cast(const void *sub,
1439   //                      const abi::__class_type_info *src,
1440   //                      const abi::__class_type_info *dst,
1441   //                      std::ptrdiff_t src2dst_offset);
1442 
1443   const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
1444   const llvm::Type *PtrDiffTy =
1445     CGF.ConvertType(CGF.getContext().getPointerDiffType());
1446 
1447   const llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
1448 
1449   const llvm::FunctionType *FTy =
1450     llvm::FunctionType::get(Int8PtrTy, Args, false);
1451 
1452   return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast");
1453 }
1454 
1455 static llvm::Constant *getBadCastFn(CodeGenFunction &CGF) {
1456   // void __cxa_bad_cast();
1457 
1458   const llvm::Type *VoidTy = llvm::Type::getVoidTy(CGF.getLLVMContext());
1459   const llvm::FunctionType *FTy =
1460     llvm::FunctionType::get(VoidTy, false);
1461 
1462   return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
1463 }
1464 
1465 static void EmitBadCastCall(CodeGenFunction &CGF) {
1466   llvm::Value *Fn = getBadCastFn(CGF);
1467   CGF.EmitCallOrInvoke(Fn, 0, 0).setDoesNotReturn();
1468   CGF.Builder.CreateUnreachable();
1469 }
1470 
1471 static llvm::Value *
1472 EmitDynamicCastCall(CodeGenFunction &CGF, llvm::Value *Value,
1473                     QualType SrcTy, QualType DestTy,
1474                     llvm::BasicBlock *CastEnd) {
1475   const llvm::Type *PtrDiffLTy =
1476     CGF.ConvertType(CGF.getContext().getPointerDiffType());
1477   const llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1478 
1479   if (const PointerType *PTy = DestTy->getAs<PointerType>()) {
1480     if (PTy->getPointeeType()->isVoidType()) {
1481       // C++ [expr.dynamic.cast]p7:
1482       //   If T is "pointer to cv void," then the result is a pointer to the
1483       //   most derived object pointed to by v.
1484 
1485       // Get the vtable pointer.
1486       llvm::Value *VTable = CGF.GetVTablePtr(Value, PtrDiffLTy->getPointerTo());
1487 
1488       // Get the offset-to-top from the vtable.
1489       llvm::Value *OffsetToTop =
1490         CGF.Builder.CreateConstInBoundsGEP1_64(VTable, -2ULL);
1491       OffsetToTop = CGF.Builder.CreateLoad(OffsetToTop, "offset.to.top");
1492 
1493       // Finally, add the offset to the pointer.
1494       Value = CGF.EmitCastToVoidPtr(Value);
1495       Value = CGF.Builder.CreateInBoundsGEP(Value, OffsetToTop);
1496 
1497       return CGF.Builder.CreateBitCast(Value, DestLTy);
1498     }
1499   }
1500 
1501   QualType SrcRecordTy;
1502   QualType DestRecordTy;
1503 
1504   if (const PointerType *DestPTy = DestTy->getAs<PointerType>()) {
1505     SrcRecordTy = SrcTy->castAs<PointerType>()->getPointeeType();
1506     DestRecordTy = DestPTy->getPointeeType();
1507   } else {
1508     SrcRecordTy = SrcTy;
1509     DestRecordTy = DestTy->castAs<ReferenceType>()->getPointeeType();
1510   }
1511 
1512   assert(SrcRecordTy->isRecordType() && "source type must be a record type!");
1513   assert(DestRecordTy->isRecordType() && "dest type must be a record type!");
1514 
1515   llvm::Value *SrcRTTI =
1516     CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
1517   llvm::Value *DestRTTI =
1518     CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType());
1519 
1520   // FIXME: Actually compute a hint here.
1521   llvm::Value *OffsetHint = llvm::ConstantInt::get(PtrDiffLTy, -1ULL);
1522 
1523   // Emit the call to __dynamic_cast.
1524   Value = CGF.EmitCastToVoidPtr(Value);
1525   Value = CGF.Builder.CreateCall4(getDynamicCastFn(CGF), Value,
1526                                   SrcRTTI, DestRTTI, OffsetHint);
1527   Value = CGF.Builder.CreateBitCast(Value, DestLTy);
1528 
1529   /// C++ [expr.dynamic.cast]p9:
1530   ///   A failed cast to reference type throws std::bad_cast
1531   if (DestTy->isReferenceType()) {
1532     llvm::BasicBlock *BadCastBlock =
1533       CGF.createBasicBlock("dynamic_cast.bad_cast");
1534 
1535     llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value);
1536     CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd);
1537 
1538     CGF.EmitBlock(BadCastBlock);
1539     EmitBadCastCall(CGF);
1540   }
1541 
1542   return Value;
1543 }
1544 
1545 static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF,
1546                                           QualType DestTy) {
1547   const llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1548   if (DestTy->isPointerType())
1549     return llvm::Constant::getNullValue(DestLTy);
1550 
1551   /// C++ [expr.dynamic.cast]p9:
1552   ///   A failed cast to reference type throws std::bad_cast
1553   EmitBadCastCall(CGF);
1554 
1555   CGF.EmitBlock(CGF.createBasicBlock("dynamic_cast.end"));
1556   return llvm::UndefValue::get(DestLTy);
1557 }
1558 
1559 llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *Value,
1560                                               const CXXDynamicCastExpr *DCE) {
1561   QualType DestTy = DCE->getTypeAsWritten();
1562 
1563   if (DCE->isAlwaysNull())
1564     return EmitDynamicCastToNull(*this, DestTy);
1565 
1566   QualType SrcTy = DCE->getSubExpr()->getType();
1567 
1568   // C++ [expr.dynamic.cast]p4:
1569   //   If the value of v is a null pointer value in the pointer case, the result
1570   //   is the null pointer value of type T.
1571   bool ShouldNullCheckSrcValue = SrcTy->isPointerType();
1572 
1573   llvm::BasicBlock *CastNull = 0;
1574   llvm::BasicBlock *CastNotNull = 0;
1575   llvm::BasicBlock *CastEnd = createBasicBlock("dynamic_cast.end");
1576 
1577   if (ShouldNullCheckSrcValue) {
1578     CastNull = createBasicBlock("dynamic_cast.null");
1579     CastNotNull = createBasicBlock("dynamic_cast.notnull");
1580 
1581     llvm::Value *IsNull = Builder.CreateIsNull(Value);
1582     Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
1583     EmitBlock(CastNotNull);
1584   }
1585 
1586   Value = EmitDynamicCastCall(*this, Value, SrcTy, DestTy, CastEnd);
1587 
1588   if (ShouldNullCheckSrcValue) {
1589     EmitBranch(CastEnd);
1590 
1591     EmitBlock(CastNull);
1592     EmitBranch(CastEnd);
1593   }
1594 
1595   EmitBlock(CastEnd);
1596 
1597   if (ShouldNullCheckSrcValue) {
1598     llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
1599     PHI->addIncoming(Value, CastNotNull);
1600     PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull);
1601 
1602     Value = PHI;
1603   }
1604 
1605   return Value;
1606 }
1607