1 //===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code dealing with code generation of C++ expressions
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "clang/Frontend/CodeGenOptions.h"
15 #include "CodeGenFunction.h"
16 #include "CGCXXABI.h"
17 #include "CGObjCRuntime.h"
18 #include "CGDebugInfo.h"
19 #include "llvm/Intrinsics.h"
20 using namespace clang;
21 using namespace CodeGen;
22 
23 RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
24                                           llvm::Value *Callee,
25                                           ReturnValueSlot ReturnValue,
26                                           llvm::Value *This,
27                                           llvm::Value *VTT,
28                                           CallExpr::const_arg_iterator ArgBeg,
29                                           CallExpr::const_arg_iterator ArgEnd) {
30   assert(MD->isInstance() &&
31          "Trying to emit a member call expr on a static method!");
32 
33   const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
34 
35   CallArgList Args;
36 
37   // Push the this ptr.
38   Args.push_back(std::make_pair(RValue::get(This),
39                                 MD->getThisType(getContext())));
40 
41   // If there is a VTT parameter, emit it.
42   if (VTT) {
43     QualType T = getContext().getPointerType(getContext().VoidPtrTy);
44     Args.push_back(std::make_pair(RValue::get(VTT), T));
45   }
46 
47   // And the rest of the call args
48   EmitCallArgs(Args, FPT, ArgBeg, ArgEnd);
49 
50   QualType ResultType = FPT->getResultType();
51   return EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args,
52                                                  FPT->getExtInfo()),
53                   Callee, ReturnValue, Args, MD);
54 }
55 
56 static const CXXRecordDecl *getMostDerivedClassDecl(const Expr *Base) {
57   const Expr *E = Base;
58 
59   while (true) {
60     E = E->IgnoreParens();
61     if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
62       if (CE->getCastKind() == CK_DerivedToBase ||
63           CE->getCastKind() == CK_UncheckedDerivedToBase ||
64           CE->getCastKind() == CK_NoOp) {
65         E = CE->getSubExpr();
66         continue;
67       }
68     }
69 
70     break;
71   }
72 
73   QualType DerivedType = E->getType();
74   if (const PointerType *PTy = DerivedType->getAs<PointerType>())
75     DerivedType = PTy->getPointeeType();
76 
77   return cast<CXXRecordDecl>(DerivedType->castAs<RecordType>()->getDecl());
78 }
79 
80 /// canDevirtualizeMemberFunctionCalls - Checks whether virtual calls on given
81 /// expr can be devirtualized.
82 static bool canDevirtualizeMemberFunctionCalls(ASTContext &Context,
83                                                const Expr *Base,
84                                                const CXXMethodDecl *MD) {
85 
86   // When building with -fapple-kext, all calls must go through the vtable since
87   // the kernel linker can do runtime patching of vtables.
88   if (Context.getLangOptions().AppleKext)
89     return false;
90 
91   // If the most derived class is marked final, we know that no subclass can
92   // override this member function and so we can devirtualize it. For example:
93   //
94   // struct A { virtual void f(); }
95   // struct B final : A { };
96   //
97   // void f(B *b) {
98   //   b->f();
99   // }
100   //
101   const CXXRecordDecl *MostDerivedClassDecl = getMostDerivedClassDecl(Base);
102   if (MostDerivedClassDecl->hasAttr<FinalAttr>())
103     return true;
104 
105   // If the member function is marked 'final', we know that it can't be
106   // overridden and can therefore devirtualize it.
107   if (MD->hasAttr<FinalAttr>())
108     return true;
109 
110   // Similarly, if the class itself is marked 'final' it can't be overridden
111   // and we can therefore devirtualize the member function call.
112   if (MD->getParent()->hasAttr<FinalAttr>())
113     return true;
114 
115   if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) {
116     if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
117       // This is a record decl. We know the type and can devirtualize it.
118       return VD->getType()->isRecordType();
119     }
120 
121     return false;
122   }
123 
124   // We can always devirtualize calls on temporary object expressions.
125   if (isa<CXXConstructExpr>(Base))
126     return true;
127 
128   // And calls on bound temporaries.
129   if (isa<CXXBindTemporaryExpr>(Base))
130     return true;
131 
132   // Check if this is a call expr that returns a record type.
133   if (const CallExpr *CE = dyn_cast<CallExpr>(Base))
134     return CE->getCallReturnType()->isRecordType();
135 
136   // We can't devirtualize the call.
137   return false;
138 }
139 
140 // Note: This function also emit constructor calls to support a MSVC
141 // extensions allowing explicit constructor function call.
142 RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
143                                               ReturnValueSlot ReturnValue) {
144   if (isa<BinaryOperator>(CE->getCallee()->IgnoreParens()))
145     return EmitCXXMemberPointerCallExpr(CE, ReturnValue);
146 
147   const MemberExpr *ME = cast<MemberExpr>(CE->getCallee()->IgnoreParens());
148   const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
149 
150   CGDebugInfo *DI = getDebugInfo();
151   if (DI && CGM.getCodeGenOpts().LimitDebugInfo
152       && !isa<CallExpr>(ME->getBase())) {
153     QualType PQTy = ME->getBase()->IgnoreParenImpCasts()->getType();
154     if (const PointerType * PTy = dyn_cast<PointerType>(PQTy)) {
155       DI->getOrCreateRecordType(PTy->getPointeeType(),
156                                 MD->getParent()->getLocation());
157     }
158   }
159 
160   if (MD->isStatic()) {
161     // The method is static, emit it as we would a regular call.
162     llvm::Value *Callee = CGM.GetAddrOfFunction(MD);
163     return EmitCall(getContext().getPointerType(MD->getType()), Callee,
164                     ReturnValue, CE->arg_begin(), CE->arg_end());
165   }
166 
167   // Compute the object pointer.
168   llvm::Value *This;
169   if (ME->isArrow())
170     This = EmitScalarExpr(ME->getBase());
171   else
172     This = EmitLValue(ME->getBase()).getAddress();
173 
174   if (MD->isTrivial()) {
175     if (isa<CXXDestructorDecl>(MD)) return RValue::get(0);
176     if (isa<CXXConstructorDecl>(MD) &&
177         cast<CXXConstructorDecl>(MD)->isDefaultConstructor())
178       return RValue::get(0);
179 
180     if (MD->isCopyAssignmentOperator()) {
181       // We don't like to generate the trivial copy assignment operator when
182       // it isn't necessary; just produce the proper effect here.
183       llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
184       EmitAggregateCopy(This, RHS, CE->getType());
185       return RValue::get(This);
186     }
187 
188     if (isa<CXXConstructorDecl>(MD) &&
189         cast<CXXConstructorDecl>(MD)->isCopyConstructor()) {
190       llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
191       EmitSynthesizedCXXCopyCtorCall(cast<CXXConstructorDecl>(MD), This, RHS,
192                                      CE->arg_begin(), CE->arg_end());
193       return RValue::get(This);
194     }
195     llvm_unreachable("unknown trivial member function");
196   }
197 
198   // Compute the function type we're calling.
199   const CGFunctionInfo *FInfo = 0;
200   if (isa<CXXDestructorDecl>(MD))
201     FInfo = &CGM.getTypes().getFunctionInfo(cast<CXXDestructorDecl>(MD),
202                                            Dtor_Complete);
203   else if (isa<CXXConstructorDecl>(MD))
204     FInfo = &CGM.getTypes().getFunctionInfo(cast<CXXConstructorDecl>(MD),
205                                             Ctor_Complete);
206   else
207     FInfo = &CGM.getTypes().getFunctionInfo(MD);
208 
209   const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
210   const llvm::Type *Ty
211     = CGM.getTypes().GetFunctionType(*FInfo, FPT->isVariadic());
212 
213   // C++ [class.virtual]p12:
214   //   Explicit qualification with the scope operator (5.1) suppresses the
215   //   virtual call mechanism.
216   //
217   // We also don't emit a virtual call if the base expression has a record type
218   // because then we know what the type is.
219   bool UseVirtualCall;
220   UseVirtualCall = MD->isVirtual() && !ME->hasQualifier()
221                    && !canDevirtualizeMemberFunctionCalls(getContext(),
222                                                           ME->getBase(), MD);
223   llvm::Value *Callee;
224   if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD)) {
225     if (UseVirtualCall) {
226       Callee = BuildVirtualCall(Dtor, Dtor_Complete, This, Ty);
227     } else {
228       if (getContext().getLangOptions().AppleKext &&
229           MD->isVirtual() &&
230           ME->hasQualifier())
231         Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
232       else
233         Callee = CGM.GetAddrOfFunction(GlobalDecl(Dtor, Dtor_Complete), Ty);
234     }
235   } else if (const CXXConstructorDecl *Ctor =
236                dyn_cast<CXXConstructorDecl>(MD)) {
237     Callee = CGM.GetAddrOfFunction(GlobalDecl(Ctor, Ctor_Complete), Ty);
238   } else if (UseVirtualCall) {
239       Callee = BuildVirtualCall(MD, This, Ty);
240   } else {
241     if (getContext().getLangOptions().AppleKext &&
242         MD->isVirtual() &&
243         ME->hasQualifier())
244       Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
245     else
246       Callee = CGM.GetAddrOfFunction(MD, Ty);
247   }
248 
249   return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
250                            CE->arg_begin(), CE->arg_end());
251 }
252 
253 RValue
254 CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
255                                               ReturnValueSlot ReturnValue) {
256   const BinaryOperator *BO =
257       cast<BinaryOperator>(E->getCallee()->IgnoreParens());
258   const Expr *BaseExpr = BO->getLHS();
259   const Expr *MemFnExpr = BO->getRHS();
260 
261   const MemberPointerType *MPT =
262     MemFnExpr->getType()->getAs<MemberPointerType>();
263 
264   const FunctionProtoType *FPT =
265     MPT->getPointeeType()->getAs<FunctionProtoType>();
266   const CXXRecordDecl *RD =
267     cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
268 
269   // Get the member function pointer.
270   llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
271 
272   // Emit the 'this' pointer.
273   llvm::Value *This;
274 
275   if (BO->getOpcode() == BO_PtrMemI)
276     This = EmitScalarExpr(BaseExpr);
277   else
278     This = EmitLValue(BaseExpr).getAddress();
279 
280   // Ask the ABI to load the callee.  Note that This is modified.
281   llvm::Value *Callee =
282     CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, This, MemFnPtr, MPT);
283 
284   CallArgList Args;
285 
286   QualType ThisType =
287     getContext().getPointerType(getContext().getTagDeclType(RD));
288 
289   // Push the this ptr.
290   Args.push_back(std::make_pair(RValue::get(This), ThisType));
291 
292   // And the rest of the call args
293   EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end());
294   const FunctionType *BO_FPT = BO->getType()->getAs<FunctionProtoType>();
295   return EmitCall(CGM.getTypes().getFunctionInfo(Args, BO_FPT), Callee,
296                   ReturnValue, Args);
297 }
298 
299 RValue
300 CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
301                                                const CXXMethodDecl *MD,
302                                                ReturnValueSlot ReturnValue) {
303   assert(MD->isInstance() &&
304          "Trying to emit a member call expr on a static method!");
305   LValue LV = EmitLValue(E->getArg(0));
306   llvm::Value *This = LV.getAddress();
307 
308   if (MD->isCopyAssignmentOperator()) {
309     const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(MD->getDeclContext());
310     if (ClassDecl->hasTrivialCopyAssignment()) {
311       assert(!ClassDecl->hasUserDeclaredCopyAssignment() &&
312              "EmitCXXOperatorMemberCallExpr - user declared copy assignment");
313       llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress();
314       QualType Ty = E->getType();
315       EmitAggregateCopy(This, Src, Ty);
316       return RValue::get(This);
317     }
318   }
319 
320   const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
321   const llvm::Type *Ty =
322     CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
323                                    FPT->isVariadic());
324   llvm::Value *Callee;
325   if (MD->isVirtual() &&
326       !canDevirtualizeMemberFunctionCalls(getContext(),
327                                            E->getArg(0), MD))
328     Callee = BuildVirtualCall(MD, This, Ty);
329   else
330     Callee = CGM.GetAddrOfFunction(MD, Ty);
331 
332   return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
333                            E->arg_begin() + 1, E->arg_end());
334 }
335 
336 void
337 CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
338                                       AggValueSlot Dest) {
339   assert(!Dest.isIgnored() && "Must have a destination!");
340   const CXXConstructorDecl *CD = E->getConstructor();
341 
342   // If we require zero initialization before (or instead of) calling the
343   // constructor, as can be the case with a non-user-provided default
344   // constructor, emit the zero initialization now.
345   if (E->requiresZeroInitialization())
346     EmitNullInitialization(Dest.getAddr(), E->getType());
347 
348   // If this is a call to a trivial default constructor, do nothing.
349   if (CD->isTrivial() && CD->isDefaultConstructor())
350     return;
351 
352   // Elide the constructor if we're constructing from a temporary.
353   // The temporary check is required because Sema sets this on NRVO
354   // returns.
355   if (getContext().getLangOptions().ElideConstructors && E->isElidable()) {
356     assert(getContext().hasSameUnqualifiedType(E->getType(),
357                                                E->getArg(0)->getType()));
358     if (E->getArg(0)->isTemporaryObject(getContext(), CD->getParent())) {
359       EmitAggExpr(E->getArg(0), Dest);
360       return;
361     }
362   }
363 
364   const ConstantArrayType *Array
365     = getContext().getAsConstantArrayType(E->getType());
366   if (Array) {
367     QualType BaseElementTy = getContext().getBaseElementType(Array);
368     const llvm::Type *BasePtr = ConvertType(BaseElementTy);
369     BasePtr = llvm::PointerType::getUnqual(BasePtr);
370     llvm::Value *BaseAddrPtr =
371       Builder.CreateBitCast(Dest.getAddr(), BasePtr);
372 
373     EmitCXXAggrConstructorCall(CD, Array, BaseAddrPtr,
374                                E->arg_begin(), E->arg_end());
375   }
376   else {
377     CXXCtorType Type =
378       (E->getConstructionKind() == CXXConstructExpr::CK_Complete)
379       ? Ctor_Complete : Ctor_Base;
380     bool ForVirtualBase =
381       E->getConstructionKind() == CXXConstructExpr::CK_VirtualBase;
382 
383     // Call the constructor.
384     EmitCXXConstructorCall(CD, Type, ForVirtualBase, Dest.getAddr(),
385                            E->arg_begin(), E->arg_end());
386   }
387 }
388 
389 void
390 CodeGenFunction::EmitSynthesizedCXXCopyCtor(llvm::Value *Dest,
391                                             llvm::Value *Src,
392                                             const Expr *Exp) {
393   if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Exp))
394     Exp = E->getSubExpr();
395   assert(isa<CXXConstructExpr>(Exp) &&
396          "EmitSynthesizedCXXCopyCtor - unknown copy ctor expr");
397   const CXXConstructExpr* E = cast<CXXConstructExpr>(Exp);
398   const CXXConstructorDecl *CD = E->getConstructor();
399   RunCleanupsScope Scope(*this);
400 
401   // If we require zero initialization before (or instead of) calling the
402   // constructor, as can be the case with a non-user-provided default
403   // constructor, emit the zero initialization now.
404   // FIXME. Do I still need this for a copy ctor synthesis?
405   if (E->requiresZeroInitialization())
406     EmitNullInitialization(Dest, E->getType());
407 
408   assert(!getContext().getAsConstantArrayType(E->getType())
409          && "EmitSynthesizedCXXCopyCtor - Copied-in Array");
410   EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src,
411                                  E->arg_begin(), E->arg_end());
412 }
413 
414 /// Check whether the given operator new[] is the global placement
415 /// operator new[].
416 static bool IsPlacementOperatorNewArray(ASTContext &Ctx,
417                                         const FunctionDecl *Fn) {
418   // Must be in global scope.  Note that allocation functions can't be
419   // declared in namespaces.
420   if (!Fn->getDeclContext()->getRedeclContext()->isFileContext())
421     return false;
422 
423   // Signature must be void *operator new[](size_t, void*).
424   // The size_t is common to all operator new[]s.
425   if (Fn->getNumParams() != 2)
426     return false;
427 
428   CanQualType ParamType = Ctx.getCanonicalType(Fn->getParamDecl(1)->getType());
429   return (ParamType == Ctx.VoidPtrTy);
430 }
431 
432 static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,
433                                         const CXXNewExpr *E) {
434   if (!E->isArray())
435     return CharUnits::Zero();
436 
437   // No cookie is required if the new operator being used is
438   // ::operator new[](size_t, void*).
439   const FunctionDecl *OperatorNew = E->getOperatorNew();
440   if (IsPlacementOperatorNewArray(CGF.getContext(), OperatorNew))
441     return CharUnits::Zero();
442 
443   return CGF.CGM.getCXXABI().GetArrayCookieSize(E);
444 }
445 
446 static llvm::Value *EmitCXXNewAllocSize(ASTContext &Context,
447                                         CodeGenFunction &CGF,
448                                         const CXXNewExpr *E,
449                                         llvm::Value *&NumElements,
450                                         llvm::Value *&SizeWithoutCookie) {
451   QualType ElemType = E->getAllocatedType();
452 
453   const llvm::IntegerType *SizeTy =
454     cast<llvm::IntegerType>(CGF.ConvertType(CGF.getContext().getSizeType()));
455 
456   CharUnits TypeSize = CGF.getContext().getTypeSizeInChars(ElemType);
457 
458   if (!E->isArray()) {
459     SizeWithoutCookie = llvm::ConstantInt::get(SizeTy, TypeSize.getQuantity());
460     return SizeWithoutCookie;
461   }
462 
463   // Figure out the cookie size.
464   CharUnits CookieSize = CalculateCookiePadding(CGF, E);
465 
466   // Emit the array size expression.
467   // We multiply the size of all dimensions for NumElements.
468   // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
469   NumElements = CGF.EmitScalarExpr(E->getArraySize());
470   assert(NumElements->getType() == SizeTy && "element count not a size_t");
471 
472   uint64_t ArraySizeMultiplier = 1;
473   while (const ConstantArrayType *CAT
474              = CGF.getContext().getAsConstantArrayType(ElemType)) {
475     ElemType = CAT->getElementType();
476     ArraySizeMultiplier *= CAT->getSize().getZExtValue();
477   }
478 
479   llvm::Value *Size;
480 
481   // If someone is doing 'new int[42]' there is no need to do a dynamic check.
482   // Don't bloat the -O0 code.
483   if (llvm::ConstantInt *NumElementsC =
484         dyn_cast<llvm::ConstantInt>(NumElements)) {
485     llvm::APInt NEC = NumElementsC->getValue();
486     unsigned SizeWidth = NEC.getBitWidth();
487 
488     // Determine if there is an overflow here by doing an extended multiply.
489     NEC = NEC.zext(SizeWidth*2);
490     llvm::APInt SC(SizeWidth*2, TypeSize.getQuantity());
491     SC *= NEC;
492 
493     if (!CookieSize.isZero()) {
494       // Save the current size without a cookie.  We don't care if an
495       // overflow's already happened because SizeWithoutCookie isn't
496       // used if the allocator returns null or throws, as it should
497       // always do on an overflow.
498       llvm::APInt SWC = SC.trunc(SizeWidth);
499       SizeWithoutCookie = llvm::ConstantInt::get(SizeTy, SWC);
500 
501       // Add the cookie size.
502       SC += llvm::APInt(SizeWidth*2, CookieSize.getQuantity());
503     }
504 
505     if (SC.countLeadingZeros() >= SizeWidth) {
506       SC = SC.trunc(SizeWidth);
507       Size = llvm::ConstantInt::get(SizeTy, SC);
508     } else {
509       // On overflow, produce a -1 so operator new throws.
510       Size = llvm::Constant::getAllOnesValue(SizeTy);
511     }
512 
513     // Scale NumElements while we're at it.
514     uint64_t N = NEC.getZExtValue() * ArraySizeMultiplier;
515     NumElements = llvm::ConstantInt::get(SizeTy, N);
516 
517   // Otherwise, we don't need to do an overflow-checked multiplication if
518   // we're multiplying by one.
519   } else if (TypeSize.isOne()) {
520     assert(ArraySizeMultiplier == 1);
521 
522     Size = NumElements;
523 
524     // If we need a cookie, add its size in with an overflow check.
525     // This is maybe a little paranoid.
526     if (!CookieSize.isZero()) {
527       SizeWithoutCookie = Size;
528 
529       llvm::Value *CookieSizeV
530         = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
531 
532       const llvm::Type *Types[] = { SizeTy };
533       llvm::Value *UAddF
534         = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, Types, 1);
535       llvm::Value *AddRes
536         = CGF.Builder.CreateCall2(UAddF, Size, CookieSizeV);
537 
538       Size = CGF.Builder.CreateExtractValue(AddRes, 0);
539       llvm::Value *DidOverflow = CGF.Builder.CreateExtractValue(AddRes, 1);
540       Size = CGF.Builder.CreateSelect(DidOverflow,
541                                       llvm::ConstantInt::get(SizeTy, -1),
542                                       Size);
543     }
544 
545   // Otherwise use the int.umul.with.overflow intrinsic.
546   } else {
547     llvm::Value *OutermostElementSize
548       = llvm::ConstantInt::get(SizeTy, TypeSize.getQuantity());
549 
550     llvm::Value *NumOutermostElements = NumElements;
551 
552     // Scale NumElements by the array size multiplier.  This might
553     // overflow, but only if the multiplication below also overflows,
554     // in which case this multiplication isn't used.
555     if (ArraySizeMultiplier != 1)
556       NumElements = CGF.Builder.CreateMul(NumElements,
557                          llvm::ConstantInt::get(SizeTy, ArraySizeMultiplier));
558 
559     // The requested size of the outermost array is non-constant.
560     // Multiply that by the static size of the elements of that array;
561     // on unsigned overflow, set the size to -1 to trigger an
562     // exception from the allocation routine.  This is sufficient to
563     // prevent buffer overruns from the allocator returning a
564     // seemingly valid pointer to insufficient space.  This idea comes
565     // originally from MSVC, and GCC has an open bug requesting
566     // similar behavior:
567     //   http://gcc.gnu.org/bugzilla/show_bug.cgi?id=19351
568     //
569     // This will not be sufficient for C++0x, which requires a
570     // specific exception class (std::bad_array_new_length).
571     // That will require ABI support that has not yet been specified.
572     const llvm::Type *Types[] = { SizeTy };
573     llvm::Value *UMulF
574       = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, Types, 1);
575     llvm::Value *MulRes = CGF.Builder.CreateCall2(UMulF, NumOutermostElements,
576                                                   OutermostElementSize);
577 
578     // The overflow bit.
579     llvm::Value *DidOverflow = CGF.Builder.CreateExtractValue(MulRes, 1);
580 
581     // The result of the multiplication.
582     Size = CGF.Builder.CreateExtractValue(MulRes, 0);
583 
584     // If we have a cookie, we need to add that size in, too.
585     if (!CookieSize.isZero()) {
586       SizeWithoutCookie = Size;
587 
588       llvm::Value *CookieSizeV
589         = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
590       llvm::Value *UAddF
591         = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, Types, 1);
592       llvm::Value *AddRes
593         = CGF.Builder.CreateCall2(UAddF, SizeWithoutCookie, CookieSizeV);
594 
595       Size = CGF.Builder.CreateExtractValue(AddRes, 0);
596 
597       llvm::Value *AddDidOverflow = CGF.Builder.CreateExtractValue(AddRes, 1);
598       DidOverflow = CGF.Builder.CreateAnd(DidOverflow, AddDidOverflow);
599     }
600 
601     Size = CGF.Builder.CreateSelect(DidOverflow,
602                                     llvm::ConstantInt::get(SizeTy, -1),
603                                     Size);
604   }
605 
606   if (CookieSize.isZero())
607     SizeWithoutCookie = Size;
608   else
609     assert(SizeWithoutCookie && "didn't set SizeWithoutCookie?");
610 
611   return Size;
612 }
613 
614 static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const CXXNewExpr *E,
615                                     llvm::Value *NewPtr) {
616 
617   assert(E->getNumConstructorArgs() == 1 &&
618          "Can only have one argument to initializer of POD type.");
619 
620   const Expr *Init = E->getConstructorArg(0);
621   QualType AllocType = E->getAllocatedType();
622 
623   unsigned Alignment =
624     CGF.getContext().getTypeAlignInChars(AllocType).getQuantity();
625   if (!CGF.hasAggregateLLVMType(AllocType))
626     CGF.EmitStoreOfScalar(CGF.EmitScalarExpr(Init), NewPtr,
627                           AllocType.isVolatileQualified(), Alignment,
628                           AllocType);
629   else if (AllocType->isAnyComplexType())
630     CGF.EmitComplexExprIntoAddr(Init, NewPtr,
631                                 AllocType.isVolatileQualified());
632   else {
633     AggValueSlot Slot
634       = AggValueSlot::forAddr(NewPtr, AllocType.isVolatileQualified(), true);
635     CGF.EmitAggExpr(Init, Slot);
636   }
637 }
638 
639 void
640 CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E,
641                                          llvm::Value *NewPtr,
642                                          llvm::Value *NumElements) {
643   // We have a POD type.
644   if (E->getNumConstructorArgs() == 0)
645     return;
646 
647   const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
648 
649   // Create a temporary for the loop index and initialize it with 0.
650   llvm::Value *IndexPtr = CreateTempAlloca(SizeTy, "loop.index");
651   llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy);
652   Builder.CreateStore(Zero, IndexPtr);
653 
654   // Start the loop with a block that tests the condition.
655   llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
656   llvm::BasicBlock *AfterFor = createBasicBlock("for.end");
657 
658   EmitBlock(CondBlock);
659 
660   llvm::BasicBlock *ForBody = createBasicBlock("for.body");
661 
662   // Generate: if (loop-index < number-of-elements fall to the loop body,
663   // otherwise, go to the block after the for-loop.
664   llvm::Value *Counter = Builder.CreateLoad(IndexPtr);
665   llvm::Value *IsLess = Builder.CreateICmpULT(Counter, NumElements, "isless");
666   // If the condition is true, execute the body.
667   Builder.CreateCondBr(IsLess, ForBody, AfterFor);
668 
669   EmitBlock(ForBody);
670 
671   llvm::BasicBlock *ContinueBlock = createBasicBlock("for.inc");
672   // Inside the loop body, emit the constructor call on the array element.
673   Counter = Builder.CreateLoad(IndexPtr);
674   llvm::Value *Address = Builder.CreateInBoundsGEP(NewPtr, Counter,
675                                                    "arrayidx");
676   StoreAnyExprIntoOneUnit(*this, E, Address);
677 
678   EmitBlock(ContinueBlock);
679 
680   // Emit the increment of the loop counter.
681   llvm::Value *NextVal = llvm::ConstantInt::get(SizeTy, 1);
682   Counter = Builder.CreateLoad(IndexPtr);
683   NextVal = Builder.CreateAdd(Counter, NextVal, "inc");
684   Builder.CreateStore(NextVal, IndexPtr);
685 
686   // Finally, branch back up to the condition for the next iteration.
687   EmitBranch(CondBlock);
688 
689   // Emit the fall-through block.
690   EmitBlock(AfterFor, true);
691 }
692 
693 static void EmitZeroMemSet(CodeGenFunction &CGF, QualType T,
694                            llvm::Value *NewPtr, llvm::Value *Size) {
695   CGF.EmitCastToVoidPtr(NewPtr);
696   CharUnits Alignment = CGF.getContext().getTypeAlignInChars(T);
697   CGF.Builder.CreateMemSet(NewPtr, CGF.Builder.getInt8(0), Size,
698                            Alignment.getQuantity(), false);
699 }
700 
701 static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
702                                llvm::Value *NewPtr,
703                                llvm::Value *NumElements,
704                                llvm::Value *AllocSizeWithoutCookie) {
705   if (E->isArray()) {
706     if (CXXConstructorDecl *Ctor = E->getConstructor()) {
707       bool RequiresZeroInitialization = false;
708       if (Ctor->getParent()->hasTrivialConstructor()) {
709         // If new expression did not specify value-initialization, then there
710         // is no initialization.
711         if (!E->hasInitializer() || Ctor->getParent()->isEmpty())
712           return;
713 
714         if (CGF.CGM.getTypes().isZeroInitializable(E->getAllocatedType())) {
715           // Optimization: since zero initialization will just set the memory
716           // to all zeroes, generate a single memset to do it in one shot.
717           EmitZeroMemSet(CGF, E->getAllocatedType(), NewPtr,
718                          AllocSizeWithoutCookie);
719           return;
720         }
721 
722         RequiresZeroInitialization = true;
723       }
724 
725       CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr,
726                                      E->constructor_arg_begin(),
727                                      E->constructor_arg_end(),
728                                      RequiresZeroInitialization);
729       return;
730     } else if (E->getNumConstructorArgs() == 1 &&
731                isa<ImplicitValueInitExpr>(E->getConstructorArg(0))) {
732       // Optimization: since zero initialization will just set the memory
733       // to all zeroes, generate a single memset to do it in one shot.
734       EmitZeroMemSet(CGF, E->getAllocatedType(), NewPtr,
735                      AllocSizeWithoutCookie);
736       return;
737     } else {
738       CGF.EmitNewArrayInitializer(E, NewPtr, NumElements);
739       return;
740     }
741   }
742 
743   if (CXXConstructorDecl *Ctor = E->getConstructor()) {
744     // Per C++ [expr.new]p15, if we have an initializer, then we're performing
745     // direct initialization. C++ [dcl.init]p5 requires that we
746     // zero-initialize storage if there are no user-declared constructors.
747     if (E->hasInitializer() &&
748         !Ctor->getParent()->hasUserDeclaredConstructor() &&
749         !Ctor->getParent()->isEmpty())
750       CGF.EmitNullInitialization(NewPtr, E->getAllocatedType());
751 
752     CGF.EmitCXXConstructorCall(Ctor, Ctor_Complete, /*ForVirtualBase=*/false,
753                                NewPtr, E->constructor_arg_begin(),
754                                E->constructor_arg_end());
755 
756     return;
757   }
758   // We have a POD type.
759   if (E->getNumConstructorArgs() == 0)
760     return;
761 
762   StoreAnyExprIntoOneUnit(CGF, E, NewPtr);
763 }
764 
765 namespace {
766   /// A cleanup to call the given 'operator delete' function upon
767   /// abnormal exit from a new expression.
768   class CallDeleteDuringNew : public EHScopeStack::Cleanup {
769     size_t NumPlacementArgs;
770     const FunctionDecl *OperatorDelete;
771     llvm::Value *Ptr;
772     llvm::Value *AllocSize;
773 
774     RValue *getPlacementArgs() { return reinterpret_cast<RValue*>(this+1); }
775 
776   public:
777     static size_t getExtraSize(size_t NumPlacementArgs) {
778       return NumPlacementArgs * sizeof(RValue);
779     }
780 
781     CallDeleteDuringNew(size_t NumPlacementArgs,
782                         const FunctionDecl *OperatorDelete,
783                         llvm::Value *Ptr,
784                         llvm::Value *AllocSize)
785       : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
786         Ptr(Ptr), AllocSize(AllocSize) {}
787 
788     void setPlacementArg(unsigned I, RValue Arg) {
789       assert(I < NumPlacementArgs && "index out of range");
790       getPlacementArgs()[I] = Arg;
791     }
792 
793     void Emit(CodeGenFunction &CGF, bool IsForEH) {
794       const FunctionProtoType *FPT
795         = OperatorDelete->getType()->getAs<FunctionProtoType>();
796       assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
797              (FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
798 
799       CallArgList DeleteArgs;
800 
801       // The first argument is always a void*.
802       FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
803       DeleteArgs.push_back(std::make_pair(RValue::get(Ptr), *AI++));
804 
805       // A member 'operator delete' can take an extra 'size_t' argument.
806       if (FPT->getNumArgs() == NumPlacementArgs + 2)
807         DeleteArgs.push_back(std::make_pair(RValue::get(AllocSize), *AI++));
808 
809       // Pass the rest of the arguments, which must match exactly.
810       for (unsigned I = 0; I != NumPlacementArgs; ++I)
811         DeleteArgs.push_back(std::make_pair(getPlacementArgs()[I], *AI++));
812 
813       // Call 'operator delete'.
814       CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(DeleteArgs, FPT),
815                    CGF.CGM.GetAddrOfFunction(OperatorDelete),
816                    ReturnValueSlot(), DeleteArgs, OperatorDelete);
817     }
818   };
819 
820   /// A cleanup to call the given 'operator delete' function upon
821   /// abnormal exit from a new expression when the new expression is
822   /// conditional.
823   class CallDeleteDuringConditionalNew : public EHScopeStack::Cleanup {
824     size_t NumPlacementArgs;
825     const FunctionDecl *OperatorDelete;
826     DominatingValue<RValue>::saved_type Ptr;
827     DominatingValue<RValue>::saved_type AllocSize;
828 
829     DominatingValue<RValue>::saved_type *getPlacementArgs() {
830       return reinterpret_cast<DominatingValue<RValue>::saved_type*>(this+1);
831     }
832 
833   public:
834     static size_t getExtraSize(size_t NumPlacementArgs) {
835       return NumPlacementArgs * sizeof(DominatingValue<RValue>::saved_type);
836     }
837 
838     CallDeleteDuringConditionalNew(size_t NumPlacementArgs,
839                                    const FunctionDecl *OperatorDelete,
840                                    DominatingValue<RValue>::saved_type Ptr,
841                               DominatingValue<RValue>::saved_type AllocSize)
842       : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
843         Ptr(Ptr), AllocSize(AllocSize) {}
844 
845     void setPlacementArg(unsigned I, DominatingValue<RValue>::saved_type Arg) {
846       assert(I < NumPlacementArgs && "index out of range");
847       getPlacementArgs()[I] = Arg;
848     }
849 
850     void Emit(CodeGenFunction &CGF, bool IsForEH) {
851       const FunctionProtoType *FPT
852         = OperatorDelete->getType()->getAs<FunctionProtoType>();
853       assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
854              (FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
855 
856       CallArgList DeleteArgs;
857 
858       // The first argument is always a void*.
859       FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
860       DeleteArgs.push_back(std::make_pair(Ptr.restore(CGF), *AI++));
861 
862       // A member 'operator delete' can take an extra 'size_t' argument.
863       if (FPT->getNumArgs() == NumPlacementArgs + 2) {
864         RValue RV = AllocSize.restore(CGF);
865         DeleteArgs.push_back(std::make_pair(RV, *AI++));
866       }
867 
868       // Pass the rest of the arguments, which must match exactly.
869       for (unsigned I = 0; I != NumPlacementArgs; ++I) {
870         RValue RV = getPlacementArgs()[I].restore(CGF);
871         DeleteArgs.push_back(std::make_pair(RV, *AI++));
872       }
873 
874       // Call 'operator delete'.
875       CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(DeleteArgs, FPT),
876                    CGF.CGM.GetAddrOfFunction(OperatorDelete),
877                    ReturnValueSlot(), DeleteArgs, OperatorDelete);
878     }
879   };
880 }
881 
882 /// Enter a cleanup to call 'operator delete' if the initializer in a
883 /// new-expression throws.
884 static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
885                                   const CXXNewExpr *E,
886                                   llvm::Value *NewPtr,
887                                   llvm::Value *AllocSize,
888                                   const CallArgList &NewArgs) {
889   // If we're not inside a conditional branch, then the cleanup will
890   // dominate and we can do the easier (and more efficient) thing.
891   if (!CGF.isInConditionalBranch()) {
892     CallDeleteDuringNew *Cleanup = CGF.EHStack
893       .pushCleanupWithExtra<CallDeleteDuringNew>(EHCleanup,
894                                                  E->getNumPlacementArgs(),
895                                                  E->getOperatorDelete(),
896                                                  NewPtr, AllocSize);
897     for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
898       Cleanup->setPlacementArg(I, NewArgs[I+1].first);
899 
900     return;
901   }
902 
903   // Otherwise, we need to save all this stuff.
904   DominatingValue<RValue>::saved_type SavedNewPtr =
905     DominatingValue<RValue>::save(CGF, RValue::get(NewPtr));
906   DominatingValue<RValue>::saved_type SavedAllocSize =
907     DominatingValue<RValue>::save(CGF, RValue::get(AllocSize));
908 
909   CallDeleteDuringConditionalNew *Cleanup = CGF.EHStack
910     .pushCleanupWithExtra<CallDeleteDuringConditionalNew>(InactiveEHCleanup,
911                                                  E->getNumPlacementArgs(),
912                                                  E->getOperatorDelete(),
913                                                  SavedNewPtr,
914                                                  SavedAllocSize);
915   for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
916     Cleanup->setPlacementArg(I,
917                      DominatingValue<RValue>::save(CGF, NewArgs[I+1].first));
918 
919   CGF.ActivateCleanupBlock(CGF.EHStack.stable_begin());
920 }
921 
922 llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
923   QualType AllocType = E->getAllocatedType();
924   if (AllocType->isArrayType())
925     while (const ArrayType *AType = getContext().getAsArrayType(AllocType))
926       AllocType = AType->getElementType();
927 
928   FunctionDecl *NewFD = E->getOperatorNew();
929   const FunctionProtoType *NewFTy = NewFD->getType()->getAs<FunctionProtoType>();
930 
931   CallArgList NewArgs;
932 
933   // The allocation size is the first argument.
934   QualType SizeTy = getContext().getSizeType();
935 
936   llvm::Value *NumElements = 0;
937   llvm::Value *AllocSizeWithoutCookie = 0;
938   llvm::Value *AllocSize = EmitCXXNewAllocSize(getContext(),
939                                                *this, E, NumElements,
940                                                AllocSizeWithoutCookie);
941 
942   NewArgs.push_back(std::make_pair(RValue::get(AllocSize), SizeTy));
943 
944   // Emit the rest of the arguments.
945   // FIXME: Ideally, this should just use EmitCallArgs.
946   CXXNewExpr::const_arg_iterator NewArg = E->placement_arg_begin();
947 
948   // First, use the types from the function type.
949   // We start at 1 here because the first argument (the allocation size)
950   // has already been emitted.
951   for (unsigned i = 1, e = NewFTy->getNumArgs(); i != e; ++i, ++NewArg) {
952     QualType ArgType = NewFTy->getArgType(i);
953 
954     assert(getContext().getCanonicalType(ArgType.getNonReferenceType()).
955            getTypePtr() ==
956            getContext().getCanonicalType(NewArg->getType()).getTypePtr() &&
957            "type mismatch in call argument!");
958 
959     NewArgs.push_back(std::make_pair(EmitCallArg(*NewArg, ArgType),
960                                      ArgType));
961 
962   }
963 
964   // Either we've emitted all the call args, or we have a call to a
965   // variadic function.
966   assert((NewArg == E->placement_arg_end() || NewFTy->isVariadic()) &&
967          "Extra arguments in non-variadic function!");
968 
969   // If we still have any arguments, emit them using the type of the argument.
970   for (CXXNewExpr::const_arg_iterator NewArgEnd = E->placement_arg_end();
971        NewArg != NewArgEnd; ++NewArg) {
972     QualType ArgType = NewArg->getType();
973     NewArgs.push_back(std::make_pair(EmitCallArg(*NewArg, ArgType),
974                                      ArgType));
975   }
976 
977   // Emit the call to new.
978   RValue RV =
979     EmitCall(CGM.getTypes().getFunctionInfo(NewArgs, NewFTy),
980              CGM.GetAddrOfFunction(NewFD), ReturnValueSlot(), NewArgs, NewFD);
981 
982   // If an allocation function is declared with an empty exception specification
983   // it returns null to indicate failure to allocate storage. [expr.new]p13.
984   // (We don't need to check for null when there's no new initializer and
985   // we're allocating a POD type).
986   bool NullCheckResult = NewFTy->hasEmptyExceptionSpec() &&
987     !(AllocType->isPODType() && !E->hasInitializer());
988 
989   llvm::BasicBlock *NullCheckSource = 0;
990   llvm::BasicBlock *NewNotNull = 0;
991   llvm::BasicBlock *NewEnd = 0;
992 
993   llvm::Value *NewPtr = RV.getScalarVal();
994   unsigned AS = cast<llvm::PointerType>(NewPtr->getType())->getAddressSpace();
995 
996   if (NullCheckResult) {
997     NullCheckSource = Builder.GetInsertBlock();
998     NewNotNull = createBasicBlock("new.notnull");
999     NewEnd = createBasicBlock("new.end");
1000 
1001     llvm::Value *IsNull = Builder.CreateIsNull(NewPtr, "new.isnull");
1002     Builder.CreateCondBr(IsNull, NewEnd, NewNotNull);
1003     EmitBlock(NewNotNull);
1004   }
1005 
1006   assert((AllocSize == AllocSizeWithoutCookie) ==
1007          CalculateCookiePadding(*this, E).isZero());
1008   if (AllocSize != AllocSizeWithoutCookie) {
1009     assert(E->isArray());
1010     NewPtr = CGM.getCXXABI().InitializeArrayCookie(*this, NewPtr, NumElements,
1011                                                    E, AllocType);
1012   }
1013 
1014   // If there's an operator delete, enter a cleanup to call it if an
1015   // exception is thrown.
1016   EHScopeStack::stable_iterator CallOperatorDelete;
1017   if (E->getOperatorDelete()) {
1018     EnterNewDeleteCleanup(*this, E, NewPtr, AllocSize, NewArgs);
1019     CallOperatorDelete = EHStack.stable_begin();
1020   }
1021 
1022   const llvm::Type *ElementPtrTy
1023     = ConvertTypeForMem(AllocType)->getPointerTo(AS);
1024   NewPtr = Builder.CreateBitCast(NewPtr, ElementPtrTy);
1025 
1026   if (E->isArray()) {
1027     EmitNewInitializer(*this, E, NewPtr, NumElements, AllocSizeWithoutCookie);
1028 
1029     // NewPtr is a pointer to the base element type.  If we're
1030     // allocating an array of arrays, we'll need to cast back to the
1031     // array pointer type.
1032     const llvm::Type *ResultTy = ConvertTypeForMem(E->getType());
1033     if (NewPtr->getType() != ResultTy)
1034       NewPtr = Builder.CreateBitCast(NewPtr, ResultTy);
1035   } else {
1036     EmitNewInitializer(*this, E, NewPtr, NumElements, AllocSizeWithoutCookie);
1037   }
1038 
1039   // Deactivate the 'operator delete' cleanup if we finished
1040   // initialization.
1041   if (CallOperatorDelete.isValid())
1042     DeactivateCleanupBlock(CallOperatorDelete);
1043 
1044   if (NullCheckResult) {
1045     Builder.CreateBr(NewEnd);
1046     llvm::BasicBlock *NotNullSource = Builder.GetInsertBlock();
1047     EmitBlock(NewEnd);
1048 
1049     llvm::PHINode *PHI = Builder.CreatePHI(NewPtr->getType());
1050     PHI->reserveOperandSpace(2);
1051     PHI->addIncoming(NewPtr, NotNullSource);
1052     PHI->addIncoming(llvm::Constant::getNullValue(NewPtr->getType()),
1053                      NullCheckSource);
1054 
1055     NewPtr = PHI;
1056   }
1057 
1058   return NewPtr;
1059 }
1060 
1061 void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
1062                                      llvm::Value *Ptr,
1063                                      QualType DeleteTy) {
1064   assert(DeleteFD->getOverloadedOperator() == OO_Delete);
1065 
1066   const FunctionProtoType *DeleteFTy =
1067     DeleteFD->getType()->getAs<FunctionProtoType>();
1068 
1069   CallArgList DeleteArgs;
1070 
1071   // Check if we need to pass the size to the delete operator.
1072   llvm::Value *Size = 0;
1073   QualType SizeTy;
1074   if (DeleteFTy->getNumArgs() == 2) {
1075     SizeTy = DeleteFTy->getArgType(1);
1076     CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy);
1077     Size = llvm::ConstantInt::get(ConvertType(SizeTy),
1078                                   DeleteTypeSize.getQuantity());
1079   }
1080 
1081   QualType ArgTy = DeleteFTy->getArgType(0);
1082   llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy));
1083   DeleteArgs.push_back(std::make_pair(RValue::get(DeletePtr), ArgTy));
1084 
1085   if (Size)
1086     DeleteArgs.push_back(std::make_pair(RValue::get(Size), SizeTy));
1087 
1088   // Emit the call to delete.
1089   EmitCall(CGM.getTypes().getFunctionInfo(DeleteArgs, DeleteFTy),
1090            CGM.GetAddrOfFunction(DeleteFD), ReturnValueSlot(),
1091            DeleteArgs, DeleteFD);
1092 }
1093 
1094 namespace {
1095   /// Calls the given 'operator delete' on a single object.
1096   struct CallObjectDelete : EHScopeStack::Cleanup {
1097     llvm::Value *Ptr;
1098     const FunctionDecl *OperatorDelete;
1099     QualType ElementType;
1100 
1101     CallObjectDelete(llvm::Value *Ptr,
1102                      const FunctionDecl *OperatorDelete,
1103                      QualType ElementType)
1104       : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {}
1105 
1106     void Emit(CodeGenFunction &CGF, bool IsForEH) {
1107       CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType);
1108     }
1109   };
1110 }
1111 
1112 /// Emit the code for deleting a single object.
1113 static void EmitObjectDelete(CodeGenFunction &CGF,
1114                              const FunctionDecl *OperatorDelete,
1115                              llvm::Value *Ptr,
1116                              QualType ElementType) {
1117   // Find the destructor for the type, if applicable.  If the
1118   // destructor is virtual, we'll just emit the vcall and return.
1119   const CXXDestructorDecl *Dtor = 0;
1120   if (const RecordType *RT = ElementType->getAs<RecordType>()) {
1121     CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1122     if (!RD->hasTrivialDestructor()) {
1123       Dtor = RD->getDestructor();
1124 
1125       if (Dtor->isVirtual()) {
1126         const llvm::Type *Ty =
1127           CGF.getTypes().GetFunctionType(CGF.getTypes().getFunctionInfo(Dtor,
1128                                                                Dtor_Complete),
1129                                          /*isVariadic=*/false);
1130 
1131         llvm::Value *Callee
1132           = CGF.BuildVirtualCall(Dtor, Dtor_Deleting, Ptr, Ty);
1133         CGF.EmitCXXMemberCall(Dtor, Callee, ReturnValueSlot(), Ptr, /*VTT=*/0,
1134                               0, 0);
1135 
1136         // The dtor took care of deleting the object.
1137         return;
1138       }
1139     }
1140   }
1141 
1142   // Make sure that we call delete even if the dtor throws.
1143   // This doesn't have to a conditional cleanup because we're going
1144   // to pop it off in a second.
1145   CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
1146                                             Ptr, OperatorDelete, ElementType);
1147 
1148   if (Dtor)
1149     CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
1150                               /*ForVirtualBase=*/false, Ptr);
1151 
1152   CGF.PopCleanupBlock();
1153 }
1154 
1155 namespace {
1156   /// Calls the given 'operator delete' on an array of objects.
1157   struct CallArrayDelete : EHScopeStack::Cleanup {
1158     llvm::Value *Ptr;
1159     const FunctionDecl *OperatorDelete;
1160     llvm::Value *NumElements;
1161     QualType ElementType;
1162     CharUnits CookieSize;
1163 
1164     CallArrayDelete(llvm::Value *Ptr,
1165                     const FunctionDecl *OperatorDelete,
1166                     llvm::Value *NumElements,
1167                     QualType ElementType,
1168                     CharUnits CookieSize)
1169       : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements),
1170         ElementType(ElementType), CookieSize(CookieSize) {}
1171 
1172     void Emit(CodeGenFunction &CGF, bool IsForEH) {
1173       const FunctionProtoType *DeleteFTy =
1174         OperatorDelete->getType()->getAs<FunctionProtoType>();
1175       assert(DeleteFTy->getNumArgs() == 1 || DeleteFTy->getNumArgs() == 2);
1176 
1177       CallArgList Args;
1178 
1179       // Pass the pointer as the first argument.
1180       QualType VoidPtrTy = DeleteFTy->getArgType(0);
1181       llvm::Value *DeletePtr
1182         = CGF.Builder.CreateBitCast(Ptr, CGF.ConvertType(VoidPtrTy));
1183       Args.push_back(std::make_pair(RValue::get(DeletePtr), VoidPtrTy));
1184 
1185       // Pass the original requested size as the second argument.
1186       if (DeleteFTy->getNumArgs() == 2) {
1187         QualType size_t = DeleteFTy->getArgType(1);
1188         const llvm::IntegerType *SizeTy
1189           = cast<llvm::IntegerType>(CGF.ConvertType(size_t));
1190 
1191         CharUnits ElementTypeSize =
1192           CGF.CGM.getContext().getTypeSizeInChars(ElementType);
1193 
1194         // The size of an element, multiplied by the number of elements.
1195         llvm::Value *Size
1196           = llvm::ConstantInt::get(SizeTy, ElementTypeSize.getQuantity());
1197         Size = CGF.Builder.CreateMul(Size, NumElements);
1198 
1199         // Plus the size of the cookie if applicable.
1200         if (!CookieSize.isZero()) {
1201           llvm::Value *CookieSizeV
1202             = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
1203           Size = CGF.Builder.CreateAdd(Size, CookieSizeV);
1204         }
1205 
1206         Args.push_back(std::make_pair(RValue::get(Size), size_t));
1207       }
1208 
1209       // Emit the call to delete.
1210       CGF.EmitCall(CGF.getTypes().getFunctionInfo(Args, DeleteFTy),
1211                    CGF.CGM.GetAddrOfFunction(OperatorDelete),
1212                    ReturnValueSlot(), Args, OperatorDelete);
1213     }
1214   };
1215 }
1216 
1217 /// Emit the code for deleting an array of objects.
1218 static void EmitArrayDelete(CodeGenFunction &CGF,
1219                             const CXXDeleteExpr *E,
1220                             llvm::Value *Ptr,
1221                             QualType ElementType) {
1222   llvm::Value *NumElements = 0;
1223   llvm::Value *AllocatedPtr = 0;
1224   CharUnits CookieSize;
1225   CGF.CGM.getCXXABI().ReadArrayCookie(CGF, Ptr, E, ElementType,
1226                                       NumElements, AllocatedPtr, CookieSize);
1227 
1228   assert(AllocatedPtr && "ReadArrayCookie didn't set AllocatedPtr");
1229 
1230   // Make sure that we call delete even if one of the dtors throws.
1231   const FunctionDecl *OperatorDelete = E->getOperatorDelete();
1232   CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup,
1233                                            AllocatedPtr, OperatorDelete,
1234                                            NumElements, ElementType,
1235                                            CookieSize);
1236 
1237   if (const CXXRecordDecl *RD = ElementType->getAsCXXRecordDecl()) {
1238     if (!RD->hasTrivialDestructor()) {
1239       assert(NumElements && "ReadArrayCookie didn't find element count"
1240                             " for a class with destructor");
1241       CGF.EmitCXXAggrDestructorCall(RD->getDestructor(), NumElements, Ptr);
1242     }
1243   }
1244 
1245   CGF.PopCleanupBlock();
1246 }
1247 
1248 void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
1249 
1250   // Get at the argument before we performed the implicit conversion
1251   // to void*.
1252   const Expr *Arg = E->getArgument();
1253   while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) {
1254     if (ICE->getCastKind() != CK_UserDefinedConversion &&
1255         ICE->getType()->isVoidPointerType())
1256       Arg = ICE->getSubExpr();
1257     else
1258       break;
1259   }
1260 
1261   llvm::Value *Ptr = EmitScalarExpr(Arg);
1262 
1263   // Null check the pointer.
1264   llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");
1265   llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");
1266 
1267   llvm::Value *IsNull =
1268     Builder.CreateICmpEQ(Ptr, llvm::Constant::getNullValue(Ptr->getType()),
1269                          "isnull");
1270 
1271   Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
1272   EmitBlock(DeleteNotNull);
1273 
1274   // We might be deleting a pointer to array.  If so, GEP down to the
1275   // first non-array element.
1276   // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*)
1277   QualType DeleteTy = Arg->getType()->getAs<PointerType>()->getPointeeType();
1278   if (DeleteTy->isConstantArrayType()) {
1279     llvm::Value *Zero = Builder.getInt32(0);
1280     llvm::SmallVector<llvm::Value*,8> GEP;
1281 
1282     GEP.push_back(Zero); // point at the outermost array
1283 
1284     // For each layer of array type we're pointing at:
1285     while (const ConstantArrayType *Arr
1286              = getContext().getAsConstantArrayType(DeleteTy)) {
1287       // 1. Unpeel the array type.
1288       DeleteTy = Arr->getElementType();
1289 
1290       // 2. GEP to the first element of the array.
1291       GEP.push_back(Zero);
1292     }
1293 
1294     Ptr = Builder.CreateInBoundsGEP(Ptr, GEP.begin(), GEP.end(), "del.first");
1295   }
1296 
1297   assert(ConvertTypeForMem(DeleteTy) ==
1298          cast<llvm::PointerType>(Ptr->getType())->getElementType());
1299 
1300   if (E->isArrayForm()) {
1301     EmitArrayDelete(*this, E, Ptr, DeleteTy);
1302   } else {
1303     EmitObjectDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy);
1304   }
1305 
1306   EmitBlock(DeleteEnd);
1307 }
1308 
1309 llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
1310   QualType Ty = E->getType();
1311   const llvm::Type *LTy = ConvertType(Ty)->getPointerTo();
1312 
1313   if (E->isTypeOperand()) {
1314     llvm::Constant *TypeInfo =
1315       CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand());
1316     return Builder.CreateBitCast(TypeInfo, LTy);
1317   }
1318 
1319   Expr *subE = E->getExprOperand();
1320   Ty = subE->getType();
1321   CanQualType CanTy = CGM.getContext().getCanonicalType(Ty);
1322   Ty = CanTy.getUnqualifiedType().getNonReferenceType();
1323   if (const RecordType *RT = Ty->getAs<RecordType>()) {
1324     const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1325     if (RD->isPolymorphic()) {
1326       // FIXME: if subE is an lvalue do
1327       LValue Obj = EmitLValue(subE);
1328       llvm::Value *This = Obj.getAddress();
1329       // We need to do a zero check for *p, unless it has NonNullAttr.
1330       // FIXME: PointerType->hasAttr<NonNullAttr>()
1331       bool CanBeZero = false;
1332       if (UnaryOperator *UO = dyn_cast<UnaryOperator>(subE->IgnoreParens()))
1333         if (UO->getOpcode() == UO_Deref)
1334           CanBeZero = true;
1335       if (CanBeZero) {
1336         llvm::BasicBlock *NonZeroBlock = createBasicBlock();
1337         llvm::BasicBlock *ZeroBlock = createBasicBlock();
1338 
1339         llvm::Value *Zero = llvm::Constant::getNullValue(This->getType());
1340         Builder.CreateCondBr(Builder.CreateICmpNE(This, Zero),
1341                              NonZeroBlock, ZeroBlock);
1342         EmitBlock(ZeroBlock);
1343         /// Call __cxa_bad_typeid
1344         const llvm::Type *ResultType = llvm::Type::getVoidTy(getLLVMContext());
1345         const llvm::FunctionType *FTy;
1346         FTy = llvm::FunctionType::get(ResultType, false);
1347         llvm::Value *F = CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
1348         Builder.CreateCall(F)->setDoesNotReturn();
1349         Builder.CreateUnreachable();
1350         EmitBlock(NonZeroBlock);
1351       }
1352       llvm::Value *V = GetVTablePtr(This, LTy->getPointerTo());
1353       V = Builder.CreateConstInBoundsGEP1_64(V, -1ULL);
1354       V = Builder.CreateLoad(V);
1355       return V;
1356     }
1357   }
1358   return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(Ty), LTy);
1359 }
1360 
1361 llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *V,
1362                                               const CXXDynamicCastExpr *DCE) {
1363   QualType SrcTy = DCE->getSubExpr()->getType();
1364   QualType DestTy = DCE->getTypeAsWritten();
1365   QualType InnerType = DestTy->getPointeeType();
1366 
1367   const llvm::Type *LTy = ConvertType(DCE->getType());
1368 
1369   bool CanBeZero = false;
1370   bool ToVoid = false;
1371   bool ThrowOnBad = false;
1372   if (DestTy->isPointerType()) {
1373     // FIXME: if PointerType->hasAttr<NonNullAttr>(), we don't set this
1374     CanBeZero = true;
1375     if (InnerType->isVoidType())
1376       ToVoid = true;
1377   } else {
1378     LTy = LTy->getPointerTo();
1379 
1380     // FIXME: What if exceptions are disabled?
1381     ThrowOnBad = true;
1382   }
1383 
1384   if (SrcTy->isPointerType() || SrcTy->isReferenceType())
1385     SrcTy = SrcTy->getPointeeType();
1386   SrcTy = SrcTy.getUnqualifiedType();
1387 
1388   if (DestTy->isPointerType() || DestTy->isReferenceType())
1389     DestTy = DestTy->getPointeeType();
1390   DestTy = DestTy.getUnqualifiedType();
1391 
1392   llvm::BasicBlock *ContBlock = createBasicBlock();
1393   llvm::BasicBlock *NullBlock = 0;
1394   llvm::BasicBlock *NonZeroBlock = 0;
1395   if (CanBeZero) {
1396     NonZeroBlock = createBasicBlock();
1397     NullBlock = createBasicBlock();
1398     Builder.CreateCondBr(Builder.CreateIsNotNull(V), NonZeroBlock, NullBlock);
1399     EmitBlock(NonZeroBlock);
1400   }
1401 
1402   llvm::BasicBlock *BadCastBlock = 0;
1403 
1404   const llvm::Type *PtrDiffTy = ConvertType(getContext().getPointerDiffType());
1405 
1406   // See if this is a dynamic_cast(void*)
1407   if (ToVoid) {
1408     llvm::Value *This = V;
1409     V = GetVTablePtr(This, PtrDiffTy->getPointerTo());
1410     V = Builder.CreateConstInBoundsGEP1_64(V, -2ULL);
1411     V = Builder.CreateLoad(V, "offset to top");
1412     This = EmitCastToVoidPtr(This);
1413     V = Builder.CreateInBoundsGEP(This, V);
1414     V = Builder.CreateBitCast(V, LTy);
1415   } else {
1416     /// Call __dynamic_cast
1417     const llvm::Type *ResultType = Int8PtrTy;
1418     const llvm::FunctionType *FTy;
1419     std::vector<const llvm::Type*> ArgTys;
1420     ArgTys.push_back(Int8PtrTy);
1421     ArgTys.push_back(Int8PtrTy);
1422     ArgTys.push_back(Int8PtrTy);
1423     ArgTys.push_back(PtrDiffTy);
1424     FTy = llvm::FunctionType::get(ResultType, ArgTys, false);
1425 
1426     // FIXME: Calculate better hint.
1427     llvm::Value *hint = llvm::ConstantInt::get(PtrDiffTy, -1ULL);
1428 
1429     assert(SrcTy->isRecordType() && "Src type must be record type!");
1430     assert(DestTy->isRecordType() && "Dest type must be record type!");
1431 
1432     llvm::Value *SrcArg
1433       = CGM.GetAddrOfRTTIDescriptor(SrcTy.getUnqualifiedType());
1434     llvm::Value *DestArg
1435       = CGM.GetAddrOfRTTIDescriptor(DestTy.getUnqualifiedType());
1436 
1437     V = Builder.CreateBitCast(V, Int8PtrTy);
1438     V = Builder.CreateCall4(CGM.CreateRuntimeFunction(FTy, "__dynamic_cast"),
1439                             V, SrcArg, DestArg, hint);
1440     V = Builder.CreateBitCast(V, LTy);
1441 
1442     if (ThrowOnBad) {
1443       BadCastBlock = createBasicBlock();
1444       Builder.CreateCondBr(Builder.CreateIsNotNull(V), ContBlock, BadCastBlock);
1445       EmitBlock(BadCastBlock);
1446       /// Invoke __cxa_bad_cast
1447       ResultType = llvm::Type::getVoidTy(getLLVMContext());
1448       const llvm::FunctionType *FBadTy;
1449       FBadTy = llvm::FunctionType::get(ResultType, false);
1450       llvm::Value *F = CGM.CreateRuntimeFunction(FBadTy, "__cxa_bad_cast");
1451       if (llvm::BasicBlock *InvokeDest = getInvokeDest()) {
1452         llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
1453         Builder.CreateInvoke(F, Cont, InvokeDest)->setDoesNotReturn();
1454         EmitBlock(Cont);
1455       } else {
1456         // FIXME: Does this ever make sense?
1457         Builder.CreateCall(F)->setDoesNotReturn();
1458       }
1459       Builder.CreateUnreachable();
1460     }
1461   }
1462 
1463   if (CanBeZero) {
1464     Builder.CreateBr(ContBlock);
1465     EmitBlock(NullBlock);
1466     Builder.CreateBr(ContBlock);
1467   }
1468   EmitBlock(ContBlock);
1469   if (CanBeZero) {
1470     llvm::PHINode *PHI = Builder.CreatePHI(LTy);
1471     PHI->reserveOperandSpace(2);
1472     PHI->addIncoming(V, NonZeroBlock);
1473     PHI->addIncoming(llvm::Constant::getNullValue(LTy), NullBlock);
1474     V = PHI;
1475   }
1476 
1477   return V;
1478 }
1479