1 //===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code dealing with code generation of C++ expressions
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "clang/Frontend/CodeGenOptions.h"
15 #include "CodeGenFunction.h"
16 #include "CGCUDARuntime.h"
17 #include "CGCXXABI.h"
18 #include "CGObjCRuntime.h"
19 #include "CGDebugInfo.h"
20 #include "llvm/Intrinsics.h"
21 #include "llvm/Support/CallSite.h"
22 
23 using namespace clang;
24 using namespace CodeGen;
25 
26 RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
27                                           llvm::Value *Callee,
28                                           ReturnValueSlot ReturnValue,
29                                           llvm::Value *This,
30                                           llvm::Value *VTT,
31                                           CallExpr::const_arg_iterator ArgBeg,
32                                           CallExpr::const_arg_iterator ArgEnd) {
33   assert(MD->isInstance() &&
34          "Trying to emit a member call expr on a static method!");
35 
36   CallArgList Args;
37 
38   // Push the this ptr.
39   Args.add(RValue::get(This), MD->getThisType(getContext()));
40 
41   // If there is a VTT parameter, emit it.
42   if (VTT) {
43     QualType T = getContext().getPointerType(getContext().VoidPtrTy);
44     Args.add(RValue::get(VTT), T);
45   }
46 
47   const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
48   RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, Args.size());
49 
50   // And the rest of the call args.
51   EmitCallArgs(Args, FPT, ArgBeg, ArgEnd);
52 
53   return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required),
54                   Callee, ReturnValue, Args, MD);
55 }
56 
57 // FIXME: Ideally Expr::IgnoreParenNoopCasts should do this, but it doesn't do
58 // quite what we want.
59 static const Expr *skipNoOpCastsAndParens(const Expr *E) {
60   while (true) {
61     if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
62       E = PE->getSubExpr();
63       continue;
64     }
65 
66     if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
67       if (CE->getCastKind() == CK_NoOp) {
68         E = CE->getSubExpr();
69         continue;
70       }
71     }
72     if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
73       if (UO->getOpcode() == UO_Extension) {
74         E = UO->getSubExpr();
75         continue;
76       }
77     }
78     return E;
79   }
80 }
81 
82 /// canDevirtualizeMemberFunctionCalls - Checks whether virtual calls on given
83 /// expr can be devirtualized.
84 static bool canDevirtualizeMemberFunctionCalls(ASTContext &Context,
85                                                const Expr *Base,
86                                                const CXXMethodDecl *MD) {
87 
88   // When building with -fapple-kext, all calls must go through the vtable since
89   // the kernel linker can do runtime patching of vtables.
90   if (Context.getLangOpts().AppleKext)
91     return false;
92 
93   // If the most derived class is marked final, we know that no subclass can
94   // override this member function and so we can devirtualize it. For example:
95   //
96   // struct A { virtual void f(); }
97   // struct B final : A { };
98   //
99   // void f(B *b) {
100   //   b->f();
101   // }
102   //
103   const CXXRecordDecl *MostDerivedClassDecl = Base->getBestDynamicClassType();
104   if (MostDerivedClassDecl->hasAttr<FinalAttr>())
105     return true;
106 
107   // If the member function is marked 'final', we know that it can't be
108   // overridden and can therefore devirtualize it.
109   if (MD->hasAttr<FinalAttr>())
110     return true;
111 
112   // Similarly, if the class itself is marked 'final' it can't be overridden
113   // and we can therefore devirtualize the member function call.
114   if (MD->getParent()->hasAttr<FinalAttr>())
115     return true;
116 
117   Base = skipNoOpCastsAndParens(Base);
118   if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) {
119     if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
120       // This is a record decl. We know the type and can devirtualize it.
121       return VD->getType()->isRecordType();
122     }
123 
124     return false;
125   }
126 
127   // We can devirtualize calls on an object accessed by a class member access
128   // expression, since by C++11 [basic.life]p6 we know that it can't refer to
129   // a derived class object constructed in the same location.
130   if (const MemberExpr *ME = dyn_cast<MemberExpr>(Base))
131     if (const ValueDecl *VD = dyn_cast<ValueDecl>(ME->getMemberDecl()))
132       return VD->getType()->isRecordType();
133 
134   // We can always devirtualize calls on temporary object expressions.
135   if (isa<CXXConstructExpr>(Base))
136     return true;
137 
138   // And calls on bound temporaries.
139   if (isa<CXXBindTemporaryExpr>(Base))
140     return true;
141 
142   // Check if this is a call expr that returns a record type.
143   if (const CallExpr *CE = dyn_cast<CallExpr>(Base))
144     return CE->getCallReturnType()->isRecordType();
145 
146   // We can't devirtualize the call.
147   return false;
148 }
149 
150 static CXXRecordDecl *getCXXRecord(const Expr *E) {
151   QualType T = E->getType();
152   if (const PointerType *PTy = T->getAs<PointerType>())
153     T = PTy->getPointeeType();
154   const RecordType *Ty = T->castAs<RecordType>();
155   return cast<CXXRecordDecl>(Ty->getDecl());
156 }
157 
158 // Note: This function also emit constructor calls to support a MSVC
159 // extensions allowing explicit constructor function call.
160 RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
161                                               ReturnValueSlot ReturnValue) {
162   const Expr *callee = CE->getCallee()->IgnoreParens();
163 
164   if (isa<BinaryOperator>(callee))
165     return EmitCXXMemberPointerCallExpr(CE, ReturnValue);
166 
167   const MemberExpr *ME = cast<MemberExpr>(callee);
168   const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
169 
170   CGDebugInfo *DI = getDebugInfo();
171   if (DI && CGM.getCodeGenOpts().DebugInfo == CodeGenOptions::LimitedDebugInfo
172       && !isa<CallExpr>(ME->getBase())) {
173     QualType PQTy = ME->getBase()->IgnoreParenImpCasts()->getType();
174     if (const PointerType * PTy = dyn_cast<PointerType>(PQTy)) {
175       DI->getOrCreateRecordType(PTy->getPointeeType(),
176                                 MD->getParent()->getLocation());
177     }
178   }
179 
180   if (MD->isStatic()) {
181     // The method is static, emit it as we would a regular call.
182     llvm::Value *Callee = CGM.GetAddrOfFunction(MD);
183     return EmitCall(getContext().getPointerType(MD->getType()), Callee,
184                     ReturnValue, CE->arg_begin(), CE->arg_end());
185   }
186 
187   // Compute the object pointer.
188   const Expr *Base = ME->getBase();
189   bool CanUseVirtualCall = MD->isVirtual() && !ME->hasQualifier();
190 
191   const CXXMethodDecl *DevirtualizedMethod = NULL;
192   if (CanUseVirtualCall &&
193       canDevirtualizeMemberFunctionCalls(getContext(), Base, MD)) {
194     const CXXRecordDecl *BestDynamicDecl = Base->getBestDynamicClassType();
195     DevirtualizedMethod = MD->getCorrespondingMethodInClass(BestDynamicDecl);
196     assert(DevirtualizedMethod);
197     const CXXRecordDecl *DevirtualizedClass = DevirtualizedMethod->getParent();
198     const Expr *Inner = Base->ignoreParenBaseCasts();
199     if (getCXXRecord(Inner) == DevirtualizedClass)
200       // If the class of the Inner expression is where the dynamic method
201       // is defined, build the this pointer from it.
202       Base = Inner;
203     else if (getCXXRecord(Base) != DevirtualizedClass) {
204       // If the method is defined in a class that is not the best dynamic
205       // one or the one of the full expression, we would have to build
206       // a derived-to-base cast to compute the correct this pointer, but
207       // we don't have support for that yet, so do a virtual call.
208       DevirtualizedMethod = NULL;
209     }
210     // If the return types are not the same, this might be a case where more
211     // code needs to run to compensate for it. For example, the derived
212     // method might return a type that inherits form from the return
213     // type of MD and has a prefix.
214     // For now we just avoid devirtualizing these covariant cases.
215     if (DevirtualizedMethod &&
216         DevirtualizedMethod->getResultType().getCanonicalType() !=
217         MD->getResultType().getCanonicalType())
218       DevirtualizedMethod = NULL;
219   }
220 
221   llvm::Value *This;
222   if (ME->isArrow())
223     This = EmitScalarExpr(Base);
224   else
225     This = EmitLValue(Base).getAddress();
226 
227 
228   if (MD->isTrivial()) {
229     if (isa<CXXDestructorDecl>(MD)) return RValue::get(0);
230     if (isa<CXXConstructorDecl>(MD) &&
231         cast<CXXConstructorDecl>(MD)->isDefaultConstructor())
232       return RValue::get(0);
233 
234     if (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) {
235       // We don't like to generate the trivial copy/move assignment operator
236       // when it isn't necessary; just produce the proper effect here.
237       llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
238       EmitAggregateCopy(This, RHS, CE->getType());
239       return RValue::get(This);
240     }
241 
242     if (isa<CXXConstructorDecl>(MD) &&
243         cast<CXXConstructorDecl>(MD)->isCopyOrMoveConstructor()) {
244       // Trivial move and copy ctor are the same.
245       llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
246       EmitSynthesizedCXXCopyCtorCall(cast<CXXConstructorDecl>(MD), This, RHS,
247                                      CE->arg_begin(), CE->arg_end());
248       return RValue::get(This);
249     }
250     llvm_unreachable("unknown trivial member function");
251   }
252 
253   // Compute the function type we're calling.
254   const CGFunctionInfo *FInfo = 0;
255   if (isa<CXXDestructorDecl>(MD))
256     FInfo = &CGM.getTypes().arrangeCXXDestructor(cast<CXXDestructorDecl>(MD),
257                                                  Dtor_Complete);
258   else if (isa<CXXConstructorDecl>(MD))
259     FInfo = &CGM.getTypes().arrangeCXXConstructorDeclaration(
260                                                  cast<CXXConstructorDecl>(MD),
261                                                  Ctor_Complete);
262   else
263     FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(MD);
264 
265   llvm::Type *Ty = CGM.getTypes().GetFunctionType(*FInfo);
266 
267   // C++ [class.virtual]p12:
268   //   Explicit qualification with the scope operator (5.1) suppresses the
269   //   virtual call mechanism.
270   //
271   // We also don't emit a virtual call if the base expression has a record type
272   // because then we know what the type is.
273   bool UseVirtualCall = CanUseVirtualCall && !DevirtualizedMethod;
274 
275   llvm::Value *Callee;
276   if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD)) {
277     if (UseVirtualCall) {
278       Callee = BuildVirtualCall(Dtor, Dtor_Complete, This, Ty);
279     } else {
280       if (getContext().getLangOpts().AppleKext &&
281           MD->isVirtual() &&
282           ME->hasQualifier())
283         Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
284       else if (!DevirtualizedMethod)
285         Callee = CGM.GetAddrOfFunction(GlobalDecl(Dtor, Dtor_Complete), Ty);
286       else {
287         const CXXDestructorDecl *DDtor =
288           cast<CXXDestructorDecl>(DevirtualizedMethod);
289         Callee = CGM.GetAddrOfFunction(GlobalDecl(DDtor, Dtor_Complete), Ty);
290       }
291     }
292   } else if (const CXXConstructorDecl *Ctor =
293                dyn_cast<CXXConstructorDecl>(MD)) {
294     Callee = CGM.GetAddrOfFunction(GlobalDecl(Ctor, Ctor_Complete), Ty);
295   } else if (UseVirtualCall) {
296       Callee = BuildVirtualCall(MD, This, Ty);
297   } else {
298     if (getContext().getLangOpts().AppleKext &&
299         MD->isVirtual() &&
300         ME->hasQualifier())
301       Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
302     else if (!DevirtualizedMethod)
303       Callee = CGM.GetAddrOfFunction(MD, Ty);
304     else {
305       Callee = CGM.GetAddrOfFunction(DevirtualizedMethod, Ty);
306     }
307   }
308 
309   return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
310                            CE->arg_begin(), CE->arg_end());
311 }
312 
313 RValue
314 CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
315                                               ReturnValueSlot ReturnValue) {
316   const BinaryOperator *BO =
317       cast<BinaryOperator>(E->getCallee()->IgnoreParens());
318   const Expr *BaseExpr = BO->getLHS();
319   const Expr *MemFnExpr = BO->getRHS();
320 
321   const MemberPointerType *MPT =
322     MemFnExpr->getType()->castAs<MemberPointerType>();
323 
324   const FunctionProtoType *FPT =
325     MPT->getPointeeType()->castAs<FunctionProtoType>();
326   const CXXRecordDecl *RD =
327     cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
328 
329   // Get the member function pointer.
330   llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
331 
332   // Emit the 'this' pointer.
333   llvm::Value *This;
334 
335   if (BO->getOpcode() == BO_PtrMemI)
336     This = EmitScalarExpr(BaseExpr);
337   else
338     This = EmitLValue(BaseExpr).getAddress();
339 
340   // Ask the ABI to load the callee.  Note that This is modified.
341   llvm::Value *Callee =
342     CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, This, MemFnPtr, MPT);
343 
344   CallArgList Args;
345 
346   QualType ThisType =
347     getContext().getPointerType(getContext().getTagDeclType(RD));
348 
349   // Push the this ptr.
350   Args.add(RValue::get(This), ThisType);
351 
352   RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, 1);
353 
354   // And the rest of the call args
355   EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end());
356   return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required), Callee,
357                   ReturnValue, Args);
358 }
359 
360 RValue
361 CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
362                                                const CXXMethodDecl *MD,
363                                                ReturnValueSlot ReturnValue) {
364   assert(MD->isInstance() &&
365          "Trying to emit a member call expr on a static method!");
366   LValue LV = EmitLValue(E->getArg(0));
367   llvm::Value *This = LV.getAddress();
368 
369   if ((MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) &&
370       MD->isTrivial()) {
371     llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress();
372     QualType Ty = E->getType();
373     EmitAggregateCopy(This, Src, Ty);
374     return RValue::get(This);
375   }
376 
377   llvm::Value *Callee = EmitCXXOperatorMemberCallee(E, MD, This);
378   return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
379                            E->arg_begin() + 1, E->arg_end());
380 }
381 
382 RValue CodeGenFunction::EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
383                                                ReturnValueSlot ReturnValue) {
384   return CGM.getCUDARuntime().EmitCUDAKernelCallExpr(*this, E, ReturnValue);
385 }
386 
387 static void EmitNullBaseClassInitialization(CodeGenFunction &CGF,
388                                             llvm::Value *DestPtr,
389                                             const CXXRecordDecl *Base) {
390   if (Base->isEmpty())
391     return;
392 
393   DestPtr = CGF.EmitCastToVoidPtr(DestPtr);
394 
395   const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(Base);
396   CharUnits Size = Layout.getNonVirtualSize();
397   CharUnits Align = Layout.getNonVirtualAlign();
398 
399   llvm::Value *SizeVal = CGF.CGM.getSize(Size);
400 
401   // If the type contains a pointer to data member we can't memset it to zero.
402   // Instead, create a null constant and copy it to the destination.
403   // TODO: there are other patterns besides zero that we can usefully memset,
404   // like -1, which happens to be the pattern used by member-pointers.
405   // TODO: isZeroInitializable can be over-conservative in the case where a
406   // virtual base contains a member pointer.
407   if (!CGF.CGM.getTypes().isZeroInitializable(Base)) {
408     llvm::Constant *NullConstant = CGF.CGM.EmitNullConstantForBase(Base);
409 
410     llvm::GlobalVariable *NullVariable =
411       new llvm::GlobalVariable(CGF.CGM.getModule(), NullConstant->getType(),
412                                /*isConstant=*/true,
413                                llvm::GlobalVariable::PrivateLinkage,
414                                NullConstant, Twine());
415     NullVariable->setAlignment(Align.getQuantity());
416     llvm::Value *SrcPtr = CGF.EmitCastToVoidPtr(NullVariable);
417 
418     // Get and call the appropriate llvm.memcpy overload.
419     CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, Align.getQuantity());
420     return;
421   }
422 
423   // Otherwise, just memset the whole thing to zero.  This is legal
424   // because in LLVM, all default initializers (other than the ones we just
425   // handled above) are guaranteed to have a bit pattern of all zeros.
426   CGF.Builder.CreateMemSet(DestPtr, CGF.Builder.getInt8(0), SizeVal,
427                            Align.getQuantity());
428 }
429 
430 void
431 CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
432                                       AggValueSlot Dest) {
433   assert(!Dest.isIgnored() && "Must have a destination!");
434   const CXXConstructorDecl *CD = E->getConstructor();
435 
436   // If we require zero initialization before (or instead of) calling the
437   // constructor, as can be the case with a non-user-provided default
438   // constructor, emit the zero initialization now, unless destination is
439   // already zeroed.
440   if (E->requiresZeroInitialization() && !Dest.isZeroed()) {
441     switch (E->getConstructionKind()) {
442     case CXXConstructExpr::CK_Delegating:
443     case CXXConstructExpr::CK_Complete:
444       EmitNullInitialization(Dest.getAddr(), E->getType());
445       break;
446     case CXXConstructExpr::CK_VirtualBase:
447     case CXXConstructExpr::CK_NonVirtualBase:
448       EmitNullBaseClassInitialization(*this, Dest.getAddr(), CD->getParent());
449       break;
450     }
451   }
452 
453   // If this is a call to a trivial default constructor, do nothing.
454   if (CD->isTrivial() && CD->isDefaultConstructor())
455     return;
456 
457   // Elide the constructor if we're constructing from a temporary.
458   // The temporary check is required because Sema sets this on NRVO
459   // returns.
460   if (getContext().getLangOpts().ElideConstructors && E->isElidable()) {
461     assert(getContext().hasSameUnqualifiedType(E->getType(),
462                                                E->getArg(0)->getType()));
463     if (E->getArg(0)->isTemporaryObject(getContext(), CD->getParent())) {
464       EmitAggExpr(E->getArg(0), Dest);
465       return;
466     }
467   }
468 
469   if (const ConstantArrayType *arrayType
470         = getContext().getAsConstantArrayType(E->getType())) {
471     EmitCXXAggrConstructorCall(CD, arrayType, Dest.getAddr(),
472                                E->arg_begin(), E->arg_end());
473   } else {
474     CXXCtorType Type = Ctor_Complete;
475     bool ForVirtualBase = false;
476 
477     switch (E->getConstructionKind()) {
478      case CXXConstructExpr::CK_Delegating:
479       // We should be emitting a constructor; GlobalDecl will assert this
480       Type = CurGD.getCtorType();
481       break;
482 
483      case CXXConstructExpr::CK_Complete:
484       Type = Ctor_Complete;
485       break;
486 
487      case CXXConstructExpr::CK_VirtualBase:
488       ForVirtualBase = true;
489       // fall-through
490 
491      case CXXConstructExpr::CK_NonVirtualBase:
492       Type = Ctor_Base;
493     }
494 
495     // Call the constructor.
496     EmitCXXConstructorCall(CD, Type, ForVirtualBase, Dest.getAddr(),
497                            E->arg_begin(), E->arg_end());
498   }
499 }
500 
501 void
502 CodeGenFunction::EmitSynthesizedCXXCopyCtor(llvm::Value *Dest,
503                                             llvm::Value *Src,
504                                             const Expr *Exp) {
505   if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Exp))
506     Exp = E->getSubExpr();
507   assert(isa<CXXConstructExpr>(Exp) &&
508          "EmitSynthesizedCXXCopyCtor - unknown copy ctor expr");
509   const CXXConstructExpr* E = cast<CXXConstructExpr>(Exp);
510   const CXXConstructorDecl *CD = E->getConstructor();
511   RunCleanupsScope Scope(*this);
512 
513   // If we require zero initialization before (or instead of) calling the
514   // constructor, as can be the case with a non-user-provided default
515   // constructor, emit the zero initialization now.
516   // FIXME. Do I still need this for a copy ctor synthesis?
517   if (E->requiresZeroInitialization())
518     EmitNullInitialization(Dest, E->getType());
519 
520   assert(!getContext().getAsConstantArrayType(E->getType())
521          && "EmitSynthesizedCXXCopyCtor - Copied-in Array");
522   EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src,
523                                  E->arg_begin(), E->arg_end());
524 }
525 
526 static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,
527                                         const CXXNewExpr *E) {
528   if (!E->isArray())
529     return CharUnits::Zero();
530 
531   // No cookie is required if the operator new[] being used is the
532   // reserved placement operator new[].
533   if (E->getOperatorNew()->isReservedGlobalPlacementOperator())
534     return CharUnits::Zero();
535 
536   return CGF.CGM.getCXXABI().GetArrayCookieSize(E);
537 }
538 
539 static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
540                                         const CXXNewExpr *e,
541                                         unsigned minElements,
542                                         llvm::Value *&numElements,
543                                         llvm::Value *&sizeWithoutCookie) {
544   QualType type = e->getAllocatedType();
545 
546   if (!e->isArray()) {
547     CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
548     sizeWithoutCookie
549       = llvm::ConstantInt::get(CGF.SizeTy, typeSize.getQuantity());
550     return sizeWithoutCookie;
551   }
552 
553   // The width of size_t.
554   unsigned sizeWidth = CGF.SizeTy->getBitWidth();
555 
556   // Figure out the cookie size.
557   llvm::APInt cookieSize(sizeWidth,
558                          CalculateCookiePadding(CGF, e).getQuantity());
559 
560   // Emit the array size expression.
561   // We multiply the size of all dimensions for NumElements.
562   // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
563   numElements = CGF.EmitScalarExpr(e->getArraySize());
564   assert(isa<llvm::IntegerType>(numElements->getType()));
565 
566   // The number of elements can be have an arbitrary integer type;
567   // essentially, we need to multiply it by a constant factor, add a
568   // cookie size, and verify that the result is representable as a
569   // size_t.  That's just a gloss, though, and it's wrong in one
570   // important way: if the count is negative, it's an error even if
571   // the cookie size would bring the total size >= 0.
572   bool isSigned
573     = e->getArraySize()->getType()->isSignedIntegerOrEnumerationType();
574   llvm::IntegerType *numElementsType
575     = cast<llvm::IntegerType>(numElements->getType());
576   unsigned numElementsWidth = numElementsType->getBitWidth();
577 
578   // Compute the constant factor.
579   llvm::APInt arraySizeMultiplier(sizeWidth, 1);
580   while (const ConstantArrayType *CAT
581              = CGF.getContext().getAsConstantArrayType(type)) {
582     type = CAT->getElementType();
583     arraySizeMultiplier *= CAT->getSize();
584   }
585 
586   CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
587   llvm::APInt typeSizeMultiplier(sizeWidth, typeSize.getQuantity());
588   typeSizeMultiplier *= arraySizeMultiplier;
589 
590   // This will be a size_t.
591   llvm::Value *size;
592 
593   // If someone is doing 'new int[42]' there is no need to do a dynamic check.
594   // Don't bloat the -O0 code.
595   if (llvm::ConstantInt *numElementsC =
596         dyn_cast<llvm::ConstantInt>(numElements)) {
597     const llvm::APInt &count = numElementsC->getValue();
598 
599     bool hasAnyOverflow = false;
600 
601     // If 'count' was a negative number, it's an overflow.
602     if (isSigned && count.isNegative())
603       hasAnyOverflow = true;
604 
605     // We want to do all this arithmetic in size_t.  If numElements is
606     // wider than that, check whether it's already too big, and if so,
607     // overflow.
608     else if (numElementsWidth > sizeWidth &&
609              numElementsWidth - sizeWidth > count.countLeadingZeros())
610       hasAnyOverflow = true;
611 
612     // Okay, compute a count at the right width.
613     llvm::APInt adjustedCount = count.zextOrTrunc(sizeWidth);
614 
615     // If there is a brace-initializer, we cannot allocate fewer elements than
616     // there are initializers. If we do, that's treated like an overflow.
617     if (adjustedCount.ult(minElements))
618       hasAnyOverflow = true;
619 
620     // Scale numElements by that.  This might overflow, but we don't
621     // care because it only overflows if allocationSize does, too, and
622     // if that overflows then we shouldn't use this.
623     numElements = llvm::ConstantInt::get(CGF.SizeTy,
624                                          adjustedCount * arraySizeMultiplier);
625 
626     // Compute the size before cookie, and track whether it overflowed.
627     bool overflow;
628     llvm::APInt allocationSize
629       = adjustedCount.umul_ov(typeSizeMultiplier, overflow);
630     hasAnyOverflow |= overflow;
631 
632     // Add in the cookie, and check whether it's overflowed.
633     if (cookieSize != 0) {
634       // Save the current size without a cookie.  This shouldn't be
635       // used if there was overflow.
636       sizeWithoutCookie = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
637 
638       allocationSize = allocationSize.uadd_ov(cookieSize, overflow);
639       hasAnyOverflow |= overflow;
640     }
641 
642     // On overflow, produce a -1 so operator new will fail.
643     if (hasAnyOverflow) {
644       size = llvm::Constant::getAllOnesValue(CGF.SizeTy);
645     } else {
646       size = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
647     }
648 
649   // Otherwise, we might need to use the overflow intrinsics.
650   } else {
651     // There are up to five conditions we need to test for:
652     // 1) if isSigned, we need to check whether numElements is negative;
653     // 2) if numElementsWidth > sizeWidth, we need to check whether
654     //   numElements is larger than something representable in size_t;
655     // 3) if minElements > 0, we need to check whether numElements is smaller
656     //    than that.
657     // 4) we need to compute
658     //      sizeWithoutCookie := numElements * typeSizeMultiplier
659     //    and check whether it overflows; and
660     // 5) if we need a cookie, we need to compute
661     //      size := sizeWithoutCookie + cookieSize
662     //    and check whether it overflows.
663 
664     llvm::Value *hasOverflow = 0;
665 
666     // If numElementsWidth > sizeWidth, then one way or another, we're
667     // going to have to do a comparison for (2), and this happens to
668     // take care of (1), too.
669     if (numElementsWidth > sizeWidth) {
670       llvm::APInt threshold(numElementsWidth, 1);
671       threshold <<= sizeWidth;
672 
673       llvm::Value *thresholdV
674         = llvm::ConstantInt::get(numElementsType, threshold);
675 
676       hasOverflow = CGF.Builder.CreateICmpUGE(numElements, thresholdV);
677       numElements = CGF.Builder.CreateTrunc(numElements, CGF.SizeTy);
678 
679     // Otherwise, if we're signed, we want to sext up to size_t.
680     } else if (isSigned) {
681       if (numElementsWidth < sizeWidth)
682         numElements = CGF.Builder.CreateSExt(numElements, CGF.SizeTy);
683 
684       // If there's a non-1 type size multiplier, then we can do the
685       // signedness check at the same time as we do the multiply
686       // because a negative number times anything will cause an
687       // unsigned overflow.  Otherwise, we have to do it here. But at least
688       // in this case, we can subsume the >= minElements check.
689       if (typeSizeMultiplier == 1)
690         hasOverflow = CGF.Builder.CreateICmpSLT(numElements,
691                               llvm::ConstantInt::get(CGF.SizeTy, minElements));
692 
693     // Otherwise, zext up to size_t if necessary.
694     } else if (numElementsWidth < sizeWidth) {
695       numElements = CGF.Builder.CreateZExt(numElements, CGF.SizeTy);
696     }
697 
698     assert(numElements->getType() == CGF.SizeTy);
699 
700     if (minElements) {
701       // Don't allow allocation of fewer elements than we have initializers.
702       if (!hasOverflow) {
703         hasOverflow = CGF.Builder.CreateICmpULT(numElements,
704                               llvm::ConstantInt::get(CGF.SizeTy, minElements));
705       } else if (numElementsWidth > sizeWidth) {
706         // The other existing overflow subsumes this check.
707         // We do an unsigned comparison, since any signed value < -1 is
708         // taken care of either above or below.
709         hasOverflow = CGF.Builder.CreateOr(hasOverflow,
710                           CGF.Builder.CreateICmpULT(numElements,
711                               llvm::ConstantInt::get(CGF.SizeTy, minElements)));
712       }
713     }
714 
715     size = numElements;
716 
717     // Multiply by the type size if necessary.  This multiplier
718     // includes all the factors for nested arrays.
719     //
720     // This step also causes numElements to be scaled up by the
721     // nested-array factor if necessary.  Overflow on this computation
722     // can be ignored because the result shouldn't be used if
723     // allocation fails.
724     if (typeSizeMultiplier != 1) {
725       llvm::Value *umul_with_overflow
726         = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, CGF.SizeTy);
727 
728       llvm::Value *tsmV =
729         llvm::ConstantInt::get(CGF.SizeTy, typeSizeMultiplier);
730       llvm::Value *result =
731         CGF.Builder.CreateCall2(umul_with_overflow, size, tsmV);
732 
733       llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
734       if (hasOverflow)
735         hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
736       else
737         hasOverflow = overflowed;
738 
739       size = CGF.Builder.CreateExtractValue(result, 0);
740 
741       // Also scale up numElements by the array size multiplier.
742       if (arraySizeMultiplier != 1) {
743         // If the base element type size is 1, then we can re-use the
744         // multiply we just did.
745         if (typeSize.isOne()) {
746           assert(arraySizeMultiplier == typeSizeMultiplier);
747           numElements = size;
748 
749         // Otherwise we need a separate multiply.
750         } else {
751           llvm::Value *asmV =
752             llvm::ConstantInt::get(CGF.SizeTy, arraySizeMultiplier);
753           numElements = CGF.Builder.CreateMul(numElements, asmV);
754         }
755       }
756     } else {
757       // numElements doesn't need to be scaled.
758       assert(arraySizeMultiplier == 1);
759     }
760 
761     // Add in the cookie size if necessary.
762     if (cookieSize != 0) {
763       sizeWithoutCookie = size;
764 
765       llvm::Value *uadd_with_overflow
766         = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, CGF.SizeTy);
767 
768       llvm::Value *cookieSizeV = llvm::ConstantInt::get(CGF.SizeTy, cookieSize);
769       llvm::Value *result =
770         CGF.Builder.CreateCall2(uadd_with_overflow, size, cookieSizeV);
771 
772       llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
773       if (hasOverflow)
774         hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
775       else
776         hasOverflow = overflowed;
777 
778       size = CGF.Builder.CreateExtractValue(result, 0);
779     }
780 
781     // If we had any possibility of dynamic overflow, make a select to
782     // overwrite 'size' with an all-ones value, which should cause
783     // operator new to throw.
784     if (hasOverflow)
785       size = CGF.Builder.CreateSelect(hasOverflow,
786                                  llvm::Constant::getAllOnesValue(CGF.SizeTy),
787                                       size);
788   }
789 
790   if (cookieSize == 0)
791     sizeWithoutCookie = size;
792   else
793     assert(sizeWithoutCookie && "didn't set sizeWithoutCookie?");
794 
795   return size;
796 }
797 
798 static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const Expr *Init,
799                                     QualType AllocType, llvm::Value *NewPtr) {
800 
801   CharUnits Alignment = CGF.getContext().getTypeAlignInChars(AllocType);
802   if (!CGF.hasAggregateLLVMType(AllocType))
803     CGF.EmitScalarInit(Init, 0, CGF.MakeAddrLValue(NewPtr, AllocType,
804                                                    Alignment),
805                        false);
806   else if (AllocType->isAnyComplexType())
807     CGF.EmitComplexExprIntoAddr(Init, NewPtr,
808                                 AllocType.isVolatileQualified());
809   else {
810     AggValueSlot Slot
811       = AggValueSlot::forAddr(NewPtr, Alignment, AllocType.getQualifiers(),
812                               AggValueSlot::IsDestructed,
813                               AggValueSlot::DoesNotNeedGCBarriers,
814                               AggValueSlot::IsNotAliased);
815     CGF.EmitAggExpr(Init, Slot);
816 
817     CGF.MaybeEmitStdInitializerListCleanup(NewPtr, Init);
818   }
819 }
820 
821 void
822 CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E,
823                                          QualType elementType,
824                                          llvm::Value *beginPtr,
825                                          llvm::Value *numElements) {
826   if (!E->hasInitializer())
827     return; // We have a POD type.
828 
829   llvm::Value *explicitPtr = beginPtr;
830   // Find the end of the array, hoisted out of the loop.
831   llvm::Value *endPtr =
832     Builder.CreateInBoundsGEP(beginPtr, numElements, "array.end");
833 
834   unsigned initializerElements = 0;
835 
836   const Expr *Init = E->getInitializer();
837   llvm::AllocaInst *endOfInit = 0;
838   QualType::DestructionKind dtorKind = elementType.isDestructedType();
839   EHScopeStack::stable_iterator cleanup;
840   llvm::Instruction *cleanupDominator = 0;
841   // If the initializer is an initializer list, first do the explicit elements.
842   if (const InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) {
843     initializerElements = ILE->getNumInits();
844 
845     // Enter a partial-destruction cleanup if necessary.
846     if (needsEHCleanup(dtorKind)) {
847       // In principle we could tell the cleanup where we are more
848       // directly, but the control flow can get so varied here that it
849       // would actually be quite complex.  Therefore we go through an
850       // alloca.
851       endOfInit = CreateTempAlloca(beginPtr->getType(), "array.endOfInit");
852       cleanupDominator = Builder.CreateStore(beginPtr, endOfInit);
853       pushIrregularPartialArrayCleanup(beginPtr, endOfInit, elementType,
854                                        getDestroyer(dtorKind));
855       cleanup = EHStack.stable_begin();
856     }
857 
858     for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i) {
859       // Tell the cleanup that it needs to destroy up to this
860       // element.  TODO: some of these stores can be trivially
861       // observed to be unnecessary.
862       if (endOfInit) Builder.CreateStore(explicitPtr, endOfInit);
863       StoreAnyExprIntoOneUnit(*this, ILE->getInit(i), elementType, explicitPtr);
864       explicitPtr =Builder.CreateConstGEP1_32(explicitPtr, 1, "array.exp.next");
865     }
866 
867     // The remaining elements are filled with the array filler expression.
868     Init = ILE->getArrayFiller();
869   }
870 
871   // Create the continuation block.
872   llvm::BasicBlock *contBB = createBasicBlock("new.loop.end");
873 
874   // If the number of elements isn't constant, we have to now check if there is
875   // anything left to initialize.
876   if (llvm::ConstantInt *constNum = dyn_cast<llvm::ConstantInt>(numElements)) {
877     // If all elements have already been initialized, skip the whole loop.
878     if (constNum->getZExtValue() <= initializerElements) {
879       // If there was a cleanup, deactivate it.
880       if (cleanupDominator)
881         DeactivateCleanupBlock(cleanup, cleanupDominator);;
882       return;
883     }
884   } else {
885     llvm::BasicBlock *nonEmptyBB = createBasicBlock("new.loop.nonempty");
886     llvm::Value *isEmpty = Builder.CreateICmpEQ(explicitPtr, endPtr,
887                                                 "array.isempty");
888     Builder.CreateCondBr(isEmpty, contBB, nonEmptyBB);
889     EmitBlock(nonEmptyBB);
890   }
891 
892   // Enter the loop.
893   llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
894   llvm::BasicBlock *loopBB = createBasicBlock("new.loop");
895 
896   EmitBlock(loopBB);
897 
898   // Set up the current-element phi.
899   llvm::PHINode *curPtr =
900     Builder.CreatePHI(explicitPtr->getType(), 2, "array.cur");
901   curPtr->addIncoming(explicitPtr, entryBB);
902 
903   // Store the new cleanup position for irregular cleanups.
904   if (endOfInit) Builder.CreateStore(curPtr, endOfInit);
905 
906   // Enter a partial-destruction cleanup if necessary.
907   if (!cleanupDominator && needsEHCleanup(dtorKind)) {
908     pushRegularPartialArrayCleanup(beginPtr, curPtr, elementType,
909                                    getDestroyer(dtorKind));
910     cleanup = EHStack.stable_begin();
911     cleanupDominator = Builder.CreateUnreachable();
912   }
913 
914   // Emit the initializer into this element.
915   StoreAnyExprIntoOneUnit(*this, Init, E->getAllocatedType(), curPtr);
916 
917   // Leave the cleanup if we entered one.
918   if (cleanupDominator) {
919     DeactivateCleanupBlock(cleanup, cleanupDominator);
920     cleanupDominator->eraseFromParent();
921   }
922 
923   // Advance to the next element.
924   llvm::Value *nextPtr = Builder.CreateConstGEP1_32(curPtr, 1, "array.next");
925 
926   // Check whether we've gotten to the end of the array and, if so,
927   // exit the loop.
928   llvm::Value *isEnd = Builder.CreateICmpEQ(nextPtr, endPtr, "array.atend");
929   Builder.CreateCondBr(isEnd, contBB, loopBB);
930   curPtr->addIncoming(nextPtr, Builder.GetInsertBlock());
931 
932   EmitBlock(contBB);
933 }
934 
935 static void EmitZeroMemSet(CodeGenFunction &CGF, QualType T,
936                            llvm::Value *NewPtr, llvm::Value *Size) {
937   CGF.EmitCastToVoidPtr(NewPtr);
938   CharUnits Alignment = CGF.getContext().getTypeAlignInChars(T);
939   CGF.Builder.CreateMemSet(NewPtr, CGF.Builder.getInt8(0), Size,
940                            Alignment.getQuantity(), false);
941 }
942 
943 static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
944                                QualType ElementType,
945                                llvm::Value *NewPtr,
946                                llvm::Value *NumElements,
947                                llvm::Value *AllocSizeWithoutCookie) {
948   const Expr *Init = E->getInitializer();
949   if (E->isArray()) {
950     if (const CXXConstructExpr *CCE = dyn_cast_or_null<CXXConstructExpr>(Init)){
951       CXXConstructorDecl *Ctor = CCE->getConstructor();
952       bool RequiresZeroInitialization = false;
953       if (Ctor->isTrivial()) {
954         // If new expression did not specify value-initialization, then there
955         // is no initialization.
956         if (!CCE->requiresZeroInitialization() || Ctor->getParent()->isEmpty())
957           return;
958 
959         if (CGF.CGM.getTypes().isZeroInitializable(ElementType)) {
960           // Optimization: since zero initialization will just set the memory
961           // to all zeroes, generate a single memset to do it in one shot.
962           EmitZeroMemSet(CGF, ElementType, NewPtr, AllocSizeWithoutCookie);
963           return;
964         }
965 
966         RequiresZeroInitialization = true;
967       }
968 
969       CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr,
970                                      CCE->arg_begin(),  CCE->arg_end(),
971                                      RequiresZeroInitialization);
972       return;
973     } else if (Init && isa<ImplicitValueInitExpr>(Init) &&
974                CGF.CGM.getTypes().isZeroInitializable(ElementType)) {
975       // Optimization: since zero initialization will just set the memory
976       // to all zeroes, generate a single memset to do it in one shot.
977       EmitZeroMemSet(CGF, ElementType, NewPtr, AllocSizeWithoutCookie);
978       return;
979     }
980     CGF.EmitNewArrayInitializer(E, ElementType, NewPtr, NumElements);
981     return;
982   }
983 
984   if (!Init)
985     return;
986 
987   StoreAnyExprIntoOneUnit(CGF, Init, E->getAllocatedType(), NewPtr);
988 }
989 
990 namespace {
991   /// A cleanup to call the given 'operator delete' function upon
992   /// abnormal exit from a new expression.
993   class CallDeleteDuringNew : public EHScopeStack::Cleanup {
994     size_t NumPlacementArgs;
995     const FunctionDecl *OperatorDelete;
996     llvm::Value *Ptr;
997     llvm::Value *AllocSize;
998 
999     RValue *getPlacementArgs() { return reinterpret_cast<RValue*>(this+1); }
1000 
1001   public:
1002     static size_t getExtraSize(size_t NumPlacementArgs) {
1003       return NumPlacementArgs * sizeof(RValue);
1004     }
1005 
1006     CallDeleteDuringNew(size_t NumPlacementArgs,
1007                         const FunctionDecl *OperatorDelete,
1008                         llvm::Value *Ptr,
1009                         llvm::Value *AllocSize)
1010       : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
1011         Ptr(Ptr), AllocSize(AllocSize) {}
1012 
1013     void setPlacementArg(unsigned I, RValue Arg) {
1014       assert(I < NumPlacementArgs && "index out of range");
1015       getPlacementArgs()[I] = Arg;
1016     }
1017 
1018     void Emit(CodeGenFunction &CGF, Flags flags) {
1019       const FunctionProtoType *FPT
1020         = OperatorDelete->getType()->getAs<FunctionProtoType>();
1021       assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
1022              (FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
1023 
1024       CallArgList DeleteArgs;
1025 
1026       // The first argument is always a void*.
1027       FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
1028       DeleteArgs.add(RValue::get(Ptr), *AI++);
1029 
1030       // A member 'operator delete' can take an extra 'size_t' argument.
1031       if (FPT->getNumArgs() == NumPlacementArgs + 2)
1032         DeleteArgs.add(RValue::get(AllocSize), *AI++);
1033 
1034       // Pass the rest of the arguments, which must match exactly.
1035       for (unsigned I = 0; I != NumPlacementArgs; ++I)
1036         DeleteArgs.add(getPlacementArgs()[I], *AI++);
1037 
1038       // Call 'operator delete'.
1039       CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall(DeleteArgs, FPT),
1040                    CGF.CGM.GetAddrOfFunction(OperatorDelete),
1041                    ReturnValueSlot(), DeleteArgs, OperatorDelete);
1042     }
1043   };
1044 
1045   /// A cleanup to call the given 'operator delete' function upon
1046   /// abnormal exit from a new expression when the new expression is
1047   /// conditional.
1048   class CallDeleteDuringConditionalNew : public EHScopeStack::Cleanup {
1049     size_t NumPlacementArgs;
1050     const FunctionDecl *OperatorDelete;
1051     DominatingValue<RValue>::saved_type Ptr;
1052     DominatingValue<RValue>::saved_type AllocSize;
1053 
1054     DominatingValue<RValue>::saved_type *getPlacementArgs() {
1055       return reinterpret_cast<DominatingValue<RValue>::saved_type*>(this+1);
1056     }
1057 
1058   public:
1059     static size_t getExtraSize(size_t NumPlacementArgs) {
1060       return NumPlacementArgs * sizeof(DominatingValue<RValue>::saved_type);
1061     }
1062 
1063     CallDeleteDuringConditionalNew(size_t NumPlacementArgs,
1064                                    const FunctionDecl *OperatorDelete,
1065                                    DominatingValue<RValue>::saved_type Ptr,
1066                               DominatingValue<RValue>::saved_type AllocSize)
1067       : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
1068         Ptr(Ptr), AllocSize(AllocSize) {}
1069 
1070     void setPlacementArg(unsigned I, DominatingValue<RValue>::saved_type Arg) {
1071       assert(I < NumPlacementArgs && "index out of range");
1072       getPlacementArgs()[I] = Arg;
1073     }
1074 
1075     void Emit(CodeGenFunction &CGF, Flags flags) {
1076       const FunctionProtoType *FPT
1077         = OperatorDelete->getType()->getAs<FunctionProtoType>();
1078       assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
1079              (FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
1080 
1081       CallArgList DeleteArgs;
1082 
1083       // The first argument is always a void*.
1084       FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
1085       DeleteArgs.add(Ptr.restore(CGF), *AI++);
1086 
1087       // A member 'operator delete' can take an extra 'size_t' argument.
1088       if (FPT->getNumArgs() == NumPlacementArgs + 2) {
1089         RValue RV = AllocSize.restore(CGF);
1090         DeleteArgs.add(RV, *AI++);
1091       }
1092 
1093       // Pass the rest of the arguments, which must match exactly.
1094       for (unsigned I = 0; I != NumPlacementArgs; ++I) {
1095         RValue RV = getPlacementArgs()[I].restore(CGF);
1096         DeleteArgs.add(RV, *AI++);
1097       }
1098 
1099       // Call 'operator delete'.
1100       CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall(DeleteArgs, FPT),
1101                    CGF.CGM.GetAddrOfFunction(OperatorDelete),
1102                    ReturnValueSlot(), DeleteArgs, OperatorDelete);
1103     }
1104   };
1105 }
1106 
1107 /// Enter a cleanup to call 'operator delete' if the initializer in a
1108 /// new-expression throws.
1109 static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
1110                                   const CXXNewExpr *E,
1111                                   llvm::Value *NewPtr,
1112                                   llvm::Value *AllocSize,
1113                                   const CallArgList &NewArgs) {
1114   // If we're not inside a conditional branch, then the cleanup will
1115   // dominate and we can do the easier (and more efficient) thing.
1116   if (!CGF.isInConditionalBranch()) {
1117     CallDeleteDuringNew *Cleanup = CGF.EHStack
1118       .pushCleanupWithExtra<CallDeleteDuringNew>(EHCleanup,
1119                                                  E->getNumPlacementArgs(),
1120                                                  E->getOperatorDelete(),
1121                                                  NewPtr, AllocSize);
1122     for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
1123       Cleanup->setPlacementArg(I, NewArgs[I+1].RV);
1124 
1125     return;
1126   }
1127 
1128   // Otherwise, we need to save all this stuff.
1129   DominatingValue<RValue>::saved_type SavedNewPtr =
1130     DominatingValue<RValue>::save(CGF, RValue::get(NewPtr));
1131   DominatingValue<RValue>::saved_type SavedAllocSize =
1132     DominatingValue<RValue>::save(CGF, RValue::get(AllocSize));
1133 
1134   CallDeleteDuringConditionalNew *Cleanup = CGF.EHStack
1135     .pushCleanupWithExtra<CallDeleteDuringConditionalNew>(EHCleanup,
1136                                                  E->getNumPlacementArgs(),
1137                                                  E->getOperatorDelete(),
1138                                                  SavedNewPtr,
1139                                                  SavedAllocSize);
1140   for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
1141     Cleanup->setPlacementArg(I,
1142                      DominatingValue<RValue>::save(CGF, NewArgs[I+1].RV));
1143 
1144   CGF.initFullExprCleanup();
1145 }
1146 
1147 llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
1148   // The element type being allocated.
1149   QualType allocType = getContext().getBaseElementType(E->getAllocatedType());
1150 
1151   // 1. Build a call to the allocation function.
1152   FunctionDecl *allocator = E->getOperatorNew();
1153   const FunctionProtoType *allocatorType =
1154     allocator->getType()->castAs<FunctionProtoType>();
1155 
1156   CallArgList allocatorArgs;
1157 
1158   // The allocation size is the first argument.
1159   QualType sizeType = getContext().getSizeType();
1160 
1161   // If there is a brace-initializer, cannot allocate fewer elements than inits.
1162   unsigned minElements = 0;
1163   if (E->isArray() && E->hasInitializer()) {
1164     if (const InitListExpr *ILE = dyn_cast<InitListExpr>(E->getInitializer()))
1165       minElements = ILE->getNumInits();
1166   }
1167 
1168   llvm::Value *numElements = 0;
1169   llvm::Value *allocSizeWithoutCookie = 0;
1170   llvm::Value *allocSize =
1171     EmitCXXNewAllocSize(*this, E, minElements, numElements,
1172                         allocSizeWithoutCookie);
1173 
1174   allocatorArgs.add(RValue::get(allocSize), sizeType);
1175 
1176   // Emit the rest of the arguments.
1177   // FIXME: Ideally, this should just use EmitCallArgs.
1178   CXXNewExpr::const_arg_iterator placementArg = E->placement_arg_begin();
1179 
1180   // First, use the types from the function type.
1181   // We start at 1 here because the first argument (the allocation size)
1182   // has already been emitted.
1183   for (unsigned i = 1, e = allocatorType->getNumArgs(); i != e;
1184        ++i, ++placementArg) {
1185     QualType argType = allocatorType->getArgType(i);
1186 
1187     assert(getContext().hasSameUnqualifiedType(argType.getNonReferenceType(),
1188                                                placementArg->getType()) &&
1189            "type mismatch in call argument!");
1190 
1191     EmitCallArg(allocatorArgs, *placementArg, argType);
1192   }
1193 
1194   // Either we've emitted all the call args, or we have a call to a
1195   // variadic function.
1196   assert((placementArg == E->placement_arg_end() ||
1197           allocatorType->isVariadic()) &&
1198          "Extra arguments to non-variadic function!");
1199 
1200   // If we still have any arguments, emit them using the type of the argument.
1201   for (CXXNewExpr::const_arg_iterator placementArgsEnd = E->placement_arg_end();
1202        placementArg != placementArgsEnd; ++placementArg) {
1203     EmitCallArg(allocatorArgs, *placementArg, placementArg->getType());
1204   }
1205 
1206   // Emit the allocation call.  If the allocator is a global placement
1207   // operator, just "inline" it directly.
1208   RValue RV;
1209   if (allocator->isReservedGlobalPlacementOperator()) {
1210     assert(allocatorArgs.size() == 2);
1211     RV = allocatorArgs[1].RV;
1212     // TODO: kill any unnecessary computations done for the size
1213     // argument.
1214   } else {
1215     RV = EmitCall(CGM.getTypes().arrangeFreeFunctionCall(allocatorArgs,
1216                                                          allocatorType),
1217                   CGM.GetAddrOfFunction(allocator), ReturnValueSlot(),
1218                   allocatorArgs, allocator);
1219   }
1220 
1221   // Emit a null check on the allocation result if the allocation
1222   // function is allowed to return null (because it has a non-throwing
1223   // exception spec; for this part, we inline
1224   // CXXNewExpr::shouldNullCheckAllocation()) and we have an
1225   // interesting initializer.
1226   bool nullCheck = allocatorType->isNothrow(getContext()) &&
1227     (!allocType.isPODType(getContext()) || E->hasInitializer());
1228 
1229   llvm::BasicBlock *nullCheckBB = 0;
1230   llvm::BasicBlock *contBB = 0;
1231 
1232   llvm::Value *allocation = RV.getScalarVal();
1233   unsigned AS =
1234     cast<llvm::PointerType>(allocation->getType())->getAddressSpace();
1235 
1236   // The null-check means that the initializer is conditionally
1237   // evaluated.
1238   ConditionalEvaluation conditional(*this);
1239 
1240   if (nullCheck) {
1241     conditional.begin(*this);
1242 
1243     nullCheckBB = Builder.GetInsertBlock();
1244     llvm::BasicBlock *notNullBB = createBasicBlock("new.notnull");
1245     contBB = createBasicBlock("new.cont");
1246 
1247     llvm::Value *isNull = Builder.CreateIsNull(allocation, "new.isnull");
1248     Builder.CreateCondBr(isNull, contBB, notNullBB);
1249     EmitBlock(notNullBB);
1250   }
1251 
1252   // If there's an operator delete, enter a cleanup to call it if an
1253   // exception is thrown.
1254   EHScopeStack::stable_iterator operatorDeleteCleanup;
1255   llvm::Instruction *cleanupDominator = 0;
1256   if (E->getOperatorDelete() &&
1257       !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
1258     EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocatorArgs);
1259     operatorDeleteCleanup = EHStack.stable_begin();
1260     cleanupDominator = Builder.CreateUnreachable();
1261   }
1262 
1263   assert((allocSize == allocSizeWithoutCookie) ==
1264          CalculateCookiePadding(*this, E).isZero());
1265   if (allocSize != allocSizeWithoutCookie) {
1266     assert(E->isArray());
1267     allocation = CGM.getCXXABI().InitializeArrayCookie(*this, allocation,
1268                                                        numElements,
1269                                                        E, allocType);
1270   }
1271 
1272   llvm::Type *elementPtrTy
1273     = ConvertTypeForMem(allocType)->getPointerTo(AS);
1274   llvm::Value *result = Builder.CreateBitCast(allocation, elementPtrTy);
1275 
1276   EmitNewInitializer(*this, E, allocType, result, numElements,
1277                      allocSizeWithoutCookie);
1278   if (E->isArray()) {
1279     // NewPtr is a pointer to the base element type.  If we're
1280     // allocating an array of arrays, we'll need to cast back to the
1281     // array pointer type.
1282     llvm::Type *resultType = ConvertTypeForMem(E->getType());
1283     if (result->getType() != resultType)
1284       result = Builder.CreateBitCast(result, resultType);
1285   }
1286 
1287   // Deactivate the 'operator delete' cleanup if we finished
1288   // initialization.
1289   if (operatorDeleteCleanup.isValid()) {
1290     DeactivateCleanupBlock(operatorDeleteCleanup, cleanupDominator);
1291     cleanupDominator->eraseFromParent();
1292   }
1293 
1294   if (nullCheck) {
1295     conditional.end(*this);
1296 
1297     llvm::BasicBlock *notNullBB = Builder.GetInsertBlock();
1298     EmitBlock(contBB);
1299 
1300     llvm::PHINode *PHI = Builder.CreatePHI(result->getType(), 2);
1301     PHI->addIncoming(result, notNullBB);
1302     PHI->addIncoming(llvm::Constant::getNullValue(result->getType()),
1303                      nullCheckBB);
1304 
1305     result = PHI;
1306   }
1307 
1308   return result;
1309 }
1310 
1311 void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
1312                                      llvm::Value *Ptr,
1313                                      QualType DeleteTy) {
1314   assert(DeleteFD->getOverloadedOperator() == OO_Delete);
1315 
1316   const FunctionProtoType *DeleteFTy =
1317     DeleteFD->getType()->getAs<FunctionProtoType>();
1318 
1319   CallArgList DeleteArgs;
1320 
1321   // Check if we need to pass the size to the delete operator.
1322   llvm::Value *Size = 0;
1323   QualType SizeTy;
1324   if (DeleteFTy->getNumArgs() == 2) {
1325     SizeTy = DeleteFTy->getArgType(1);
1326     CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy);
1327     Size = llvm::ConstantInt::get(ConvertType(SizeTy),
1328                                   DeleteTypeSize.getQuantity());
1329   }
1330 
1331   QualType ArgTy = DeleteFTy->getArgType(0);
1332   llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy));
1333   DeleteArgs.add(RValue::get(DeletePtr), ArgTy);
1334 
1335   if (Size)
1336     DeleteArgs.add(RValue::get(Size), SizeTy);
1337 
1338   // Emit the call to delete.
1339   EmitCall(CGM.getTypes().arrangeFreeFunctionCall(DeleteArgs, DeleteFTy),
1340            CGM.GetAddrOfFunction(DeleteFD), ReturnValueSlot(),
1341            DeleteArgs, DeleteFD);
1342 }
1343 
1344 namespace {
1345   /// Calls the given 'operator delete' on a single object.
1346   struct CallObjectDelete : EHScopeStack::Cleanup {
1347     llvm::Value *Ptr;
1348     const FunctionDecl *OperatorDelete;
1349     QualType ElementType;
1350 
1351     CallObjectDelete(llvm::Value *Ptr,
1352                      const FunctionDecl *OperatorDelete,
1353                      QualType ElementType)
1354       : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {}
1355 
1356     void Emit(CodeGenFunction &CGF, Flags flags) {
1357       CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType);
1358     }
1359   };
1360 }
1361 
1362 /// Emit the code for deleting a single object.
1363 static void EmitObjectDelete(CodeGenFunction &CGF,
1364                              const FunctionDecl *OperatorDelete,
1365                              llvm::Value *Ptr,
1366                              QualType ElementType,
1367                              bool UseGlobalDelete) {
1368   // Find the destructor for the type, if applicable.  If the
1369   // destructor is virtual, we'll just emit the vcall and return.
1370   const CXXDestructorDecl *Dtor = 0;
1371   if (const RecordType *RT = ElementType->getAs<RecordType>()) {
1372     CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1373     if (RD->hasDefinition() && !RD->hasTrivialDestructor()) {
1374       Dtor = RD->getDestructor();
1375 
1376       if (Dtor->isVirtual()) {
1377         if (UseGlobalDelete) {
1378           // If we're supposed to call the global delete, make sure we do so
1379           // even if the destructor throws.
1380           CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
1381                                                     Ptr, OperatorDelete,
1382                                                     ElementType);
1383         }
1384 
1385         llvm::Type *Ty =
1386           CGF.getTypes().GetFunctionType(
1387                          CGF.getTypes().arrangeCXXDestructor(Dtor, Dtor_Complete));
1388 
1389         llvm::Value *Callee
1390           = CGF.BuildVirtualCall(Dtor,
1391                                  UseGlobalDelete? Dtor_Complete : Dtor_Deleting,
1392                                  Ptr, Ty);
1393         CGF.EmitCXXMemberCall(Dtor, Callee, ReturnValueSlot(), Ptr, /*VTT=*/0,
1394                               0, 0);
1395 
1396         if (UseGlobalDelete) {
1397           CGF.PopCleanupBlock();
1398         }
1399 
1400         return;
1401       }
1402     }
1403   }
1404 
1405   // Make sure that we call delete even if the dtor throws.
1406   // This doesn't have to a conditional cleanup because we're going
1407   // to pop it off in a second.
1408   CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
1409                                             Ptr, OperatorDelete, ElementType);
1410 
1411   if (Dtor)
1412     CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
1413                               /*ForVirtualBase=*/false, Ptr);
1414   else if (CGF.getLangOpts().ObjCAutoRefCount &&
1415            ElementType->isObjCLifetimeType()) {
1416     switch (ElementType.getObjCLifetime()) {
1417     case Qualifiers::OCL_None:
1418     case Qualifiers::OCL_ExplicitNone:
1419     case Qualifiers::OCL_Autoreleasing:
1420       break;
1421 
1422     case Qualifiers::OCL_Strong: {
1423       // Load the pointer value.
1424       llvm::Value *PtrValue = CGF.Builder.CreateLoad(Ptr,
1425                                              ElementType.isVolatileQualified());
1426 
1427       CGF.EmitARCRelease(PtrValue, /*precise*/ true);
1428       break;
1429     }
1430 
1431     case Qualifiers::OCL_Weak:
1432       CGF.EmitARCDestroyWeak(Ptr);
1433       break;
1434     }
1435   }
1436 
1437   CGF.PopCleanupBlock();
1438 }
1439 
1440 namespace {
1441   /// Calls the given 'operator delete' on an array of objects.
1442   struct CallArrayDelete : EHScopeStack::Cleanup {
1443     llvm::Value *Ptr;
1444     const FunctionDecl *OperatorDelete;
1445     llvm::Value *NumElements;
1446     QualType ElementType;
1447     CharUnits CookieSize;
1448 
1449     CallArrayDelete(llvm::Value *Ptr,
1450                     const FunctionDecl *OperatorDelete,
1451                     llvm::Value *NumElements,
1452                     QualType ElementType,
1453                     CharUnits CookieSize)
1454       : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements),
1455         ElementType(ElementType), CookieSize(CookieSize) {}
1456 
1457     void Emit(CodeGenFunction &CGF, Flags flags) {
1458       const FunctionProtoType *DeleteFTy =
1459         OperatorDelete->getType()->getAs<FunctionProtoType>();
1460       assert(DeleteFTy->getNumArgs() == 1 || DeleteFTy->getNumArgs() == 2);
1461 
1462       CallArgList Args;
1463 
1464       // Pass the pointer as the first argument.
1465       QualType VoidPtrTy = DeleteFTy->getArgType(0);
1466       llvm::Value *DeletePtr
1467         = CGF.Builder.CreateBitCast(Ptr, CGF.ConvertType(VoidPtrTy));
1468       Args.add(RValue::get(DeletePtr), VoidPtrTy);
1469 
1470       // Pass the original requested size as the second argument.
1471       if (DeleteFTy->getNumArgs() == 2) {
1472         QualType size_t = DeleteFTy->getArgType(1);
1473         llvm::IntegerType *SizeTy
1474           = cast<llvm::IntegerType>(CGF.ConvertType(size_t));
1475 
1476         CharUnits ElementTypeSize =
1477           CGF.CGM.getContext().getTypeSizeInChars(ElementType);
1478 
1479         // The size of an element, multiplied by the number of elements.
1480         llvm::Value *Size
1481           = llvm::ConstantInt::get(SizeTy, ElementTypeSize.getQuantity());
1482         Size = CGF.Builder.CreateMul(Size, NumElements);
1483 
1484         // Plus the size of the cookie if applicable.
1485         if (!CookieSize.isZero()) {
1486           llvm::Value *CookieSizeV
1487             = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
1488           Size = CGF.Builder.CreateAdd(Size, CookieSizeV);
1489         }
1490 
1491         Args.add(RValue::get(Size), size_t);
1492       }
1493 
1494       // Emit the call to delete.
1495       CGF.EmitCall(CGF.getTypes().arrangeFreeFunctionCall(Args, DeleteFTy),
1496                    CGF.CGM.GetAddrOfFunction(OperatorDelete),
1497                    ReturnValueSlot(), Args, OperatorDelete);
1498     }
1499   };
1500 }
1501 
1502 /// Emit the code for deleting an array of objects.
1503 static void EmitArrayDelete(CodeGenFunction &CGF,
1504                             const CXXDeleteExpr *E,
1505                             llvm::Value *deletedPtr,
1506                             QualType elementType) {
1507   llvm::Value *numElements = 0;
1508   llvm::Value *allocatedPtr = 0;
1509   CharUnits cookieSize;
1510   CGF.CGM.getCXXABI().ReadArrayCookie(CGF, deletedPtr, E, elementType,
1511                                       numElements, allocatedPtr, cookieSize);
1512 
1513   assert(allocatedPtr && "ReadArrayCookie didn't set allocated pointer");
1514 
1515   // Make sure that we call delete even if one of the dtors throws.
1516   const FunctionDecl *operatorDelete = E->getOperatorDelete();
1517   CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup,
1518                                            allocatedPtr, operatorDelete,
1519                                            numElements, elementType,
1520                                            cookieSize);
1521 
1522   // Destroy the elements.
1523   if (QualType::DestructionKind dtorKind = elementType.isDestructedType()) {
1524     assert(numElements && "no element count for a type with a destructor!");
1525 
1526     llvm::Value *arrayEnd =
1527       CGF.Builder.CreateInBoundsGEP(deletedPtr, numElements, "delete.end");
1528 
1529     // Note that it is legal to allocate a zero-length array, and we
1530     // can never fold the check away because the length should always
1531     // come from a cookie.
1532     CGF.emitArrayDestroy(deletedPtr, arrayEnd, elementType,
1533                          CGF.getDestroyer(dtorKind),
1534                          /*checkZeroLength*/ true,
1535                          CGF.needsEHCleanup(dtorKind));
1536   }
1537 
1538   // Pop the cleanup block.
1539   CGF.PopCleanupBlock();
1540 }
1541 
1542 void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
1543   const Expr *Arg = E->getArgument();
1544   llvm::Value *Ptr = EmitScalarExpr(Arg);
1545 
1546   // Null check the pointer.
1547   llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");
1548   llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");
1549 
1550   llvm::Value *IsNull = Builder.CreateIsNull(Ptr, "isnull");
1551 
1552   Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
1553   EmitBlock(DeleteNotNull);
1554 
1555   // We might be deleting a pointer to array.  If so, GEP down to the
1556   // first non-array element.
1557   // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*)
1558   QualType DeleteTy = Arg->getType()->getAs<PointerType>()->getPointeeType();
1559   if (DeleteTy->isConstantArrayType()) {
1560     llvm::Value *Zero = Builder.getInt32(0);
1561     SmallVector<llvm::Value*,8> GEP;
1562 
1563     GEP.push_back(Zero); // point at the outermost array
1564 
1565     // For each layer of array type we're pointing at:
1566     while (const ConstantArrayType *Arr
1567              = getContext().getAsConstantArrayType(DeleteTy)) {
1568       // 1. Unpeel the array type.
1569       DeleteTy = Arr->getElementType();
1570 
1571       // 2. GEP to the first element of the array.
1572       GEP.push_back(Zero);
1573     }
1574 
1575     Ptr = Builder.CreateInBoundsGEP(Ptr, GEP, "del.first");
1576   }
1577 
1578   assert(ConvertTypeForMem(DeleteTy) ==
1579          cast<llvm::PointerType>(Ptr->getType())->getElementType());
1580 
1581   if (E->isArrayForm()) {
1582     EmitArrayDelete(*this, E, Ptr, DeleteTy);
1583   } else {
1584     EmitObjectDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy,
1585                      E->isGlobalDelete());
1586   }
1587 
1588   EmitBlock(DeleteEnd);
1589 }
1590 
1591 static llvm::Constant *getBadTypeidFn(CodeGenFunction &CGF) {
1592   // void __cxa_bad_typeid();
1593   llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1594 
1595   return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
1596 }
1597 
1598 static void EmitBadTypeidCall(CodeGenFunction &CGF) {
1599   llvm::Value *Fn = getBadTypeidFn(CGF);
1600   CGF.EmitCallOrInvoke(Fn).setDoesNotReturn();
1601   CGF.Builder.CreateUnreachable();
1602 }
1603 
1604 static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF,
1605                                          const Expr *E,
1606                                          llvm::Type *StdTypeInfoPtrTy) {
1607   // Get the vtable pointer.
1608   llvm::Value *ThisPtr = CGF.EmitLValue(E).getAddress();
1609 
1610   // C++ [expr.typeid]p2:
1611   //   If the glvalue expression is obtained by applying the unary * operator to
1612   //   a pointer and the pointer is a null pointer value, the typeid expression
1613   //   throws the std::bad_typeid exception.
1614   if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E->IgnoreParens())) {
1615     if (UO->getOpcode() == UO_Deref) {
1616       llvm::BasicBlock *BadTypeidBlock =
1617         CGF.createBasicBlock("typeid.bad_typeid");
1618       llvm::BasicBlock *EndBlock =
1619         CGF.createBasicBlock("typeid.end");
1620 
1621       llvm::Value *IsNull = CGF.Builder.CreateIsNull(ThisPtr);
1622       CGF.Builder.CreateCondBr(IsNull, BadTypeidBlock, EndBlock);
1623 
1624       CGF.EmitBlock(BadTypeidBlock);
1625       EmitBadTypeidCall(CGF);
1626       CGF.EmitBlock(EndBlock);
1627     }
1628   }
1629 
1630   llvm::Value *Value = CGF.GetVTablePtr(ThisPtr,
1631                                         StdTypeInfoPtrTy->getPointerTo());
1632 
1633   // Load the type info.
1634   Value = CGF.Builder.CreateConstInBoundsGEP1_64(Value, -1ULL);
1635   return CGF.Builder.CreateLoad(Value);
1636 }
1637 
1638 llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
1639   llvm::Type *StdTypeInfoPtrTy =
1640     ConvertType(E->getType())->getPointerTo();
1641 
1642   if (E->isTypeOperand()) {
1643     llvm::Constant *TypeInfo =
1644       CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand());
1645     return Builder.CreateBitCast(TypeInfo, StdTypeInfoPtrTy);
1646   }
1647 
1648   // C++ [expr.typeid]p2:
1649   //   When typeid is applied to a glvalue expression whose type is a
1650   //   polymorphic class type, the result refers to a std::type_info object
1651   //   representing the type of the most derived object (that is, the dynamic
1652   //   type) to which the glvalue refers.
1653   if (E->isPotentiallyEvaluated())
1654     return EmitTypeidFromVTable(*this, E->getExprOperand(),
1655                                 StdTypeInfoPtrTy);
1656 
1657   QualType OperandTy = E->getExprOperand()->getType();
1658   return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(OperandTy),
1659                                StdTypeInfoPtrTy);
1660 }
1661 
1662 static llvm::Constant *getDynamicCastFn(CodeGenFunction &CGF) {
1663   // void *__dynamic_cast(const void *sub,
1664   //                      const abi::__class_type_info *src,
1665   //                      const abi::__class_type_info *dst,
1666   //                      std::ptrdiff_t src2dst_offset);
1667 
1668   llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
1669   llvm::Type *PtrDiffTy =
1670     CGF.ConvertType(CGF.getContext().getPointerDiffType());
1671 
1672   llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
1673 
1674   llvm::FunctionType *FTy =
1675     llvm::FunctionType::get(Int8PtrTy, Args, false);
1676 
1677   return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast");
1678 }
1679 
1680 static llvm::Constant *getBadCastFn(CodeGenFunction &CGF) {
1681   // void __cxa_bad_cast();
1682   llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1683   return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
1684 }
1685 
1686 static void EmitBadCastCall(CodeGenFunction &CGF) {
1687   llvm::Value *Fn = getBadCastFn(CGF);
1688   CGF.EmitCallOrInvoke(Fn).setDoesNotReturn();
1689   CGF.Builder.CreateUnreachable();
1690 }
1691 
1692 static llvm::Value *
1693 EmitDynamicCastCall(CodeGenFunction &CGF, llvm::Value *Value,
1694                     QualType SrcTy, QualType DestTy,
1695                     llvm::BasicBlock *CastEnd) {
1696   llvm::Type *PtrDiffLTy =
1697     CGF.ConvertType(CGF.getContext().getPointerDiffType());
1698   llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1699 
1700   if (const PointerType *PTy = DestTy->getAs<PointerType>()) {
1701     if (PTy->getPointeeType()->isVoidType()) {
1702       // C++ [expr.dynamic.cast]p7:
1703       //   If T is "pointer to cv void," then the result is a pointer to the
1704       //   most derived object pointed to by v.
1705 
1706       // Get the vtable pointer.
1707       llvm::Value *VTable = CGF.GetVTablePtr(Value, PtrDiffLTy->getPointerTo());
1708 
1709       // Get the offset-to-top from the vtable.
1710       llvm::Value *OffsetToTop =
1711         CGF.Builder.CreateConstInBoundsGEP1_64(VTable, -2ULL);
1712       OffsetToTop = CGF.Builder.CreateLoad(OffsetToTop, "offset.to.top");
1713 
1714       // Finally, add the offset to the pointer.
1715       Value = CGF.EmitCastToVoidPtr(Value);
1716       Value = CGF.Builder.CreateInBoundsGEP(Value, OffsetToTop);
1717 
1718       return CGF.Builder.CreateBitCast(Value, DestLTy);
1719     }
1720   }
1721 
1722   QualType SrcRecordTy;
1723   QualType DestRecordTy;
1724 
1725   if (const PointerType *DestPTy = DestTy->getAs<PointerType>()) {
1726     SrcRecordTy = SrcTy->castAs<PointerType>()->getPointeeType();
1727     DestRecordTy = DestPTy->getPointeeType();
1728   } else {
1729     SrcRecordTy = SrcTy;
1730     DestRecordTy = DestTy->castAs<ReferenceType>()->getPointeeType();
1731   }
1732 
1733   assert(SrcRecordTy->isRecordType() && "source type must be a record type!");
1734   assert(DestRecordTy->isRecordType() && "dest type must be a record type!");
1735 
1736   llvm::Value *SrcRTTI =
1737     CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
1738   llvm::Value *DestRTTI =
1739     CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType());
1740 
1741   // FIXME: Actually compute a hint here.
1742   llvm::Value *OffsetHint = llvm::ConstantInt::get(PtrDiffLTy, -1ULL);
1743 
1744   // Emit the call to __dynamic_cast.
1745   Value = CGF.EmitCastToVoidPtr(Value);
1746   Value = CGF.Builder.CreateCall4(getDynamicCastFn(CGF), Value,
1747                                   SrcRTTI, DestRTTI, OffsetHint);
1748   Value = CGF.Builder.CreateBitCast(Value, DestLTy);
1749 
1750   /// C++ [expr.dynamic.cast]p9:
1751   ///   A failed cast to reference type throws std::bad_cast
1752   if (DestTy->isReferenceType()) {
1753     llvm::BasicBlock *BadCastBlock =
1754       CGF.createBasicBlock("dynamic_cast.bad_cast");
1755 
1756     llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value);
1757     CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd);
1758 
1759     CGF.EmitBlock(BadCastBlock);
1760     EmitBadCastCall(CGF);
1761   }
1762 
1763   return Value;
1764 }
1765 
1766 static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF,
1767                                           QualType DestTy) {
1768   llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1769   if (DestTy->isPointerType())
1770     return llvm::Constant::getNullValue(DestLTy);
1771 
1772   /// C++ [expr.dynamic.cast]p9:
1773   ///   A failed cast to reference type throws std::bad_cast
1774   EmitBadCastCall(CGF);
1775 
1776   CGF.EmitBlock(CGF.createBasicBlock("dynamic_cast.end"));
1777   return llvm::UndefValue::get(DestLTy);
1778 }
1779 
1780 llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *Value,
1781                                               const CXXDynamicCastExpr *DCE) {
1782   QualType DestTy = DCE->getTypeAsWritten();
1783 
1784   if (DCE->isAlwaysNull())
1785     return EmitDynamicCastToNull(*this, DestTy);
1786 
1787   QualType SrcTy = DCE->getSubExpr()->getType();
1788 
1789   // C++ [expr.dynamic.cast]p4:
1790   //   If the value of v is a null pointer value in the pointer case, the result
1791   //   is the null pointer value of type T.
1792   bool ShouldNullCheckSrcValue = SrcTy->isPointerType();
1793 
1794   llvm::BasicBlock *CastNull = 0;
1795   llvm::BasicBlock *CastNotNull = 0;
1796   llvm::BasicBlock *CastEnd = createBasicBlock("dynamic_cast.end");
1797 
1798   if (ShouldNullCheckSrcValue) {
1799     CastNull = createBasicBlock("dynamic_cast.null");
1800     CastNotNull = createBasicBlock("dynamic_cast.notnull");
1801 
1802     llvm::Value *IsNull = Builder.CreateIsNull(Value);
1803     Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
1804     EmitBlock(CastNotNull);
1805   }
1806 
1807   Value = EmitDynamicCastCall(*this, Value, SrcTy, DestTy, CastEnd);
1808 
1809   if (ShouldNullCheckSrcValue) {
1810     EmitBranch(CastEnd);
1811 
1812     EmitBlock(CastNull);
1813     EmitBranch(CastEnd);
1814   }
1815 
1816   EmitBlock(CastEnd);
1817 
1818   if (ShouldNullCheckSrcValue) {
1819     llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
1820     PHI->addIncoming(Value, CastNotNull);
1821     PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull);
1822 
1823     Value = PHI;
1824   }
1825 
1826   return Value;
1827 }
1828 
1829 void CodeGenFunction::EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Slot) {
1830   RunCleanupsScope Scope(*this);
1831   LValue SlotLV = MakeAddrLValue(Slot.getAddr(), E->getType(),
1832                                  Slot.getAlignment());
1833 
1834   CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin();
1835   for (LambdaExpr::capture_init_iterator i = E->capture_init_begin(),
1836                                          e = E->capture_init_end();
1837        i != e; ++i, ++CurField) {
1838     // Emit initialization
1839 
1840     LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
1841     ArrayRef<VarDecl *> ArrayIndexes;
1842     if (CurField->getType()->isArrayType())
1843       ArrayIndexes = E->getCaptureInitIndexVars(i);
1844     EmitInitializerForField(*CurField, LV, *i, ArrayIndexes);
1845   }
1846 }
1847