1 //===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code dealing with code generation of C++ expressions
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CodeGenFunction.h"
15 #include "CGCXXABI.h"
16 #include "CGObjCRuntime.h"
17 #include "llvm/Intrinsics.h"
18 using namespace clang;
19 using namespace CodeGen;
20 
21 RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
22                                           llvm::Value *Callee,
23                                           ReturnValueSlot ReturnValue,
24                                           llvm::Value *This,
25                                           llvm::Value *VTT,
26                                           CallExpr::const_arg_iterator ArgBeg,
27                                           CallExpr::const_arg_iterator ArgEnd) {
28   assert(MD->isInstance() &&
29          "Trying to emit a member call expr on a static method!");
30 
31   const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
32 
33   CallArgList Args;
34 
35   // Push the this ptr.
36   Args.push_back(std::make_pair(RValue::get(This),
37                                 MD->getThisType(getContext())));
38 
39   // If there is a VTT parameter, emit it.
40   if (VTT) {
41     QualType T = getContext().getPointerType(getContext().VoidPtrTy);
42     Args.push_back(std::make_pair(RValue::get(VTT), T));
43   }
44 
45   // And the rest of the call args
46   EmitCallArgs(Args, FPT, ArgBeg, ArgEnd);
47 
48   QualType ResultType = FPT->getResultType();
49   return EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args,
50                                                  FPT->getExtInfo()),
51                   Callee, ReturnValue, Args, MD);
52 }
53 
54 /// canDevirtualizeMemberFunctionCalls - Checks whether virtual calls on given
55 /// expr can be devirtualized.
56 static bool canDevirtualizeMemberFunctionCalls(const Expr *Base) {
57   if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) {
58     if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
59       // This is a record decl. We know the type and can devirtualize it.
60       return VD->getType()->isRecordType();
61     }
62 
63     return false;
64   }
65 
66   // We can always devirtualize calls on temporary object expressions.
67   if (isa<CXXConstructExpr>(Base))
68     return true;
69 
70   // And calls on bound temporaries.
71   if (isa<CXXBindTemporaryExpr>(Base))
72     return true;
73 
74   // Check if this is a call expr that returns a record type.
75   if (const CallExpr *CE = dyn_cast<CallExpr>(Base))
76     return CE->getCallReturnType()->isRecordType();
77 
78   // We can't devirtualize the call.
79   return false;
80 }
81 
82 RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
83                                               ReturnValueSlot ReturnValue) {
84   if (isa<BinaryOperator>(CE->getCallee()->IgnoreParens()))
85     return EmitCXXMemberPointerCallExpr(CE, ReturnValue);
86 
87   const MemberExpr *ME = cast<MemberExpr>(CE->getCallee()->IgnoreParens());
88   const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
89 
90   if (MD->isStatic()) {
91     // The method is static, emit it as we would a regular call.
92     llvm::Value *Callee = CGM.GetAddrOfFunction(MD);
93     return EmitCall(getContext().getPointerType(MD->getType()), Callee,
94                     ReturnValue, CE->arg_begin(), CE->arg_end());
95   }
96 
97   // Compute the object pointer.
98   llvm::Value *This;
99   if (ME->isArrow())
100     This = EmitScalarExpr(ME->getBase());
101   else {
102     LValue BaseLV = EmitLValue(ME->getBase());
103     This = BaseLV.getAddress();
104   }
105 
106   if (MD->isTrivial()) {
107     if (isa<CXXDestructorDecl>(MD)) return RValue::get(0);
108 
109     assert(MD->isCopyAssignment() && "unknown trivial member function");
110     // We don't like to generate the trivial copy assignment operator when
111     // it isn't necessary; just produce the proper effect here.
112     llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
113     EmitAggregateCopy(This, RHS, CE->getType());
114     return RValue::get(This);
115   }
116 
117   // Compute the function type we're calling.
118   const CGFunctionInfo &FInfo =
119     (isa<CXXDestructorDecl>(MD)
120      ? CGM.getTypes().getFunctionInfo(cast<CXXDestructorDecl>(MD),
121                                       Dtor_Complete)
122      : CGM.getTypes().getFunctionInfo(MD));
123 
124   const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
125   const llvm::Type *Ty
126     = CGM.getTypes().GetFunctionType(FInfo, FPT->isVariadic());
127 
128   // C++ [class.virtual]p12:
129   //   Explicit qualification with the scope operator (5.1) suppresses the
130   //   virtual call mechanism.
131   //
132   // We also don't emit a virtual call if the base expression has a record type
133   // because then we know what the type is.
134   bool UseVirtualCall = MD->isVirtual() && !ME->hasQualifier()
135                      && !canDevirtualizeMemberFunctionCalls(ME->getBase());
136 
137   llvm::Value *Callee;
138   if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD)) {
139     if (UseVirtualCall) {
140       Callee = BuildVirtualCall(Dtor, Dtor_Complete, This, Ty);
141     } else {
142       Callee = CGM.GetAddrOfFunction(GlobalDecl(Dtor, Dtor_Complete), Ty);
143     }
144   } else if (UseVirtualCall) {
145     Callee = BuildVirtualCall(MD, This, Ty);
146   } else {
147     Callee = CGM.GetAddrOfFunction(MD, Ty);
148   }
149 
150   return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
151                            CE->arg_begin(), CE->arg_end());
152 }
153 
154 RValue
155 CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
156                                               ReturnValueSlot ReturnValue) {
157   const BinaryOperator *BO =
158       cast<BinaryOperator>(E->getCallee()->IgnoreParens());
159   const Expr *BaseExpr = BO->getLHS();
160   const Expr *MemFnExpr = BO->getRHS();
161 
162   const MemberPointerType *MPT =
163     MemFnExpr->getType()->getAs<MemberPointerType>();
164 
165   const FunctionProtoType *FPT =
166     MPT->getPointeeType()->getAs<FunctionProtoType>();
167   const CXXRecordDecl *RD =
168     cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
169 
170   // Get the member function pointer.
171   llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
172 
173   // Emit the 'this' pointer.
174   llvm::Value *This;
175 
176   if (BO->getOpcode() == BO_PtrMemI)
177     This = EmitScalarExpr(BaseExpr);
178   else
179     This = EmitLValue(BaseExpr).getAddress();
180 
181   // Ask the ABI to load the callee.  Note that This is modified.
182   llvm::Value *Callee =
183     CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(CGF, This, MemFnPtr, MPT);
184 
185   CallArgList Args;
186 
187   QualType ThisType =
188     getContext().getPointerType(getContext().getTagDeclType(RD));
189 
190   // Push the this ptr.
191   Args.push_back(std::make_pair(RValue::get(This), ThisType));
192 
193   // And the rest of the call args
194   EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end());
195   const FunctionType *BO_FPT = BO->getType()->getAs<FunctionProtoType>();
196   return EmitCall(CGM.getTypes().getFunctionInfo(Args, BO_FPT), Callee,
197                   ReturnValue, Args);
198 }
199 
200 RValue
201 CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
202                                                const CXXMethodDecl *MD,
203                                                ReturnValueSlot ReturnValue) {
204   assert(MD->isInstance() &&
205          "Trying to emit a member call expr on a static method!");
206   if (MD->isCopyAssignment()) {
207     const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(MD->getDeclContext());
208     if (ClassDecl->hasTrivialCopyAssignment()) {
209       assert(!ClassDecl->hasUserDeclaredCopyAssignment() &&
210              "EmitCXXOperatorMemberCallExpr - user declared copy assignment");
211       LValue LV = EmitLValue(E->getArg(0));
212       llvm::Value *This;
213       if (LV.isPropertyRef() || LV.isKVCRef()) {
214         llvm::Value *AggLoc  = CreateMemTemp(E->getArg(1)->getType());
215         EmitAggExpr(E->getArg(1), AggLoc, false /*VolatileDest*/);
216         if (LV.isPropertyRef())
217           EmitObjCPropertySet(LV.getPropertyRefExpr(),
218                               RValue::getAggregate(AggLoc,
219                                                    false /*VolatileDest*/));
220         else
221           EmitObjCPropertySet(LV.getKVCRefExpr(),
222                               RValue::getAggregate(AggLoc,
223                                                    false /*VolatileDest*/));
224         return RValue::getAggregate(0, false);
225       }
226       else
227         This = LV.getAddress();
228 
229       llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress();
230       QualType Ty = E->getType();
231       EmitAggregateCopy(This, Src, Ty);
232       return RValue::get(This);
233     }
234   }
235 
236   const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
237   const llvm::Type *Ty =
238     CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
239                                    FPT->isVariadic());
240   LValue LV = EmitLValue(E->getArg(0));
241   llvm::Value *This;
242   if (LV.isPropertyRef() || LV.isKVCRef()) {
243     QualType QT = E->getArg(0)->getType();
244     RValue RV =
245       LV.isPropertyRef() ? EmitLoadOfPropertyRefLValue(LV, QT)
246                          : EmitLoadOfKVCRefLValue(LV, QT);
247     assert (!RV.isScalar() && "EmitCXXOperatorMemberCallExpr");
248     This = RV.getAggregateAddr();
249   }
250   else
251     This = LV.getAddress();
252 
253   llvm::Value *Callee;
254   if (MD->isVirtual() && !canDevirtualizeMemberFunctionCalls(E->getArg(0)))
255     Callee = BuildVirtualCall(MD, This, Ty);
256   else
257     Callee = CGM.GetAddrOfFunction(MD, Ty);
258 
259   return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
260                            E->arg_begin() + 1, E->arg_end());
261 }
262 
263 void
264 CodeGenFunction::EmitCXXConstructExpr(llvm::Value *Dest,
265                                       const CXXConstructExpr *E) {
266   assert(Dest && "Must have a destination!");
267   const CXXConstructorDecl *CD = E->getConstructor();
268 
269   // If we require zero initialization before (or instead of) calling the
270   // constructor, as can be the case with a non-user-provided default
271   // constructor, emit the zero initialization now.
272   if (E->requiresZeroInitialization())
273     EmitNullInitialization(Dest, E->getType());
274 
275 
276   // If this is a call to a trivial default constructor, do nothing.
277   if (CD->isTrivial() && CD->isDefaultConstructor())
278     return;
279 
280   // Code gen optimization to eliminate copy constructor and return
281   // its first argument instead, if in fact that argument is a temporary
282   // object.
283   if (getContext().getLangOptions().ElideConstructors && E->isElidable()) {
284     if (const Expr *Arg = E->getArg(0)->getTemporaryObject()) {
285       EmitAggExpr(Arg, Dest, false);
286       return;
287     }
288   }
289 
290   const ConstantArrayType *Array
291     = getContext().getAsConstantArrayType(E->getType());
292   if (Array) {
293     QualType BaseElementTy = getContext().getBaseElementType(Array);
294     const llvm::Type *BasePtr = ConvertType(BaseElementTy);
295     BasePtr = llvm::PointerType::getUnqual(BasePtr);
296     llvm::Value *BaseAddrPtr =
297       Builder.CreateBitCast(Dest, BasePtr);
298 
299     EmitCXXAggrConstructorCall(CD, Array, BaseAddrPtr,
300                                E->arg_begin(), E->arg_end());
301   }
302   else {
303     CXXCtorType Type =
304       (E->getConstructionKind() == CXXConstructExpr::CK_Complete)
305       ? Ctor_Complete : Ctor_Base;
306     bool ForVirtualBase =
307       E->getConstructionKind() == CXXConstructExpr::CK_VirtualBase;
308 
309     // Call the constructor.
310     EmitCXXConstructorCall(CD, Type, ForVirtualBase, Dest,
311                            E->arg_begin(), E->arg_end());
312   }
313 }
314 
315 /// Check whether the given operator new[] is the global placement
316 /// operator new[].
317 static bool IsPlacementOperatorNewArray(ASTContext &Ctx,
318                                         const FunctionDecl *Fn) {
319   // Must be in global scope.  Note that allocation functions can't be
320   // declared in namespaces.
321   if (!Fn->getDeclContext()->getRedeclContext()->isFileContext())
322     return false;
323 
324   // Signature must be void *operator new[](size_t, void*).
325   // The size_t is common to all operator new[]s.
326   if (Fn->getNumParams() != 2)
327     return false;
328 
329   CanQualType ParamType = Ctx.getCanonicalType(Fn->getParamDecl(1)->getType());
330   return (ParamType == Ctx.VoidPtrTy);
331 }
332 
333 static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,
334                                         const CXXNewExpr *E) {
335   if (!E->isArray())
336     return CharUnits::Zero();
337 
338   // No cookie is required if the new operator being used is
339   // ::operator new[](size_t, void*).
340   const FunctionDecl *OperatorNew = E->getOperatorNew();
341   if (IsPlacementOperatorNewArray(CGF.getContext(), OperatorNew))
342     return CharUnits::Zero();
343 
344   return CGF.CGM.getCXXABI().GetArrayCookieSize(E->getAllocatedType());
345 }
346 
347 static llvm::Value *EmitCXXNewAllocSize(ASTContext &Context,
348                                         CodeGenFunction &CGF,
349                                         const CXXNewExpr *E,
350                                         llvm::Value *&NumElements,
351                                         llvm::Value *&SizeWithoutCookie) {
352   QualType ElemType = E->getAllocatedType();
353 
354   const llvm::IntegerType *SizeTy =
355     cast<llvm::IntegerType>(CGF.ConvertType(CGF.getContext().getSizeType()));
356 
357   CharUnits TypeSize = CGF.getContext().getTypeSizeInChars(ElemType);
358 
359   if (!E->isArray()) {
360     SizeWithoutCookie = llvm::ConstantInt::get(SizeTy, TypeSize.getQuantity());
361     return SizeWithoutCookie;
362   }
363 
364   // Figure out the cookie size.
365   CharUnits CookieSize = CalculateCookiePadding(CGF, E);
366 
367   // Emit the array size expression.
368   // We multiply the size of all dimensions for NumElements.
369   // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
370   NumElements = CGF.EmitScalarExpr(E->getArraySize());
371   assert(NumElements->getType() == SizeTy && "element count not a size_t");
372 
373   uint64_t ArraySizeMultiplier = 1;
374   while (const ConstantArrayType *CAT
375              = CGF.getContext().getAsConstantArrayType(ElemType)) {
376     ElemType = CAT->getElementType();
377     ArraySizeMultiplier *= CAT->getSize().getZExtValue();
378   }
379 
380   llvm::Value *Size;
381 
382   // If someone is doing 'new int[42]' there is no need to do a dynamic check.
383   // Don't bloat the -O0 code.
384   if (llvm::ConstantInt *NumElementsC =
385         dyn_cast<llvm::ConstantInt>(NumElements)) {
386     llvm::APInt NEC = NumElementsC->getValue();
387     unsigned SizeWidth = NEC.getBitWidth();
388 
389     // Determine if there is an overflow here by doing an extended multiply.
390     NEC.zext(SizeWidth*2);
391     llvm::APInt SC(SizeWidth*2, TypeSize.getQuantity());
392     SC *= NEC;
393 
394     if (!CookieSize.isZero()) {
395       // Save the current size without a cookie.  We don't care if an
396       // overflow's already happened because SizeWithoutCookie isn't
397       // used if the allocator returns null or throws, as it should
398       // always do on an overflow.
399       llvm::APInt SWC = SC;
400       SWC.trunc(SizeWidth);
401       SizeWithoutCookie = llvm::ConstantInt::get(SizeTy, SWC);
402 
403       // Add the cookie size.
404       SC += llvm::APInt(SizeWidth*2, CookieSize.getQuantity());
405     }
406 
407     if (SC.countLeadingZeros() >= SizeWidth) {
408       SC.trunc(SizeWidth);
409       Size = llvm::ConstantInt::get(SizeTy, SC);
410     } else {
411       // On overflow, produce a -1 so operator new throws.
412       Size = llvm::Constant::getAllOnesValue(SizeTy);
413     }
414 
415     // Scale NumElements while we're at it.
416     uint64_t N = NEC.getZExtValue() * ArraySizeMultiplier;
417     NumElements = llvm::ConstantInt::get(SizeTy, N);
418 
419   // Otherwise, we don't need to do an overflow-checked multiplication if
420   // we're multiplying by one.
421   } else if (TypeSize.isOne()) {
422     assert(ArraySizeMultiplier == 1);
423 
424     Size = NumElements;
425 
426     // If we need a cookie, add its size in with an overflow check.
427     // This is maybe a little paranoid.
428     if (!CookieSize.isZero()) {
429       SizeWithoutCookie = Size;
430 
431       llvm::Value *CookieSizeV
432         = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
433 
434       const llvm::Type *Types[] = { SizeTy };
435       llvm::Value *UAddF
436         = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, Types, 1);
437       llvm::Value *AddRes
438         = CGF.Builder.CreateCall2(UAddF, Size, CookieSizeV);
439 
440       Size = CGF.Builder.CreateExtractValue(AddRes, 0);
441       llvm::Value *DidOverflow = CGF.Builder.CreateExtractValue(AddRes, 1);
442       Size = CGF.Builder.CreateSelect(DidOverflow,
443                                       llvm::ConstantInt::get(SizeTy, -1),
444                                       Size);
445     }
446 
447   // Otherwise use the int.umul.with.overflow intrinsic.
448   } else {
449     llvm::Value *OutermostElementSize
450       = llvm::ConstantInt::get(SizeTy, TypeSize.getQuantity());
451 
452     llvm::Value *NumOutermostElements = NumElements;
453 
454     // Scale NumElements by the array size multiplier.  This might
455     // overflow, but only if the multiplication below also overflows,
456     // in which case this multiplication isn't used.
457     if (ArraySizeMultiplier != 1)
458       NumElements = CGF.Builder.CreateMul(NumElements,
459                          llvm::ConstantInt::get(SizeTy, ArraySizeMultiplier));
460 
461     // The requested size of the outermost array is non-constant.
462     // Multiply that by the static size of the elements of that array;
463     // on unsigned overflow, set the size to -1 to trigger an
464     // exception from the allocation routine.  This is sufficient to
465     // prevent buffer overruns from the allocator returning a
466     // seemingly valid pointer to insufficient space.  This idea comes
467     // originally from MSVC, and GCC has an open bug requesting
468     // similar behavior:
469     //   http://gcc.gnu.org/bugzilla/show_bug.cgi?id=19351
470     //
471     // This will not be sufficient for C++0x, which requires a
472     // specific exception class (std::bad_array_new_length).
473     // That will require ABI support that has not yet been specified.
474     const llvm::Type *Types[] = { SizeTy };
475     llvm::Value *UMulF
476       = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, Types, 1);
477     llvm::Value *MulRes = CGF.Builder.CreateCall2(UMulF, NumOutermostElements,
478                                                   OutermostElementSize);
479 
480     // The overflow bit.
481     llvm::Value *DidOverflow = CGF.Builder.CreateExtractValue(MulRes, 1);
482 
483     // The result of the multiplication.
484     Size = CGF.Builder.CreateExtractValue(MulRes, 0);
485 
486     // If we have a cookie, we need to add that size in, too.
487     if (!CookieSize.isZero()) {
488       SizeWithoutCookie = Size;
489 
490       llvm::Value *CookieSizeV
491         = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
492       llvm::Value *UAddF
493         = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, Types, 1);
494       llvm::Value *AddRes
495         = CGF.Builder.CreateCall2(UAddF, SizeWithoutCookie, CookieSizeV);
496 
497       Size = CGF.Builder.CreateExtractValue(AddRes, 0);
498 
499       llvm::Value *AddDidOverflow = CGF.Builder.CreateExtractValue(AddRes, 1);
500       DidOverflow = CGF.Builder.CreateAnd(DidOverflow, AddDidOverflow);
501     }
502 
503     Size = CGF.Builder.CreateSelect(DidOverflow,
504                                     llvm::ConstantInt::get(SizeTy, -1),
505                                     Size);
506   }
507 
508   if (CookieSize.isZero())
509     SizeWithoutCookie = Size;
510   else
511     assert(SizeWithoutCookie && "didn't set SizeWithoutCookie?");
512 
513   return Size;
514 }
515 
516 static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const CXXNewExpr *E,
517                                     llvm::Value *NewPtr) {
518 
519   assert(E->getNumConstructorArgs() == 1 &&
520          "Can only have one argument to initializer of POD type.");
521 
522   const Expr *Init = E->getConstructorArg(0);
523   QualType AllocType = E->getAllocatedType();
524 
525   unsigned Alignment =
526     CGF.getContext().getTypeAlignInChars(AllocType).getQuantity();
527   if (!CGF.hasAggregateLLVMType(AllocType))
528     CGF.EmitStoreOfScalar(CGF.EmitScalarExpr(Init), NewPtr,
529                           AllocType.isVolatileQualified(), Alignment,
530                           AllocType);
531   else if (AllocType->isAnyComplexType())
532     CGF.EmitComplexExprIntoAddr(Init, NewPtr,
533                                 AllocType.isVolatileQualified());
534   else
535     CGF.EmitAggExpr(Init, NewPtr, AllocType.isVolatileQualified());
536 }
537 
538 void
539 CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E,
540                                          llvm::Value *NewPtr,
541                                          llvm::Value *NumElements) {
542   // We have a POD type.
543   if (E->getNumConstructorArgs() == 0)
544     return;
545 
546   const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
547 
548   // Create a temporary for the loop index and initialize it with 0.
549   llvm::Value *IndexPtr = CreateTempAlloca(SizeTy, "loop.index");
550   llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy);
551   Builder.CreateStore(Zero, IndexPtr);
552 
553   // Start the loop with a block that tests the condition.
554   llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
555   llvm::BasicBlock *AfterFor = createBasicBlock("for.end");
556 
557   EmitBlock(CondBlock);
558 
559   llvm::BasicBlock *ForBody = createBasicBlock("for.body");
560 
561   // Generate: if (loop-index < number-of-elements fall to the loop body,
562   // otherwise, go to the block after the for-loop.
563   llvm::Value *Counter = Builder.CreateLoad(IndexPtr);
564   llvm::Value *IsLess = Builder.CreateICmpULT(Counter, NumElements, "isless");
565   // If the condition is true, execute the body.
566   Builder.CreateCondBr(IsLess, ForBody, AfterFor);
567 
568   EmitBlock(ForBody);
569 
570   llvm::BasicBlock *ContinueBlock = createBasicBlock("for.inc");
571   // Inside the loop body, emit the constructor call on the array element.
572   Counter = Builder.CreateLoad(IndexPtr);
573   llvm::Value *Address = Builder.CreateInBoundsGEP(NewPtr, Counter,
574                                                    "arrayidx");
575   StoreAnyExprIntoOneUnit(*this, E, Address);
576 
577   EmitBlock(ContinueBlock);
578 
579   // Emit the increment of the loop counter.
580   llvm::Value *NextVal = llvm::ConstantInt::get(SizeTy, 1);
581   Counter = Builder.CreateLoad(IndexPtr);
582   NextVal = Builder.CreateAdd(Counter, NextVal, "inc");
583   Builder.CreateStore(NextVal, IndexPtr);
584 
585   // Finally, branch back up to the condition for the next iteration.
586   EmitBranch(CondBlock);
587 
588   // Emit the fall-through block.
589   EmitBlock(AfterFor, true);
590 }
591 
592 static void EmitZeroMemSet(CodeGenFunction &CGF, QualType T,
593                            llvm::Value *NewPtr, llvm::Value *Size) {
594   llvm::LLVMContext &VMContext = CGF.CGM.getLLVMContext();
595   const llvm::Type *BP = llvm::Type::getInt8PtrTy(VMContext);
596   if (NewPtr->getType() != BP)
597     NewPtr = CGF.Builder.CreateBitCast(NewPtr, BP, "tmp");
598 
599   CGF.Builder.CreateCall5(CGF.CGM.getMemSetFn(BP, CGF.IntPtrTy), NewPtr,
600                 llvm::Constant::getNullValue(llvm::Type::getInt8Ty(VMContext)),
601                           Size,
602                     llvm::ConstantInt::get(CGF.Int32Ty,
603                                            CGF.getContext().getTypeAlign(T)/8),
604                           llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext),
605                                                  0));
606 }
607 
608 static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
609                                llvm::Value *NewPtr,
610                                llvm::Value *NumElements,
611                                llvm::Value *AllocSizeWithoutCookie) {
612   if (E->isArray()) {
613     if (CXXConstructorDecl *Ctor = E->getConstructor()) {
614       bool RequiresZeroInitialization = false;
615       if (Ctor->getParent()->hasTrivialConstructor()) {
616         // If new expression did not specify value-initialization, then there
617         // is no initialization.
618         if (!E->hasInitializer() || Ctor->getParent()->isEmpty())
619           return;
620 
621         if (CGF.CGM.getTypes().isZeroInitializable(E->getAllocatedType())) {
622           // Optimization: since zero initialization will just set the memory
623           // to all zeroes, generate a single memset to do it in one shot.
624           EmitZeroMemSet(CGF, E->getAllocatedType(), NewPtr,
625                          AllocSizeWithoutCookie);
626           return;
627         }
628 
629         RequiresZeroInitialization = true;
630       }
631 
632       CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr,
633                                      E->constructor_arg_begin(),
634                                      E->constructor_arg_end(),
635                                      RequiresZeroInitialization);
636       return;
637     } else if (E->getNumConstructorArgs() == 1 &&
638                isa<ImplicitValueInitExpr>(E->getConstructorArg(0))) {
639       // Optimization: since zero initialization will just set the memory
640       // to all zeroes, generate a single memset to do it in one shot.
641       EmitZeroMemSet(CGF, E->getAllocatedType(), NewPtr,
642                      AllocSizeWithoutCookie);
643       return;
644     } else {
645       CGF.EmitNewArrayInitializer(E, NewPtr, NumElements);
646       return;
647     }
648   }
649 
650   if (CXXConstructorDecl *Ctor = E->getConstructor()) {
651     // Per C++ [expr.new]p15, if we have an initializer, then we're performing
652     // direct initialization. C++ [dcl.init]p5 requires that we
653     // zero-initialize storage if there are no user-declared constructors.
654     if (E->hasInitializer() &&
655         !Ctor->getParent()->hasUserDeclaredConstructor() &&
656         !Ctor->getParent()->isEmpty())
657       CGF.EmitNullInitialization(NewPtr, E->getAllocatedType());
658 
659     CGF.EmitCXXConstructorCall(Ctor, Ctor_Complete, /*ForVirtualBase=*/false,
660                                NewPtr, E->constructor_arg_begin(),
661                                E->constructor_arg_end());
662 
663     return;
664   }
665   // We have a POD type.
666   if (E->getNumConstructorArgs() == 0)
667     return;
668 
669   StoreAnyExprIntoOneUnit(CGF, E, NewPtr);
670 }
671 
672 llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
673   QualType AllocType = E->getAllocatedType();
674   if (AllocType->isArrayType())
675     while (const ArrayType *AType = getContext().getAsArrayType(AllocType))
676       AllocType = AType->getElementType();
677 
678   FunctionDecl *NewFD = E->getOperatorNew();
679   const FunctionProtoType *NewFTy = NewFD->getType()->getAs<FunctionProtoType>();
680 
681   CallArgList NewArgs;
682 
683   // The allocation size is the first argument.
684   QualType SizeTy = getContext().getSizeType();
685 
686   llvm::Value *NumElements = 0;
687   llvm::Value *AllocSizeWithoutCookie = 0;
688   llvm::Value *AllocSize = EmitCXXNewAllocSize(getContext(),
689                                                *this, E, NumElements,
690                                                AllocSizeWithoutCookie);
691 
692   NewArgs.push_back(std::make_pair(RValue::get(AllocSize), SizeTy));
693 
694   // Emit the rest of the arguments.
695   // FIXME: Ideally, this should just use EmitCallArgs.
696   CXXNewExpr::const_arg_iterator NewArg = E->placement_arg_begin();
697 
698   // First, use the types from the function type.
699   // We start at 1 here because the first argument (the allocation size)
700   // has already been emitted.
701   for (unsigned i = 1, e = NewFTy->getNumArgs(); i != e; ++i, ++NewArg) {
702     QualType ArgType = NewFTy->getArgType(i);
703 
704     assert(getContext().getCanonicalType(ArgType.getNonReferenceType()).
705            getTypePtr() ==
706            getContext().getCanonicalType(NewArg->getType()).getTypePtr() &&
707            "type mismatch in call argument!");
708 
709     NewArgs.push_back(std::make_pair(EmitCallArg(*NewArg, ArgType),
710                                      ArgType));
711 
712   }
713 
714   // Either we've emitted all the call args, or we have a call to a
715   // variadic function.
716   assert((NewArg == E->placement_arg_end() || NewFTy->isVariadic()) &&
717          "Extra arguments in non-variadic function!");
718 
719   // If we still have any arguments, emit them using the type of the argument.
720   for (CXXNewExpr::const_arg_iterator NewArgEnd = E->placement_arg_end();
721        NewArg != NewArgEnd; ++NewArg) {
722     QualType ArgType = NewArg->getType();
723     NewArgs.push_back(std::make_pair(EmitCallArg(*NewArg, ArgType),
724                                      ArgType));
725   }
726 
727   // Emit the call to new.
728   RValue RV =
729     EmitCall(CGM.getTypes().getFunctionInfo(NewArgs, NewFTy),
730              CGM.GetAddrOfFunction(NewFD), ReturnValueSlot(), NewArgs, NewFD);
731 
732   // If an allocation function is declared with an empty exception specification
733   // it returns null to indicate failure to allocate storage. [expr.new]p13.
734   // (We don't need to check for null when there's no new initializer and
735   // we're allocating a POD type).
736   bool NullCheckResult = NewFTy->hasEmptyExceptionSpec() &&
737     !(AllocType->isPODType() && !E->hasInitializer());
738 
739   llvm::BasicBlock *NullCheckSource = 0;
740   llvm::BasicBlock *NewNotNull = 0;
741   llvm::BasicBlock *NewEnd = 0;
742 
743   llvm::Value *NewPtr = RV.getScalarVal();
744   unsigned AS = cast<llvm::PointerType>(NewPtr->getType())->getAddressSpace();
745 
746   if (NullCheckResult) {
747     NullCheckSource = Builder.GetInsertBlock();
748     NewNotNull = createBasicBlock("new.notnull");
749     NewEnd = createBasicBlock("new.end");
750 
751     llvm::Value *IsNull = Builder.CreateIsNull(NewPtr, "new.isnull");
752     Builder.CreateCondBr(IsNull, NewEnd, NewNotNull);
753     EmitBlock(NewNotNull);
754   }
755 
756   assert((AllocSize == AllocSizeWithoutCookie) ==
757          CalculateCookiePadding(*this, E).isZero());
758   if (AllocSize != AllocSizeWithoutCookie) {
759     assert(E->isArray());
760     NewPtr = CGM.getCXXABI().InitializeArrayCookie(CGF, NewPtr, NumElements,
761                                                    AllocType);
762   }
763 
764   const llvm::Type *ElementPtrTy
765     = ConvertTypeForMem(AllocType)->getPointerTo(AS);
766   NewPtr = Builder.CreateBitCast(NewPtr, ElementPtrTy);
767   if (E->isArray()) {
768     EmitNewInitializer(*this, E, NewPtr, NumElements, AllocSizeWithoutCookie);
769 
770     // NewPtr is a pointer to the base element type.  If we're
771     // allocating an array of arrays, we'll need to cast back to the
772     // array pointer type.
773     const llvm::Type *ResultTy = ConvertTypeForMem(E->getType());
774     if (NewPtr->getType() != ResultTy)
775       NewPtr = Builder.CreateBitCast(NewPtr, ResultTy);
776   } else {
777     EmitNewInitializer(*this, E, NewPtr, NumElements, AllocSizeWithoutCookie);
778   }
779 
780   if (NullCheckResult) {
781     Builder.CreateBr(NewEnd);
782     llvm::BasicBlock *NotNullSource = Builder.GetInsertBlock();
783     EmitBlock(NewEnd);
784 
785     llvm::PHINode *PHI = Builder.CreatePHI(NewPtr->getType());
786     PHI->reserveOperandSpace(2);
787     PHI->addIncoming(NewPtr, NotNullSource);
788     PHI->addIncoming(llvm::Constant::getNullValue(NewPtr->getType()),
789                      NullCheckSource);
790 
791     NewPtr = PHI;
792   }
793 
794   return NewPtr;
795 }
796 
797 void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
798                                      llvm::Value *Ptr,
799                                      QualType DeleteTy) {
800   assert(DeleteFD->getOverloadedOperator() == OO_Delete);
801 
802   const FunctionProtoType *DeleteFTy =
803     DeleteFD->getType()->getAs<FunctionProtoType>();
804 
805   CallArgList DeleteArgs;
806 
807   // Check if we need to pass the size to the delete operator.
808   llvm::Value *Size = 0;
809   QualType SizeTy;
810   if (DeleteFTy->getNumArgs() == 2) {
811     SizeTy = DeleteFTy->getArgType(1);
812     CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy);
813     Size = llvm::ConstantInt::get(ConvertType(SizeTy),
814                                   DeleteTypeSize.getQuantity());
815   }
816 
817   QualType ArgTy = DeleteFTy->getArgType(0);
818   llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy));
819   DeleteArgs.push_back(std::make_pair(RValue::get(DeletePtr), ArgTy));
820 
821   if (Size)
822     DeleteArgs.push_back(std::make_pair(RValue::get(Size), SizeTy));
823 
824   // Emit the call to delete.
825   EmitCall(CGM.getTypes().getFunctionInfo(DeleteArgs, DeleteFTy),
826            CGM.GetAddrOfFunction(DeleteFD), ReturnValueSlot(),
827            DeleteArgs, DeleteFD);
828 }
829 
830 namespace {
831   /// Calls the given 'operator delete' on a single object.
832   struct CallObjectDelete : EHScopeStack::Cleanup {
833     llvm::Value *Ptr;
834     const FunctionDecl *OperatorDelete;
835     QualType ElementType;
836 
837     CallObjectDelete(llvm::Value *Ptr,
838                      const FunctionDecl *OperatorDelete,
839                      QualType ElementType)
840       : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {}
841 
842     void Emit(CodeGenFunction &CGF, bool IsForEH) {
843       CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType);
844     }
845   };
846 }
847 
848 /// Emit the code for deleting a single object.
849 static void EmitObjectDelete(CodeGenFunction &CGF,
850                              const FunctionDecl *OperatorDelete,
851                              llvm::Value *Ptr,
852                              QualType ElementType) {
853   // Find the destructor for the type, if applicable.  If the
854   // destructor is virtual, we'll just emit the vcall and return.
855   const CXXDestructorDecl *Dtor = 0;
856   if (const RecordType *RT = ElementType->getAs<RecordType>()) {
857     CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
858     if (!RD->hasTrivialDestructor()) {
859       Dtor = RD->getDestructor();
860 
861       if (Dtor->isVirtual()) {
862         const llvm::Type *Ty =
863           CGF.getTypes().GetFunctionType(CGF.getTypes().getFunctionInfo(Dtor,
864                                                                Dtor_Complete),
865                                          /*isVariadic=*/false);
866 
867         llvm::Value *Callee
868           = CGF.BuildVirtualCall(Dtor, Dtor_Deleting, Ptr, Ty);
869         CGF.EmitCXXMemberCall(Dtor, Callee, ReturnValueSlot(), Ptr, /*VTT=*/0,
870                               0, 0);
871 
872         // The dtor took care of deleting the object.
873         return;
874       }
875     }
876   }
877 
878   // Make sure that we call delete even if the dtor throws.
879   CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
880                                             Ptr, OperatorDelete, ElementType);
881 
882   if (Dtor)
883     CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
884                               /*ForVirtualBase=*/false, Ptr);
885 
886   CGF.PopCleanupBlock();
887 }
888 
889 namespace {
890   /// Calls the given 'operator delete' on an array of objects.
891   struct CallArrayDelete : EHScopeStack::Cleanup {
892     llvm::Value *Ptr;
893     const FunctionDecl *OperatorDelete;
894     llvm::Value *NumElements;
895     QualType ElementType;
896     CharUnits CookieSize;
897 
898     CallArrayDelete(llvm::Value *Ptr,
899                     const FunctionDecl *OperatorDelete,
900                     llvm::Value *NumElements,
901                     QualType ElementType,
902                     CharUnits CookieSize)
903       : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements),
904         ElementType(ElementType), CookieSize(CookieSize) {}
905 
906     void Emit(CodeGenFunction &CGF, bool IsForEH) {
907       const FunctionProtoType *DeleteFTy =
908         OperatorDelete->getType()->getAs<FunctionProtoType>();
909       assert(DeleteFTy->getNumArgs() == 1 || DeleteFTy->getNumArgs() == 2);
910 
911       CallArgList Args;
912 
913       // Pass the pointer as the first argument.
914       QualType VoidPtrTy = DeleteFTy->getArgType(0);
915       llvm::Value *DeletePtr
916         = CGF.Builder.CreateBitCast(Ptr, CGF.ConvertType(VoidPtrTy));
917       Args.push_back(std::make_pair(RValue::get(DeletePtr), VoidPtrTy));
918 
919       // Pass the original requested size as the second argument.
920       if (DeleteFTy->getNumArgs() == 2) {
921         QualType size_t = DeleteFTy->getArgType(1);
922         const llvm::IntegerType *SizeTy
923           = cast<llvm::IntegerType>(CGF.ConvertType(size_t));
924 
925         CharUnits ElementTypeSize =
926           CGF.CGM.getContext().getTypeSizeInChars(ElementType);
927 
928         // The size of an element, multiplied by the number of elements.
929         llvm::Value *Size
930           = llvm::ConstantInt::get(SizeTy, ElementTypeSize.getQuantity());
931         Size = CGF.Builder.CreateMul(Size, NumElements);
932 
933         // Plus the size of the cookie if applicable.
934         if (!CookieSize.isZero()) {
935           llvm::Value *CookieSizeV
936             = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
937           Size = CGF.Builder.CreateAdd(Size, CookieSizeV);
938         }
939 
940         Args.push_back(std::make_pair(RValue::get(Size), size_t));
941       }
942 
943       // Emit the call to delete.
944       CGF.EmitCall(CGF.getTypes().getFunctionInfo(Args, DeleteFTy),
945                    CGF.CGM.GetAddrOfFunction(OperatorDelete),
946                    ReturnValueSlot(), Args, OperatorDelete);
947     }
948   };
949 }
950 
951 /// Emit the code for deleting an array of objects.
952 static void EmitArrayDelete(CodeGenFunction &CGF,
953                             const FunctionDecl *OperatorDelete,
954                             llvm::Value *Ptr,
955                             QualType ElementType) {
956   llvm::Value *NumElements = 0;
957   llvm::Value *AllocatedPtr = 0;
958   CharUnits CookieSize;
959   CGF.CGM.getCXXABI().ReadArrayCookie(CGF, Ptr, ElementType,
960                                       NumElements, AllocatedPtr, CookieSize);
961 
962   assert(AllocatedPtr && "ReadArrayCookie didn't set AllocatedPtr");
963 
964   // Make sure that we call delete even if one of the dtors throws.
965   CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup,
966                                            AllocatedPtr, OperatorDelete,
967                                            NumElements, ElementType,
968                                            CookieSize);
969 
970   if (const CXXRecordDecl *RD = ElementType->getAsCXXRecordDecl()) {
971     if (!RD->hasTrivialDestructor()) {
972       assert(NumElements && "ReadArrayCookie didn't find element count"
973                             " for a class with destructor");
974       CGF.EmitCXXAggrDestructorCall(RD->getDestructor(), NumElements, Ptr);
975     }
976   }
977 
978   CGF.PopCleanupBlock();
979 }
980 
981 void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
982 
983   // Get at the argument before we performed the implicit conversion
984   // to void*.
985   const Expr *Arg = E->getArgument();
986   while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) {
987     if (ICE->getCastKind() != CK_UserDefinedConversion &&
988         ICE->getType()->isVoidPointerType())
989       Arg = ICE->getSubExpr();
990     else
991       break;
992   }
993 
994   llvm::Value *Ptr = EmitScalarExpr(Arg);
995 
996   // Null check the pointer.
997   llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");
998   llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");
999 
1000   llvm::Value *IsNull =
1001     Builder.CreateICmpEQ(Ptr, llvm::Constant::getNullValue(Ptr->getType()),
1002                          "isnull");
1003 
1004   Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
1005   EmitBlock(DeleteNotNull);
1006 
1007   // We might be deleting a pointer to array.  If so, GEP down to the
1008   // first non-array element.
1009   // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*)
1010   QualType DeleteTy = Arg->getType()->getAs<PointerType>()->getPointeeType();
1011   if (DeleteTy->isConstantArrayType()) {
1012     llvm::Value *Zero = Builder.getInt32(0);
1013     llvm::SmallVector<llvm::Value*,8> GEP;
1014 
1015     GEP.push_back(Zero); // point at the outermost array
1016 
1017     // For each layer of array type we're pointing at:
1018     while (const ConstantArrayType *Arr
1019              = getContext().getAsConstantArrayType(DeleteTy)) {
1020       // 1. Unpeel the array type.
1021       DeleteTy = Arr->getElementType();
1022 
1023       // 2. GEP to the first element of the array.
1024       GEP.push_back(Zero);
1025     }
1026 
1027     Ptr = Builder.CreateInBoundsGEP(Ptr, GEP.begin(), GEP.end(), "del.first");
1028   }
1029 
1030   assert(ConvertTypeForMem(DeleteTy) ==
1031          cast<llvm::PointerType>(Ptr->getType())->getElementType());
1032 
1033   if (E->isArrayForm()) {
1034     EmitArrayDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy);
1035   } else {
1036     EmitObjectDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy);
1037   }
1038 
1039   EmitBlock(DeleteEnd);
1040 }
1041 
1042 llvm::Value * CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
1043   QualType Ty = E->getType();
1044   const llvm::Type *LTy = ConvertType(Ty)->getPointerTo();
1045 
1046   if (E->isTypeOperand()) {
1047     llvm::Constant *TypeInfo =
1048       CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand());
1049     return Builder.CreateBitCast(TypeInfo, LTy);
1050   }
1051 
1052   Expr *subE = E->getExprOperand();
1053   Ty = subE->getType();
1054   CanQualType CanTy = CGM.getContext().getCanonicalType(Ty);
1055   Ty = CanTy.getUnqualifiedType().getNonReferenceType();
1056   if (const RecordType *RT = Ty->getAs<RecordType>()) {
1057     const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1058     if (RD->isPolymorphic()) {
1059       // FIXME: if subE is an lvalue do
1060       LValue Obj = EmitLValue(subE);
1061       llvm::Value *This = Obj.getAddress();
1062       LTy = LTy->getPointerTo()->getPointerTo();
1063       llvm::Value *V = Builder.CreateBitCast(This, LTy);
1064       // We need to do a zero check for *p, unless it has NonNullAttr.
1065       // FIXME: PointerType->hasAttr<NonNullAttr>()
1066       bool CanBeZero = false;
1067       if (UnaryOperator *UO = dyn_cast<UnaryOperator>(subE->IgnoreParens()))
1068         if (UO->getOpcode() == UO_Deref)
1069           CanBeZero = true;
1070       if (CanBeZero) {
1071         llvm::BasicBlock *NonZeroBlock = createBasicBlock();
1072         llvm::BasicBlock *ZeroBlock = createBasicBlock();
1073 
1074         llvm::Value *Zero = llvm::Constant::getNullValue(LTy);
1075         Builder.CreateCondBr(Builder.CreateICmpNE(V, Zero),
1076                              NonZeroBlock, ZeroBlock);
1077         EmitBlock(ZeroBlock);
1078         /// Call __cxa_bad_typeid
1079         const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext);
1080         const llvm::FunctionType *FTy;
1081         FTy = llvm::FunctionType::get(ResultType, false);
1082         llvm::Value *F = CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
1083         Builder.CreateCall(F)->setDoesNotReturn();
1084         Builder.CreateUnreachable();
1085         EmitBlock(NonZeroBlock);
1086       }
1087       V = Builder.CreateLoad(V, "vtable");
1088       V = Builder.CreateConstInBoundsGEP1_64(V, -1ULL);
1089       V = Builder.CreateLoad(V);
1090       return V;
1091     }
1092   }
1093   return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(Ty), LTy);
1094 }
1095 
1096 llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *V,
1097                                               const CXXDynamicCastExpr *DCE) {
1098   QualType SrcTy = DCE->getSubExpr()->getType();
1099   QualType DestTy = DCE->getTypeAsWritten();
1100   QualType InnerType = DestTy->getPointeeType();
1101 
1102   const llvm::Type *LTy = ConvertType(DCE->getType());
1103 
1104   bool CanBeZero = false;
1105   bool ToVoid = false;
1106   bool ThrowOnBad = false;
1107   if (DestTy->isPointerType()) {
1108     // FIXME: if PointerType->hasAttr<NonNullAttr>(), we don't set this
1109     CanBeZero = true;
1110     if (InnerType->isVoidType())
1111       ToVoid = true;
1112   } else {
1113     LTy = LTy->getPointerTo();
1114 
1115     // FIXME: What if exceptions are disabled?
1116     ThrowOnBad = true;
1117   }
1118 
1119   if (SrcTy->isPointerType() || SrcTy->isReferenceType())
1120     SrcTy = SrcTy->getPointeeType();
1121   SrcTy = SrcTy.getUnqualifiedType();
1122 
1123   if (DestTy->isPointerType() || DestTy->isReferenceType())
1124     DestTy = DestTy->getPointeeType();
1125   DestTy = DestTy.getUnqualifiedType();
1126 
1127   llvm::BasicBlock *ContBlock = createBasicBlock();
1128   llvm::BasicBlock *NullBlock = 0;
1129   llvm::BasicBlock *NonZeroBlock = 0;
1130   if (CanBeZero) {
1131     NonZeroBlock = createBasicBlock();
1132     NullBlock = createBasicBlock();
1133     Builder.CreateCondBr(Builder.CreateIsNotNull(V), NonZeroBlock, NullBlock);
1134     EmitBlock(NonZeroBlock);
1135   }
1136 
1137   llvm::BasicBlock *BadCastBlock = 0;
1138 
1139   const llvm::Type *PtrDiffTy = ConvertType(getContext().getPointerDiffType());
1140 
1141   // See if this is a dynamic_cast(void*)
1142   if (ToVoid) {
1143     llvm::Value *This = V;
1144     V = Builder.CreateBitCast(This, PtrDiffTy->getPointerTo()->getPointerTo());
1145     V = Builder.CreateLoad(V, "vtable");
1146     V = Builder.CreateConstInBoundsGEP1_64(V, -2ULL);
1147     V = Builder.CreateLoad(V, "offset to top");
1148     This = Builder.CreateBitCast(This, llvm::Type::getInt8PtrTy(VMContext));
1149     V = Builder.CreateInBoundsGEP(This, V);
1150     V = Builder.CreateBitCast(V, LTy);
1151   } else {
1152     /// Call __dynamic_cast
1153     const llvm::Type *ResultType = llvm::Type::getInt8PtrTy(VMContext);
1154     const llvm::FunctionType *FTy;
1155     std::vector<const llvm::Type*> ArgTys;
1156     const llvm::Type *PtrToInt8Ty
1157       = llvm::Type::getInt8Ty(VMContext)->getPointerTo();
1158     ArgTys.push_back(PtrToInt8Ty);
1159     ArgTys.push_back(PtrToInt8Ty);
1160     ArgTys.push_back(PtrToInt8Ty);
1161     ArgTys.push_back(PtrDiffTy);
1162     FTy = llvm::FunctionType::get(ResultType, ArgTys, false);
1163 
1164     // FIXME: Calculate better hint.
1165     llvm::Value *hint = llvm::ConstantInt::get(PtrDiffTy, -1ULL);
1166 
1167     assert(SrcTy->isRecordType() && "Src type must be record type!");
1168     assert(DestTy->isRecordType() && "Dest type must be record type!");
1169 
1170     llvm::Value *SrcArg
1171       = CGM.GetAddrOfRTTIDescriptor(SrcTy.getUnqualifiedType());
1172     llvm::Value *DestArg
1173       = CGM.GetAddrOfRTTIDescriptor(DestTy.getUnqualifiedType());
1174 
1175     V = Builder.CreateBitCast(V, PtrToInt8Ty);
1176     V = Builder.CreateCall4(CGM.CreateRuntimeFunction(FTy, "__dynamic_cast"),
1177                             V, SrcArg, DestArg, hint);
1178     V = Builder.CreateBitCast(V, LTy);
1179 
1180     if (ThrowOnBad) {
1181       BadCastBlock = createBasicBlock();
1182       Builder.CreateCondBr(Builder.CreateIsNotNull(V), ContBlock, BadCastBlock);
1183       EmitBlock(BadCastBlock);
1184       /// Invoke __cxa_bad_cast
1185       ResultType = llvm::Type::getVoidTy(VMContext);
1186       const llvm::FunctionType *FBadTy;
1187       FBadTy = llvm::FunctionType::get(ResultType, false);
1188       llvm::Value *F = CGM.CreateRuntimeFunction(FBadTy, "__cxa_bad_cast");
1189       if (llvm::BasicBlock *InvokeDest = getInvokeDest()) {
1190         llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
1191         Builder.CreateInvoke(F, Cont, InvokeDest)->setDoesNotReturn();
1192         EmitBlock(Cont);
1193       } else {
1194         // FIXME: Does this ever make sense?
1195         Builder.CreateCall(F)->setDoesNotReturn();
1196       }
1197       Builder.CreateUnreachable();
1198     }
1199   }
1200 
1201   if (CanBeZero) {
1202     Builder.CreateBr(ContBlock);
1203     EmitBlock(NullBlock);
1204     Builder.CreateBr(ContBlock);
1205   }
1206   EmitBlock(ContBlock);
1207   if (CanBeZero) {
1208     llvm::PHINode *PHI = Builder.CreatePHI(LTy);
1209     PHI->reserveOperandSpace(2);
1210     PHI->addIncoming(V, NonZeroBlock);
1211     PHI->addIncoming(llvm::Constant::getNullValue(LTy), NullBlock);
1212     V = PHI;
1213   }
1214 
1215   return V;
1216 }
1217