1 //===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code dealing with code generation of C++ expressions 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "clang/Frontend/CodeGenOptions.h" 15 #include "CodeGenFunction.h" 16 #include "CGCUDARuntime.h" 17 #include "CGCXXABI.h" 18 #include "CGObjCRuntime.h" 19 #include "CGDebugInfo.h" 20 #include "llvm/Intrinsics.h" 21 #include "llvm/Support/CallSite.h" 22 23 using namespace clang; 24 using namespace CodeGen; 25 26 RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD, 27 SourceLocation CallLoc, 28 llvm::Value *Callee, 29 ReturnValueSlot ReturnValue, 30 llvm::Value *This, 31 llvm::Value *VTT, 32 CallExpr::const_arg_iterator ArgBeg, 33 CallExpr::const_arg_iterator ArgEnd) { 34 assert(MD->isInstance() && 35 "Trying to emit a member call expr on a static method!"); 36 37 // C++11 [class.mfct.non-static]p2: 38 // If a non-static member function of a class X is called for an object that 39 // is not of type X, or of a type derived from X, the behavior is undefined. 40 EmitTypeCheck(isa<CXXConstructorDecl>(MD) ? TCK_ConstructorCall 41 : TCK_MemberCall, 42 CallLoc, This, getContext().getRecordType(MD->getParent())); 43 44 CallArgList Args; 45 46 // Push the this ptr. 47 Args.add(RValue::get(This), MD->getThisType(getContext())); 48 49 // If there is a VTT parameter, emit it. 50 if (VTT) { 51 QualType T = getContext().getPointerType(getContext().VoidPtrTy); 52 Args.add(RValue::get(VTT), T); 53 } 54 55 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>(); 56 RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, Args.size()); 57 58 // And the rest of the call args. 59 EmitCallArgs(Args, FPT, ArgBeg, ArgEnd); 60 61 return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required), 62 Callee, ReturnValue, Args, MD); 63 } 64 65 // FIXME: Ideally Expr::IgnoreParenNoopCasts should do this, but it doesn't do 66 // quite what we want. 67 static const Expr *skipNoOpCastsAndParens(const Expr *E) { 68 while (true) { 69 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) { 70 E = PE->getSubExpr(); 71 continue; 72 } 73 74 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) { 75 if (CE->getCastKind() == CK_NoOp) { 76 E = CE->getSubExpr(); 77 continue; 78 } 79 } 80 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 81 if (UO->getOpcode() == UO_Extension) { 82 E = UO->getSubExpr(); 83 continue; 84 } 85 } 86 return E; 87 } 88 } 89 90 /// canDevirtualizeMemberFunctionCalls - Checks whether virtual calls on given 91 /// expr can be devirtualized. 92 static bool canDevirtualizeMemberFunctionCalls(ASTContext &Context, 93 const Expr *Base, 94 const CXXMethodDecl *MD) { 95 96 // When building with -fapple-kext, all calls must go through the vtable since 97 // the kernel linker can do runtime patching of vtables. 98 if (Context.getLangOpts().AppleKext) 99 return false; 100 101 // If the most derived class is marked final, we know that no subclass can 102 // override this member function and so we can devirtualize it. For example: 103 // 104 // struct A { virtual void f(); } 105 // struct B final : A { }; 106 // 107 // void f(B *b) { 108 // b->f(); 109 // } 110 // 111 const CXXRecordDecl *MostDerivedClassDecl = Base->getBestDynamicClassType(); 112 if (MostDerivedClassDecl->hasAttr<FinalAttr>()) 113 return true; 114 115 // If the member function is marked 'final', we know that it can't be 116 // overridden and can therefore devirtualize it. 117 if (MD->hasAttr<FinalAttr>()) 118 return true; 119 120 // Similarly, if the class itself is marked 'final' it can't be overridden 121 // and we can therefore devirtualize the member function call. 122 if (MD->getParent()->hasAttr<FinalAttr>()) 123 return true; 124 125 Base = skipNoOpCastsAndParens(Base); 126 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) { 127 if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) { 128 // This is a record decl. We know the type and can devirtualize it. 129 return VD->getType()->isRecordType(); 130 } 131 132 return false; 133 } 134 135 // We can devirtualize calls on an object accessed by a class member access 136 // expression, since by C++11 [basic.life]p6 we know that it can't refer to 137 // a derived class object constructed in the same location. 138 if (const MemberExpr *ME = dyn_cast<MemberExpr>(Base)) 139 if (const ValueDecl *VD = dyn_cast<ValueDecl>(ME->getMemberDecl())) 140 return VD->getType()->isRecordType(); 141 142 // We can always devirtualize calls on temporary object expressions. 143 if (isa<CXXConstructExpr>(Base)) 144 return true; 145 146 // And calls on bound temporaries. 147 if (isa<CXXBindTemporaryExpr>(Base)) 148 return true; 149 150 // Check if this is a call expr that returns a record type. 151 if (const CallExpr *CE = dyn_cast<CallExpr>(Base)) 152 return CE->getCallReturnType()->isRecordType(); 153 154 // We can't devirtualize the call. 155 return false; 156 } 157 158 static CXXRecordDecl *getCXXRecord(const Expr *E) { 159 QualType T = E->getType(); 160 if (const PointerType *PTy = T->getAs<PointerType>()) 161 T = PTy->getPointeeType(); 162 const RecordType *Ty = T->castAs<RecordType>(); 163 return cast<CXXRecordDecl>(Ty->getDecl()); 164 } 165 166 // Note: This function also emit constructor calls to support a MSVC 167 // extensions allowing explicit constructor function call. 168 RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE, 169 ReturnValueSlot ReturnValue) { 170 const Expr *callee = CE->getCallee()->IgnoreParens(); 171 172 if (isa<BinaryOperator>(callee)) 173 return EmitCXXMemberPointerCallExpr(CE, ReturnValue); 174 175 const MemberExpr *ME = cast<MemberExpr>(callee); 176 const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl()); 177 178 CGDebugInfo *DI = getDebugInfo(); 179 if (DI && 180 CGM.getCodeGenOpts().getDebugInfo() == CodeGenOptions::LimitedDebugInfo && 181 !isa<CallExpr>(ME->getBase())) { 182 QualType PQTy = ME->getBase()->IgnoreParenImpCasts()->getType(); 183 if (const PointerType * PTy = dyn_cast<PointerType>(PQTy)) { 184 DI->getOrCreateRecordType(PTy->getPointeeType(), 185 MD->getParent()->getLocation()); 186 } 187 } 188 189 if (MD->isStatic()) { 190 // The method is static, emit it as we would a regular call. 191 llvm::Value *Callee = CGM.GetAddrOfFunction(MD); 192 return EmitCall(getContext().getPointerType(MD->getType()), Callee, 193 ReturnValue, CE->arg_begin(), CE->arg_end()); 194 } 195 196 // Compute the object pointer. 197 const Expr *Base = ME->getBase(); 198 bool CanUseVirtualCall = MD->isVirtual() && !ME->hasQualifier(); 199 200 const CXXMethodDecl *DevirtualizedMethod = NULL; 201 if (CanUseVirtualCall && 202 canDevirtualizeMemberFunctionCalls(getContext(), Base, MD)) { 203 const CXXRecordDecl *BestDynamicDecl = Base->getBestDynamicClassType(); 204 DevirtualizedMethod = MD->getCorrespondingMethodInClass(BestDynamicDecl); 205 assert(DevirtualizedMethod); 206 const CXXRecordDecl *DevirtualizedClass = DevirtualizedMethod->getParent(); 207 const Expr *Inner = Base->ignoreParenBaseCasts(); 208 if (getCXXRecord(Inner) == DevirtualizedClass) 209 // If the class of the Inner expression is where the dynamic method 210 // is defined, build the this pointer from it. 211 Base = Inner; 212 else if (getCXXRecord(Base) != DevirtualizedClass) { 213 // If the method is defined in a class that is not the best dynamic 214 // one or the one of the full expression, we would have to build 215 // a derived-to-base cast to compute the correct this pointer, but 216 // we don't have support for that yet, so do a virtual call. 217 DevirtualizedMethod = NULL; 218 } 219 // If the return types are not the same, this might be a case where more 220 // code needs to run to compensate for it. For example, the derived 221 // method might return a type that inherits form from the return 222 // type of MD and has a prefix. 223 // For now we just avoid devirtualizing these covariant cases. 224 if (DevirtualizedMethod && 225 DevirtualizedMethod->getResultType().getCanonicalType() != 226 MD->getResultType().getCanonicalType()) 227 DevirtualizedMethod = NULL; 228 } 229 230 llvm::Value *This; 231 if (ME->isArrow()) 232 This = EmitScalarExpr(Base); 233 else 234 This = EmitLValue(Base).getAddress(); 235 236 237 if (MD->isTrivial()) { 238 if (isa<CXXDestructorDecl>(MD)) return RValue::get(0); 239 if (isa<CXXConstructorDecl>(MD) && 240 cast<CXXConstructorDecl>(MD)->isDefaultConstructor()) 241 return RValue::get(0); 242 243 if (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) { 244 // We don't like to generate the trivial copy/move assignment operator 245 // when it isn't necessary; just produce the proper effect here. 246 llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress(); 247 EmitAggregateAssign(This, RHS, CE->getType()); 248 return RValue::get(This); 249 } 250 251 if (isa<CXXConstructorDecl>(MD) && 252 cast<CXXConstructorDecl>(MD)->isCopyOrMoveConstructor()) { 253 // Trivial move and copy ctor are the same. 254 llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress(); 255 EmitSynthesizedCXXCopyCtorCall(cast<CXXConstructorDecl>(MD), This, RHS, 256 CE->arg_begin(), CE->arg_end()); 257 return RValue::get(This); 258 } 259 llvm_unreachable("unknown trivial member function"); 260 } 261 262 // Compute the function type we're calling. 263 const CXXMethodDecl *CalleeDecl = DevirtualizedMethod ? DevirtualizedMethod : MD; 264 const CGFunctionInfo *FInfo = 0; 265 if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(CalleeDecl)) 266 FInfo = &CGM.getTypes().arrangeCXXDestructor(Dtor, 267 Dtor_Complete); 268 else if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(CalleeDecl)) 269 FInfo = &CGM.getTypes().arrangeCXXConstructorDeclaration(Ctor, 270 Ctor_Complete); 271 else 272 FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(CalleeDecl); 273 274 llvm::Type *Ty = CGM.getTypes().GetFunctionType(*FInfo); 275 276 // C++ [class.virtual]p12: 277 // Explicit qualification with the scope operator (5.1) suppresses the 278 // virtual call mechanism. 279 // 280 // We also don't emit a virtual call if the base expression has a record type 281 // because then we know what the type is. 282 bool UseVirtualCall = CanUseVirtualCall && !DevirtualizedMethod; 283 284 llvm::Value *Callee; 285 if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD)) { 286 if (UseVirtualCall) { 287 Callee = BuildVirtualCall(Dtor, Dtor_Complete, This, Ty); 288 } else { 289 if (getLangOpts().AppleKext && 290 MD->isVirtual() && 291 ME->hasQualifier()) 292 Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty); 293 else if (!DevirtualizedMethod) 294 Callee = CGM.GetAddrOfFunction(GlobalDecl(Dtor, Dtor_Complete), Ty); 295 else { 296 const CXXDestructorDecl *DDtor = 297 cast<CXXDestructorDecl>(DevirtualizedMethod); 298 Callee = CGM.GetAddrOfFunction(GlobalDecl(DDtor, Dtor_Complete), Ty); 299 } 300 } 301 } else if (const CXXConstructorDecl *Ctor = 302 dyn_cast<CXXConstructorDecl>(MD)) { 303 Callee = CGM.GetAddrOfFunction(GlobalDecl(Ctor, Ctor_Complete), Ty); 304 } else if (UseVirtualCall) { 305 Callee = BuildVirtualCall(MD, This, Ty); 306 } else { 307 if (getLangOpts().AppleKext && 308 MD->isVirtual() && 309 ME->hasQualifier()) 310 Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty); 311 else if (!DevirtualizedMethod) 312 Callee = CGM.GetAddrOfFunction(MD, Ty); 313 else { 314 Callee = CGM.GetAddrOfFunction(DevirtualizedMethod, Ty); 315 } 316 } 317 318 return EmitCXXMemberCall(MD, CE->getExprLoc(), Callee, ReturnValue, This, 319 /*VTT=*/0, CE->arg_begin(), CE->arg_end()); 320 } 321 322 RValue 323 CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E, 324 ReturnValueSlot ReturnValue) { 325 const BinaryOperator *BO = 326 cast<BinaryOperator>(E->getCallee()->IgnoreParens()); 327 const Expr *BaseExpr = BO->getLHS(); 328 const Expr *MemFnExpr = BO->getRHS(); 329 330 const MemberPointerType *MPT = 331 MemFnExpr->getType()->castAs<MemberPointerType>(); 332 333 const FunctionProtoType *FPT = 334 MPT->getPointeeType()->castAs<FunctionProtoType>(); 335 const CXXRecordDecl *RD = 336 cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl()); 337 338 // Get the member function pointer. 339 llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr); 340 341 // Emit the 'this' pointer. 342 llvm::Value *This; 343 344 if (BO->getOpcode() == BO_PtrMemI) 345 This = EmitScalarExpr(BaseExpr); 346 else 347 This = EmitLValue(BaseExpr).getAddress(); 348 349 EmitTypeCheck(TCK_MemberCall, E->getExprLoc(), This, 350 QualType(MPT->getClass(), 0)); 351 352 // Ask the ABI to load the callee. Note that This is modified. 353 llvm::Value *Callee = 354 CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, This, MemFnPtr, MPT); 355 356 CallArgList Args; 357 358 QualType ThisType = 359 getContext().getPointerType(getContext().getTagDeclType(RD)); 360 361 // Push the this ptr. 362 Args.add(RValue::get(This), ThisType); 363 364 RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, 1); 365 366 // And the rest of the call args 367 EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end()); 368 return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required), Callee, 369 ReturnValue, Args); 370 } 371 372 RValue 373 CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E, 374 const CXXMethodDecl *MD, 375 ReturnValueSlot ReturnValue) { 376 assert(MD->isInstance() && 377 "Trying to emit a member call expr on a static method!"); 378 LValue LV = EmitLValue(E->getArg(0)); 379 llvm::Value *This = LV.getAddress(); 380 381 if ((MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) && 382 MD->isTrivial()) { 383 llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress(); 384 QualType Ty = E->getType(); 385 EmitAggregateAssign(This, Src, Ty); 386 return RValue::get(This); 387 } 388 389 llvm::Value *Callee = EmitCXXOperatorMemberCallee(E, MD, This); 390 return EmitCXXMemberCall(MD, E->getExprLoc(), Callee, ReturnValue, This, 391 /*VTT=*/0, E->arg_begin() + 1, E->arg_end()); 392 } 393 394 RValue CodeGenFunction::EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E, 395 ReturnValueSlot ReturnValue) { 396 return CGM.getCUDARuntime().EmitCUDAKernelCallExpr(*this, E, ReturnValue); 397 } 398 399 static void EmitNullBaseClassInitialization(CodeGenFunction &CGF, 400 llvm::Value *DestPtr, 401 const CXXRecordDecl *Base) { 402 if (Base->isEmpty()) 403 return; 404 405 DestPtr = CGF.EmitCastToVoidPtr(DestPtr); 406 407 const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(Base); 408 CharUnits Size = Layout.getNonVirtualSize(); 409 CharUnits Align = Layout.getNonVirtualAlign(); 410 411 llvm::Value *SizeVal = CGF.CGM.getSize(Size); 412 413 // If the type contains a pointer to data member we can't memset it to zero. 414 // Instead, create a null constant and copy it to the destination. 415 // TODO: there are other patterns besides zero that we can usefully memset, 416 // like -1, which happens to be the pattern used by member-pointers. 417 // TODO: isZeroInitializable can be over-conservative in the case where a 418 // virtual base contains a member pointer. 419 if (!CGF.CGM.getTypes().isZeroInitializable(Base)) { 420 llvm::Constant *NullConstant = CGF.CGM.EmitNullConstantForBase(Base); 421 422 llvm::GlobalVariable *NullVariable = 423 new llvm::GlobalVariable(CGF.CGM.getModule(), NullConstant->getType(), 424 /*isConstant=*/true, 425 llvm::GlobalVariable::PrivateLinkage, 426 NullConstant, Twine()); 427 NullVariable->setAlignment(Align.getQuantity()); 428 llvm::Value *SrcPtr = CGF.EmitCastToVoidPtr(NullVariable); 429 430 // Get and call the appropriate llvm.memcpy overload. 431 CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, Align.getQuantity()); 432 return; 433 } 434 435 // Otherwise, just memset the whole thing to zero. This is legal 436 // because in LLVM, all default initializers (other than the ones we just 437 // handled above) are guaranteed to have a bit pattern of all zeros. 438 CGF.Builder.CreateMemSet(DestPtr, CGF.Builder.getInt8(0), SizeVal, 439 Align.getQuantity()); 440 } 441 442 void 443 CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E, 444 AggValueSlot Dest) { 445 assert(!Dest.isIgnored() && "Must have a destination!"); 446 const CXXConstructorDecl *CD = E->getConstructor(); 447 448 // If we require zero initialization before (or instead of) calling the 449 // constructor, as can be the case with a non-user-provided default 450 // constructor, emit the zero initialization now, unless destination is 451 // already zeroed. 452 if (E->requiresZeroInitialization() && !Dest.isZeroed()) { 453 switch (E->getConstructionKind()) { 454 case CXXConstructExpr::CK_Delegating: 455 case CXXConstructExpr::CK_Complete: 456 EmitNullInitialization(Dest.getAddr(), E->getType()); 457 break; 458 case CXXConstructExpr::CK_VirtualBase: 459 case CXXConstructExpr::CK_NonVirtualBase: 460 EmitNullBaseClassInitialization(*this, Dest.getAddr(), CD->getParent()); 461 break; 462 } 463 } 464 465 // If this is a call to a trivial default constructor, do nothing. 466 if (CD->isTrivial() && CD->isDefaultConstructor()) 467 return; 468 469 // Elide the constructor if we're constructing from a temporary. 470 // The temporary check is required because Sema sets this on NRVO 471 // returns. 472 if (getLangOpts().ElideConstructors && E->isElidable()) { 473 assert(getContext().hasSameUnqualifiedType(E->getType(), 474 E->getArg(0)->getType())); 475 if (E->getArg(0)->isTemporaryObject(getContext(), CD->getParent())) { 476 EmitAggExpr(E->getArg(0), Dest); 477 return; 478 } 479 } 480 481 if (const ConstantArrayType *arrayType 482 = getContext().getAsConstantArrayType(E->getType())) { 483 EmitCXXAggrConstructorCall(CD, arrayType, Dest.getAddr(), 484 E->arg_begin(), E->arg_end()); 485 } else { 486 CXXCtorType Type = Ctor_Complete; 487 bool ForVirtualBase = false; 488 489 switch (E->getConstructionKind()) { 490 case CXXConstructExpr::CK_Delegating: 491 // We should be emitting a constructor; GlobalDecl will assert this 492 Type = CurGD.getCtorType(); 493 break; 494 495 case CXXConstructExpr::CK_Complete: 496 Type = Ctor_Complete; 497 break; 498 499 case CXXConstructExpr::CK_VirtualBase: 500 ForVirtualBase = true; 501 // fall-through 502 503 case CXXConstructExpr::CK_NonVirtualBase: 504 Type = Ctor_Base; 505 } 506 507 // Call the constructor. 508 EmitCXXConstructorCall(CD, Type, ForVirtualBase, Dest.getAddr(), 509 E->arg_begin(), E->arg_end()); 510 } 511 } 512 513 void 514 CodeGenFunction::EmitSynthesizedCXXCopyCtor(llvm::Value *Dest, 515 llvm::Value *Src, 516 const Expr *Exp) { 517 if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Exp)) 518 Exp = E->getSubExpr(); 519 assert(isa<CXXConstructExpr>(Exp) && 520 "EmitSynthesizedCXXCopyCtor - unknown copy ctor expr"); 521 const CXXConstructExpr* E = cast<CXXConstructExpr>(Exp); 522 const CXXConstructorDecl *CD = E->getConstructor(); 523 RunCleanupsScope Scope(*this); 524 525 // If we require zero initialization before (or instead of) calling the 526 // constructor, as can be the case with a non-user-provided default 527 // constructor, emit the zero initialization now. 528 // FIXME. Do I still need this for a copy ctor synthesis? 529 if (E->requiresZeroInitialization()) 530 EmitNullInitialization(Dest, E->getType()); 531 532 assert(!getContext().getAsConstantArrayType(E->getType()) 533 && "EmitSynthesizedCXXCopyCtor - Copied-in Array"); 534 EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src, 535 E->arg_begin(), E->arg_end()); 536 } 537 538 static CharUnits CalculateCookiePadding(CodeGenFunction &CGF, 539 const CXXNewExpr *E) { 540 if (!E->isArray()) 541 return CharUnits::Zero(); 542 543 // No cookie is required if the operator new[] being used is the 544 // reserved placement operator new[]. 545 if (E->getOperatorNew()->isReservedGlobalPlacementOperator()) 546 return CharUnits::Zero(); 547 548 return CGF.CGM.getCXXABI().GetArrayCookieSize(E); 549 } 550 551 static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF, 552 const CXXNewExpr *e, 553 unsigned minElements, 554 llvm::Value *&numElements, 555 llvm::Value *&sizeWithoutCookie) { 556 QualType type = e->getAllocatedType(); 557 558 if (!e->isArray()) { 559 CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type); 560 sizeWithoutCookie 561 = llvm::ConstantInt::get(CGF.SizeTy, typeSize.getQuantity()); 562 return sizeWithoutCookie; 563 } 564 565 // The width of size_t. 566 unsigned sizeWidth = CGF.SizeTy->getBitWidth(); 567 568 // Figure out the cookie size. 569 llvm::APInt cookieSize(sizeWidth, 570 CalculateCookiePadding(CGF, e).getQuantity()); 571 572 // Emit the array size expression. 573 // We multiply the size of all dimensions for NumElements. 574 // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6. 575 numElements = CGF.EmitScalarExpr(e->getArraySize()); 576 assert(isa<llvm::IntegerType>(numElements->getType())); 577 578 // The number of elements can be have an arbitrary integer type; 579 // essentially, we need to multiply it by a constant factor, add a 580 // cookie size, and verify that the result is representable as a 581 // size_t. That's just a gloss, though, and it's wrong in one 582 // important way: if the count is negative, it's an error even if 583 // the cookie size would bring the total size >= 0. 584 bool isSigned 585 = e->getArraySize()->getType()->isSignedIntegerOrEnumerationType(); 586 llvm::IntegerType *numElementsType 587 = cast<llvm::IntegerType>(numElements->getType()); 588 unsigned numElementsWidth = numElementsType->getBitWidth(); 589 590 // Compute the constant factor. 591 llvm::APInt arraySizeMultiplier(sizeWidth, 1); 592 while (const ConstantArrayType *CAT 593 = CGF.getContext().getAsConstantArrayType(type)) { 594 type = CAT->getElementType(); 595 arraySizeMultiplier *= CAT->getSize(); 596 } 597 598 CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type); 599 llvm::APInt typeSizeMultiplier(sizeWidth, typeSize.getQuantity()); 600 typeSizeMultiplier *= arraySizeMultiplier; 601 602 // This will be a size_t. 603 llvm::Value *size; 604 605 // If someone is doing 'new int[42]' there is no need to do a dynamic check. 606 // Don't bloat the -O0 code. 607 if (llvm::ConstantInt *numElementsC = 608 dyn_cast<llvm::ConstantInt>(numElements)) { 609 const llvm::APInt &count = numElementsC->getValue(); 610 611 bool hasAnyOverflow = false; 612 613 // If 'count' was a negative number, it's an overflow. 614 if (isSigned && count.isNegative()) 615 hasAnyOverflow = true; 616 617 // We want to do all this arithmetic in size_t. If numElements is 618 // wider than that, check whether it's already too big, and if so, 619 // overflow. 620 else if (numElementsWidth > sizeWidth && 621 numElementsWidth - sizeWidth > count.countLeadingZeros()) 622 hasAnyOverflow = true; 623 624 // Okay, compute a count at the right width. 625 llvm::APInt adjustedCount = count.zextOrTrunc(sizeWidth); 626 627 // If there is a brace-initializer, we cannot allocate fewer elements than 628 // there are initializers. If we do, that's treated like an overflow. 629 if (adjustedCount.ult(minElements)) 630 hasAnyOverflow = true; 631 632 // Scale numElements by that. This might overflow, but we don't 633 // care because it only overflows if allocationSize does, too, and 634 // if that overflows then we shouldn't use this. 635 numElements = llvm::ConstantInt::get(CGF.SizeTy, 636 adjustedCount * arraySizeMultiplier); 637 638 // Compute the size before cookie, and track whether it overflowed. 639 bool overflow; 640 llvm::APInt allocationSize 641 = adjustedCount.umul_ov(typeSizeMultiplier, overflow); 642 hasAnyOverflow |= overflow; 643 644 // Add in the cookie, and check whether it's overflowed. 645 if (cookieSize != 0) { 646 // Save the current size without a cookie. This shouldn't be 647 // used if there was overflow. 648 sizeWithoutCookie = llvm::ConstantInt::get(CGF.SizeTy, allocationSize); 649 650 allocationSize = allocationSize.uadd_ov(cookieSize, overflow); 651 hasAnyOverflow |= overflow; 652 } 653 654 // On overflow, produce a -1 so operator new will fail. 655 if (hasAnyOverflow) { 656 size = llvm::Constant::getAllOnesValue(CGF.SizeTy); 657 } else { 658 size = llvm::ConstantInt::get(CGF.SizeTy, allocationSize); 659 } 660 661 // Otherwise, we might need to use the overflow intrinsics. 662 } else { 663 // There are up to five conditions we need to test for: 664 // 1) if isSigned, we need to check whether numElements is negative; 665 // 2) if numElementsWidth > sizeWidth, we need to check whether 666 // numElements is larger than something representable in size_t; 667 // 3) if minElements > 0, we need to check whether numElements is smaller 668 // than that. 669 // 4) we need to compute 670 // sizeWithoutCookie := numElements * typeSizeMultiplier 671 // and check whether it overflows; and 672 // 5) if we need a cookie, we need to compute 673 // size := sizeWithoutCookie + cookieSize 674 // and check whether it overflows. 675 676 llvm::Value *hasOverflow = 0; 677 678 // If numElementsWidth > sizeWidth, then one way or another, we're 679 // going to have to do a comparison for (2), and this happens to 680 // take care of (1), too. 681 if (numElementsWidth > sizeWidth) { 682 llvm::APInt threshold(numElementsWidth, 1); 683 threshold <<= sizeWidth; 684 685 llvm::Value *thresholdV 686 = llvm::ConstantInt::get(numElementsType, threshold); 687 688 hasOverflow = CGF.Builder.CreateICmpUGE(numElements, thresholdV); 689 numElements = CGF.Builder.CreateTrunc(numElements, CGF.SizeTy); 690 691 // Otherwise, if we're signed, we want to sext up to size_t. 692 } else if (isSigned) { 693 if (numElementsWidth < sizeWidth) 694 numElements = CGF.Builder.CreateSExt(numElements, CGF.SizeTy); 695 696 // If there's a non-1 type size multiplier, then we can do the 697 // signedness check at the same time as we do the multiply 698 // because a negative number times anything will cause an 699 // unsigned overflow. Otherwise, we have to do it here. But at least 700 // in this case, we can subsume the >= minElements check. 701 if (typeSizeMultiplier == 1) 702 hasOverflow = CGF.Builder.CreateICmpSLT(numElements, 703 llvm::ConstantInt::get(CGF.SizeTy, minElements)); 704 705 // Otherwise, zext up to size_t if necessary. 706 } else if (numElementsWidth < sizeWidth) { 707 numElements = CGF.Builder.CreateZExt(numElements, CGF.SizeTy); 708 } 709 710 assert(numElements->getType() == CGF.SizeTy); 711 712 if (minElements) { 713 // Don't allow allocation of fewer elements than we have initializers. 714 if (!hasOverflow) { 715 hasOverflow = CGF.Builder.CreateICmpULT(numElements, 716 llvm::ConstantInt::get(CGF.SizeTy, minElements)); 717 } else if (numElementsWidth > sizeWidth) { 718 // The other existing overflow subsumes this check. 719 // We do an unsigned comparison, since any signed value < -1 is 720 // taken care of either above or below. 721 hasOverflow = CGF.Builder.CreateOr(hasOverflow, 722 CGF.Builder.CreateICmpULT(numElements, 723 llvm::ConstantInt::get(CGF.SizeTy, minElements))); 724 } 725 } 726 727 size = numElements; 728 729 // Multiply by the type size if necessary. This multiplier 730 // includes all the factors for nested arrays. 731 // 732 // This step also causes numElements to be scaled up by the 733 // nested-array factor if necessary. Overflow on this computation 734 // can be ignored because the result shouldn't be used if 735 // allocation fails. 736 if (typeSizeMultiplier != 1) { 737 llvm::Value *umul_with_overflow 738 = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, CGF.SizeTy); 739 740 llvm::Value *tsmV = 741 llvm::ConstantInt::get(CGF.SizeTy, typeSizeMultiplier); 742 llvm::Value *result = 743 CGF.Builder.CreateCall2(umul_with_overflow, size, tsmV); 744 745 llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1); 746 if (hasOverflow) 747 hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed); 748 else 749 hasOverflow = overflowed; 750 751 size = CGF.Builder.CreateExtractValue(result, 0); 752 753 // Also scale up numElements by the array size multiplier. 754 if (arraySizeMultiplier != 1) { 755 // If the base element type size is 1, then we can re-use the 756 // multiply we just did. 757 if (typeSize.isOne()) { 758 assert(arraySizeMultiplier == typeSizeMultiplier); 759 numElements = size; 760 761 // Otherwise we need a separate multiply. 762 } else { 763 llvm::Value *asmV = 764 llvm::ConstantInt::get(CGF.SizeTy, arraySizeMultiplier); 765 numElements = CGF.Builder.CreateMul(numElements, asmV); 766 } 767 } 768 } else { 769 // numElements doesn't need to be scaled. 770 assert(arraySizeMultiplier == 1); 771 } 772 773 // Add in the cookie size if necessary. 774 if (cookieSize != 0) { 775 sizeWithoutCookie = size; 776 777 llvm::Value *uadd_with_overflow 778 = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, CGF.SizeTy); 779 780 llvm::Value *cookieSizeV = llvm::ConstantInt::get(CGF.SizeTy, cookieSize); 781 llvm::Value *result = 782 CGF.Builder.CreateCall2(uadd_with_overflow, size, cookieSizeV); 783 784 llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1); 785 if (hasOverflow) 786 hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed); 787 else 788 hasOverflow = overflowed; 789 790 size = CGF.Builder.CreateExtractValue(result, 0); 791 } 792 793 // If we had any possibility of dynamic overflow, make a select to 794 // overwrite 'size' with an all-ones value, which should cause 795 // operator new to throw. 796 if (hasOverflow) 797 size = CGF.Builder.CreateSelect(hasOverflow, 798 llvm::Constant::getAllOnesValue(CGF.SizeTy), 799 size); 800 } 801 802 if (cookieSize == 0) 803 sizeWithoutCookie = size; 804 else 805 assert(sizeWithoutCookie && "didn't set sizeWithoutCookie?"); 806 807 return size; 808 } 809 810 static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const Expr *Init, 811 QualType AllocType, llvm::Value *NewPtr) { 812 813 CharUnits Alignment = CGF.getContext().getTypeAlignInChars(AllocType); 814 if (!CGF.hasAggregateLLVMType(AllocType)) 815 CGF.EmitScalarInit(Init, 0, CGF.MakeAddrLValue(NewPtr, AllocType, 816 Alignment), 817 false); 818 else if (AllocType->isAnyComplexType()) 819 CGF.EmitComplexExprIntoAddr(Init, NewPtr, 820 AllocType.isVolatileQualified()); 821 else { 822 AggValueSlot Slot 823 = AggValueSlot::forAddr(NewPtr, Alignment, AllocType.getQualifiers(), 824 AggValueSlot::IsDestructed, 825 AggValueSlot::DoesNotNeedGCBarriers, 826 AggValueSlot::IsNotAliased); 827 CGF.EmitAggExpr(Init, Slot); 828 829 CGF.MaybeEmitStdInitializerListCleanup(NewPtr, Init); 830 } 831 } 832 833 void 834 CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E, 835 QualType elementType, 836 llvm::Value *beginPtr, 837 llvm::Value *numElements) { 838 if (!E->hasInitializer()) 839 return; // We have a POD type. 840 841 llvm::Value *explicitPtr = beginPtr; 842 // Find the end of the array, hoisted out of the loop. 843 llvm::Value *endPtr = 844 Builder.CreateInBoundsGEP(beginPtr, numElements, "array.end"); 845 846 unsigned initializerElements = 0; 847 848 const Expr *Init = E->getInitializer(); 849 llvm::AllocaInst *endOfInit = 0; 850 QualType::DestructionKind dtorKind = elementType.isDestructedType(); 851 EHScopeStack::stable_iterator cleanup; 852 llvm::Instruction *cleanupDominator = 0; 853 // If the initializer is an initializer list, first do the explicit elements. 854 if (const InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) { 855 initializerElements = ILE->getNumInits(); 856 857 // Enter a partial-destruction cleanup if necessary. 858 if (needsEHCleanup(dtorKind)) { 859 // In principle we could tell the cleanup where we are more 860 // directly, but the control flow can get so varied here that it 861 // would actually be quite complex. Therefore we go through an 862 // alloca. 863 endOfInit = CreateTempAlloca(beginPtr->getType(), "array.endOfInit"); 864 cleanupDominator = Builder.CreateStore(beginPtr, endOfInit); 865 pushIrregularPartialArrayCleanup(beginPtr, endOfInit, elementType, 866 getDestroyer(dtorKind)); 867 cleanup = EHStack.stable_begin(); 868 } 869 870 for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i) { 871 // Tell the cleanup that it needs to destroy up to this 872 // element. TODO: some of these stores can be trivially 873 // observed to be unnecessary. 874 if (endOfInit) Builder.CreateStore(explicitPtr, endOfInit); 875 StoreAnyExprIntoOneUnit(*this, ILE->getInit(i), elementType, explicitPtr); 876 explicitPtr =Builder.CreateConstGEP1_32(explicitPtr, 1, "array.exp.next"); 877 } 878 879 // The remaining elements are filled with the array filler expression. 880 Init = ILE->getArrayFiller(); 881 } 882 883 // Create the continuation block. 884 llvm::BasicBlock *contBB = createBasicBlock("new.loop.end"); 885 886 // If the number of elements isn't constant, we have to now check if there is 887 // anything left to initialize. 888 if (llvm::ConstantInt *constNum = dyn_cast<llvm::ConstantInt>(numElements)) { 889 // If all elements have already been initialized, skip the whole loop. 890 if (constNum->getZExtValue() <= initializerElements) { 891 // If there was a cleanup, deactivate it. 892 if (cleanupDominator) 893 DeactivateCleanupBlock(cleanup, cleanupDominator); 894 return; 895 } 896 } else { 897 llvm::BasicBlock *nonEmptyBB = createBasicBlock("new.loop.nonempty"); 898 llvm::Value *isEmpty = Builder.CreateICmpEQ(explicitPtr, endPtr, 899 "array.isempty"); 900 Builder.CreateCondBr(isEmpty, contBB, nonEmptyBB); 901 EmitBlock(nonEmptyBB); 902 } 903 904 // Enter the loop. 905 llvm::BasicBlock *entryBB = Builder.GetInsertBlock(); 906 llvm::BasicBlock *loopBB = createBasicBlock("new.loop"); 907 908 EmitBlock(loopBB); 909 910 // Set up the current-element phi. 911 llvm::PHINode *curPtr = 912 Builder.CreatePHI(explicitPtr->getType(), 2, "array.cur"); 913 curPtr->addIncoming(explicitPtr, entryBB); 914 915 // Store the new cleanup position for irregular cleanups. 916 if (endOfInit) Builder.CreateStore(curPtr, endOfInit); 917 918 // Enter a partial-destruction cleanup if necessary. 919 if (!cleanupDominator && needsEHCleanup(dtorKind)) { 920 pushRegularPartialArrayCleanup(beginPtr, curPtr, elementType, 921 getDestroyer(dtorKind)); 922 cleanup = EHStack.stable_begin(); 923 cleanupDominator = Builder.CreateUnreachable(); 924 } 925 926 // Emit the initializer into this element. 927 StoreAnyExprIntoOneUnit(*this, Init, E->getAllocatedType(), curPtr); 928 929 // Leave the cleanup if we entered one. 930 if (cleanupDominator) { 931 DeactivateCleanupBlock(cleanup, cleanupDominator); 932 cleanupDominator->eraseFromParent(); 933 } 934 935 // Advance to the next element. 936 llvm::Value *nextPtr = Builder.CreateConstGEP1_32(curPtr, 1, "array.next"); 937 938 // Check whether we've gotten to the end of the array and, if so, 939 // exit the loop. 940 llvm::Value *isEnd = Builder.CreateICmpEQ(nextPtr, endPtr, "array.atend"); 941 Builder.CreateCondBr(isEnd, contBB, loopBB); 942 curPtr->addIncoming(nextPtr, Builder.GetInsertBlock()); 943 944 EmitBlock(contBB); 945 } 946 947 static void EmitZeroMemSet(CodeGenFunction &CGF, QualType T, 948 llvm::Value *NewPtr, llvm::Value *Size) { 949 CGF.EmitCastToVoidPtr(NewPtr); 950 CharUnits Alignment = CGF.getContext().getTypeAlignInChars(T); 951 CGF.Builder.CreateMemSet(NewPtr, CGF.Builder.getInt8(0), Size, 952 Alignment.getQuantity(), false); 953 } 954 955 static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E, 956 QualType ElementType, 957 llvm::Value *NewPtr, 958 llvm::Value *NumElements, 959 llvm::Value *AllocSizeWithoutCookie) { 960 const Expr *Init = E->getInitializer(); 961 if (E->isArray()) { 962 if (const CXXConstructExpr *CCE = dyn_cast_or_null<CXXConstructExpr>(Init)){ 963 CXXConstructorDecl *Ctor = CCE->getConstructor(); 964 if (Ctor->isTrivial()) { 965 // If new expression did not specify value-initialization, then there 966 // is no initialization. 967 if (!CCE->requiresZeroInitialization() || Ctor->getParent()->isEmpty()) 968 return; 969 970 if (CGF.CGM.getTypes().isZeroInitializable(ElementType)) { 971 // Optimization: since zero initialization will just set the memory 972 // to all zeroes, generate a single memset to do it in one shot. 973 EmitZeroMemSet(CGF, ElementType, NewPtr, AllocSizeWithoutCookie); 974 return; 975 } 976 } 977 978 CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr, 979 CCE->arg_begin(), CCE->arg_end(), 980 CCE->requiresZeroInitialization()); 981 return; 982 } else if (Init && isa<ImplicitValueInitExpr>(Init) && 983 CGF.CGM.getTypes().isZeroInitializable(ElementType)) { 984 // Optimization: since zero initialization will just set the memory 985 // to all zeroes, generate a single memset to do it in one shot. 986 EmitZeroMemSet(CGF, ElementType, NewPtr, AllocSizeWithoutCookie); 987 return; 988 } 989 CGF.EmitNewArrayInitializer(E, ElementType, NewPtr, NumElements); 990 return; 991 } 992 993 if (!Init) 994 return; 995 996 StoreAnyExprIntoOneUnit(CGF, Init, E->getAllocatedType(), NewPtr); 997 } 998 999 namespace { 1000 /// A cleanup to call the given 'operator delete' function upon 1001 /// abnormal exit from a new expression. 1002 class CallDeleteDuringNew : public EHScopeStack::Cleanup { 1003 size_t NumPlacementArgs; 1004 const FunctionDecl *OperatorDelete; 1005 llvm::Value *Ptr; 1006 llvm::Value *AllocSize; 1007 1008 RValue *getPlacementArgs() { return reinterpret_cast<RValue*>(this+1); } 1009 1010 public: 1011 static size_t getExtraSize(size_t NumPlacementArgs) { 1012 return NumPlacementArgs * sizeof(RValue); 1013 } 1014 1015 CallDeleteDuringNew(size_t NumPlacementArgs, 1016 const FunctionDecl *OperatorDelete, 1017 llvm::Value *Ptr, 1018 llvm::Value *AllocSize) 1019 : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete), 1020 Ptr(Ptr), AllocSize(AllocSize) {} 1021 1022 void setPlacementArg(unsigned I, RValue Arg) { 1023 assert(I < NumPlacementArgs && "index out of range"); 1024 getPlacementArgs()[I] = Arg; 1025 } 1026 1027 void Emit(CodeGenFunction &CGF, Flags flags) { 1028 const FunctionProtoType *FPT 1029 = OperatorDelete->getType()->getAs<FunctionProtoType>(); 1030 assert(FPT->getNumArgs() == NumPlacementArgs + 1 || 1031 (FPT->getNumArgs() == 2 && NumPlacementArgs == 0)); 1032 1033 CallArgList DeleteArgs; 1034 1035 // The first argument is always a void*. 1036 FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin(); 1037 DeleteArgs.add(RValue::get(Ptr), *AI++); 1038 1039 // A member 'operator delete' can take an extra 'size_t' argument. 1040 if (FPT->getNumArgs() == NumPlacementArgs + 2) 1041 DeleteArgs.add(RValue::get(AllocSize), *AI++); 1042 1043 // Pass the rest of the arguments, which must match exactly. 1044 for (unsigned I = 0; I != NumPlacementArgs; ++I) 1045 DeleteArgs.add(getPlacementArgs()[I], *AI++); 1046 1047 // Call 'operator delete'. 1048 CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall(DeleteArgs, FPT), 1049 CGF.CGM.GetAddrOfFunction(OperatorDelete), 1050 ReturnValueSlot(), DeleteArgs, OperatorDelete); 1051 } 1052 }; 1053 1054 /// A cleanup to call the given 'operator delete' function upon 1055 /// abnormal exit from a new expression when the new expression is 1056 /// conditional. 1057 class CallDeleteDuringConditionalNew : public EHScopeStack::Cleanup { 1058 size_t NumPlacementArgs; 1059 const FunctionDecl *OperatorDelete; 1060 DominatingValue<RValue>::saved_type Ptr; 1061 DominatingValue<RValue>::saved_type AllocSize; 1062 1063 DominatingValue<RValue>::saved_type *getPlacementArgs() { 1064 return reinterpret_cast<DominatingValue<RValue>::saved_type*>(this+1); 1065 } 1066 1067 public: 1068 static size_t getExtraSize(size_t NumPlacementArgs) { 1069 return NumPlacementArgs * sizeof(DominatingValue<RValue>::saved_type); 1070 } 1071 1072 CallDeleteDuringConditionalNew(size_t NumPlacementArgs, 1073 const FunctionDecl *OperatorDelete, 1074 DominatingValue<RValue>::saved_type Ptr, 1075 DominatingValue<RValue>::saved_type AllocSize) 1076 : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete), 1077 Ptr(Ptr), AllocSize(AllocSize) {} 1078 1079 void setPlacementArg(unsigned I, DominatingValue<RValue>::saved_type Arg) { 1080 assert(I < NumPlacementArgs && "index out of range"); 1081 getPlacementArgs()[I] = Arg; 1082 } 1083 1084 void Emit(CodeGenFunction &CGF, Flags flags) { 1085 const FunctionProtoType *FPT 1086 = OperatorDelete->getType()->getAs<FunctionProtoType>(); 1087 assert(FPT->getNumArgs() == NumPlacementArgs + 1 || 1088 (FPT->getNumArgs() == 2 && NumPlacementArgs == 0)); 1089 1090 CallArgList DeleteArgs; 1091 1092 // The first argument is always a void*. 1093 FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin(); 1094 DeleteArgs.add(Ptr.restore(CGF), *AI++); 1095 1096 // A member 'operator delete' can take an extra 'size_t' argument. 1097 if (FPT->getNumArgs() == NumPlacementArgs + 2) { 1098 RValue RV = AllocSize.restore(CGF); 1099 DeleteArgs.add(RV, *AI++); 1100 } 1101 1102 // Pass the rest of the arguments, which must match exactly. 1103 for (unsigned I = 0; I != NumPlacementArgs; ++I) { 1104 RValue RV = getPlacementArgs()[I].restore(CGF); 1105 DeleteArgs.add(RV, *AI++); 1106 } 1107 1108 // Call 'operator delete'. 1109 CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall(DeleteArgs, FPT), 1110 CGF.CGM.GetAddrOfFunction(OperatorDelete), 1111 ReturnValueSlot(), DeleteArgs, OperatorDelete); 1112 } 1113 }; 1114 } 1115 1116 /// Enter a cleanup to call 'operator delete' if the initializer in a 1117 /// new-expression throws. 1118 static void EnterNewDeleteCleanup(CodeGenFunction &CGF, 1119 const CXXNewExpr *E, 1120 llvm::Value *NewPtr, 1121 llvm::Value *AllocSize, 1122 const CallArgList &NewArgs) { 1123 // If we're not inside a conditional branch, then the cleanup will 1124 // dominate and we can do the easier (and more efficient) thing. 1125 if (!CGF.isInConditionalBranch()) { 1126 CallDeleteDuringNew *Cleanup = CGF.EHStack 1127 .pushCleanupWithExtra<CallDeleteDuringNew>(EHCleanup, 1128 E->getNumPlacementArgs(), 1129 E->getOperatorDelete(), 1130 NewPtr, AllocSize); 1131 for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) 1132 Cleanup->setPlacementArg(I, NewArgs[I+1].RV); 1133 1134 return; 1135 } 1136 1137 // Otherwise, we need to save all this stuff. 1138 DominatingValue<RValue>::saved_type SavedNewPtr = 1139 DominatingValue<RValue>::save(CGF, RValue::get(NewPtr)); 1140 DominatingValue<RValue>::saved_type SavedAllocSize = 1141 DominatingValue<RValue>::save(CGF, RValue::get(AllocSize)); 1142 1143 CallDeleteDuringConditionalNew *Cleanup = CGF.EHStack 1144 .pushCleanupWithExtra<CallDeleteDuringConditionalNew>(EHCleanup, 1145 E->getNumPlacementArgs(), 1146 E->getOperatorDelete(), 1147 SavedNewPtr, 1148 SavedAllocSize); 1149 for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) 1150 Cleanup->setPlacementArg(I, 1151 DominatingValue<RValue>::save(CGF, NewArgs[I+1].RV)); 1152 1153 CGF.initFullExprCleanup(); 1154 } 1155 1156 llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) { 1157 // The element type being allocated. 1158 QualType allocType = getContext().getBaseElementType(E->getAllocatedType()); 1159 1160 // 1. Build a call to the allocation function. 1161 FunctionDecl *allocator = E->getOperatorNew(); 1162 const FunctionProtoType *allocatorType = 1163 allocator->getType()->castAs<FunctionProtoType>(); 1164 1165 CallArgList allocatorArgs; 1166 1167 // The allocation size is the first argument. 1168 QualType sizeType = getContext().getSizeType(); 1169 1170 // If there is a brace-initializer, cannot allocate fewer elements than inits. 1171 unsigned minElements = 0; 1172 if (E->isArray() && E->hasInitializer()) { 1173 if (const InitListExpr *ILE = dyn_cast<InitListExpr>(E->getInitializer())) 1174 minElements = ILE->getNumInits(); 1175 } 1176 1177 llvm::Value *numElements = 0; 1178 llvm::Value *allocSizeWithoutCookie = 0; 1179 llvm::Value *allocSize = 1180 EmitCXXNewAllocSize(*this, E, minElements, numElements, 1181 allocSizeWithoutCookie); 1182 1183 allocatorArgs.add(RValue::get(allocSize), sizeType); 1184 1185 // Emit the rest of the arguments. 1186 // FIXME: Ideally, this should just use EmitCallArgs. 1187 CXXNewExpr::const_arg_iterator placementArg = E->placement_arg_begin(); 1188 1189 // First, use the types from the function type. 1190 // We start at 1 here because the first argument (the allocation size) 1191 // has already been emitted. 1192 for (unsigned i = 1, e = allocatorType->getNumArgs(); i != e; 1193 ++i, ++placementArg) { 1194 QualType argType = allocatorType->getArgType(i); 1195 1196 assert(getContext().hasSameUnqualifiedType(argType.getNonReferenceType(), 1197 placementArg->getType()) && 1198 "type mismatch in call argument!"); 1199 1200 EmitCallArg(allocatorArgs, *placementArg, argType); 1201 } 1202 1203 // Either we've emitted all the call args, or we have a call to a 1204 // variadic function. 1205 assert((placementArg == E->placement_arg_end() || 1206 allocatorType->isVariadic()) && 1207 "Extra arguments to non-variadic function!"); 1208 1209 // If we still have any arguments, emit them using the type of the argument. 1210 for (CXXNewExpr::const_arg_iterator placementArgsEnd = E->placement_arg_end(); 1211 placementArg != placementArgsEnd; ++placementArg) { 1212 EmitCallArg(allocatorArgs, *placementArg, placementArg->getType()); 1213 } 1214 1215 // Emit the allocation call. If the allocator is a global placement 1216 // operator, just "inline" it directly. 1217 RValue RV; 1218 if (allocator->isReservedGlobalPlacementOperator()) { 1219 assert(allocatorArgs.size() == 2); 1220 RV = allocatorArgs[1].RV; 1221 // TODO: kill any unnecessary computations done for the size 1222 // argument. 1223 } else { 1224 RV = EmitCall(CGM.getTypes().arrangeFreeFunctionCall(allocatorArgs, 1225 allocatorType), 1226 CGM.GetAddrOfFunction(allocator), ReturnValueSlot(), 1227 allocatorArgs, allocator); 1228 } 1229 1230 // Emit a null check on the allocation result if the allocation 1231 // function is allowed to return null (because it has a non-throwing 1232 // exception spec; for this part, we inline 1233 // CXXNewExpr::shouldNullCheckAllocation()) and we have an 1234 // interesting initializer. 1235 bool nullCheck = allocatorType->isNothrow(getContext()) && 1236 (!allocType.isPODType(getContext()) || E->hasInitializer()); 1237 1238 llvm::BasicBlock *nullCheckBB = 0; 1239 llvm::BasicBlock *contBB = 0; 1240 1241 llvm::Value *allocation = RV.getScalarVal(); 1242 unsigned AS = allocation->getType()->getPointerAddressSpace(); 1243 1244 // The null-check means that the initializer is conditionally 1245 // evaluated. 1246 ConditionalEvaluation conditional(*this); 1247 1248 if (nullCheck) { 1249 conditional.begin(*this); 1250 1251 nullCheckBB = Builder.GetInsertBlock(); 1252 llvm::BasicBlock *notNullBB = createBasicBlock("new.notnull"); 1253 contBB = createBasicBlock("new.cont"); 1254 1255 llvm::Value *isNull = Builder.CreateIsNull(allocation, "new.isnull"); 1256 Builder.CreateCondBr(isNull, contBB, notNullBB); 1257 EmitBlock(notNullBB); 1258 } 1259 1260 // If there's an operator delete, enter a cleanup to call it if an 1261 // exception is thrown. 1262 EHScopeStack::stable_iterator operatorDeleteCleanup; 1263 llvm::Instruction *cleanupDominator = 0; 1264 if (E->getOperatorDelete() && 1265 !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) { 1266 EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocatorArgs); 1267 operatorDeleteCleanup = EHStack.stable_begin(); 1268 cleanupDominator = Builder.CreateUnreachable(); 1269 } 1270 1271 assert((allocSize == allocSizeWithoutCookie) == 1272 CalculateCookiePadding(*this, E).isZero()); 1273 if (allocSize != allocSizeWithoutCookie) { 1274 assert(E->isArray()); 1275 allocation = CGM.getCXXABI().InitializeArrayCookie(*this, allocation, 1276 numElements, 1277 E, allocType); 1278 } 1279 1280 llvm::Type *elementPtrTy 1281 = ConvertTypeForMem(allocType)->getPointerTo(AS); 1282 llvm::Value *result = Builder.CreateBitCast(allocation, elementPtrTy); 1283 1284 EmitNewInitializer(*this, E, allocType, result, numElements, 1285 allocSizeWithoutCookie); 1286 if (E->isArray()) { 1287 // NewPtr is a pointer to the base element type. If we're 1288 // allocating an array of arrays, we'll need to cast back to the 1289 // array pointer type. 1290 llvm::Type *resultType = ConvertTypeForMem(E->getType()); 1291 if (result->getType() != resultType) 1292 result = Builder.CreateBitCast(result, resultType); 1293 } 1294 1295 // Deactivate the 'operator delete' cleanup if we finished 1296 // initialization. 1297 if (operatorDeleteCleanup.isValid()) { 1298 DeactivateCleanupBlock(operatorDeleteCleanup, cleanupDominator); 1299 cleanupDominator->eraseFromParent(); 1300 } 1301 1302 if (nullCheck) { 1303 conditional.end(*this); 1304 1305 llvm::BasicBlock *notNullBB = Builder.GetInsertBlock(); 1306 EmitBlock(contBB); 1307 1308 llvm::PHINode *PHI = Builder.CreatePHI(result->getType(), 2); 1309 PHI->addIncoming(result, notNullBB); 1310 PHI->addIncoming(llvm::Constant::getNullValue(result->getType()), 1311 nullCheckBB); 1312 1313 result = PHI; 1314 } 1315 1316 return result; 1317 } 1318 1319 void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD, 1320 llvm::Value *Ptr, 1321 QualType DeleteTy) { 1322 assert(DeleteFD->getOverloadedOperator() == OO_Delete); 1323 1324 const FunctionProtoType *DeleteFTy = 1325 DeleteFD->getType()->getAs<FunctionProtoType>(); 1326 1327 CallArgList DeleteArgs; 1328 1329 // Check if we need to pass the size to the delete operator. 1330 llvm::Value *Size = 0; 1331 QualType SizeTy; 1332 if (DeleteFTy->getNumArgs() == 2) { 1333 SizeTy = DeleteFTy->getArgType(1); 1334 CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy); 1335 Size = llvm::ConstantInt::get(ConvertType(SizeTy), 1336 DeleteTypeSize.getQuantity()); 1337 } 1338 1339 QualType ArgTy = DeleteFTy->getArgType(0); 1340 llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy)); 1341 DeleteArgs.add(RValue::get(DeletePtr), ArgTy); 1342 1343 if (Size) 1344 DeleteArgs.add(RValue::get(Size), SizeTy); 1345 1346 // Emit the call to delete. 1347 EmitCall(CGM.getTypes().arrangeFreeFunctionCall(DeleteArgs, DeleteFTy), 1348 CGM.GetAddrOfFunction(DeleteFD), ReturnValueSlot(), 1349 DeleteArgs, DeleteFD); 1350 } 1351 1352 namespace { 1353 /// Calls the given 'operator delete' on a single object. 1354 struct CallObjectDelete : EHScopeStack::Cleanup { 1355 llvm::Value *Ptr; 1356 const FunctionDecl *OperatorDelete; 1357 QualType ElementType; 1358 1359 CallObjectDelete(llvm::Value *Ptr, 1360 const FunctionDecl *OperatorDelete, 1361 QualType ElementType) 1362 : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {} 1363 1364 void Emit(CodeGenFunction &CGF, Flags flags) { 1365 CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType); 1366 } 1367 }; 1368 } 1369 1370 /// Emit the code for deleting a single object. 1371 static void EmitObjectDelete(CodeGenFunction &CGF, 1372 const FunctionDecl *OperatorDelete, 1373 llvm::Value *Ptr, 1374 QualType ElementType, 1375 bool UseGlobalDelete) { 1376 // Find the destructor for the type, if applicable. If the 1377 // destructor is virtual, we'll just emit the vcall and return. 1378 const CXXDestructorDecl *Dtor = 0; 1379 if (const RecordType *RT = ElementType->getAs<RecordType>()) { 1380 CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); 1381 if (RD->hasDefinition() && !RD->hasTrivialDestructor()) { 1382 Dtor = RD->getDestructor(); 1383 1384 if (Dtor->isVirtual()) { 1385 if (UseGlobalDelete) { 1386 // If we're supposed to call the global delete, make sure we do so 1387 // even if the destructor throws. 1388 1389 // Derive the complete-object pointer, which is what we need 1390 // to pass to the deallocation function. 1391 llvm::Value *completePtr = 1392 CGF.CGM.getCXXABI().adjustToCompleteObject(CGF, Ptr, ElementType); 1393 1394 CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup, 1395 completePtr, OperatorDelete, 1396 ElementType); 1397 } 1398 1399 llvm::Type *Ty = 1400 CGF.getTypes().GetFunctionType( 1401 CGF.getTypes().arrangeCXXDestructor(Dtor, Dtor_Complete)); 1402 1403 llvm::Value *Callee 1404 = CGF.BuildVirtualCall(Dtor, 1405 UseGlobalDelete? Dtor_Complete : Dtor_Deleting, 1406 Ptr, Ty); 1407 // FIXME: Provide a source location here. 1408 CGF.EmitCXXMemberCall(Dtor, SourceLocation(), Callee, ReturnValueSlot(), 1409 Ptr, /*VTT=*/0, 0, 0); 1410 1411 if (UseGlobalDelete) { 1412 CGF.PopCleanupBlock(); 1413 } 1414 1415 return; 1416 } 1417 } 1418 } 1419 1420 // Make sure that we call delete even if the dtor throws. 1421 // This doesn't have to a conditional cleanup because we're going 1422 // to pop it off in a second. 1423 CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup, 1424 Ptr, OperatorDelete, ElementType); 1425 1426 if (Dtor) 1427 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, 1428 /*ForVirtualBase=*/false, Ptr); 1429 else if (CGF.getLangOpts().ObjCAutoRefCount && 1430 ElementType->isObjCLifetimeType()) { 1431 switch (ElementType.getObjCLifetime()) { 1432 case Qualifiers::OCL_None: 1433 case Qualifiers::OCL_ExplicitNone: 1434 case Qualifiers::OCL_Autoreleasing: 1435 break; 1436 1437 case Qualifiers::OCL_Strong: { 1438 // Load the pointer value. 1439 llvm::Value *PtrValue = CGF.Builder.CreateLoad(Ptr, 1440 ElementType.isVolatileQualified()); 1441 1442 CGF.EmitARCRelease(PtrValue, /*precise*/ true); 1443 break; 1444 } 1445 1446 case Qualifiers::OCL_Weak: 1447 CGF.EmitARCDestroyWeak(Ptr); 1448 break; 1449 } 1450 } 1451 1452 CGF.PopCleanupBlock(); 1453 } 1454 1455 namespace { 1456 /// Calls the given 'operator delete' on an array of objects. 1457 struct CallArrayDelete : EHScopeStack::Cleanup { 1458 llvm::Value *Ptr; 1459 const FunctionDecl *OperatorDelete; 1460 llvm::Value *NumElements; 1461 QualType ElementType; 1462 CharUnits CookieSize; 1463 1464 CallArrayDelete(llvm::Value *Ptr, 1465 const FunctionDecl *OperatorDelete, 1466 llvm::Value *NumElements, 1467 QualType ElementType, 1468 CharUnits CookieSize) 1469 : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements), 1470 ElementType(ElementType), CookieSize(CookieSize) {} 1471 1472 void Emit(CodeGenFunction &CGF, Flags flags) { 1473 const FunctionProtoType *DeleteFTy = 1474 OperatorDelete->getType()->getAs<FunctionProtoType>(); 1475 assert(DeleteFTy->getNumArgs() == 1 || DeleteFTy->getNumArgs() == 2); 1476 1477 CallArgList Args; 1478 1479 // Pass the pointer as the first argument. 1480 QualType VoidPtrTy = DeleteFTy->getArgType(0); 1481 llvm::Value *DeletePtr 1482 = CGF.Builder.CreateBitCast(Ptr, CGF.ConvertType(VoidPtrTy)); 1483 Args.add(RValue::get(DeletePtr), VoidPtrTy); 1484 1485 // Pass the original requested size as the second argument. 1486 if (DeleteFTy->getNumArgs() == 2) { 1487 QualType size_t = DeleteFTy->getArgType(1); 1488 llvm::IntegerType *SizeTy 1489 = cast<llvm::IntegerType>(CGF.ConvertType(size_t)); 1490 1491 CharUnits ElementTypeSize = 1492 CGF.CGM.getContext().getTypeSizeInChars(ElementType); 1493 1494 // The size of an element, multiplied by the number of elements. 1495 llvm::Value *Size 1496 = llvm::ConstantInt::get(SizeTy, ElementTypeSize.getQuantity()); 1497 Size = CGF.Builder.CreateMul(Size, NumElements); 1498 1499 // Plus the size of the cookie if applicable. 1500 if (!CookieSize.isZero()) { 1501 llvm::Value *CookieSizeV 1502 = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity()); 1503 Size = CGF.Builder.CreateAdd(Size, CookieSizeV); 1504 } 1505 1506 Args.add(RValue::get(Size), size_t); 1507 } 1508 1509 // Emit the call to delete. 1510 CGF.EmitCall(CGF.getTypes().arrangeFreeFunctionCall(Args, DeleteFTy), 1511 CGF.CGM.GetAddrOfFunction(OperatorDelete), 1512 ReturnValueSlot(), Args, OperatorDelete); 1513 } 1514 }; 1515 } 1516 1517 /// Emit the code for deleting an array of objects. 1518 static void EmitArrayDelete(CodeGenFunction &CGF, 1519 const CXXDeleteExpr *E, 1520 llvm::Value *deletedPtr, 1521 QualType elementType) { 1522 llvm::Value *numElements = 0; 1523 llvm::Value *allocatedPtr = 0; 1524 CharUnits cookieSize; 1525 CGF.CGM.getCXXABI().ReadArrayCookie(CGF, deletedPtr, E, elementType, 1526 numElements, allocatedPtr, cookieSize); 1527 1528 assert(allocatedPtr && "ReadArrayCookie didn't set allocated pointer"); 1529 1530 // Make sure that we call delete even if one of the dtors throws. 1531 const FunctionDecl *operatorDelete = E->getOperatorDelete(); 1532 CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup, 1533 allocatedPtr, operatorDelete, 1534 numElements, elementType, 1535 cookieSize); 1536 1537 // Destroy the elements. 1538 if (QualType::DestructionKind dtorKind = elementType.isDestructedType()) { 1539 assert(numElements && "no element count for a type with a destructor!"); 1540 1541 llvm::Value *arrayEnd = 1542 CGF.Builder.CreateInBoundsGEP(deletedPtr, numElements, "delete.end"); 1543 1544 // Note that it is legal to allocate a zero-length array, and we 1545 // can never fold the check away because the length should always 1546 // come from a cookie. 1547 CGF.emitArrayDestroy(deletedPtr, arrayEnd, elementType, 1548 CGF.getDestroyer(dtorKind), 1549 /*checkZeroLength*/ true, 1550 CGF.needsEHCleanup(dtorKind)); 1551 } 1552 1553 // Pop the cleanup block. 1554 CGF.PopCleanupBlock(); 1555 } 1556 1557 void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) { 1558 const Expr *Arg = E->getArgument(); 1559 llvm::Value *Ptr = EmitScalarExpr(Arg); 1560 1561 // Null check the pointer. 1562 llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull"); 1563 llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end"); 1564 1565 llvm::Value *IsNull = Builder.CreateIsNull(Ptr, "isnull"); 1566 1567 Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull); 1568 EmitBlock(DeleteNotNull); 1569 1570 // We might be deleting a pointer to array. If so, GEP down to the 1571 // first non-array element. 1572 // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*) 1573 QualType DeleteTy = Arg->getType()->getAs<PointerType>()->getPointeeType(); 1574 if (DeleteTy->isConstantArrayType()) { 1575 llvm::Value *Zero = Builder.getInt32(0); 1576 SmallVector<llvm::Value*,8> GEP; 1577 1578 GEP.push_back(Zero); // point at the outermost array 1579 1580 // For each layer of array type we're pointing at: 1581 while (const ConstantArrayType *Arr 1582 = getContext().getAsConstantArrayType(DeleteTy)) { 1583 // 1. Unpeel the array type. 1584 DeleteTy = Arr->getElementType(); 1585 1586 // 2. GEP to the first element of the array. 1587 GEP.push_back(Zero); 1588 } 1589 1590 Ptr = Builder.CreateInBoundsGEP(Ptr, GEP, "del.first"); 1591 } 1592 1593 assert(ConvertTypeForMem(DeleteTy) == 1594 cast<llvm::PointerType>(Ptr->getType())->getElementType()); 1595 1596 if (E->isArrayForm()) { 1597 EmitArrayDelete(*this, E, Ptr, DeleteTy); 1598 } else { 1599 EmitObjectDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy, 1600 E->isGlobalDelete()); 1601 } 1602 1603 EmitBlock(DeleteEnd); 1604 } 1605 1606 static llvm::Constant *getBadTypeidFn(CodeGenFunction &CGF) { 1607 // void __cxa_bad_typeid(); 1608 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false); 1609 1610 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid"); 1611 } 1612 1613 static void EmitBadTypeidCall(CodeGenFunction &CGF) { 1614 llvm::Value *Fn = getBadTypeidFn(CGF); 1615 CGF.EmitCallOrInvoke(Fn).setDoesNotReturn(); 1616 CGF.Builder.CreateUnreachable(); 1617 } 1618 1619 static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF, 1620 const Expr *E, 1621 llvm::Type *StdTypeInfoPtrTy) { 1622 // Get the vtable pointer. 1623 llvm::Value *ThisPtr = CGF.EmitLValue(E).getAddress(); 1624 1625 // C++ [expr.typeid]p2: 1626 // If the glvalue expression is obtained by applying the unary * operator to 1627 // a pointer and the pointer is a null pointer value, the typeid expression 1628 // throws the std::bad_typeid exception. 1629 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E->IgnoreParens())) { 1630 if (UO->getOpcode() == UO_Deref) { 1631 llvm::BasicBlock *BadTypeidBlock = 1632 CGF.createBasicBlock("typeid.bad_typeid"); 1633 llvm::BasicBlock *EndBlock = 1634 CGF.createBasicBlock("typeid.end"); 1635 1636 llvm::Value *IsNull = CGF.Builder.CreateIsNull(ThisPtr); 1637 CGF.Builder.CreateCondBr(IsNull, BadTypeidBlock, EndBlock); 1638 1639 CGF.EmitBlock(BadTypeidBlock); 1640 EmitBadTypeidCall(CGF); 1641 CGF.EmitBlock(EndBlock); 1642 } 1643 } 1644 1645 llvm::Value *Value = CGF.GetVTablePtr(ThisPtr, 1646 StdTypeInfoPtrTy->getPointerTo()); 1647 1648 // Load the type info. 1649 Value = CGF.Builder.CreateConstInBoundsGEP1_64(Value, -1ULL); 1650 return CGF.Builder.CreateLoad(Value); 1651 } 1652 1653 llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) { 1654 llvm::Type *StdTypeInfoPtrTy = 1655 ConvertType(E->getType())->getPointerTo(); 1656 1657 if (E->isTypeOperand()) { 1658 llvm::Constant *TypeInfo = 1659 CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand()); 1660 return Builder.CreateBitCast(TypeInfo, StdTypeInfoPtrTy); 1661 } 1662 1663 // C++ [expr.typeid]p2: 1664 // When typeid is applied to a glvalue expression whose type is a 1665 // polymorphic class type, the result refers to a std::type_info object 1666 // representing the type of the most derived object (that is, the dynamic 1667 // type) to which the glvalue refers. 1668 if (E->isPotentiallyEvaluated()) 1669 return EmitTypeidFromVTable(*this, E->getExprOperand(), 1670 StdTypeInfoPtrTy); 1671 1672 QualType OperandTy = E->getExprOperand()->getType(); 1673 return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(OperandTy), 1674 StdTypeInfoPtrTy); 1675 } 1676 1677 static llvm::Constant *getDynamicCastFn(CodeGenFunction &CGF) { 1678 // void *__dynamic_cast(const void *sub, 1679 // const abi::__class_type_info *src, 1680 // const abi::__class_type_info *dst, 1681 // std::ptrdiff_t src2dst_offset); 1682 1683 llvm::Type *Int8PtrTy = CGF.Int8PtrTy; 1684 llvm::Type *PtrDiffTy = 1685 CGF.ConvertType(CGF.getContext().getPointerDiffType()); 1686 1687 llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy }; 1688 1689 llvm::FunctionType *FTy = 1690 llvm::FunctionType::get(Int8PtrTy, Args, false); 1691 1692 return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast"); 1693 } 1694 1695 static llvm::Constant *getBadCastFn(CodeGenFunction &CGF) { 1696 // void __cxa_bad_cast(); 1697 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false); 1698 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast"); 1699 } 1700 1701 static void EmitBadCastCall(CodeGenFunction &CGF) { 1702 llvm::Value *Fn = getBadCastFn(CGF); 1703 CGF.EmitCallOrInvoke(Fn).setDoesNotReturn(); 1704 CGF.Builder.CreateUnreachable(); 1705 } 1706 1707 static llvm::Value * 1708 EmitDynamicCastCall(CodeGenFunction &CGF, llvm::Value *Value, 1709 QualType SrcTy, QualType DestTy, 1710 llvm::BasicBlock *CastEnd) { 1711 llvm::Type *PtrDiffLTy = 1712 CGF.ConvertType(CGF.getContext().getPointerDiffType()); 1713 llvm::Type *DestLTy = CGF.ConvertType(DestTy); 1714 1715 if (const PointerType *PTy = DestTy->getAs<PointerType>()) { 1716 if (PTy->getPointeeType()->isVoidType()) { 1717 // C++ [expr.dynamic.cast]p7: 1718 // If T is "pointer to cv void," then the result is a pointer to the 1719 // most derived object pointed to by v. 1720 1721 // Get the vtable pointer. 1722 llvm::Value *VTable = CGF.GetVTablePtr(Value, PtrDiffLTy->getPointerTo()); 1723 1724 // Get the offset-to-top from the vtable. 1725 llvm::Value *OffsetToTop = 1726 CGF.Builder.CreateConstInBoundsGEP1_64(VTable, -2ULL); 1727 OffsetToTop = CGF.Builder.CreateLoad(OffsetToTop, "offset.to.top"); 1728 1729 // Finally, add the offset to the pointer. 1730 Value = CGF.EmitCastToVoidPtr(Value); 1731 Value = CGF.Builder.CreateInBoundsGEP(Value, OffsetToTop); 1732 1733 return CGF.Builder.CreateBitCast(Value, DestLTy); 1734 } 1735 } 1736 1737 QualType SrcRecordTy; 1738 QualType DestRecordTy; 1739 1740 if (const PointerType *DestPTy = DestTy->getAs<PointerType>()) { 1741 SrcRecordTy = SrcTy->castAs<PointerType>()->getPointeeType(); 1742 DestRecordTy = DestPTy->getPointeeType(); 1743 } else { 1744 SrcRecordTy = SrcTy; 1745 DestRecordTy = DestTy->castAs<ReferenceType>()->getPointeeType(); 1746 } 1747 1748 assert(SrcRecordTy->isRecordType() && "source type must be a record type!"); 1749 assert(DestRecordTy->isRecordType() && "dest type must be a record type!"); 1750 1751 llvm::Value *SrcRTTI = 1752 CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType()); 1753 llvm::Value *DestRTTI = 1754 CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType()); 1755 1756 // FIXME: Actually compute a hint here. 1757 llvm::Value *OffsetHint = llvm::ConstantInt::get(PtrDiffLTy, -1ULL); 1758 1759 // Emit the call to __dynamic_cast. 1760 Value = CGF.EmitCastToVoidPtr(Value); 1761 Value = CGF.Builder.CreateCall4(getDynamicCastFn(CGF), Value, 1762 SrcRTTI, DestRTTI, OffsetHint); 1763 Value = CGF.Builder.CreateBitCast(Value, DestLTy); 1764 1765 /// C++ [expr.dynamic.cast]p9: 1766 /// A failed cast to reference type throws std::bad_cast 1767 if (DestTy->isReferenceType()) { 1768 llvm::BasicBlock *BadCastBlock = 1769 CGF.createBasicBlock("dynamic_cast.bad_cast"); 1770 1771 llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value); 1772 CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd); 1773 1774 CGF.EmitBlock(BadCastBlock); 1775 EmitBadCastCall(CGF); 1776 } 1777 1778 return Value; 1779 } 1780 1781 static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF, 1782 QualType DestTy) { 1783 llvm::Type *DestLTy = CGF.ConvertType(DestTy); 1784 if (DestTy->isPointerType()) 1785 return llvm::Constant::getNullValue(DestLTy); 1786 1787 /// C++ [expr.dynamic.cast]p9: 1788 /// A failed cast to reference type throws std::bad_cast 1789 EmitBadCastCall(CGF); 1790 1791 CGF.EmitBlock(CGF.createBasicBlock("dynamic_cast.end")); 1792 return llvm::UndefValue::get(DestLTy); 1793 } 1794 1795 llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *Value, 1796 const CXXDynamicCastExpr *DCE) { 1797 QualType DestTy = DCE->getTypeAsWritten(); 1798 1799 if (DCE->isAlwaysNull()) 1800 return EmitDynamicCastToNull(*this, DestTy); 1801 1802 QualType SrcTy = DCE->getSubExpr()->getType(); 1803 1804 // C++ [expr.dynamic.cast]p4: 1805 // If the value of v is a null pointer value in the pointer case, the result 1806 // is the null pointer value of type T. 1807 bool ShouldNullCheckSrcValue = SrcTy->isPointerType(); 1808 1809 llvm::BasicBlock *CastNull = 0; 1810 llvm::BasicBlock *CastNotNull = 0; 1811 llvm::BasicBlock *CastEnd = createBasicBlock("dynamic_cast.end"); 1812 1813 if (ShouldNullCheckSrcValue) { 1814 CastNull = createBasicBlock("dynamic_cast.null"); 1815 CastNotNull = createBasicBlock("dynamic_cast.notnull"); 1816 1817 llvm::Value *IsNull = Builder.CreateIsNull(Value); 1818 Builder.CreateCondBr(IsNull, CastNull, CastNotNull); 1819 EmitBlock(CastNotNull); 1820 } 1821 1822 Value = EmitDynamicCastCall(*this, Value, SrcTy, DestTy, CastEnd); 1823 1824 if (ShouldNullCheckSrcValue) { 1825 EmitBranch(CastEnd); 1826 1827 EmitBlock(CastNull); 1828 EmitBranch(CastEnd); 1829 } 1830 1831 EmitBlock(CastEnd); 1832 1833 if (ShouldNullCheckSrcValue) { 1834 llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2); 1835 PHI->addIncoming(Value, CastNotNull); 1836 PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull); 1837 1838 Value = PHI; 1839 } 1840 1841 return Value; 1842 } 1843 1844 void CodeGenFunction::EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Slot) { 1845 RunCleanupsScope Scope(*this); 1846 LValue SlotLV = MakeAddrLValue(Slot.getAddr(), E->getType(), 1847 Slot.getAlignment()); 1848 1849 CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin(); 1850 for (LambdaExpr::capture_init_iterator i = E->capture_init_begin(), 1851 e = E->capture_init_end(); 1852 i != e; ++i, ++CurField) { 1853 // Emit initialization 1854 1855 LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField); 1856 ArrayRef<VarDecl *> ArrayIndexes; 1857 if (CurField->getType()->isArrayType()) 1858 ArrayIndexes = E->getCaptureInitIndexVars(i); 1859 EmitInitializerForField(*CurField, LV, *i, ArrayIndexes); 1860 } 1861 } 1862