1 //===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code dealing with code generation of C++ expressions 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "clang/Frontend/CodeGenOptions.h" 15 #include "CodeGenFunction.h" 16 #include "CGCXXABI.h" 17 #include "CGObjCRuntime.h" 18 #include "CGDebugInfo.h" 19 #include "llvm/Intrinsics.h" 20 #include "llvm/Support/CallSite.h" 21 22 using namespace clang; 23 using namespace CodeGen; 24 25 RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD, 26 llvm::Value *Callee, 27 ReturnValueSlot ReturnValue, 28 llvm::Value *This, 29 llvm::Value *VTT, 30 CallExpr::const_arg_iterator ArgBeg, 31 CallExpr::const_arg_iterator ArgEnd) { 32 assert(MD->isInstance() && 33 "Trying to emit a member call expr on a static method!"); 34 35 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 36 37 CallArgList Args; 38 39 // Push the this ptr. 40 Args.add(RValue::get(This), MD->getThisType(getContext())); 41 42 // If there is a VTT parameter, emit it. 43 if (VTT) { 44 QualType T = getContext().getPointerType(getContext().VoidPtrTy); 45 Args.add(RValue::get(VTT), T); 46 } 47 48 // And the rest of the call args 49 EmitCallArgs(Args, FPT, ArgBeg, ArgEnd); 50 51 QualType ResultType = FPT->getResultType(); 52 return EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args, 53 FPT->getExtInfo()), 54 Callee, ReturnValue, Args, MD); 55 } 56 57 static const CXXRecordDecl *getMostDerivedClassDecl(const Expr *Base) { 58 const Expr *E = Base; 59 60 while (true) { 61 E = E->IgnoreParens(); 62 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) { 63 if (CE->getCastKind() == CK_DerivedToBase || 64 CE->getCastKind() == CK_UncheckedDerivedToBase || 65 CE->getCastKind() == CK_NoOp) { 66 E = CE->getSubExpr(); 67 continue; 68 } 69 } 70 71 break; 72 } 73 74 QualType DerivedType = E->getType(); 75 if (const PointerType *PTy = DerivedType->getAs<PointerType>()) 76 DerivedType = PTy->getPointeeType(); 77 78 return cast<CXXRecordDecl>(DerivedType->castAs<RecordType>()->getDecl()); 79 } 80 81 // FIXME: Ideally Expr::IgnoreParenNoopCasts should do this, but it doesn't do 82 // quite what we want. 83 static const Expr *skipNoOpCastsAndParens(const Expr *E) { 84 while (true) { 85 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) { 86 E = PE->getSubExpr(); 87 continue; 88 } 89 90 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) { 91 if (CE->getCastKind() == CK_NoOp) { 92 E = CE->getSubExpr(); 93 continue; 94 } 95 } 96 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 97 if (UO->getOpcode() == UO_Extension) { 98 E = UO->getSubExpr(); 99 continue; 100 } 101 } 102 return E; 103 } 104 } 105 106 /// canDevirtualizeMemberFunctionCalls - Checks whether virtual calls on given 107 /// expr can be devirtualized. 108 static bool canDevirtualizeMemberFunctionCalls(ASTContext &Context, 109 const Expr *Base, 110 const CXXMethodDecl *MD) { 111 112 // When building with -fapple-kext, all calls must go through the vtable since 113 // the kernel linker can do runtime patching of vtables. 114 if (Context.getLangOptions().AppleKext) 115 return false; 116 117 // If the most derived class is marked final, we know that no subclass can 118 // override this member function and so we can devirtualize it. For example: 119 // 120 // struct A { virtual void f(); } 121 // struct B final : A { }; 122 // 123 // void f(B *b) { 124 // b->f(); 125 // } 126 // 127 const CXXRecordDecl *MostDerivedClassDecl = getMostDerivedClassDecl(Base); 128 if (MostDerivedClassDecl->hasAttr<FinalAttr>()) 129 return true; 130 131 // If the member function is marked 'final', we know that it can't be 132 // overridden and can therefore devirtualize it. 133 if (MD->hasAttr<FinalAttr>()) 134 return true; 135 136 // Similarly, if the class itself is marked 'final' it can't be overridden 137 // and we can therefore devirtualize the member function call. 138 if (MD->getParent()->hasAttr<FinalAttr>()) 139 return true; 140 141 Base = skipNoOpCastsAndParens(Base); 142 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) { 143 if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) { 144 // This is a record decl. We know the type and can devirtualize it. 145 return VD->getType()->isRecordType(); 146 } 147 148 return false; 149 } 150 151 // We can always devirtualize calls on temporary object expressions. 152 if (isa<CXXConstructExpr>(Base)) 153 return true; 154 155 // And calls on bound temporaries. 156 if (isa<CXXBindTemporaryExpr>(Base)) 157 return true; 158 159 // Check if this is a call expr that returns a record type. 160 if (const CallExpr *CE = dyn_cast<CallExpr>(Base)) 161 return CE->getCallReturnType()->isRecordType(); 162 163 // We can't devirtualize the call. 164 return false; 165 } 166 167 // Note: This function also emit constructor calls to support a MSVC 168 // extensions allowing explicit constructor function call. 169 RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE, 170 ReturnValueSlot ReturnValue) { 171 const Expr *callee = CE->getCallee()->IgnoreParens(); 172 173 if (isa<BinaryOperator>(callee)) 174 return EmitCXXMemberPointerCallExpr(CE, ReturnValue); 175 176 const MemberExpr *ME = cast<MemberExpr>(callee); 177 const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl()); 178 179 CGDebugInfo *DI = getDebugInfo(); 180 if (DI && CGM.getCodeGenOpts().LimitDebugInfo 181 && !isa<CallExpr>(ME->getBase())) { 182 QualType PQTy = ME->getBase()->IgnoreParenImpCasts()->getType(); 183 if (const PointerType * PTy = dyn_cast<PointerType>(PQTy)) { 184 DI->getOrCreateRecordType(PTy->getPointeeType(), 185 MD->getParent()->getLocation()); 186 } 187 } 188 189 if (MD->isStatic()) { 190 // The method is static, emit it as we would a regular call. 191 llvm::Value *Callee = CGM.GetAddrOfFunction(MD); 192 return EmitCall(getContext().getPointerType(MD->getType()), Callee, 193 ReturnValue, CE->arg_begin(), CE->arg_end()); 194 } 195 196 // Compute the object pointer. 197 llvm::Value *This; 198 if (ME->isArrow()) 199 This = EmitScalarExpr(ME->getBase()); 200 else 201 This = EmitLValue(ME->getBase()).getAddress(); 202 203 if (MD->isTrivial()) { 204 if (isa<CXXDestructorDecl>(MD)) return RValue::get(0); 205 if (isa<CXXConstructorDecl>(MD) && 206 cast<CXXConstructorDecl>(MD)->isDefaultConstructor()) 207 return RValue::get(0); 208 209 if (MD->isCopyAssignmentOperator()) { 210 // We don't like to generate the trivial copy assignment operator when 211 // it isn't necessary; just produce the proper effect here. 212 llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress(); 213 EmitAggregateCopy(This, RHS, CE->getType()); 214 return RValue::get(This); 215 } 216 217 if (isa<CXXConstructorDecl>(MD) && 218 cast<CXXConstructorDecl>(MD)->isCopyConstructor()) { 219 llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress(); 220 EmitSynthesizedCXXCopyCtorCall(cast<CXXConstructorDecl>(MD), This, RHS, 221 CE->arg_begin(), CE->arg_end()); 222 return RValue::get(This); 223 } 224 llvm_unreachable("unknown trivial member function"); 225 } 226 227 // Compute the function type we're calling. 228 const CGFunctionInfo *FInfo = 0; 229 if (isa<CXXDestructorDecl>(MD)) 230 FInfo = &CGM.getTypes().getFunctionInfo(cast<CXXDestructorDecl>(MD), 231 Dtor_Complete); 232 else if (isa<CXXConstructorDecl>(MD)) 233 FInfo = &CGM.getTypes().getFunctionInfo(cast<CXXConstructorDecl>(MD), 234 Ctor_Complete); 235 else 236 FInfo = &CGM.getTypes().getFunctionInfo(MD); 237 238 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 239 const llvm::Type *Ty 240 = CGM.getTypes().GetFunctionType(*FInfo, FPT->isVariadic()); 241 242 // C++ [class.virtual]p12: 243 // Explicit qualification with the scope operator (5.1) suppresses the 244 // virtual call mechanism. 245 // 246 // We also don't emit a virtual call if the base expression has a record type 247 // because then we know what the type is. 248 bool UseVirtualCall; 249 UseVirtualCall = MD->isVirtual() && !ME->hasQualifier() 250 && !canDevirtualizeMemberFunctionCalls(getContext(), 251 ME->getBase(), MD); 252 llvm::Value *Callee; 253 if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD)) { 254 if (UseVirtualCall) { 255 Callee = BuildVirtualCall(Dtor, Dtor_Complete, This, Ty); 256 } else { 257 if (getContext().getLangOptions().AppleKext && 258 MD->isVirtual() && 259 ME->hasQualifier()) 260 Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty); 261 else 262 Callee = CGM.GetAddrOfFunction(GlobalDecl(Dtor, Dtor_Complete), Ty); 263 } 264 } else if (const CXXConstructorDecl *Ctor = 265 dyn_cast<CXXConstructorDecl>(MD)) { 266 Callee = CGM.GetAddrOfFunction(GlobalDecl(Ctor, Ctor_Complete), Ty); 267 } else if (UseVirtualCall) { 268 Callee = BuildVirtualCall(MD, This, Ty); 269 } else { 270 if (getContext().getLangOptions().AppleKext && 271 MD->isVirtual() && 272 ME->hasQualifier()) 273 Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty); 274 else 275 Callee = CGM.GetAddrOfFunction(MD, Ty); 276 } 277 278 return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0, 279 CE->arg_begin(), CE->arg_end()); 280 } 281 282 RValue 283 CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E, 284 ReturnValueSlot ReturnValue) { 285 const BinaryOperator *BO = 286 cast<BinaryOperator>(E->getCallee()->IgnoreParens()); 287 const Expr *BaseExpr = BO->getLHS(); 288 const Expr *MemFnExpr = BO->getRHS(); 289 290 const MemberPointerType *MPT = 291 MemFnExpr->getType()->castAs<MemberPointerType>(); 292 293 const FunctionProtoType *FPT = 294 MPT->getPointeeType()->castAs<FunctionProtoType>(); 295 const CXXRecordDecl *RD = 296 cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl()); 297 298 // Get the member function pointer. 299 llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr); 300 301 // Emit the 'this' pointer. 302 llvm::Value *This; 303 304 if (BO->getOpcode() == BO_PtrMemI) 305 This = EmitScalarExpr(BaseExpr); 306 else 307 This = EmitLValue(BaseExpr).getAddress(); 308 309 // Ask the ABI to load the callee. Note that This is modified. 310 llvm::Value *Callee = 311 CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, This, MemFnPtr, MPT); 312 313 CallArgList Args; 314 315 QualType ThisType = 316 getContext().getPointerType(getContext().getTagDeclType(RD)); 317 318 // Push the this ptr. 319 Args.add(RValue::get(This), ThisType); 320 321 // And the rest of the call args 322 EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end()); 323 return EmitCall(CGM.getTypes().getFunctionInfo(Args, FPT), Callee, 324 ReturnValue, Args); 325 } 326 327 RValue 328 CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E, 329 const CXXMethodDecl *MD, 330 ReturnValueSlot ReturnValue) { 331 assert(MD->isInstance() && 332 "Trying to emit a member call expr on a static method!"); 333 LValue LV = EmitLValue(E->getArg(0)); 334 llvm::Value *This = LV.getAddress(); 335 336 if (MD->isCopyAssignmentOperator()) { 337 const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(MD->getDeclContext()); 338 if (ClassDecl->hasTrivialCopyAssignment()) { 339 assert(!ClassDecl->hasUserDeclaredCopyAssignment() && 340 "EmitCXXOperatorMemberCallExpr - user declared copy assignment"); 341 llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress(); 342 QualType Ty = E->getType(); 343 EmitAggregateCopy(This, Src, Ty); 344 return RValue::get(This); 345 } 346 } 347 348 llvm::Value *Callee = EmitCXXOperatorMemberCallee(E, MD, This); 349 return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0, 350 E->arg_begin() + 1, E->arg_end()); 351 } 352 353 void 354 CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E, 355 AggValueSlot Dest) { 356 assert(!Dest.isIgnored() && "Must have a destination!"); 357 const CXXConstructorDecl *CD = E->getConstructor(); 358 359 // If we require zero initialization before (or instead of) calling the 360 // constructor, as can be the case with a non-user-provided default 361 // constructor, emit the zero initialization now, unless destination is 362 // already zeroed. 363 if (E->requiresZeroInitialization() && !Dest.isZeroed()) 364 EmitNullInitialization(Dest.getAddr(), E->getType()); 365 366 // If this is a call to a trivial default constructor, do nothing. 367 if (CD->isTrivial() && CD->isDefaultConstructor()) 368 return; 369 370 // Elide the constructor if we're constructing from a temporary. 371 // The temporary check is required because Sema sets this on NRVO 372 // returns. 373 if (getContext().getLangOptions().ElideConstructors && E->isElidable()) { 374 assert(getContext().hasSameUnqualifiedType(E->getType(), 375 E->getArg(0)->getType())); 376 if (E->getArg(0)->isTemporaryObject(getContext(), CD->getParent())) { 377 EmitAggExpr(E->getArg(0), Dest); 378 return; 379 } 380 } 381 382 const ConstantArrayType *Array 383 = getContext().getAsConstantArrayType(E->getType()); 384 if (Array) { 385 QualType BaseElementTy = getContext().getBaseElementType(Array); 386 const llvm::Type *BasePtr = ConvertType(BaseElementTy); 387 BasePtr = llvm::PointerType::getUnqual(BasePtr); 388 llvm::Value *BaseAddrPtr = 389 Builder.CreateBitCast(Dest.getAddr(), BasePtr); 390 391 EmitCXXAggrConstructorCall(CD, Array, BaseAddrPtr, 392 E->arg_begin(), E->arg_end()); 393 } 394 else { 395 CXXCtorType Type = Ctor_Complete; 396 bool ForVirtualBase = false; 397 398 switch (E->getConstructionKind()) { 399 case CXXConstructExpr::CK_Delegating: 400 // We should be emitting a constructor; GlobalDecl will assert this 401 Type = CurGD.getCtorType(); 402 break; 403 404 case CXXConstructExpr::CK_Complete: 405 Type = Ctor_Complete; 406 break; 407 408 case CXXConstructExpr::CK_VirtualBase: 409 ForVirtualBase = true; 410 // fall-through 411 412 case CXXConstructExpr::CK_NonVirtualBase: 413 Type = Ctor_Base; 414 } 415 416 // Call the constructor. 417 EmitCXXConstructorCall(CD, Type, ForVirtualBase, Dest.getAddr(), 418 E->arg_begin(), E->arg_end()); 419 } 420 } 421 422 void 423 CodeGenFunction::EmitSynthesizedCXXCopyCtor(llvm::Value *Dest, 424 llvm::Value *Src, 425 const Expr *Exp) { 426 if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Exp)) 427 Exp = E->getSubExpr(); 428 assert(isa<CXXConstructExpr>(Exp) && 429 "EmitSynthesizedCXXCopyCtor - unknown copy ctor expr"); 430 const CXXConstructExpr* E = cast<CXXConstructExpr>(Exp); 431 const CXXConstructorDecl *CD = E->getConstructor(); 432 RunCleanupsScope Scope(*this); 433 434 // If we require zero initialization before (or instead of) calling the 435 // constructor, as can be the case with a non-user-provided default 436 // constructor, emit the zero initialization now. 437 // FIXME. Do I still need this for a copy ctor synthesis? 438 if (E->requiresZeroInitialization()) 439 EmitNullInitialization(Dest, E->getType()); 440 441 assert(!getContext().getAsConstantArrayType(E->getType()) 442 && "EmitSynthesizedCXXCopyCtor - Copied-in Array"); 443 EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src, 444 E->arg_begin(), E->arg_end()); 445 } 446 447 static CharUnits CalculateCookiePadding(CodeGenFunction &CGF, 448 const CXXNewExpr *E) { 449 if (!E->isArray()) 450 return CharUnits::Zero(); 451 452 // No cookie is required if the operator new[] being used is the 453 // reserved placement operator new[]. 454 if (E->getOperatorNew()->isReservedGlobalPlacementOperator()) 455 return CharUnits::Zero(); 456 457 return CGF.CGM.getCXXABI().GetArrayCookieSize(E); 458 } 459 460 static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF, 461 const CXXNewExpr *e, 462 llvm::Value *&numElements, 463 llvm::Value *&sizeWithoutCookie) { 464 QualType type = e->getAllocatedType(); 465 466 if (!e->isArray()) { 467 CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type); 468 sizeWithoutCookie 469 = llvm::ConstantInt::get(CGF.SizeTy, typeSize.getQuantity()); 470 return sizeWithoutCookie; 471 } 472 473 // The width of size_t. 474 unsigned sizeWidth = CGF.SizeTy->getBitWidth(); 475 476 // Figure out the cookie size. 477 llvm::APInt cookieSize(sizeWidth, 478 CalculateCookiePadding(CGF, e).getQuantity()); 479 480 // Emit the array size expression. 481 // We multiply the size of all dimensions for NumElements. 482 // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6. 483 numElements = CGF.EmitScalarExpr(e->getArraySize()); 484 assert(isa<llvm::IntegerType>(numElements->getType())); 485 486 // The number of elements can be have an arbitrary integer type; 487 // essentially, we need to multiply it by a constant factor, add a 488 // cookie size, and verify that the result is representable as a 489 // size_t. That's just a gloss, though, and it's wrong in one 490 // important way: if the count is negative, it's an error even if 491 // the cookie size would bring the total size >= 0. 492 bool isSigned 493 = e->getArraySize()->getType()->isSignedIntegerOrEnumerationType(); 494 const llvm::IntegerType *numElementsType 495 = cast<llvm::IntegerType>(numElements->getType()); 496 unsigned numElementsWidth = numElementsType->getBitWidth(); 497 498 // Compute the constant factor. 499 llvm::APInt arraySizeMultiplier(sizeWidth, 1); 500 while (const ConstantArrayType *CAT 501 = CGF.getContext().getAsConstantArrayType(type)) { 502 type = CAT->getElementType(); 503 arraySizeMultiplier *= CAT->getSize(); 504 } 505 506 CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type); 507 llvm::APInt typeSizeMultiplier(sizeWidth, typeSize.getQuantity()); 508 typeSizeMultiplier *= arraySizeMultiplier; 509 510 // This will be a size_t. 511 llvm::Value *size; 512 513 // If someone is doing 'new int[42]' there is no need to do a dynamic check. 514 // Don't bloat the -O0 code. 515 if (llvm::ConstantInt *numElementsC = 516 dyn_cast<llvm::ConstantInt>(numElements)) { 517 const llvm::APInt &count = numElementsC->getValue(); 518 519 bool hasAnyOverflow = false; 520 521 // If 'count' was a negative number, it's an overflow. 522 if (isSigned && count.isNegative()) 523 hasAnyOverflow = true; 524 525 // We want to do all this arithmetic in size_t. If numElements is 526 // wider than that, check whether it's already too big, and if so, 527 // overflow. 528 else if (numElementsWidth > sizeWidth && 529 numElementsWidth - sizeWidth > count.countLeadingZeros()) 530 hasAnyOverflow = true; 531 532 // Okay, compute a count at the right width. 533 llvm::APInt adjustedCount = count.zextOrTrunc(sizeWidth); 534 535 // Scale numElements by that. This might overflow, but we don't 536 // care because it only overflows if allocationSize does, too, and 537 // if that overflows then we shouldn't use this. 538 numElements = llvm::ConstantInt::get(CGF.SizeTy, 539 adjustedCount * arraySizeMultiplier); 540 541 // Compute the size before cookie, and track whether it overflowed. 542 bool overflow; 543 llvm::APInt allocationSize 544 = adjustedCount.umul_ov(typeSizeMultiplier, overflow); 545 hasAnyOverflow |= overflow; 546 547 // Add in the cookie, and check whether it's overflowed. 548 if (cookieSize != 0) { 549 // Save the current size without a cookie. This shouldn't be 550 // used if there was overflow. 551 sizeWithoutCookie = llvm::ConstantInt::get(CGF.SizeTy, allocationSize); 552 553 allocationSize = allocationSize.uadd_ov(cookieSize, overflow); 554 hasAnyOverflow |= overflow; 555 } 556 557 // On overflow, produce a -1 so operator new will fail. 558 if (hasAnyOverflow) { 559 size = llvm::Constant::getAllOnesValue(CGF.SizeTy); 560 } else { 561 size = llvm::ConstantInt::get(CGF.SizeTy, allocationSize); 562 } 563 564 // Otherwise, we might need to use the overflow intrinsics. 565 } else { 566 // There are up to four conditions we need to test for: 567 // 1) if isSigned, we need to check whether numElements is negative; 568 // 2) if numElementsWidth > sizeWidth, we need to check whether 569 // numElements is larger than something representable in size_t; 570 // 3) we need to compute 571 // sizeWithoutCookie := numElements * typeSizeMultiplier 572 // and check whether it overflows; and 573 // 4) if we need a cookie, we need to compute 574 // size := sizeWithoutCookie + cookieSize 575 // and check whether it overflows. 576 577 llvm::Value *hasOverflow = 0; 578 579 // If numElementsWidth > sizeWidth, then one way or another, we're 580 // going to have to do a comparison for (2), and this happens to 581 // take care of (1), too. 582 if (numElementsWidth > sizeWidth) { 583 llvm::APInt threshold(numElementsWidth, 1); 584 threshold <<= sizeWidth; 585 586 llvm::Value *thresholdV 587 = llvm::ConstantInt::get(numElementsType, threshold); 588 589 hasOverflow = CGF.Builder.CreateICmpUGE(numElements, thresholdV); 590 numElements = CGF.Builder.CreateTrunc(numElements, CGF.SizeTy); 591 592 // Otherwise, if we're signed, we want to sext up to size_t. 593 } else if (isSigned) { 594 if (numElementsWidth < sizeWidth) 595 numElements = CGF.Builder.CreateSExt(numElements, CGF.SizeTy); 596 597 // If there's a non-1 type size multiplier, then we can do the 598 // signedness check at the same time as we do the multiply 599 // because a negative number times anything will cause an 600 // unsigned overflow. Otherwise, we have to do it here. 601 if (typeSizeMultiplier == 1) 602 hasOverflow = CGF.Builder.CreateICmpSLT(numElements, 603 llvm::ConstantInt::get(CGF.SizeTy, 0)); 604 605 // Otherwise, zext up to size_t if necessary. 606 } else if (numElementsWidth < sizeWidth) { 607 numElements = CGF.Builder.CreateZExt(numElements, CGF.SizeTy); 608 } 609 610 assert(numElements->getType() == CGF.SizeTy); 611 612 size = numElements; 613 614 // Multiply by the type size if necessary. This multiplier 615 // includes all the factors for nested arrays. 616 // 617 // This step also causes numElements to be scaled up by the 618 // nested-array factor if necessary. Overflow on this computation 619 // can be ignored because the result shouldn't be used if 620 // allocation fails. 621 if (typeSizeMultiplier != 1) { 622 const llvm::Type *intrinsicTypes[] = { CGF.SizeTy }; 623 llvm::Value *umul_with_overflow 624 = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, 625 intrinsicTypes, 1); 626 627 llvm::Value *tsmV = 628 llvm::ConstantInt::get(CGF.SizeTy, typeSizeMultiplier); 629 llvm::Value *result = 630 CGF.Builder.CreateCall2(umul_with_overflow, size, tsmV); 631 632 llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1); 633 if (hasOverflow) 634 hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed); 635 else 636 hasOverflow = overflowed; 637 638 size = CGF.Builder.CreateExtractValue(result, 0); 639 640 // Also scale up numElements by the array size multiplier. 641 if (arraySizeMultiplier != 1) { 642 // If the base element type size is 1, then we can re-use the 643 // multiply we just did. 644 if (typeSize.isOne()) { 645 assert(arraySizeMultiplier == typeSizeMultiplier); 646 numElements = size; 647 648 // Otherwise we need a separate multiply. 649 } else { 650 llvm::Value *asmV = 651 llvm::ConstantInt::get(CGF.SizeTy, arraySizeMultiplier); 652 numElements = CGF.Builder.CreateMul(numElements, asmV); 653 } 654 } 655 } else { 656 // numElements doesn't need to be scaled. 657 assert(arraySizeMultiplier == 1); 658 } 659 660 // Add in the cookie size if necessary. 661 if (cookieSize != 0) { 662 sizeWithoutCookie = size; 663 664 const llvm::Type *intrinsicTypes[] = { CGF.SizeTy }; 665 llvm::Value *uadd_with_overflow 666 = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, 667 intrinsicTypes, 1); 668 669 llvm::Value *cookieSizeV = llvm::ConstantInt::get(CGF.SizeTy, cookieSize); 670 llvm::Value *result = 671 CGF.Builder.CreateCall2(uadd_with_overflow, size, cookieSizeV); 672 673 llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1); 674 if (hasOverflow) 675 hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed); 676 else 677 hasOverflow = overflowed; 678 679 size = CGF.Builder.CreateExtractValue(result, 0); 680 } 681 682 // If we had any possibility of dynamic overflow, make a select to 683 // overwrite 'size' with an all-ones value, which should cause 684 // operator new to throw. 685 if (hasOverflow) 686 size = CGF.Builder.CreateSelect(hasOverflow, 687 llvm::Constant::getAllOnesValue(CGF.SizeTy), 688 size); 689 } 690 691 if (cookieSize == 0) 692 sizeWithoutCookie = size; 693 else 694 assert(sizeWithoutCookie && "didn't set sizeWithoutCookie?"); 695 696 return size; 697 } 698 699 static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const CXXNewExpr *E, 700 llvm::Value *NewPtr) { 701 702 assert(E->getNumConstructorArgs() == 1 && 703 "Can only have one argument to initializer of POD type."); 704 705 const Expr *Init = E->getConstructorArg(0); 706 QualType AllocType = E->getAllocatedType(); 707 708 unsigned Alignment = 709 CGF.getContext().getTypeAlignInChars(AllocType).getQuantity(); 710 if (!CGF.hasAggregateLLVMType(AllocType)) 711 CGF.EmitStoreOfScalar(CGF.EmitScalarExpr(Init), NewPtr, 712 AllocType.isVolatileQualified(), Alignment, 713 AllocType); 714 else if (AllocType->isAnyComplexType()) 715 CGF.EmitComplexExprIntoAddr(Init, NewPtr, 716 AllocType.isVolatileQualified()); 717 else { 718 AggValueSlot Slot 719 = AggValueSlot::forAddr(NewPtr, AllocType.isVolatileQualified(), true); 720 CGF.EmitAggExpr(Init, Slot); 721 } 722 } 723 724 void 725 CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E, 726 llvm::Value *NewPtr, 727 llvm::Value *NumElements) { 728 // We have a POD type. 729 if (E->getNumConstructorArgs() == 0) 730 return; 731 732 const llvm::Type *SizeTy = ConvertType(getContext().getSizeType()); 733 734 // Create a temporary for the loop index and initialize it with 0. 735 llvm::Value *IndexPtr = CreateTempAlloca(SizeTy, "loop.index"); 736 llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy); 737 Builder.CreateStore(Zero, IndexPtr); 738 739 // Start the loop with a block that tests the condition. 740 llvm::BasicBlock *CondBlock = createBasicBlock("for.cond"); 741 llvm::BasicBlock *AfterFor = createBasicBlock("for.end"); 742 743 EmitBlock(CondBlock); 744 745 llvm::BasicBlock *ForBody = createBasicBlock("for.body"); 746 747 // Generate: if (loop-index < number-of-elements fall to the loop body, 748 // otherwise, go to the block after the for-loop. 749 llvm::Value *Counter = Builder.CreateLoad(IndexPtr); 750 llvm::Value *IsLess = Builder.CreateICmpULT(Counter, NumElements, "isless"); 751 // If the condition is true, execute the body. 752 Builder.CreateCondBr(IsLess, ForBody, AfterFor); 753 754 EmitBlock(ForBody); 755 756 llvm::BasicBlock *ContinueBlock = createBasicBlock("for.inc"); 757 // Inside the loop body, emit the constructor call on the array element. 758 Counter = Builder.CreateLoad(IndexPtr); 759 llvm::Value *Address = Builder.CreateInBoundsGEP(NewPtr, Counter, 760 "arrayidx"); 761 StoreAnyExprIntoOneUnit(*this, E, Address); 762 763 EmitBlock(ContinueBlock); 764 765 // Emit the increment of the loop counter. 766 llvm::Value *NextVal = llvm::ConstantInt::get(SizeTy, 1); 767 Counter = Builder.CreateLoad(IndexPtr); 768 NextVal = Builder.CreateAdd(Counter, NextVal, "inc"); 769 Builder.CreateStore(NextVal, IndexPtr); 770 771 // Finally, branch back up to the condition for the next iteration. 772 EmitBranch(CondBlock); 773 774 // Emit the fall-through block. 775 EmitBlock(AfterFor, true); 776 } 777 778 static void EmitZeroMemSet(CodeGenFunction &CGF, QualType T, 779 llvm::Value *NewPtr, llvm::Value *Size) { 780 CGF.EmitCastToVoidPtr(NewPtr); 781 CharUnits Alignment = CGF.getContext().getTypeAlignInChars(T); 782 CGF.Builder.CreateMemSet(NewPtr, CGF.Builder.getInt8(0), Size, 783 Alignment.getQuantity(), false); 784 } 785 786 static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E, 787 llvm::Value *NewPtr, 788 llvm::Value *NumElements, 789 llvm::Value *AllocSizeWithoutCookie) { 790 if (E->isArray()) { 791 if (CXXConstructorDecl *Ctor = E->getConstructor()) { 792 bool RequiresZeroInitialization = false; 793 if (Ctor->getParent()->hasTrivialDefaultConstructor()) { 794 // If new expression did not specify value-initialization, then there 795 // is no initialization. 796 if (!E->hasInitializer() || Ctor->getParent()->isEmpty()) 797 return; 798 799 if (CGF.CGM.getTypes().isZeroInitializable(E->getAllocatedType())) { 800 // Optimization: since zero initialization will just set the memory 801 // to all zeroes, generate a single memset to do it in one shot. 802 EmitZeroMemSet(CGF, E->getAllocatedType(), NewPtr, 803 AllocSizeWithoutCookie); 804 return; 805 } 806 807 RequiresZeroInitialization = true; 808 } 809 810 CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr, 811 E->constructor_arg_begin(), 812 E->constructor_arg_end(), 813 RequiresZeroInitialization); 814 return; 815 } else if (E->getNumConstructorArgs() == 1 && 816 isa<ImplicitValueInitExpr>(E->getConstructorArg(0))) { 817 // Optimization: since zero initialization will just set the memory 818 // to all zeroes, generate a single memset to do it in one shot. 819 EmitZeroMemSet(CGF, E->getAllocatedType(), NewPtr, 820 AllocSizeWithoutCookie); 821 return; 822 } else { 823 CGF.EmitNewArrayInitializer(E, NewPtr, NumElements); 824 return; 825 } 826 } 827 828 if (CXXConstructorDecl *Ctor = E->getConstructor()) { 829 // Per C++ [expr.new]p15, if we have an initializer, then we're performing 830 // direct initialization. C++ [dcl.init]p5 requires that we 831 // zero-initialize storage if there are no user-declared constructors. 832 if (E->hasInitializer() && 833 !Ctor->getParent()->hasUserDeclaredConstructor() && 834 !Ctor->getParent()->isEmpty()) 835 CGF.EmitNullInitialization(NewPtr, E->getAllocatedType()); 836 837 CGF.EmitCXXConstructorCall(Ctor, Ctor_Complete, /*ForVirtualBase=*/false, 838 NewPtr, E->constructor_arg_begin(), 839 E->constructor_arg_end()); 840 841 return; 842 } 843 // We have a POD type. 844 if (E->getNumConstructorArgs() == 0) 845 return; 846 847 StoreAnyExprIntoOneUnit(CGF, E, NewPtr); 848 } 849 850 namespace { 851 /// A cleanup to call the given 'operator delete' function upon 852 /// abnormal exit from a new expression. 853 class CallDeleteDuringNew : public EHScopeStack::Cleanup { 854 size_t NumPlacementArgs; 855 const FunctionDecl *OperatorDelete; 856 llvm::Value *Ptr; 857 llvm::Value *AllocSize; 858 859 RValue *getPlacementArgs() { return reinterpret_cast<RValue*>(this+1); } 860 861 public: 862 static size_t getExtraSize(size_t NumPlacementArgs) { 863 return NumPlacementArgs * sizeof(RValue); 864 } 865 866 CallDeleteDuringNew(size_t NumPlacementArgs, 867 const FunctionDecl *OperatorDelete, 868 llvm::Value *Ptr, 869 llvm::Value *AllocSize) 870 : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete), 871 Ptr(Ptr), AllocSize(AllocSize) {} 872 873 void setPlacementArg(unsigned I, RValue Arg) { 874 assert(I < NumPlacementArgs && "index out of range"); 875 getPlacementArgs()[I] = Arg; 876 } 877 878 void Emit(CodeGenFunction &CGF, bool IsForEH) { 879 const FunctionProtoType *FPT 880 = OperatorDelete->getType()->getAs<FunctionProtoType>(); 881 assert(FPT->getNumArgs() == NumPlacementArgs + 1 || 882 (FPT->getNumArgs() == 2 && NumPlacementArgs == 0)); 883 884 CallArgList DeleteArgs; 885 886 // The first argument is always a void*. 887 FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin(); 888 DeleteArgs.add(RValue::get(Ptr), *AI++); 889 890 // A member 'operator delete' can take an extra 'size_t' argument. 891 if (FPT->getNumArgs() == NumPlacementArgs + 2) 892 DeleteArgs.add(RValue::get(AllocSize), *AI++); 893 894 // Pass the rest of the arguments, which must match exactly. 895 for (unsigned I = 0; I != NumPlacementArgs; ++I) 896 DeleteArgs.add(getPlacementArgs()[I], *AI++); 897 898 // Call 'operator delete'. 899 CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(DeleteArgs, FPT), 900 CGF.CGM.GetAddrOfFunction(OperatorDelete), 901 ReturnValueSlot(), DeleteArgs, OperatorDelete); 902 } 903 }; 904 905 /// A cleanup to call the given 'operator delete' function upon 906 /// abnormal exit from a new expression when the new expression is 907 /// conditional. 908 class CallDeleteDuringConditionalNew : public EHScopeStack::Cleanup { 909 size_t NumPlacementArgs; 910 const FunctionDecl *OperatorDelete; 911 DominatingValue<RValue>::saved_type Ptr; 912 DominatingValue<RValue>::saved_type AllocSize; 913 914 DominatingValue<RValue>::saved_type *getPlacementArgs() { 915 return reinterpret_cast<DominatingValue<RValue>::saved_type*>(this+1); 916 } 917 918 public: 919 static size_t getExtraSize(size_t NumPlacementArgs) { 920 return NumPlacementArgs * sizeof(DominatingValue<RValue>::saved_type); 921 } 922 923 CallDeleteDuringConditionalNew(size_t NumPlacementArgs, 924 const FunctionDecl *OperatorDelete, 925 DominatingValue<RValue>::saved_type Ptr, 926 DominatingValue<RValue>::saved_type AllocSize) 927 : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete), 928 Ptr(Ptr), AllocSize(AllocSize) {} 929 930 void setPlacementArg(unsigned I, DominatingValue<RValue>::saved_type Arg) { 931 assert(I < NumPlacementArgs && "index out of range"); 932 getPlacementArgs()[I] = Arg; 933 } 934 935 void Emit(CodeGenFunction &CGF, bool IsForEH) { 936 const FunctionProtoType *FPT 937 = OperatorDelete->getType()->getAs<FunctionProtoType>(); 938 assert(FPT->getNumArgs() == NumPlacementArgs + 1 || 939 (FPT->getNumArgs() == 2 && NumPlacementArgs == 0)); 940 941 CallArgList DeleteArgs; 942 943 // The first argument is always a void*. 944 FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin(); 945 DeleteArgs.add(Ptr.restore(CGF), *AI++); 946 947 // A member 'operator delete' can take an extra 'size_t' argument. 948 if (FPT->getNumArgs() == NumPlacementArgs + 2) { 949 RValue RV = AllocSize.restore(CGF); 950 DeleteArgs.add(RV, *AI++); 951 } 952 953 // Pass the rest of the arguments, which must match exactly. 954 for (unsigned I = 0; I != NumPlacementArgs; ++I) { 955 RValue RV = getPlacementArgs()[I].restore(CGF); 956 DeleteArgs.add(RV, *AI++); 957 } 958 959 // Call 'operator delete'. 960 CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(DeleteArgs, FPT), 961 CGF.CGM.GetAddrOfFunction(OperatorDelete), 962 ReturnValueSlot(), DeleteArgs, OperatorDelete); 963 } 964 }; 965 } 966 967 /// Enter a cleanup to call 'operator delete' if the initializer in a 968 /// new-expression throws. 969 static void EnterNewDeleteCleanup(CodeGenFunction &CGF, 970 const CXXNewExpr *E, 971 llvm::Value *NewPtr, 972 llvm::Value *AllocSize, 973 const CallArgList &NewArgs) { 974 // If we're not inside a conditional branch, then the cleanup will 975 // dominate and we can do the easier (and more efficient) thing. 976 if (!CGF.isInConditionalBranch()) { 977 CallDeleteDuringNew *Cleanup = CGF.EHStack 978 .pushCleanupWithExtra<CallDeleteDuringNew>(EHCleanup, 979 E->getNumPlacementArgs(), 980 E->getOperatorDelete(), 981 NewPtr, AllocSize); 982 for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) 983 Cleanup->setPlacementArg(I, NewArgs[I+1].RV); 984 985 return; 986 } 987 988 // Otherwise, we need to save all this stuff. 989 DominatingValue<RValue>::saved_type SavedNewPtr = 990 DominatingValue<RValue>::save(CGF, RValue::get(NewPtr)); 991 DominatingValue<RValue>::saved_type SavedAllocSize = 992 DominatingValue<RValue>::save(CGF, RValue::get(AllocSize)); 993 994 CallDeleteDuringConditionalNew *Cleanup = CGF.EHStack 995 .pushCleanupWithExtra<CallDeleteDuringConditionalNew>(InactiveEHCleanup, 996 E->getNumPlacementArgs(), 997 E->getOperatorDelete(), 998 SavedNewPtr, 999 SavedAllocSize); 1000 for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) 1001 Cleanup->setPlacementArg(I, 1002 DominatingValue<RValue>::save(CGF, NewArgs[I+1].RV)); 1003 1004 CGF.ActivateCleanupBlock(CGF.EHStack.stable_begin()); 1005 } 1006 1007 llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) { 1008 // The element type being allocated. 1009 QualType allocType = getContext().getBaseElementType(E->getAllocatedType()); 1010 1011 // 1. Build a call to the allocation function. 1012 FunctionDecl *allocator = E->getOperatorNew(); 1013 const FunctionProtoType *allocatorType = 1014 allocator->getType()->castAs<FunctionProtoType>(); 1015 1016 CallArgList allocatorArgs; 1017 1018 // The allocation size is the first argument. 1019 QualType sizeType = getContext().getSizeType(); 1020 1021 llvm::Value *numElements = 0; 1022 llvm::Value *allocSizeWithoutCookie = 0; 1023 llvm::Value *allocSize = 1024 EmitCXXNewAllocSize(*this, E, numElements, allocSizeWithoutCookie); 1025 1026 allocatorArgs.add(RValue::get(allocSize), sizeType); 1027 1028 // Emit the rest of the arguments. 1029 // FIXME: Ideally, this should just use EmitCallArgs. 1030 CXXNewExpr::const_arg_iterator placementArg = E->placement_arg_begin(); 1031 1032 // First, use the types from the function type. 1033 // We start at 1 here because the first argument (the allocation size) 1034 // has already been emitted. 1035 for (unsigned i = 1, e = allocatorType->getNumArgs(); i != e; 1036 ++i, ++placementArg) { 1037 QualType argType = allocatorType->getArgType(i); 1038 1039 assert(getContext().hasSameUnqualifiedType(argType.getNonReferenceType(), 1040 placementArg->getType()) && 1041 "type mismatch in call argument!"); 1042 1043 EmitCallArg(allocatorArgs, *placementArg, argType); 1044 } 1045 1046 // Either we've emitted all the call args, or we have a call to a 1047 // variadic function. 1048 assert((placementArg == E->placement_arg_end() || 1049 allocatorType->isVariadic()) && 1050 "Extra arguments to non-variadic function!"); 1051 1052 // If we still have any arguments, emit them using the type of the argument. 1053 for (CXXNewExpr::const_arg_iterator placementArgsEnd = E->placement_arg_end(); 1054 placementArg != placementArgsEnd; ++placementArg) { 1055 EmitCallArg(allocatorArgs, *placementArg, placementArg->getType()); 1056 } 1057 1058 // Emit the allocation call. If the allocator is a global placement 1059 // operator, just "inline" it directly. 1060 RValue RV; 1061 if (allocator->isReservedGlobalPlacementOperator()) { 1062 assert(allocatorArgs.size() == 2); 1063 RV = allocatorArgs[1].RV; 1064 // TODO: kill any unnecessary computations done for the size 1065 // argument. 1066 } else { 1067 RV = EmitCall(CGM.getTypes().getFunctionInfo(allocatorArgs, allocatorType), 1068 CGM.GetAddrOfFunction(allocator), ReturnValueSlot(), 1069 allocatorArgs, allocator); 1070 } 1071 1072 // Emit a null check on the allocation result if the allocation 1073 // function is allowed to return null (because it has a non-throwing 1074 // exception spec; for this part, we inline 1075 // CXXNewExpr::shouldNullCheckAllocation()) and we have an 1076 // interesting initializer. 1077 bool nullCheck = allocatorType->isNothrow(getContext()) && 1078 !(allocType->isPODType() && !E->hasInitializer()); 1079 1080 llvm::BasicBlock *nullCheckBB = 0; 1081 llvm::BasicBlock *contBB = 0; 1082 1083 llvm::Value *allocation = RV.getScalarVal(); 1084 unsigned AS = 1085 cast<llvm::PointerType>(allocation->getType())->getAddressSpace(); 1086 1087 // The null-check means that the initializer is conditionally 1088 // evaluated. 1089 ConditionalEvaluation conditional(*this); 1090 1091 if (nullCheck) { 1092 conditional.begin(*this); 1093 1094 nullCheckBB = Builder.GetInsertBlock(); 1095 llvm::BasicBlock *notNullBB = createBasicBlock("new.notnull"); 1096 contBB = createBasicBlock("new.cont"); 1097 1098 llvm::Value *isNull = Builder.CreateIsNull(allocation, "new.isnull"); 1099 Builder.CreateCondBr(isNull, contBB, notNullBB); 1100 EmitBlock(notNullBB); 1101 } 1102 1103 assert((allocSize == allocSizeWithoutCookie) == 1104 CalculateCookiePadding(*this, E).isZero()); 1105 if (allocSize != allocSizeWithoutCookie) { 1106 assert(E->isArray()); 1107 allocation = CGM.getCXXABI().InitializeArrayCookie(*this, allocation, 1108 numElements, 1109 E, allocType); 1110 } 1111 1112 // If there's an operator delete, enter a cleanup to call it if an 1113 // exception is thrown. 1114 EHScopeStack::stable_iterator operatorDeleteCleanup; 1115 if (E->getOperatorDelete() && 1116 !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) { 1117 EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocatorArgs); 1118 operatorDeleteCleanup = EHStack.stable_begin(); 1119 } 1120 1121 const llvm::Type *elementPtrTy 1122 = ConvertTypeForMem(allocType)->getPointerTo(AS); 1123 llvm::Value *result = Builder.CreateBitCast(allocation, elementPtrTy); 1124 1125 if (E->isArray()) { 1126 EmitNewInitializer(*this, E, result, numElements, allocSizeWithoutCookie); 1127 1128 // NewPtr is a pointer to the base element type. If we're 1129 // allocating an array of arrays, we'll need to cast back to the 1130 // array pointer type. 1131 const llvm::Type *resultType = ConvertTypeForMem(E->getType()); 1132 if (result->getType() != resultType) 1133 result = Builder.CreateBitCast(result, resultType); 1134 } else { 1135 EmitNewInitializer(*this, E, result, numElements, allocSizeWithoutCookie); 1136 } 1137 1138 // Deactivate the 'operator delete' cleanup if we finished 1139 // initialization. 1140 if (operatorDeleteCleanup.isValid()) 1141 DeactivateCleanupBlock(operatorDeleteCleanup); 1142 1143 if (nullCheck) { 1144 conditional.end(*this); 1145 1146 llvm::BasicBlock *notNullBB = Builder.GetInsertBlock(); 1147 EmitBlock(contBB); 1148 1149 llvm::PHINode *PHI = Builder.CreatePHI(result->getType(), 2); 1150 PHI->addIncoming(result, notNullBB); 1151 PHI->addIncoming(llvm::Constant::getNullValue(result->getType()), 1152 nullCheckBB); 1153 1154 result = PHI; 1155 } 1156 1157 return result; 1158 } 1159 1160 void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD, 1161 llvm::Value *Ptr, 1162 QualType DeleteTy) { 1163 assert(DeleteFD->getOverloadedOperator() == OO_Delete); 1164 1165 const FunctionProtoType *DeleteFTy = 1166 DeleteFD->getType()->getAs<FunctionProtoType>(); 1167 1168 CallArgList DeleteArgs; 1169 1170 // Check if we need to pass the size to the delete operator. 1171 llvm::Value *Size = 0; 1172 QualType SizeTy; 1173 if (DeleteFTy->getNumArgs() == 2) { 1174 SizeTy = DeleteFTy->getArgType(1); 1175 CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy); 1176 Size = llvm::ConstantInt::get(ConvertType(SizeTy), 1177 DeleteTypeSize.getQuantity()); 1178 } 1179 1180 QualType ArgTy = DeleteFTy->getArgType(0); 1181 llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy)); 1182 DeleteArgs.add(RValue::get(DeletePtr), ArgTy); 1183 1184 if (Size) 1185 DeleteArgs.add(RValue::get(Size), SizeTy); 1186 1187 // Emit the call to delete. 1188 EmitCall(CGM.getTypes().getFunctionInfo(DeleteArgs, DeleteFTy), 1189 CGM.GetAddrOfFunction(DeleteFD), ReturnValueSlot(), 1190 DeleteArgs, DeleteFD); 1191 } 1192 1193 namespace { 1194 /// Calls the given 'operator delete' on a single object. 1195 struct CallObjectDelete : EHScopeStack::Cleanup { 1196 llvm::Value *Ptr; 1197 const FunctionDecl *OperatorDelete; 1198 QualType ElementType; 1199 1200 CallObjectDelete(llvm::Value *Ptr, 1201 const FunctionDecl *OperatorDelete, 1202 QualType ElementType) 1203 : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {} 1204 1205 void Emit(CodeGenFunction &CGF, bool IsForEH) { 1206 CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType); 1207 } 1208 }; 1209 } 1210 1211 /// Emit the code for deleting a single object. 1212 static void EmitObjectDelete(CodeGenFunction &CGF, 1213 const FunctionDecl *OperatorDelete, 1214 llvm::Value *Ptr, 1215 QualType ElementType) { 1216 // Find the destructor for the type, if applicable. If the 1217 // destructor is virtual, we'll just emit the vcall and return. 1218 const CXXDestructorDecl *Dtor = 0; 1219 if (const RecordType *RT = ElementType->getAs<RecordType>()) { 1220 CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); 1221 if (!RD->hasTrivialDestructor()) { 1222 Dtor = RD->getDestructor(); 1223 1224 if (Dtor->isVirtual()) { 1225 const llvm::Type *Ty = 1226 CGF.getTypes().GetFunctionType(CGF.getTypes().getFunctionInfo(Dtor, 1227 Dtor_Complete), 1228 /*isVariadic=*/false); 1229 1230 llvm::Value *Callee 1231 = CGF.BuildVirtualCall(Dtor, Dtor_Deleting, Ptr, Ty); 1232 CGF.EmitCXXMemberCall(Dtor, Callee, ReturnValueSlot(), Ptr, /*VTT=*/0, 1233 0, 0); 1234 1235 // The dtor took care of deleting the object. 1236 return; 1237 } 1238 } 1239 } 1240 1241 // Make sure that we call delete even if the dtor throws. 1242 // This doesn't have to a conditional cleanup because we're going 1243 // to pop it off in a second. 1244 CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup, 1245 Ptr, OperatorDelete, ElementType); 1246 1247 if (Dtor) 1248 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, 1249 /*ForVirtualBase=*/false, Ptr); 1250 1251 CGF.PopCleanupBlock(); 1252 } 1253 1254 namespace { 1255 /// Calls the given 'operator delete' on an array of objects. 1256 struct CallArrayDelete : EHScopeStack::Cleanup { 1257 llvm::Value *Ptr; 1258 const FunctionDecl *OperatorDelete; 1259 llvm::Value *NumElements; 1260 QualType ElementType; 1261 CharUnits CookieSize; 1262 1263 CallArrayDelete(llvm::Value *Ptr, 1264 const FunctionDecl *OperatorDelete, 1265 llvm::Value *NumElements, 1266 QualType ElementType, 1267 CharUnits CookieSize) 1268 : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements), 1269 ElementType(ElementType), CookieSize(CookieSize) {} 1270 1271 void Emit(CodeGenFunction &CGF, bool IsForEH) { 1272 const FunctionProtoType *DeleteFTy = 1273 OperatorDelete->getType()->getAs<FunctionProtoType>(); 1274 assert(DeleteFTy->getNumArgs() == 1 || DeleteFTy->getNumArgs() == 2); 1275 1276 CallArgList Args; 1277 1278 // Pass the pointer as the first argument. 1279 QualType VoidPtrTy = DeleteFTy->getArgType(0); 1280 llvm::Value *DeletePtr 1281 = CGF.Builder.CreateBitCast(Ptr, CGF.ConvertType(VoidPtrTy)); 1282 Args.add(RValue::get(DeletePtr), VoidPtrTy); 1283 1284 // Pass the original requested size as the second argument. 1285 if (DeleteFTy->getNumArgs() == 2) { 1286 QualType size_t = DeleteFTy->getArgType(1); 1287 const llvm::IntegerType *SizeTy 1288 = cast<llvm::IntegerType>(CGF.ConvertType(size_t)); 1289 1290 CharUnits ElementTypeSize = 1291 CGF.CGM.getContext().getTypeSizeInChars(ElementType); 1292 1293 // The size of an element, multiplied by the number of elements. 1294 llvm::Value *Size 1295 = llvm::ConstantInt::get(SizeTy, ElementTypeSize.getQuantity()); 1296 Size = CGF.Builder.CreateMul(Size, NumElements); 1297 1298 // Plus the size of the cookie if applicable. 1299 if (!CookieSize.isZero()) { 1300 llvm::Value *CookieSizeV 1301 = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity()); 1302 Size = CGF.Builder.CreateAdd(Size, CookieSizeV); 1303 } 1304 1305 Args.add(RValue::get(Size), size_t); 1306 } 1307 1308 // Emit the call to delete. 1309 CGF.EmitCall(CGF.getTypes().getFunctionInfo(Args, DeleteFTy), 1310 CGF.CGM.GetAddrOfFunction(OperatorDelete), 1311 ReturnValueSlot(), Args, OperatorDelete); 1312 } 1313 }; 1314 } 1315 1316 /// Emit the code for deleting an array of objects. 1317 static void EmitArrayDelete(CodeGenFunction &CGF, 1318 const CXXDeleteExpr *E, 1319 llvm::Value *Ptr, 1320 QualType ElementType) { 1321 llvm::Value *NumElements = 0; 1322 llvm::Value *AllocatedPtr = 0; 1323 CharUnits CookieSize; 1324 CGF.CGM.getCXXABI().ReadArrayCookie(CGF, Ptr, E, ElementType, 1325 NumElements, AllocatedPtr, CookieSize); 1326 1327 assert(AllocatedPtr && "ReadArrayCookie didn't set AllocatedPtr"); 1328 1329 // Make sure that we call delete even if one of the dtors throws. 1330 const FunctionDecl *OperatorDelete = E->getOperatorDelete(); 1331 CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup, 1332 AllocatedPtr, OperatorDelete, 1333 NumElements, ElementType, 1334 CookieSize); 1335 1336 if (const CXXRecordDecl *RD = ElementType->getAsCXXRecordDecl()) { 1337 if (!RD->hasTrivialDestructor()) { 1338 assert(NumElements && "ReadArrayCookie didn't find element count" 1339 " for a class with destructor"); 1340 CGF.EmitCXXAggrDestructorCall(RD->getDestructor(), NumElements, Ptr); 1341 } 1342 } 1343 1344 CGF.PopCleanupBlock(); 1345 } 1346 1347 void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) { 1348 1349 // Get at the argument before we performed the implicit conversion 1350 // to void*. 1351 const Expr *Arg = E->getArgument(); 1352 while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) { 1353 if (ICE->getCastKind() != CK_UserDefinedConversion && 1354 ICE->getType()->isVoidPointerType()) 1355 Arg = ICE->getSubExpr(); 1356 else 1357 break; 1358 } 1359 1360 llvm::Value *Ptr = EmitScalarExpr(Arg); 1361 1362 // Null check the pointer. 1363 llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull"); 1364 llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end"); 1365 1366 llvm::Value *IsNull = Builder.CreateIsNull(Ptr, "isnull"); 1367 1368 Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull); 1369 EmitBlock(DeleteNotNull); 1370 1371 // We might be deleting a pointer to array. If so, GEP down to the 1372 // first non-array element. 1373 // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*) 1374 QualType DeleteTy = Arg->getType()->getAs<PointerType>()->getPointeeType(); 1375 if (DeleteTy->isConstantArrayType()) { 1376 llvm::Value *Zero = Builder.getInt32(0); 1377 llvm::SmallVector<llvm::Value*,8> GEP; 1378 1379 GEP.push_back(Zero); // point at the outermost array 1380 1381 // For each layer of array type we're pointing at: 1382 while (const ConstantArrayType *Arr 1383 = getContext().getAsConstantArrayType(DeleteTy)) { 1384 // 1. Unpeel the array type. 1385 DeleteTy = Arr->getElementType(); 1386 1387 // 2. GEP to the first element of the array. 1388 GEP.push_back(Zero); 1389 } 1390 1391 Ptr = Builder.CreateInBoundsGEP(Ptr, GEP.begin(), GEP.end(), "del.first"); 1392 } 1393 1394 assert(ConvertTypeForMem(DeleteTy) == 1395 cast<llvm::PointerType>(Ptr->getType())->getElementType()); 1396 1397 if (E->isArrayForm()) { 1398 EmitArrayDelete(*this, E, Ptr, DeleteTy); 1399 } else { 1400 EmitObjectDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy); 1401 } 1402 1403 EmitBlock(DeleteEnd); 1404 } 1405 1406 static llvm::Constant *getBadTypeidFn(CodeGenFunction &CGF) { 1407 // void __cxa_bad_typeid(); 1408 1409 const llvm::Type *VoidTy = llvm::Type::getVoidTy(CGF.getLLVMContext()); 1410 const llvm::FunctionType *FTy = 1411 llvm::FunctionType::get(VoidTy, false); 1412 1413 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid"); 1414 } 1415 1416 static void EmitBadTypeidCall(CodeGenFunction &CGF) { 1417 llvm::Value *Fn = getBadTypeidFn(CGF); 1418 CGF.EmitCallOrInvoke(Fn, 0, 0).setDoesNotReturn(); 1419 CGF.Builder.CreateUnreachable(); 1420 } 1421 1422 static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF, 1423 const Expr *E, 1424 const llvm::Type *StdTypeInfoPtrTy) { 1425 // Get the vtable pointer. 1426 llvm::Value *ThisPtr = CGF.EmitLValue(E).getAddress(); 1427 1428 // C++ [expr.typeid]p2: 1429 // If the glvalue expression is obtained by applying the unary * operator to 1430 // a pointer and the pointer is a null pointer value, the typeid expression 1431 // throws the std::bad_typeid exception. 1432 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E->IgnoreParens())) { 1433 if (UO->getOpcode() == UO_Deref) { 1434 llvm::BasicBlock *BadTypeidBlock = 1435 CGF.createBasicBlock("typeid.bad_typeid"); 1436 llvm::BasicBlock *EndBlock = 1437 CGF.createBasicBlock("typeid.end"); 1438 1439 llvm::Value *IsNull = CGF.Builder.CreateIsNull(ThisPtr); 1440 CGF.Builder.CreateCondBr(IsNull, BadTypeidBlock, EndBlock); 1441 1442 CGF.EmitBlock(BadTypeidBlock); 1443 EmitBadTypeidCall(CGF); 1444 CGF.EmitBlock(EndBlock); 1445 } 1446 } 1447 1448 llvm::Value *Value = CGF.GetVTablePtr(ThisPtr, 1449 StdTypeInfoPtrTy->getPointerTo()); 1450 1451 // Load the type info. 1452 Value = CGF.Builder.CreateConstInBoundsGEP1_64(Value, -1ULL); 1453 return CGF.Builder.CreateLoad(Value); 1454 } 1455 1456 llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) { 1457 const llvm::Type *StdTypeInfoPtrTy = 1458 ConvertType(E->getType())->getPointerTo(); 1459 1460 if (E->isTypeOperand()) { 1461 llvm::Constant *TypeInfo = 1462 CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand()); 1463 return Builder.CreateBitCast(TypeInfo, StdTypeInfoPtrTy); 1464 } 1465 1466 // C++ [expr.typeid]p2: 1467 // When typeid is applied to a glvalue expression whose type is a 1468 // polymorphic class type, the result refers to a std::type_info object 1469 // representing the type of the most derived object (that is, the dynamic 1470 // type) to which the glvalue refers. 1471 if (E->getExprOperand()->isGLValue()) { 1472 if (const RecordType *RT = 1473 E->getExprOperand()->getType()->getAs<RecordType>()) { 1474 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); 1475 if (RD->isPolymorphic()) 1476 return EmitTypeidFromVTable(*this, E->getExprOperand(), 1477 StdTypeInfoPtrTy); 1478 } 1479 } 1480 1481 QualType OperandTy = E->getExprOperand()->getType(); 1482 return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(OperandTy), 1483 StdTypeInfoPtrTy); 1484 } 1485 1486 static llvm::Constant *getDynamicCastFn(CodeGenFunction &CGF) { 1487 // void *__dynamic_cast(const void *sub, 1488 // const abi::__class_type_info *src, 1489 // const abi::__class_type_info *dst, 1490 // std::ptrdiff_t src2dst_offset); 1491 1492 const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext()); 1493 const llvm::Type *PtrDiffTy = 1494 CGF.ConvertType(CGF.getContext().getPointerDiffType()); 1495 1496 const llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy }; 1497 1498 const llvm::FunctionType *FTy = 1499 llvm::FunctionType::get(Int8PtrTy, Args, false); 1500 1501 return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast"); 1502 } 1503 1504 static llvm::Constant *getBadCastFn(CodeGenFunction &CGF) { 1505 // void __cxa_bad_cast(); 1506 1507 const llvm::Type *VoidTy = llvm::Type::getVoidTy(CGF.getLLVMContext()); 1508 const llvm::FunctionType *FTy = 1509 llvm::FunctionType::get(VoidTy, false); 1510 1511 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast"); 1512 } 1513 1514 static void EmitBadCastCall(CodeGenFunction &CGF) { 1515 llvm::Value *Fn = getBadCastFn(CGF); 1516 CGF.EmitCallOrInvoke(Fn, 0, 0).setDoesNotReturn(); 1517 CGF.Builder.CreateUnreachable(); 1518 } 1519 1520 static llvm::Value * 1521 EmitDynamicCastCall(CodeGenFunction &CGF, llvm::Value *Value, 1522 QualType SrcTy, QualType DestTy, 1523 llvm::BasicBlock *CastEnd) { 1524 const llvm::Type *PtrDiffLTy = 1525 CGF.ConvertType(CGF.getContext().getPointerDiffType()); 1526 const llvm::Type *DestLTy = CGF.ConvertType(DestTy); 1527 1528 if (const PointerType *PTy = DestTy->getAs<PointerType>()) { 1529 if (PTy->getPointeeType()->isVoidType()) { 1530 // C++ [expr.dynamic.cast]p7: 1531 // If T is "pointer to cv void," then the result is a pointer to the 1532 // most derived object pointed to by v. 1533 1534 // Get the vtable pointer. 1535 llvm::Value *VTable = CGF.GetVTablePtr(Value, PtrDiffLTy->getPointerTo()); 1536 1537 // Get the offset-to-top from the vtable. 1538 llvm::Value *OffsetToTop = 1539 CGF.Builder.CreateConstInBoundsGEP1_64(VTable, -2ULL); 1540 OffsetToTop = CGF.Builder.CreateLoad(OffsetToTop, "offset.to.top"); 1541 1542 // Finally, add the offset to the pointer. 1543 Value = CGF.EmitCastToVoidPtr(Value); 1544 Value = CGF.Builder.CreateInBoundsGEP(Value, OffsetToTop); 1545 1546 return CGF.Builder.CreateBitCast(Value, DestLTy); 1547 } 1548 } 1549 1550 QualType SrcRecordTy; 1551 QualType DestRecordTy; 1552 1553 if (const PointerType *DestPTy = DestTy->getAs<PointerType>()) { 1554 SrcRecordTy = SrcTy->castAs<PointerType>()->getPointeeType(); 1555 DestRecordTy = DestPTy->getPointeeType(); 1556 } else { 1557 SrcRecordTy = SrcTy; 1558 DestRecordTy = DestTy->castAs<ReferenceType>()->getPointeeType(); 1559 } 1560 1561 assert(SrcRecordTy->isRecordType() && "source type must be a record type!"); 1562 assert(DestRecordTy->isRecordType() && "dest type must be a record type!"); 1563 1564 llvm::Value *SrcRTTI = 1565 CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType()); 1566 llvm::Value *DestRTTI = 1567 CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType()); 1568 1569 // FIXME: Actually compute a hint here. 1570 llvm::Value *OffsetHint = llvm::ConstantInt::get(PtrDiffLTy, -1ULL); 1571 1572 // Emit the call to __dynamic_cast. 1573 Value = CGF.EmitCastToVoidPtr(Value); 1574 Value = CGF.Builder.CreateCall4(getDynamicCastFn(CGF), Value, 1575 SrcRTTI, DestRTTI, OffsetHint); 1576 Value = CGF.Builder.CreateBitCast(Value, DestLTy); 1577 1578 /// C++ [expr.dynamic.cast]p9: 1579 /// A failed cast to reference type throws std::bad_cast 1580 if (DestTy->isReferenceType()) { 1581 llvm::BasicBlock *BadCastBlock = 1582 CGF.createBasicBlock("dynamic_cast.bad_cast"); 1583 1584 llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value); 1585 CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd); 1586 1587 CGF.EmitBlock(BadCastBlock); 1588 EmitBadCastCall(CGF); 1589 } 1590 1591 return Value; 1592 } 1593 1594 static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF, 1595 QualType DestTy) { 1596 const llvm::Type *DestLTy = CGF.ConvertType(DestTy); 1597 if (DestTy->isPointerType()) 1598 return llvm::Constant::getNullValue(DestLTy); 1599 1600 /// C++ [expr.dynamic.cast]p9: 1601 /// A failed cast to reference type throws std::bad_cast 1602 EmitBadCastCall(CGF); 1603 1604 CGF.EmitBlock(CGF.createBasicBlock("dynamic_cast.end")); 1605 return llvm::UndefValue::get(DestLTy); 1606 } 1607 1608 llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *Value, 1609 const CXXDynamicCastExpr *DCE) { 1610 QualType DestTy = DCE->getTypeAsWritten(); 1611 1612 if (DCE->isAlwaysNull()) 1613 return EmitDynamicCastToNull(*this, DestTy); 1614 1615 QualType SrcTy = DCE->getSubExpr()->getType(); 1616 1617 // C++ [expr.dynamic.cast]p4: 1618 // If the value of v is a null pointer value in the pointer case, the result 1619 // is the null pointer value of type T. 1620 bool ShouldNullCheckSrcValue = SrcTy->isPointerType(); 1621 1622 llvm::BasicBlock *CastNull = 0; 1623 llvm::BasicBlock *CastNotNull = 0; 1624 llvm::BasicBlock *CastEnd = createBasicBlock("dynamic_cast.end"); 1625 1626 if (ShouldNullCheckSrcValue) { 1627 CastNull = createBasicBlock("dynamic_cast.null"); 1628 CastNotNull = createBasicBlock("dynamic_cast.notnull"); 1629 1630 llvm::Value *IsNull = Builder.CreateIsNull(Value); 1631 Builder.CreateCondBr(IsNull, CastNull, CastNotNull); 1632 EmitBlock(CastNotNull); 1633 } 1634 1635 Value = EmitDynamicCastCall(*this, Value, SrcTy, DestTy, CastEnd); 1636 1637 if (ShouldNullCheckSrcValue) { 1638 EmitBranch(CastEnd); 1639 1640 EmitBlock(CastNull); 1641 EmitBranch(CastEnd); 1642 } 1643 1644 EmitBlock(CastEnd); 1645 1646 if (ShouldNullCheckSrcValue) { 1647 llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2); 1648 PHI->addIncoming(Value, CastNotNull); 1649 PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull); 1650 1651 Value = PHI; 1652 } 1653 1654 return Value; 1655 } 1656