1 //===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code dealing with code generation of C++ expressions 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CodeGenFunction.h" 15 #include "CGCUDARuntime.h" 16 #include "CGCXXABI.h" 17 #include "CGDebugInfo.h" 18 #include "CGObjCRuntime.h" 19 #include "clang/Frontend/CodeGenOptions.h" 20 #include "llvm/IR/Intrinsics.h" 21 #include "llvm/Support/CallSite.h" 22 23 using namespace clang; 24 using namespace CodeGen; 25 26 RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD, 27 SourceLocation CallLoc, 28 llvm::Value *Callee, 29 ReturnValueSlot ReturnValue, 30 llvm::Value *This, 31 llvm::Value *ImplicitParam, 32 QualType ImplicitParamTy, 33 CallExpr::const_arg_iterator ArgBeg, 34 CallExpr::const_arg_iterator ArgEnd) { 35 assert(MD->isInstance() && 36 "Trying to emit a member call expr on a static method!"); 37 38 // C++11 [class.mfct.non-static]p2: 39 // If a non-static member function of a class X is called for an object that 40 // is not of type X, or of a type derived from X, the behavior is undefined. 41 EmitTypeCheck(isa<CXXConstructorDecl>(MD) ? TCK_ConstructorCall 42 : TCK_MemberCall, 43 CallLoc, This, getContext().getRecordType(MD->getParent())); 44 45 CallArgList Args; 46 47 // Push the this ptr. 48 Args.add(RValue::get(This), MD->getThisType(getContext())); 49 50 // If there is an implicit parameter (e.g. VTT), emit it. 51 if (ImplicitParam) { 52 Args.add(RValue::get(ImplicitParam), ImplicitParamTy); 53 } 54 55 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>(); 56 RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, Args.size()); 57 58 // And the rest of the call args. 59 EmitCallArgs(Args, FPT, ArgBeg, ArgEnd); 60 61 return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required), 62 Callee, ReturnValue, Args, MD); 63 } 64 65 static CXXRecordDecl *getCXXRecord(const Expr *E) { 66 QualType T = E->getType(); 67 if (const PointerType *PTy = T->getAs<PointerType>()) 68 T = PTy->getPointeeType(); 69 const RecordType *Ty = T->castAs<RecordType>(); 70 return cast<CXXRecordDecl>(Ty->getDecl()); 71 } 72 73 // Note: This function also emit constructor calls to support a MSVC 74 // extensions allowing explicit constructor function call. 75 RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE, 76 ReturnValueSlot ReturnValue) { 77 const Expr *callee = CE->getCallee()->IgnoreParens(); 78 79 if (isa<BinaryOperator>(callee)) 80 return EmitCXXMemberPointerCallExpr(CE, ReturnValue); 81 82 const MemberExpr *ME = cast<MemberExpr>(callee); 83 const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl()); 84 85 if (MD->isStatic()) { 86 // The method is static, emit it as we would a regular call. 87 llvm::Value *Callee = CGM.GetAddrOfFunction(MD); 88 return EmitCall(getContext().getPointerType(MD->getType()), Callee, 89 ReturnValue, CE->arg_begin(), CE->arg_end()); 90 } 91 92 // Compute the object pointer. 93 const Expr *Base = ME->getBase(); 94 bool CanUseVirtualCall = MD->isVirtual() && !ME->hasQualifier(); 95 96 const CXXMethodDecl *DevirtualizedMethod = NULL; 97 if (CanUseVirtualCall && CanDevirtualizeMemberFunctionCall(Base, MD)) { 98 const CXXRecordDecl *BestDynamicDecl = Base->getBestDynamicClassType(); 99 DevirtualizedMethod = MD->getCorrespondingMethodInClass(BestDynamicDecl); 100 assert(DevirtualizedMethod); 101 const CXXRecordDecl *DevirtualizedClass = DevirtualizedMethod->getParent(); 102 const Expr *Inner = Base->ignoreParenBaseCasts(); 103 if (getCXXRecord(Inner) == DevirtualizedClass) 104 // If the class of the Inner expression is where the dynamic method 105 // is defined, build the this pointer from it. 106 Base = Inner; 107 else if (getCXXRecord(Base) != DevirtualizedClass) { 108 // If the method is defined in a class that is not the best dynamic 109 // one or the one of the full expression, we would have to build 110 // a derived-to-base cast to compute the correct this pointer, but 111 // we don't have support for that yet, so do a virtual call. 112 DevirtualizedMethod = NULL; 113 } 114 // If the return types are not the same, this might be a case where more 115 // code needs to run to compensate for it. For example, the derived 116 // method might return a type that inherits form from the return 117 // type of MD and has a prefix. 118 // For now we just avoid devirtualizing these covariant cases. 119 if (DevirtualizedMethod && 120 DevirtualizedMethod->getResultType().getCanonicalType() != 121 MD->getResultType().getCanonicalType()) 122 DevirtualizedMethod = NULL; 123 } 124 125 llvm::Value *This; 126 if (ME->isArrow()) 127 This = EmitScalarExpr(Base); 128 else 129 This = EmitLValue(Base).getAddress(); 130 131 132 if (MD->isTrivial()) { 133 if (isa<CXXDestructorDecl>(MD)) return RValue::get(0); 134 if (isa<CXXConstructorDecl>(MD) && 135 cast<CXXConstructorDecl>(MD)->isDefaultConstructor()) 136 return RValue::get(0); 137 138 if (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) { 139 // We don't like to generate the trivial copy/move assignment operator 140 // when it isn't necessary; just produce the proper effect here. 141 llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress(); 142 EmitAggregateAssign(This, RHS, CE->getType()); 143 return RValue::get(This); 144 } 145 146 if (isa<CXXConstructorDecl>(MD) && 147 cast<CXXConstructorDecl>(MD)->isCopyOrMoveConstructor()) { 148 // Trivial move and copy ctor are the same. 149 llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress(); 150 EmitSynthesizedCXXCopyCtorCall(cast<CXXConstructorDecl>(MD), This, RHS, 151 CE->arg_begin(), CE->arg_end()); 152 return RValue::get(This); 153 } 154 llvm_unreachable("unknown trivial member function"); 155 } 156 157 // Compute the function type we're calling. 158 const CXXMethodDecl *CalleeDecl = DevirtualizedMethod ? DevirtualizedMethod : MD; 159 const CGFunctionInfo *FInfo = 0; 160 if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(CalleeDecl)) 161 FInfo = &CGM.getTypes().arrangeCXXDestructor(Dtor, 162 Dtor_Complete); 163 else if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(CalleeDecl)) 164 FInfo = &CGM.getTypes().arrangeCXXConstructorDeclaration(Ctor, 165 Ctor_Complete); 166 else 167 FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(CalleeDecl); 168 169 llvm::FunctionType *Ty = CGM.getTypes().GetFunctionType(*FInfo); 170 171 // C++ [class.virtual]p12: 172 // Explicit qualification with the scope operator (5.1) suppresses the 173 // virtual call mechanism. 174 // 175 // We also don't emit a virtual call if the base expression has a record type 176 // because then we know what the type is. 177 bool UseVirtualCall = CanUseVirtualCall && !DevirtualizedMethod; 178 llvm::Value *Callee; 179 180 if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD)) { 181 assert(CE->arg_begin() == CE->arg_end() && 182 "Destructor shouldn't have explicit parameters"); 183 assert(ReturnValue.isNull() && "Destructor shouldn't have return value"); 184 if (UseVirtualCall) { 185 CGM.getCXXABI().EmitVirtualDestructorCall(*this, Dtor, Dtor_Complete, 186 CE->getExprLoc(), This); 187 } else { 188 if (getLangOpts().AppleKext && 189 MD->isVirtual() && 190 ME->hasQualifier()) 191 Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty); 192 else if (!DevirtualizedMethod) 193 Callee = CGM.GetAddrOfCXXDestructor(Dtor, Dtor_Complete, FInfo, Ty); 194 else { 195 const CXXDestructorDecl *DDtor = 196 cast<CXXDestructorDecl>(DevirtualizedMethod); 197 Callee = CGM.GetAddrOfFunction(GlobalDecl(DDtor, Dtor_Complete), Ty); 198 } 199 EmitCXXMemberCall(MD, CE->getExprLoc(), Callee, ReturnValue, This, 200 /*ImplicitParam=*/0, QualType(), 0, 0); 201 } 202 return RValue::get(0); 203 } 204 205 if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(MD)) { 206 Callee = CGM.GetAddrOfFunction(GlobalDecl(Ctor, Ctor_Complete), Ty); 207 } else if (UseVirtualCall) { 208 Callee = CGM.getCXXABI().getVirtualFunctionPointer(*this, MD, This, Ty); 209 } else { 210 if (getLangOpts().AppleKext && 211 MD->isVirtual() && 212 ME->hasQualifier()) 213 Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty); 214 else if (!DevirtualizedMethod) 215 Callee = CGM.GetAddrOfFunction(MD, Ty); 216 else { 217 Callee = CGM.GetAddrOfFunction(DevirtualizedMethod, Ty); 218 } 219 } 220 221 if (MD->isVirtual()) 222 This = CGM.getCXXABI().adjustThisArgumentForVirtualCall(*this, MD, This); 223 224 return EmitCXXMemberCall(MD, CE->getExprLoc(), Callee, ReturnValue, This, 225 /*ImplicitParam=*/0, QualType(), 226 CE->arg_begin(), CE->arg_end()); 227 } 228 229 RValue 230 CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E, 231 ReturnValueSlot ReturnValue) { 232 const BinaryOperator *BO = 233 cast<BinaryOperator>(E->getCallee()->IgnoreParens()); 234 const Expr *BaseExpr = BO->getLHS(); 235 const Expr *MemFnExpr = BO->getRHS(); 236 237 const MemberPointerType *MPT = 238 MemFnExpr->getType()->castAs<MemberPointerType>(); 239 240 const FunctionProtoType *FPT = 241 MPT->getPointeeType()->castAs<FunctionProtoType>(); 242 const CXXRecordDecl *RD = 243 cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl()); 244 245 // Get the member function pointer. 246 llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr); 247 248 // Emit the 'this' pointer. 249 llvm::Value *This; 250 251 if (BO->getOpcode() == BO_PtrMemI) 252 This = EmitScalarExpr(BaseExpr); 253 else 254 This = EmitLValue(BaseExpr).getAddress(); 255 256 EmitTypeCheck(TCK_MemberCall, E->getExprLoc(), This, 257 QualType(MPT->getClass(), 0)); 258 259 // Ask the ABI to load the callee. Note that This is modified. 260 llvm::Value *Callee = 261 CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, This, MemFnPtr, MPT); 262 263 CallArgList Args; 264 265 QualType ThisType = 266 getContext().getPointerType(getContext().getTagDeclType(RD)); 267 268 // Push the this ptr. 269 Args.add(RValue::get(This), ThisType); 270 271 RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, 1); 272 273 // And the rest of the call args 274 EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end()); 275 return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required), 276 Callee, ReturnValue, Args); 277 } 278 279 RValue 280 CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E, 281 const CXXMethodDecl *MD, 282 ReturnValueSlot ReturnValue) { 283 assert(MD->isInstance() && 284 "Trying to emit a member call expr on a static method!"); 285 LValue LV = EmitLValue(E->getArg(0)); 286 llvm::Value *This = LV.getAddress(); 287 288 if ((MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) && 289 MD->isTrivial()) { 290 llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress(); 291 QualType Ty = E->getType(); 292 EmitAggregateAssign(This, Src, Ty); 293 return RValue::get(This); 294 } 295 296 llvm::Value *Callee = EmitCXXOperatorMemberCallee(E, MD, This); 297 return EmitCXXMemberCall(MD, E->getExprLoc(), Callee, ReturnValue, This, 298 /*ImplicitParam=*/0, QualType(), 299 E->arg_begin() + 1, E->arg_end()); 300 } 301 302 RValue CodeGenFunction::EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E, 303 ReturnValueSlot ReturnValue) { 304 return CGM.getCUDARuntime().EmitCUDAKernelCallExpr(*this, E, ReturnValue); 305 } 306 307 static void EmitNullBaseClassInitialization(CodeGenFunction &CGF, 308 llvm::Value *DestPtr, 309 const CXXRecordDecl *Base) { 310 if (Base->isEmpty()) 311 return; 312 313 DestPtr = CGF.EmitCastToVoidPtr(DestPtr); 314 315 const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(Base); 316 CharUnits Size = Layout.getNonVirtualSize(); 317 CharUnits Align = Layout.getNonVirtualAlign(); 318 319 llvm::Value *SizeVal = CGF.CGM.getSize(Size); 320 321 // If the type contains a pointer to data member we can't memset it to zero. 322 // Instead, create a null constant and copy it to the destination. 323 // TODO: there are other patterns besides zero that we can usefully memset, 324 // like -1, which happens to be the pattern used by member-pointers. 325 // TODO: isZeroInitializable can be over-conservative in the case where a 326 // virtual base contains a member pointer. 327 if (!CGF.CGM.getTypes().isZeroInitializable(Base)) { 328 llvm::Constant *NullConstant = CGF.CGM.EmitNullConstantForBase(Base); 329 330 llvm::GlobalVariable *NullVariable = 331 new llvm::GlobalVariable(CGF.CGM.getModule(), NullConstant->getType(), 332 /*isConstant=*/true, 333 llvm::GlobalVariable::PrivateLinkage, 334 NullConstant, Twine()); 335 NullVariable->setAlignment(Align.getQuantity()); 336 llvm::Value *SrcPtr = CGF.EmitCastToVoidPtr(NullVariable); 337 338 // Get and call the appropriate llvm.memcpy overload. 339 CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, Align.getQuantity()); 340 return; 341 } 342 343 // Otherwise, just memset the whole thing to zero. This is legal 344 // because in LLVM, all default initializers (other than the ones we just 345 // handled above) are guaranteed to have a bit pattern of all zeros. 346 CGF.Builder.CreateMemSet(DestPtr, CGF.Builder.getInt8(0), SizeVal, 347 Align.getQuantity()); 348 } 349 350 void 351 CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E, 352 AggValueSlot Dest) { 353 assert(!Dest.isIgnored() && "Must have a destination!"); 354 const CXXConstructorDecl *CD = E->getConstructor(); 355 356 // If we require zero initialization before (or instead of) calling the 357 // constructor, as can be the case with a non-user-provided default 358 // constructor, emit the zero initialization now, unless destination is 359 // already zeroed. 360 if (E->requiresZeroInitialization() && !Dest.isZeroed()) { 361 switch (E->getConstructionKind()) { 362 case CXXConstructExpr::CK_Delegating: 363 case CXXConstructExpr::CK_Complete: 364 EmitNullInitialization(Dest.getAddr(), E->getType()); 365 break; 366 case CXXConstructExpr::CK_VirtualBase: 367 case CXXConstructExpr::CK_NonVirtualBase: 368 EmitNullBaseClassInitialization(*this, Dest.getAddr(), CD->getParent()); 369 break; 370 } 371 } 372 373 // If this is a call to a trivial default constructor, do nothing. 374 if (CD->isTrivial() && CD->isDefaultConstructor()) 375 return; 376 377 // Elide the constructor if we're constructing from a temporary. 378 // The temporary check is required because Sema sets this on NRVO 379 // returns. 380 if (getLangOpts().ElideConstructors && E->isElidable()) { 381 assert(getContext().hasSameUnqualifiedType(E->getType(), 382 E->getArg(0)->getType())); 383 if (E->getArg(0)->isTemporaryObject(getContext(), CD->getParent())) { 384 EmitAggExpr(E->getArg(0), Dest); 385 return; 386 } 387 } 388 389 if (const ConstantArrayType *arrayType 390 = getContext().getAsConstantArrayType(E->getType())) { 391 EmitCXXAggrConstructorCall(CD, arrayType, Dest.getAddr(), 392 E->arg_begin(), E->arg_end()); 393 } else { 394 CXXCtorType Type = Ctor_Complete; 395 bool ForVirtualBase = false; 396 bool Delegating = false; 397 398 switch (E->getConstructionKind()) { 399 case CXXConstructExpr::CK_Delegating: 400 // We should be emitting a constructor; GlobalDecl will assert this 401 Type = CurGD.getCtorType(); 402 Delegating = true; 403 break; 404 405 case CXXConstructExpr::CK_Complete: 406 Type = Ctor_Complete; 407 break; 408 409 case CXXConstructExpr::CK_VirtualBase: 410 ForVirtualBase = true; 411 // fall-through 412 413 case CXXConstructExpr::CK_NonVirtualBase: 414 Type = Ctor_Base; 415 } 416 417 // Call the constructor. 418 EmitCXXConstructorCall(CD, Type, ForVirtualBase, Delegating, Dest.getAddr(), 419 E->arg_begin(), E->arg_end()); 420 } 421 } 422 423 void 424 CodeGenFunction::EmitSynthesizedCXXCopyCtor(llvm::Value *Dest, 425 llvm::Value *Src, 426 const Expr *Exp) { 427 if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Exp)) 428 Exp = E->getSubExpr(); 429 assert(isa<CXXConstructExpr>(Exp) && 430 "EmitSynthesizedCXXCopyCtor - unknown copy ctor expr"); 431 const CXXConstructExpr* E = cast<CXXConstructExpr>(Exp); 432 const CXXConstructorDecl *CD = E->getConstructor(); 433 RunCleanupsScope Scope(*this); 434 435 // If we require zero initialization before (or instead of) calling the 436 // constructor, as can be the case with a non-user-provided default 437 // constructor, emit the zero initialization now. 438 // FIXME. Do I still need this for a copy ctor synthesis? 439 if (E->requiresZeroInitialization()) 440 EmitNullInitialization(Dest, E->getType()); 441 442 assert(!getContext().getAsConstantArrayType(E->getType()) 443 && "EmitSynthesizedCXXCopyCtor - Copied-in Array"); 444 EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src, E->arg_begin(), E->arg_end()); 445 } 446 447 static CharUnits CalculateCookiePadding(CodeGenFunction &CGF, 448 const CXXNewExpr *E) { 449 if (!E->isArray()) 450 return CharUnits::Zero(); 451 452 // No cookie is required if the operator new[] being used is the 453 // reserved placement operator new[]. 454 if (E->getOperatorNew()->isReservedGlobalPlacementOperator()) 455 return CharUnits::Zero(); 456 457 return CGF.CGM.getCXXABI().GetArrayCookieSize(E); 458 } 459 460 static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF, 461 const CXXNewExpr *e, 462 unsigned minElements, 463 llvm::Value *&numElements, 464 llvm::Value *&sizeWithoutCookie) { 465 QualType type = e->getAllocatedType(); 466 467 if (!e->isArray()) { 468 CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type); 469 sizeWithoutCookie 470 = llvm::ConstantInt::get(CGF.SizeTy, typeSize.getQuantity()); 471 return sizeWithoutCookie; 472 } 473 474 // The width of size_t. 475 unsigned sizeWidth = CGF.SizeTy->getBitWidth(); 476 477 // Figure out the cookie size. 478 llvm::APInt cookieSize(sizeWidth, 479 CalculateCookiePadding(CGF, e).getQuantity()); 480 481 // Emit the array size expression. 482 // We multiply the size of all dimensions for NumElements. 483 // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6. 484 numElements = CGF.EmitScalarExpr(e->getArraySize()); 485 assert(isa<llvm::IntegerType>(numElements->getType())); 486 487 // The number of elements can be have an arbitrary integer type; 488 // essentially, we need to multiply it by a constant factor, add a 489 // cookie size, and verify that the result is representable as a 490 // size_t. That's just a gloss, though, and it's wrong in one 491 // important way: if the count is negative, it's an error even if 492 // the cookie size would bring the total size >= 0. 493 bool isSigned 494 = e->getArraySize()->getType()->isSignedIntegerOrEnumerationType(); 495 llvm::IntegerType *numElementsType 496 = cast<llvm::IntegerType>(numElements->getType()); 497 unsigned numElementsWidth = numElementsType->getBitWidth(); 498 499 // Compute the constant factor. 500 llvm::APInt arraySizeMultiplier(sizeWidth, 1); 501 while (const ConstantArrayType *CAT 502 = CGF.getContext().getAsConstantArrayType(type)) { 503 type = CAT->getElementType(); 504 arraySizeMultiplier *= CAT->getSize(); 505 } 506 507 CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type); 508 llvm::APInt typeSizeMultiplier(sizeWidth, typeSize.getQuantity()); 509 typeSizeMultiplier *= arraySizeMultiplier; 510 511 // This will be a size_t. 512 llvm::Value *size; 513 514 // If someone is doing 'new int[42]' there is no need to do a dynamic check. 515 // Don't bloat the -O0 code. 516 if (llvm::ConstantInt *numElementsC = 517 dyn_cast<llvm::ConstantInt>(numElements)) { 518 const llvm::APInt &count = numElementsC->getValue(); 519 520 bool hasAnyOverflow = false; 521 522 // If 'count' was a negative number, it's an overflow. 523 if (isSigned && count.isNegative()) 524 hasAnyOverflow = true; 525 526 // We want to do all this arithmetic in size_t. If numElements is 527 // wider than that, check whether it's already too big, and if so, 528 // overflow. 529 else if (numElementsWidth > sizeWidth && 530 numElementsWidth - sizeWidth > count.countLeadingZeros()) 531 hasAnyOverflow = true; 532 533 // Okay, compute a count at the right width. 534 llvm::APInt adjustedCount = count.zextOrTrunc(sizeWidth); 535 536 // If there is a brace-initializer, we cannot allocate fewer elements than 537 // there are initializers. If we do, that's treated like an overflow. 538 if (adjustedCount.ult(minElements)) 539 hasAnyOverflow = true; 540 541 // Scale numElements by that. This might overflow, but we don't 542 // care because it only overflows if allocationSize does, too, and 543 // if that overflows then we shouldn't use this. 544 numElements = llvm::ConstantInt::get(CGF.SizeTy, 545 adjustedCount * arraySizeMultiplier); 546 547 // Compute the size before cookie, and track whether it overflowed. 548 bool overflow; 549 llvm::APInt allocationSize 550 = adjustedCount.umul_ov(typeSizeMultiplier, overflow); 551 hasAnyOverflow |= overflow; 552 553 // Add in the cookie, and check whether it's overflowed. 554 if (cookieSize != 0) { 555 // Save the current size without a cookie. This shouldn't be 556 // used if there was overflow. 557 sizeWithoutCookie = llvm::ConstantInt::get(CGF.SizeTy, allocationSize); 558 559 allocationSize = allocationSize.uadd_ov(cookieSize, overflow); 560 hasAnyOverflow |= overflow; 561 } 562 563 // On overflow, produce a -1 so operator new will fail. 564 if (hasAnyOverflow) { 565 size = llvm::Constant::getAllOnesValue(CGF.SizeTy); 566 } else { 567 size = llvm::ConstantInt::get(CGF.SizeTy, allocationSize); 568 } 569 570 // Otherwise, we might need to use the overflow intrinsics. 571 } else { 572 // There are up to five conditions we need to test for: 573 // 1) if isSigned, we need to check whether numElements is negative; 574 // 2) if numElementsWidth > sizeWidth, we need to check whether 575 // numElements is larger than something representable in size_t; 576 // 3) if minElements > 0, we need to check whether numElements is smaller 577 // than that. 578 // 4) we need to compute 579 // sizeWithoutCookie := numElements * typeSizeMultiplier 580 // and check whether it overflows; and 581 // 5) if we need a cookie, we need to compute 582 // size := sizeWithoutCookie + cookieSize 583 // and check whether it overflows. 584 585 llvm::Value *hasOverflow = 0; 586 587 // If numElementsWidth > sizeWidth, then one way or another, we're 588 // going to have to do a comparison for (2), and this happens to 589 // take care of (1), too. 590 if (numElementsWidth > sizeWidth) { 591 llvm::APInt threshold(numElementsWidth, 1); 592 threshold <<= sizeWidth; 593 594 llvm::Value *thresholdV 595 = llvm::ConstantInt::get(numElementsType, threshold); 596 597 hasOverflow = CGF.Builder.CreateICmpUGE(numElements, thresholdV); 598 numElements = CGF.Builder.CreateTrunc(numElements, CGF.SizeTy); 599 600 // Otherwise, if we're signed, we want to sext up to size_t. 601 } else if (isSigned) { 602 if (numElementsWidth < sizeWidth) 603 numElements = CGF.Builder.CreateSExt(numElements, CGF.SizeTy); 604 605 // If there's a non-1 type size multiplier, then we can do the 606 // signedness check at the same time as we do the multiply 607 // because a negative number times anything will cause an 608 // unsigned overflow. Otherwise, we have to do it here. But at least 609 // in this case, we can subsume the >= minElements check. 610 if (typeSizeMultiplier == 1) 611 hasOverflow = CGF.Builder.CreateICmpSLT(numElements, 612 llvm::ConstantInt::get(CGF.SizeTy, minElements)); 613 614 // Otherwise, zext up to size_t if necessary. 615 } else if (numElementsWidth < sizeWidth) { 616 numElements = CGF.Builder.CreateZExt(numElements, CGF.SizeTy); 617 } 618 619 assert(numElements->getType() == CGF.SizeTy); 620 621 if (minElements) { 622 // Don't allow allocation of fewer elements than we have initializers. 623 if (!hasOverflow) { 624 hasOverflow = CGF.Builder.CreateICmpULT(numElements, 625 llvm::ConstantInt::get(CGF.SizeTy, minElements)); 626 } else if (numElementsWidth > sizeWidth) { 627 // The other existing overflow subsumes this check. 628 // We do an unsigned comparison, since any signed value < -1 is 629 // taken care of either above or below. 630 hasOverflow = CGF.Builder.CreateOr(hasOverflow, 631 CGF.Builder.CreateICmpULT(numElements, 632 llvm::ConstantInt::get(CGF.SizeTy, minElements))); 633 } 634 } 635 636 size = numElements; 637 638 // Multiply by the type size if necessary. This multiplier 639 // includes all the factors for nested arrays. 640 // 641 // This step also causes numElements to be scaled up by the 642 // nested-array factor if necessary. Overflow on this computation 643 // can be ignored because the result shouldn't be used if 644 // allocation fails. 645 if (typeSizeMultiplier != 1) { 646 llvm::Value *umul_with_overflow 647 = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, CGF.SizeTy); 648 649 llvm::Value *tsmV = 650 llvm::ConstantInt::get(CGF.SizeTy, typeSizeMultiplier); 651 llvm::Value *result = 652 CGF.Builder.CreateCall2(umul_with_overflow, size, tsmV); 653 654 llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1); 655 if (hasOverflow) 656 hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed); 657 else 658 hasOverflow = overflowed; 659 660 size = CGF.Builder.CreateExtractValue(result, 0); 661 662 // Also scale up numElements by the array size multiplier. 663 if (arraySizeMultiplier != 1) { 664 // If the base element type size is 1, then we can re-use the 665 // multiply we just did. 666 if (typeSize.isOne()) { 667 assert(arraySizeMultiplier == typeSizeMultiplier); 668 numElements = size; 669 670 // Otherwise we need a separate multiply. 671 } else { 672 llvm::Value *asmV = 673 llvm::ConstantInt::get(CGF.SizeTy, arraySizeMultiplier); 674 numElements = CGF.Builder.CreateMul(numElements, asmV); 675 } 676 } 677 } else { 678 // numElements doesn't need to be scaled. 679 assert(arraySizeMultiplier == 1); 680 } 681 682 // Add in the cookie size if necessary. 683 if (cookieSize != 0) { 684 sizeWithoutCookie = size; 685 686 llvm::Value *uadd_with_overflow 687 = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, CGF.SizeTy); 688 689 llvm::Value *cookieSizeV = llvm::ConstantInt::get(CGF.SizeTy, cookieSize); 690 llvm::Value *result = 691 CGF.Builder.CreateCall2(uadd_with_overflow, size, cookieSizeV); 692 693 llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1); 694 if (hasOverflow) 695 hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed); 696 else 697 hasOverflow = overflowed; 698 699 size = CGF.Builder.CreateExtractValue(result, 0); 700 } 701 702 // If we had any possibility of dynamic overflow, make a select to 703 // overwrite 'size' with an all-ones value, which should cause 704 // operator new to throw. 705 if (hasOverflow) 706 size = CGF.Builder.CreateSelect(hasOverflow, 707 llvm::Constant::getAllOnesValue(CGF.SizeTy), 708 size); 709 } 710 711 if (cookieSize == 0) 712 sizeWithoutCookie = size; 713 else 714 assert(sizeWithoutCookie && "didn't set sizeWithoutCookie?"); 715 716 return size; 717 } 718 719 static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const Expr *Init, 720 QualType AllocType, llvm::Value *NewPtr) { 721 722 CharUnits Alignment = CGF.getContext().getTypeAlignInChars(AllocType); 723 switch (CGF.getEvaluationKind(AllocType)) { 724 case TEK_Scalar: 725 CGF.EmitScalarInit(Init, 0, CGF.MakeAddrLValue(NewPtr, AllocType, 726 Alignment), 727 false); 728 return; 729 case TEK_Complex: 730 CGF.EmitComplexExprIntoLValue(Init, CGF.MakeAddrLValue(NewPtr, AllocType, 731 Alignment), 732 /*isInit*/ true); 733 return; 734 case TEK_Aggregate: { 735 AggValueSlot Slot 736 = AggValueSlot::forAddr(NewPtr, Alignment, AllocType.getQualifiers(), 737 AggValueSlot::IsDestructed, 738 AggValueSlot::DoesNotNeedGCBarriers, 739 AggValueSlot::IsNotAliased); 740 CGF.EmitAggExpr(Init, Slot); 741 return; 742 } 743 } 744 llvm_unreachable("bad evaluation kind"); 745 } 746 747 void 748 CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E, 749 QualType elementType, 750 llvm::Value *beginPtr, 751 llvm::Value *numElements) { 752 if (!E->hasInitializer()) 753 return; // We have a POD type. 754 755 llvm::Value *explicitPtr = beginPtr; 756 // Find the end of the array, hoisted out of the loop. 757 llvm::Value *endPtr = 758 Builder.CreateInBoundsGEP(beginPtr, numElements, "array.end"); 759 760 unsigned initializerElements = 0; 761 762 const Expr *Init = E->getInitializer(); 763 llvm::AllocaInst *endOfInit = 0; 764 QualType::DestructionKind dtorKind = elementType.isDestructedType(); 765 EHScopeStack::stable_iterator cleanup; 766 llvm::Instruction *cleanupDominator = 0; 767 // If the initializer is an initializer list, first do the explicit elements. 768 if (const InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) { 769 initializerElements = ILE->getNumInits(); 770 771 // Enter a partial-destruction cleanup if necessary. 772 if (needsEHCleanup(dtorKind)) { 773 // In principle we could tell the cleanup where we are more 774 // directly, but the control flow can get so varied here that it 775 // would actually be quite complex. Therefore we go through an 776 // alloca. 777 endOfInit = CreateTempAlloca(beginPtr->getType(), "array.endOfInit"); 778 cleanupDominator = Builder.CreateStore(beginPtr, endOfInit); 779 pushIrregularPartialArrayCleanup(beginPtr, endOfInit, elementType, 780 getDestroyer(dtorKind)); 781 cleanup = EHStack.stable_begin(); 782 } 783 784 for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i) { 785 // Tell the cleanup that it needs to destroy up to this 786 // element. TODO: some of these stores can be trivially 787 // observed to be unnecessary. 788 if (endOfInit) Builder.CreateStore(explicitPtr, endOfInit); 789 StoreAnyExprIntoOneUnit(*this, ILE->getInit(i), elementType, explicitPtr); 790 explicitPtr =Builder.CreateConstGEP1_32(explicitPtr, 1, "array.exp.next"); 791 } 792 793 // The remaining elements are filled with the array filler expression. 794 Init = ILE->getArrayFiller(); 795 } 796 797 // Create the continuation block. 798 llvm::BasicBlock *contBB = createBasicBlock("new.loop.end"); 799 800 // If the number of elements isn't constant, we have to now check if there is 801 // anything left to initialize. 802 if (llvm::ConstantInt *constNum = dyn_cast<llvm::ConstantInt>(numElements)) { 803 // If all elements have already been initialized, skip the whole loop. 804 if (constNum->getZExtValue() <= initializerElements) { 805 // If there was a cleanup, deactivate it. 806 if (cleanupDominator) 807 DeactivateCleanupBlock(cleanup, cleanupDominator); 808 return; 809 } 810 } else { 811 llvm::BasicBlock *nonEmptyBB = createBasicBlock("new.loop.nonempty"); 812 llvm::Value *isEmpty = Builder.CreateICmpEQ(explicitPtr, endPtr, 813 "array.isempty"); 814 Builder.CreateCondBr(isEmpty, contBB, nonEmptyBB); 815 EmitBlock(nonEmptyBB); 816 } 817 818 // Enter the loop. 819 llvm::BasicBlock *entryBB = Builder.GetInsertBlock(); 820 llvm::BasicBlock *loopBB = createBasicBlock("new.loop"); 821 822 EmitBlock(loopBB); 823 824 // Set up the current-element phi. 825 llvm::PHINode *curPtr = 826 Builder.CreatePHI(explicitPtr->getType(), 2, "array.cur"); 827 curPtr->addIncoming(explicitPtr, entryBB); 828 829 // Store the new cleanup position for irregular cleanups. 830 if (endOfInit) Builder.CreateStore(curPtr, endOfInit); 831 832 // Enter a partial-destruction cleanup if necessary. 833 if (!cleanupDominator && needsEHCleanup(dtorKind)) { 834 pushRegularPartialArrayCleanup(beginPtr, curPtr, elementType, 835 getDestroyer(dtorKind)); 836 cleanup = EHStack.stable_begin(); 837 cleanupDominator = Builder.CreateUnreachable(); 838 } 839 840 // Emit the initializer into this element. 841 StoreAnyExprIntoOneUnit(*this, Init, E->getAllocatedType(), curPtr); 842 843 // Leave the cleanup if we entered one. 844 if (cleanupDominator) { 845 DeactivateCleanupBlock(cleanup, cleanupDominator); 846 cleanupDominator->eraseFromParent(); 847 } 848 849 // Advance to the next element. 850 llvm::Value *nextPtr = Builder.CreateConstGEP1_32(curPtr, 1, "array.next"); 851 852 // Check whether we've gotten to the end of the array and, if so, 853 // exit the loop. 854 llvm::Value *isEnd = Builder.CreateICmpEQ(nextPtr, endPtr, "array.atend"); 855 Builder.CreateCondBr(isEnd, contBB, loopBB); 856 curPtr->addIncoming(nextPtr, Builder.GetInsertBlock()); 857 858 EmitBlock(contBB); 859 } 860 861 static void EmitZeroMemSet(CodeGenFunction &CGF, QualType T, 862 llvm::Value *NewPtr, llvm::Value *Size) { 863 CGF.EmitCastToVoidPtr(NewPtr); 864 CharUnits Alignment = CGF.getContext().getTypeAlignInChars(T); 865 CGF.Builder.CreateMemSet(NewPtr, CGF.Builder.getInt8(0), Size, 866 Alignment.getQuantity(), false); 867 } 868 869 static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E, 870 QualType ElementType, 871 llvm::Value *NewPtr, 872 llvm::Value *NumElements, 873 llvm::Value *AllocSizeWithoutCookie) { 874 const Expr *Init = E->getInitializer(); 875 if (E->isArray()) { 876 if (const CXXConstructExpr *CCE = dyn_cast_or_null<CXXConstructExpr>(Init)){ 877 CXXConstructorDecl *Ctor = CCE->getConstructor(); 878 if (Ctor->isTrivial()) { 879 // If new expression did not specify value-initialization, then there 880 // is no initialization. 881 if (!CCE->requiresZeroInitialization() || Ctor->getParent()->isEmpty()) 882 return; 883 884 if (CGF.CGM.getTypes().isZeroInitializable(ElementType)) { 885 // Optimization: since zero initialization will just set the memory 886 // to all zeroes, generate a single memset to do it in one shot. 887 EmitZeroMemSet(CGF, ElementType, NewPtr, AllocSizeWithoutCookie); 888 return; 889 } 890 } 891 892 CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr, 893 CCE->arg_begin(), CCE->arg_end(), 894 CCE->requiresZeroInitialization()); 895 return; 896 } else if (Init && isa<ImplicitValueInitExpr>(Init) && 897 CGF.CGM.getTypes().isZeroInitializable(ElementType)) { 898 // Optimization: since zero initialization will just set the memory 899 // to all zeroes, generate a single memset to do it in one shot. 900 EmitZeroMemSet(CGF, ElementType, NewPtr, AllocSizeWithoutCookie); 901 return; 902 } 903 CGF.EmitNewArrayInitializer(E, ElementType, NewPtr, NumElements); 904 return; 905 } 906 907 if (!Init) 908 return; 909 910 StoreAnyExprIntoOneUnit(CGF, Init, E->getAllocatedType(), NewPtr); 911 } 912 913 /// Emit a call to an operator new or operator delete function, as implicitly 914 /// created by new-expressions and delete-expressions. 915 static RValue EmitNewDeleteCall(CodeGenFunction &CGF, 916 const FunctionDecl *Callee, 917 const FunctionProtoType *CalleeType, 918 const CallArgList &Args) { 919 llvm::Instruction *CallOrInvoke; 920 llvm::Value *CalleeAddr = CGF.CGM.GetAddrOfFunction(Callee); 921 RValue RV = 922 CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall(Args, CalleeType), 923 CalleeAddr, ReturnValueSlot(), Args, 924 Callee, &CallOrInvoke); 925 926 /// C++1y [expr.new]p10: 927 /// [In a new-expression,] an implementation is allowed to omit a call 928 /// to a replaceable global allocation function. 929 /// 930 /// We model such elidable calls with the 'builtin' attribute. 931 llvm::Function *Fn = dyn_cast<llvm::Function>(CalleeAddr); 932 if (Callee->isReplaceableGlobalAllocationFunction() && 933 Fn && Fn->hasFnAttribute(llvm::Attribute::NoBuiltin)) { 934 // FIXME: Add addAttribute to CallSite. 935 if (llvm::CallInst *CI = dyn_cast<llvm::CallInst>(CallOrInvoke)) 936 CI->addAttribute(llvm::AttributeSet::FunctionIndex, 937 llvm::Attribute::Builtin); 938 else if (llvm::InvokeInst *II = dyn_cast<llvm::InvokeInst>(CallOrInvoke)) 939 II->addAttribute(llvm::AttributeSet::FunctionIndex, 940 llvm::Attribute::Builtin); 941 else 942 llvm_unreachable("unexpected kind of call instruction"); 943 } 944 945 return RV; 946 } 947 948 namespace { 949 /// A cleanup to call the given 'operator delete' function upon 950 /// abnormal exit from a new expression. 951 class CallDeleteDuringNew : public EHScopeStack::Cleanup { 952 size_t NumPlacementArgs; 953 const FunctionDecl *OperatorDelete; 954 llvm::Value *Ptr; 955 llvm::Value *AllocSize; 956 957 RValue *getPlacementArgs() { return reinterpret_cast<RValue*>(this+1); } 958 959 public: 960 static size_t getExtraSize(size_t NumPlacementArgs) { 961 return NumPlacementArgs * sizeof(RValue); 962 } 963 964 CallDeleteDuringNew(size_t NumPlacementArgs, 965 const FunctionDecl *OperatorDelete, 966 llvm::Value *Ptr, 967 llvm::Value *AllocSize) 968 : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete), 969 Ptr(Ptr), AllocSize(AllocSize) {} 970 971 void setPlacementArg(unsigned I, RValue Arg) { 972 assert(I < NumPlacementArgs && "index out of range"); 973 getPlacementArgs()[I] = Arg; 974 } 975 976 void Emit(CodeGenFunction &CGF, Flags flags) { 977 const FunctionProtoType *FPT 978 = OperatorDelete->getType()->getAs<FunctionProtoType>(); 979 assert(FPT->getNumArgs() == NumPlacementArgs + 1 || 980 (FPT->getNumArgs() == 2 && NumPlacementArgs == 0)); 981 982 CallArgList DeleteArgs; 983 984 // The first argument is always a void*. 985 FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin(); 986 DeleteArgs.add(RValue::get(Ptr), *AI++); 987 988 // A member 'operator delete' can take an extra 'size_t' argument. 989 if (FPT->getNumArgs() == NumPlacementArgs + 2) 990 DeleteArgs.add(RValue::get(AllocSize), *AI++); 991 992 // Pass the rest of the arguments, which must match exactly. 993 for (unsigned I = 0; I != NumPlacementArgs; ++I) 994 DeleteArgs.add(getPlacementArgs()[I], *AI++); 995 996 // Call 'operator delete'. 997 EmitNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs); 998 } 999 }; 1000 1001 /// A cleanup to call the given 'operator delete' function upon 1002 /// abnormal exit from a new expression when the new expression is 1003 /// conditional. 1004 class CallDeleteDuringConditionalNew : public EHScopeStack::Cleanup { 1005 size_t NumPlacementArgs; 1006 const FunctionDecl *OperatorDelete; 1007 DominatingValue<RValue>::saved_type Ptr; 1008 DominatingValue<RValue>::saved_type AllocSize; 1009 1010 DominatingValue<RValue>::saved_type *getPlacementArgs() { 1011 return reinterpret_cast<DominatingValue<RValue>::saved_type*>(this+1); 1012 } 1013 1014 public: 1015 static size_t getExtraSize(size_t NumPlacementArgs) { 1016 return NumPlacementArgs * sizeof(DominatingValue<RValue>::saved_type); 1017 } 1018 1019 CallDeleteDuringConditionalNew(size_t NumPlacementArgs, 1020 const FunctionDecl *OperatorDelete, 1021 DominatingValue<RValue>::saved_type Ptr, 1022 DominatingValue<RValue>::saved_type AllocSize) 1023 : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete), 1024 Ptr(Ptr), AllocSize(AllocSize) {} 1025 1026 void setPlacementArg(unsigned I, DominatingValue<RValue>::saved_type Arg) { 1027 assert(I < NumPlacementArgs && "index out of range"); 1028 getPlacementArgs()[I] = Arg; 1029 } 1030 1031 void Emit(CodeGenFunction &CGF, Flags flags) { 1032 const FunctionProtoType *FPT 1033 = OperatorDelete->getType()->getAs<FunctionProtoType>(); 1034 assert(FPT->getNumArgs() == NumPlacementArgs + 1 || 1035 (FPT->getNumArgs() == 2 && NumPlacementArgs == 0)); 1036 1037 CallArgList DeleteArgs; 1038 1039 // The first argument is always a void*. 1040 FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin(); 1041 DeleteArgs.add(Ptr.restore(CGF), *AI++); 1042 1043 // A member 'operator delete' can take an extra 'size_t' argument. 1044 if (FPT->getNumArgs() == NumPlacementArgs + 2) { 1045 RValue RV = AllocSize.restore(CGF); 1046 DeleteArgs.add(RV, *AI++); 1047 } 1048 1049 // Pass the rest of the arguments, which must match exactly. 1050 for (unsigned I = 0; I != NumPlacementArgs; ++I) { 1051 RValue RV = getPlacementArgs()[I].restore(CGF); 1052 DeleteArgs.add(RV, *AI++); 1053 } 1054 1055 // Call 'operator delete'. 1056 EmitNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs); 1057 } 1058 }; 1059 } 1060 1061 /// Enter a cleanup to call 'operator delete' if the initializer in a 1062 /// new-expression throws. 1063 static void EnterNewDeleteCleanup(CodeGenFunction &CGF, 1064 const CXXNewExpr *E, 1065 llvm::Value *NewPtr, 1066 llvm::Value *AllocSize, 1067 const CallArgList &NewArgs) { 1068 // If we're not inside a conditional branch, then the cleanup will 1069 // dominate and we can do the easier (and more efficient) thing. 1070 if (!CGF.isInConditionalBranch()) { 1071 CallDeleteDuringNew *Cleanup = CGF.EHStack 1072 .pushCleanupWithExtra<CallDeleteDuringNew>(EHCleanup, 1073 E->getNumPlacementArgs(), 1074 E->getOperatorDelete(), 1075 NewPtr, AllocSize); 1076 for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) 1077 Cleanup->setPlacementArg(I, NewArgs[I+1].RV); 1078 1079 return; 1080 } 1081 1082 // Otherwise, we need to save all this stuff. 1083 DominatingValue<RValue>::saved_type SavedNewPtr = 1084 DominatingValue<RValue>::save(CGF, RValue::get(NewPtr)); 1085 DominatingValue<RValue>::saved_type SavedAllocSize = 1086 DominatingValue<RValue>::save(CGF, RValue::get(AllocSize)); 1087 1088 CallDeleteDuringConditionalNew *Cleanup = CGF.EHStack 1089 .pushCleanupWithExtra<CallDeleteDuringConditionalNew>(EHCleanup, 1090 E->getNumPlacementArgs(), 1091 E->getOperatorDelete(), 1092 SavedNewPtr, 1093 SavedAllocSize); 1094 for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) 1095 Cleanup->setPlacementArg(I, 1096 DominatingValue<RValue>::save(CGF, NewArgs[I+1].RV)); 1097 1098 CGF.initFullExprCleanup(); 1099 } 1100 1101 llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) { 1102 // The element type being allocated. 1103 QualType allocType = getContext().getBaseElementType(E->getAllocatedType()); 1104 1105 // 1. Build a call to the allocation function. 1106 FunctionDecl *allocator = E->getOperatorNew(); 1107 const FunctionProtoType *allocatorType = 1108 allocator->getType()->castAs<FunctionProtoType>(); 1109 1110 CallArgList allocatorArgs; 1111 1112 // The allocation size is the first argument. 1113 QualType sizeType = getContext().getSizeType(); 1114 1115 // If there is a brace-initializer, cannot allocate fewer elements than inits. 1116 unsigned minElements = 0; 1117 if (E->isArray() && E->hasInitializer()) { 1118 if (const InitListExpr *ILE = dyn_cast<InitListExpr>(E->getInitializer())) 1119 minElements = ILE->getNumInits(); 1120 } 1121 1122 llvm::Value *numElements = 0; 1123 llvm::Value *allocSizeWithoutCookie = 0; 1124 llvm::Value *allocSize = 1125 EmitCXXNewAllocSize(*this, E, minElements, numElements, 1126 allocSizeWithoutCookie); 1127 1128 allocatorArgs.add(RValue::get(allocSize), sizeType); 1129 1130 // Emit the rest of the arguments. 1131 // FIXME: Ideally, this should just use EmitCallArgs. 1132 CXXNewExpr::const_arg_iterator placementArg = E->placement_arg_begin(); 1133 1134 // First, use the types from the function type. 1135 // We start at 1 here because the first argument (the allocation size) 1136 // has already been emitted. 1137 for (unsigned i = 1, e = allocatorType->getNumArgs(); i != e; 1138 ++i, ++placementArg) { 1139 QualType argType = allocatorType->getArgType(i); 1140 1141 assert(getContext().hasSameUnqualifiedType(argType.getNonReferenceType(), 1142 placementArg->getType()) && 1143 "type mismatch in call argument!"); 1144 1145 EmitCallArg(allocatorArgs, *placementArg, argType); 1146 } 1147 1148 // Either we've emitted all the call args, or we have a call to a 1149 // variadic function. 1150 assert((placementArg == E->placement_arg_end() || 1151 allocatorType->isVariadic()) && 1152 "Extra arguments to non-variadic function!"); 1153 1154 // If we still have any arguments, emit them using the type of the argument. 1155 for (CXXNewExpr::const_arg_iterator placementArgsEnd = E->placement_arg_end(); 1156 placementArg != placementArgsEnd; ++placementArg) { 1157 EmitCallArg(allocatorArgs, *placementArg, placementArg->getType()); 1158 } 1159 1160 // Emit the allocation call. If the allocator is a global placement 1161 // operator, just "inline" it directly. 1162 RValue RV; 1163 if (allocator->isReservedGlobalPlacementOperator()) { 1164 assert(allocatorArgs.size() == 2); 1165 RV = allocatorArgs[1].RV; 1166 // TODO: kill any unnecessary computations done for the size 1167 // argument. 1168 } else { 1169 RV = EmitNewDeleteCall(*this, allocator, allocatorType, allocatorArgs); 1170 } 1171 1172 // Emit a null check on the allocation result if the allocation 1173 // function is allowed to return null (because it has a non-throwing 1174 // exception spec; for this part, we inline 1175 // CXXNewExpr::shouldNullCheckAllocation()) and we have an 1176 // interesting initializer. 1177 bool nullCheck = allocatorType->isNothrow(getContext()) && 1178 (!allocType.isPODType(getContext()) || E->hasInitializer()); 1179 1180 llvm::BasicBlock *nullCheckBB = 0; 1181 llvm::BasicBlock *contBB = 0; 1182 1183 llvm::Value *allocation = RV.getScalarVal(); 1184 unsigned AS = allocation->getType()->getPointerAddressSpace(); 1185 1186 // The null-check means that the initializer is conditionally 1187 // evaluated. 1188 ConditionalEvaluation conditional(*this); 1189 1190 if (nullCheck) { 1191 conditional.begin(*this); 1192 1193 nullCheckBB = Builder.GetInsertBlock(); 1194 llvm::BasicBlock *notNullBB = createBasicBlock("new.notnull"); 1195 contBB = createBasicBlock("new.cont"); 1196 1197 llvm::Value *isNull = Builder.CreateIsNull(allocation, "new.isnull"); 1198 Builder.CreateCondBr(isNull, contBB, notNullBB); 1199 EmitBlock(notNullBB); 1200 } 1201 1202 // If there's an operator delete, enter a cleanup to call it if an 1203 // exception is thrown. 1204 EHScopeStack::stable_iterator operatorDeleteCleanup; 1205 llvm::Instruction *cleanupDominator = 0; 1206 if (E->getOperatorDelete() && 1207 !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) { 1208 EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocatorArgs); 1209 operatorDeleteCleanup = EHStack.stable_begin(); 1210 cleanupDominator = Builder.CreateUnreachable(); 1211 } 1212 1213 assert((allocSize == allocSizeWithoutCookie) == 1214 CalculateCookiePadding(*this, E).isZero()); 1215 if (allocSize != allocSizeWithoutCookie) { 1216 assert(E->isArray()); 1217 allocation = CGM.getCXXABI().InitializeArrayCookie(*this, allocation, 1218 numElements, 1219 E, allocType); 1220 } 1221 1222 llvm::Type *elementPtrTy 1223 = ConvertTypeForMem(allocType)->getPointerTo(AS); 1224 llvm::Value *result = Builder.CreateBitCast(allocation, elementPtrTy); 1225 1226 EmitNewInitializer(*this, E, allocType, result, numElements, 1227 allocSizeWithoutCookie); 1228 if (E->isArray()) { 1229 // NewPtr is a pointer to the base element type. If we're 1230 // allocating an array of arrays, we'll need to cast back to the 1231 // array pointer type. 1232 llvm::Type *resultType = ConvertTypeForMem(E->getType()); 1233 if (result->getType() != resultType) 1234 result = Builder.CreateBitCast(result, resultType); 1235 } 1236 1237 // Deactivate the 'operator delete' cleanup if we finished 1238 // initialization. 1239 if (operatorDeleteCleanup.isValid()) { 1240 DeactivateCleanupBlock(operatorDeleteCleanup, cleanupDominator); 1241 cleanupDominator->eraseFromParent(); 1242 } 1243 1244 if (nullCheck) { 1245 conditional.end(*this); 1246 1247 llvm::BasicBlock *notNullBB = Builder.GetInsertBlock(); 1248 EmitBlock(contBB); 1249 1250 llvm::PHINode *PHI = Builder.CreatePHI(result->getType(), 2); 1251 PHI->addIncoming(result, notNullBB); 1252 PHI->addIncoming(llvm::Constant::getNullValue(result->getType()), 1253 nullCheckBB); 1254 1255 result = PHI; 1256 } 1257 1258 return result; 1259 } 1260 1261 void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD, 1262 llvm::Value *Ptr, 1263 QualType DeleteTy) { 1264 assert(DeleteFD->getOverloadedOperator() == OO_Delete); 1265 1266 const FunctionProtoType *DeleteFTy = 1267 DeleteFD->getType()->getAs<FunctionProtoType>(); 1268 1269 CallArgList DeleteArgs; 1270 1271 // Check if we need to pass the size to the delete operator. 1272 llvm::Value *Size = 0; 1273 QualType SizeTy; 1274 if (DeleteFTy->getNumArgs() == 2) { 1275 SizeTy = DeleteFTy->getArgType(1); 1276 CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy); 1277 Size = llvm::ConstantInt::get(ConvertType(SizeTy), 1278 DeleteTypeSize.getQuantity()); 1279 } 1280 1281 QualType ArgTy = DeleteFTy->getArgType(0); 1282 llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy)); 1283 DeleteArgs.add(RValue::get(DeletePtr), ArgTy); 1284 1285 if (Size) 1286 DeleteArgs.add(RValue::get(Size), SizeTy); 1287 1288 // Emit the call to delete. 1289 EmitNewDeleteCall(*this, DeleteFD, DeleteFTy, DeleteArgs); 1290 } 1291 1292 namespace { 1293 /// Calls the given 'operator delete' on a single object. 1294 struct CallObjectDelete : EHScopeStack::Cleanup { 1295 llvm::Value *Ptr; 1296 const FunctionDecl *OperatorDelete; 1297 QualType ElementType; 1298 1299 CallObjectDelete(llvm::Value *Ptr, 1300 const FunctionDecl *OperatorDelete, 1301 QualType ElementType) 1302 : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {} 1303 1304 void Emit(CodeGenFunction &CGF, Flags flags) { 1305 CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType); 1306 } 1307 }; 1308 } 1309 1310 /// Emit the code for deleting a single object. 1311 static void EmitObjectDelete(CodeGenFunction &CGF, 1312 const FunctionDecl *OperatorDelete, 1313 llvm::Value *Ptr, 1314 QualType ElementType, 1315 bool UseGlobalDelete) { 1316 // Find the destructor for the type, if applicable. If the 1317 // destructor is virtual, we'll just emit the vcall and return. 1318 const CXXDestructorDecl *Dtor = 0; 1319 if (const RecordType *RT = ElementType->getAs<RecordType>()) { 1320 CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); 1321 if (RD->hasDefinition() && !RD->hasTrivialDestructor()) { 1322 Dtor = RD->getDestructor(); 1323 1324 if (Dtor->isVirtual()) { 1325 if (UseGlobalDelete) { 1326 // If we're supposed to call the global delete, make sure we do so 1327 // even if the destructor throws. 1328 1329 // Derive the complete-object pointer, which is what we need 1330 // to pass to the deallocation function. 1331 llvm::Value *completePtr = 1332 CGF.CGM.getCXXABI().adjustToCompleteObject(CGF, Ptr, ElementType); 1333 1334 CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup, 1335 completePtr, OperatorDelete, 1336 ElementType); 1337 } 1338 1339 // FIXME: Provide a source location here. 1340 CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting; 1341 CGF.CGM.getCXXABI().EmitVirtualDestructorCall(CGF, Dtor, DtorType, 1342 SourceLocation(), Ptr); 1343 1344 if (UseGlobalDelete) { 1345 CGF.PopCleanupBlock(); 1346 } 1347 1348 return; 1349 } 1350 } 1351 } 1352 1353 // Make sure that we call delete even if the dtor throws. 1354 // This doesn't have to a conditional cleanup because we're going 1355 // to pop it off in a second. 1356 CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup, 1357 Ptr, OperatorDelete, ElementType); 1358 1359 if (Dtor) 1360 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, 1361 /*ForVirtualBase=*/false, 1362 /*Delegating=*/false, 1363 Ptr); 1364 else if (CGF.getLangOpts().ObjCAutoRefCount && 1365 ElementType->isObjCLifetimeType()) { 1366 switch (ElementType.getObjCLifetime()) { 1367 case Qualifiers::OCL_None: 1368 case Qualifiers::OCL_ExplicitNone: 1369 case Qualifiers::OCL_Autoreleasing: 1370 break; 1371 1372 case Qualifiers::OCL_Strong: { 1373 // Load the pointer value. 1374 llvm::Value *PtrValue = CGF.Builder.CreateLoad(Ptr, 1375 ElementType.isVolatileQualified()); 1376 1377 CGF.EmitARCRelease(PtrValue, ARCPreciseLifetime); 1378 break; 1379 } 1380 1381 case Qualifiers::OCL_Weak: 1382 CGF.EmitARCDestroyWeak(Ptr); 1383 break; 1384 } 1385 } 1386 1387 CGF.PopCleanupBlock(); 1388 } 1389 1390 namespace { 1391 /// Calls the given 'operator delete' on an array of objects. 1392 struct CallArrayDelete : EHScopeStack::Cleanup { 1393 llvm::Value *Ptr; 1394 const FunctionDecl *OperatorDelete; 1395 llvm::Value *NumElements; 1396 QualType ElementType; 1397 CharUnits CookieSize; 1398 1399 CallArrayDelete(llvm::Value *Ptr, 1400 const FunctionDecl *OperatorDelete, 1401 llvm::Value *NumElements, 1402 QualType ElementType, 1403 CharUnits CookieSize) 1404 : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements), 1405 ElementType(ElementType), CookieSize(CookieSize) {} 1406 1407 void Emit(CodeGenFunction &CGF, Flags flags) { 1408 const FunctionProtoType *DeleteFTy = 1409 OperatorDelete->getType()->getAs<FunctionProtoType>(); 1410 assert(DeleteFTy->getNumArgs() == 1 || DeleteFTy->getNumArgs() == 2); 1411 1412 CallArgList Args; 1413 1414 // Pass the pointer as the first argument. 1415 QualType VoidPtrTy = DeleteFTy->getArgType(0); 1416 llvm::Value *DeletePtr 1417 = CGF.Builder.CreateBitCast(Ptr, CGF.ConvertType(VoidPtrTy)); 1418 Args.add(RValue::get(DeletePtr), VoidPtrTy); 1419 1420 // Pass the original requested size as the second argument. 1421 if (DeleteFTy->getNumArgs() == 2) { 1422 QualType size_t = DeleteFTy->getArgType(1); 1423 llvm::IntegerType *SizeTy 1424 = cast<llvm::IntegerType>(CGF.ConvertType(size_t)); 1425 1426 CharUnits ElementTypeSize = 1427 CGF.CGM.getContext().getTypeSizeInChars(ElementType); 1428 1429 // The size of an element, multiplied by the number of elements. 1430 llvm::Value *Size 1431 = llvm::ConstantInt::get(SizeTy, ElementTypeSize.getQuantity()); 1432 Size = CGF.Builder.CreateMul(Size, NumElements); 1433 1434 // Plus the size of the cookie if applicable. 1435 if (!CookieSize.isZero()) { 1436 llvm::Value *CookieSizeV 1437 = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity()); 1438 Size = CGF.Builder.CreateAdd(Size, CookieSizeV); 1439 } 1440 1441 Args.add(RValue::get(Size), size_t); 1442 } 1443 1444 // Emit the call to delete. 1445 EmitNewDeleteCall(CGF, OperatorDelete, DeleteFTy, Args); 1446 } 1447 }; 1448 } 1449 1450 /// Emit the code for deleting an array of objects. 1451 static void EmitArrayDelete(CodeGenFunction &CGF, 1452 const CXXDeleteExpr *E, 1453 llvm::Value *deletedPtr, 1454 QualType elementType) { 1455 llvm::Value *numElements = 0; 1456 llvm::Value *allocatedPtr = 0; 1457 CharUnits cookieSize; 1458 CGF.CGM.getCXXABI().ReadArrayCookie(CGF, deletedPtr, E, elementType, 1459 numElements, allocatedPtr, cookieSize); 1460 1461 assert(allocatedPtr && "ReadArrayCookie didn't set allocated pointer"); 1462 1463 // Make sure that we call delete even if one of the dtors throws. 1464 const FunctionDecl *operatorDelete = E->getOperatorDelete(); 1465 CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup, 1466 allocatedPtr, operatorDelete, 1467 numElements, elementType, 1468 cookieSize); 1469 1470 // Destroy the elements. 1471 if (QualType::DestructionKind dtorKind = elementType.isDestructedType()) { 1472 assert(numElements && "no element count for a type with a destructor!"); 1473 1474 llvm::Value *arrayEnd = 1475 CGF.Builder.CreateInBoundsGEP(deletedPtr, numElements, "delete.end"); 1476 1477 // Note that it is legal to allocate a zero-length array, and we 1478 // can never fold the check away because the length should always 1479 // come from a cookie. 1480 CGF.emitArrayDestroy(deletedPtr, arrayEnd, elementType, 1481 CGF.getDestroyer(dtorKind), 1482 /*checkZeroLength*/ true, 1483 CGF.needsEHCleanup(dtorKind)); 1484 } 1485 1486 // Pop the cleanup block. 1487 CGF.PopCleanupBlock(); 1488 } 1489 1490 void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) { 1491 const Expr *Arg = E->getArgument(); 1492 llvm::Value *Ptr = EmitScalarExpr(Arg); 1493 1494 // Null check the pointer. 1495 llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull"); 1496 llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end"); 1497 1498 llvm::Value *IsNull = Builder.CreateIsNull(Ptr, "isnull"); 1499 1500 Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull); 1501 EmitBlock(DeleteNotNull); 1502 1503 // We might be deleting a pointer to array. If so, GEP down to the 1504 // first non-array element. 1505 // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*) 1506 QualType DeleteTy = Arg->getType()->getAs<PointerType>()->getPointeeType(); 1507 if (DeleteTy->isConstantArrayType()) { 1508 llvm::Value *Zero = Builder.getInt32(0); 1509 SmallVector<llvm::Value*,8> GEP; 1510 1511 GEP.push_back(Zero); // point at the outermost array 1512 1513 // For each layer of array type we're pointing at: 1514 while (const ConstantArrayType *Arr 1515 = getContext().getAsConstantArrayType(DeleteTy)) { 1516 // 1. Unpeel the array type. 1517 DeleteTy = Arr->getElementType(); 1518 1519 // 2. GEP to the first element of the array. 1520 GEP.push_back(Zero); 1521 } 1522 1523 Ptr = Builder.CreateInBoundsGEP(Ptr, GEP, "del.first"); 1524 } 1525 1526 assert(ConvertTypeForMem(DeleteTy) == 1527 cast<llvm::PointerType>(Ptr->getType())->getElementType()); 1528 1529 if (E->isArrayForm()) { 1530 EmitArrayDelete(*this, E, Ptr, DeleteTy); 1531 } else { 1532 EmitObjectDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy, 1533 E->isGlobalDelete()); 1534 } 1535 1536 EmitBlock(DeleteEnd); 1537 } 1538 1539 static llvm::Constant *getBadTypeidFn(CodeGenFunction &CGF) { 1540 // void __cxa_bad_typeid(); 1541 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false); 1542 1543 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid"); 1544 } 1545 1546 static void EmitBadTypeidCall(CodeGenFunction &CGF) { 1547 llvm::Value *Fn = getBadTypeidFn(CGF); 1548 CGF.EmitRuntimeCallOrInvoke(Fn).setDoesNotReturn(); 1549 CGF.Builder.CreateUnreachable(); 1550 } 1551 1552 static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF, 1553 const Expr *E, 1554 llvm::Type *StdTypeInfoPtrTy) { 1555 // Get the vtable pointer. 1556 llvm::Value *ThisPtr = CGF.EmitLValue(E).getAddress(); 1557 1558 // C++ [expr.typeid]p2: 1559 // If the glvalue expression is obtained by applying the unary * operator to 1560 // a pointer and the pointer is a null pointer value, the typeid expression 1561 // throws the std::bad_typeid exception. 1562 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E->IgnoreParens())) { 1563 if (UO->getOpcode() == UO_Deref) { 1564 llvm::BasicBlock *BadTypeidBlock = 1565 CGF.createBasicBlock("typeid.bad_typeid"); 1566 llvm::BasicBlock *EndBlock = 1567 CGF.createBasicBlock("typeid.end"); 1568 1569 llvm::Value *IsNull = CGF.Builder.CreateIsNull(ThisPtr); 1570 CGF.Builder.CreateCondBr(IsNull, BadTypeidBlock, EndBlock); 1571 1572 CGF.EmitBlock(BadTypeidBlock); 1573 EmitBadTypeidCall(CGF); 1574 CGF.EmitBlock(EndBlock); 1575 } 1576 } 1577 1578 llvm::Value *Value = CGF.GetVTablePtr(ThisPtr, 1579 StdTypeInfoPtrTy->getPointerTo()); 1580 1581 // Load the type info. 1582 Value = CGF.Builder.CreateConstInBoundsGEP1_64(Value, -1ULL); 1583 return CGF.Builder.CreateLoad(Value); 1584 } 1585 1586 llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) { 1587 llvm::Type *StdTypeInfoPtrTy = 1588 ConvertType(E->getType())->getPointerTo(); 1589 1590 if (E->isTypeOperand()) { 1591 llvm::Constant *TypeInfo = 1592 CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand(getContext())); 1593 return Builder.CreateBitCast(TypeInfo, StdTypeInfoPtrTy); 1594 } 1595 1596 // C++ [expr.typeid]p2: 1597 // When typeid is applied to a glvalue expression whose type is a 1598 // polymorphic class type, the result refers to a std::type_info object 1599 // representing the type of the most derived object (that is, the dynamic 1600 // type) to which the glvalue refers. 1601 if (E->isPotentiallyEvaluated()) 1602 return EmitTypeidFromVTable(*this, E->getExprOperand(), 1603 StdTypeInfoPtrTy); 1604 1605 QualType OperandTy = E->getExprOperand()->getType(); 1606 return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(OperandTy), 1607 StdTypeInfoPtrTy); 1608 } 1609 1610 static llvm::Constant *getDynamicCastFn(CodeGenFunction &CGF) { 1611 // void *__dynamic_cast(const void *sub, 1612 // const abi::__class_type_info *src, 1613 // const abi::__class_type_info *dst, 1614 // std::ptrdiff_t src2dst_offset); 1615 1616 llvm::Type *Int8PtrTy = CGF.Int8PtrTy; 1617 llvm::Type *PtrDiffTy = 1618 CGF.ConvertType(CGF.getContext().getPointerDiffType()); 1619 1620 llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy }; 1621 1622 llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false); 1623 1624 // Mark the function as nounwind readonly. 1625 llvm::Attribute::AttrKind FuncAttrs[] = { llvm::Attribute::NoUnwind, 1626 llvm::Attribute::ReadOnly }; 1627 llvm::AttributeSet Attrs = llvm::AttributeSet::get( 1628 CGF.getLLVMContext(), llvm::AttributeSet::FunctionIndex, FuncAttrs); 1629 1630 return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast", Attrs); 1631 } 1632 1633 static llvm::Constant *getBadCastFn(CodeGenFunction &CGF) { 1634 // void __cxa_bad_cast(); 1635 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false); 1636 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast"); 1637 } 1638 1639 static void EmitBadCastCall(CodeGenFunction &CGF) { 1640 llvm::Value *Fn = getBadCastFn(CGF); 1641 CGF.EmitRuntimeCallOrInvoke(Fn).setDoesNotReturn(); 1642 CGF.Builder.CreateUnreachable(); 1643 } 1644 1645 /// \brief Compute the src2dst_offset hint as described in the 1646 /// Itanium C++ ABI [2.9.7] 1647 static CharUnits computeOffsetHint(ASTContext &Context, 1648 const CXXRecordDecl *Src, 1649 const CXXRecordDecl *Dst) { 1650 CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true, 1651 /*DetectVirtual=*/false); 1652 1653 // If Dst is not derived from Src we can skip the whole computation below and 1654 // return that Src is not a public base of Dst. Record all inheritance paths. 1655 if (!Dst->isDerivedFrom(Src, Paths)) 1656 return CharUnits::fromQuantity(-2ULL); 1657 1658 unsigned NumPublicPaths = 0; 1659 CharUnits Offset; 1660 1661 // Now walk all possible inheritance paths. 1662 for (CXXBasePaths::paths_iterator I = Paths.begin(), E = Paths.end(); 1663 I != E; ++I) { 1664 if (I->Access != AS_public) // Ignore non-public inheritance. 1665 continue; 1666 1667 ++NumPublicPaths; 1668 1669 for (CXXBasePath::iterator J = I->begin(), JE = I->end(); J != JE; ++J) { 1670 // If the path contains a virtual base class we can't give any hint. 1671 // -1: no hint. 1672 if (J->Base->isVirtual()) 1673 return CharUnits::fromQuantity(-1ULL); 1674 1675 if (NumPublicPaths > 1) // Won't use offsets, skip computation. 1676 continue; 1677 1678 // Accumulate the base class offsets. 1679 const ASTRecordLayout &L = Context.getASTRecordLayout(J->Class); 1680 Offset += L.getBaseClassOffset(J->Base->getType()->getAsCXXRecordDecl()); 1681 } 1682 } 1683 1684 // -2: Src is not a public base of Dst. 1685 if (NumPublicPaths == 0) 1686 return CharUnits::fromQuantity(-2ULL); 1687 1688 // -3: Src is a multiple public base type but never a virtual base type. 1689 if (NumPublicPaths > 1) 1690 return CharUnits::fromQuantity(-3ULL); 1691 1692 // Otherwise, the Src type is a unique public nonvirtual base type of Dst. 1693 // Return the offset of Src from the origin of Dst. 1694 return Offset; 1695 } 1696 1697 static llvm::Value * 1698 EmitDynamicCastCall(CodeGenFunction &CGF, llvm::Value *Value, 1699 QualType SrcTy, QualType DestTy, 1700 llvm::BasicBlock *CastEnd) { 1701 llvm::Type *PtrDiffLTy = 1702 CGF.ConvertType(CGF.getContext().getPointerDiffType()); 1703 llvm::Type *DestLTy = CGF.ConvertType(DestTy); 1704 1705 if (const PointerType *PTy = DestTy->getAs<PointerType>()) { 1706 if (PTy->getPointeeType()->isVoidType()) { 1707 // C++ [expr.dynamic.cast]p7: 1708 // If T is "pointer to cv void," then the result is a pointer to the 1709 // most derived object pointed to by v. 1710 1711 // Get the vtable pointer. 1712 llvm::Value *VTable = CGF.GetVTablePtr(Value, PtrDiffLTy->getPointerTo()); 1713 1714 // Get the offset-to-top from the vtable. 1715 llvm::Value *OffsetToTop = 1716 CGF.Builder.CreateConstInBoundsGEP1_64(VTable, -2ULL); 1717 OffsetToTop = CGF.Builder.CreateLoad(OffsetToTop, "offset.to.top"); 1718 1719 // Finally, add the offset to the pointer. 1720 Value = CGF.EmitCastToVoidPtr(Value); 1721 Value = CGF.Builder.CreateInBoundsGEP(Value, OffsetToTop); 1722 1723 return CGF.Builder.CreateBitCast(Value, DestLTy); 1724 } 1725 } 1726 1727 QualType SrcRecordTy; 1728 QualType DestRecordTy; 1729 1730 if (const PointerType *DestPTy = DestTy->getAs<PointerType>()) { 1731 SrcRecordTy = SrcTy->castAs<PointerType>()->getPointeeType(); 1732 DestRecordTy = DestPTy->getPointeeType(); 1733 } else { 1734 SrcRecordTy = SrcTy; 1735 DestRecordTy = DestTy->castAs<ReferenceType>()->getPointeeType(); 1736 } 1737 1738 assert(SrcRecordTy->isRecordType() && "source type must be a record type!"); 1739 assert(DestRecordTy->isRecordType() && "dest type must be a record type!"); 1740 1741 llvm::Value *SrcRTTI = 1742 CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType()); 1743 llvm::Value *DestRTTI = 1744 CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType()); 1745 1746 // Compute the offset hint. 1747 const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl(); 1748 const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl(); 1749 llvm::Value *OffsetHint = 1750 llvm::ConstantInt::get(PtrDiffLTy, 1751 computeOffsetHint(CGF.getContext(), SrcDecl, 1752 DestDecl).getQuantity()); 1753 1754 // Emit the call to __dynamic_cast. 1755 Value = CGF.EmitCastToVoidPtr(Value); 1756 1757 llvm::Value *args[] = { Value, SrcRTTI, DestRTTI, OffsetHint }; 1758 Value = CGF.EmitNounwindRuntimeCall(getDynamicCastFn(CGF), args); 1759 Value = CGF.Builder.CreateBitCast(Value, DestLTy); 1760 1761 /// C++ [expr.dynamic.cast]p9: 1762 /// A failed cast to reference type throws std::bad_cast 1763 if (DestTy->isReferenceType()) { 1764 llvm::BasicBlock *BadCastBlock = 1765 CGF.createBasicBlock("dynamic_cast.bad_cast"); 1766 1767 llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value); 1768 CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd); 1769 1770 CGF.EmitBlock(BadCastBlock); 1771 EmitBadCastCall(CGF); 1772 } 1773 1774 return Value; 1775 } 1776 1777 static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF, 1778 QualType DestTy) { 1779 llvm::Type *DestLTy = CGF.ConvertType(DestTy); 1780 if (DestTy->isPointerType()) 1781 return llvm::Constant::getNullValue(DestLTy); 1782 1783 /// C++ [expr.dynamic.cast]p9: 1784 /// A failed cast to reference type throws std::bad_cast 1785 EmitBadCastCall(CGF); 1786 1787 CGF.EmitBlock(CGF.createBasicBlock("dynamic_cast.end")); 1788 return llvm::UndefValue::get(DestLTy); 1789 } 1790 1791 llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *Value, 1792 const CXXDynamicCastExpr *DCE) { 1793 QualType DestTy = DCE->getTypeAsWritten(); 1794 1795 if (DCE->isAlwaysNull()) 1796 return EmitDynamicCastToNull(*this, DestTy); 1797 1798 QualType SrcTy = DCE->getSubExpr()->getType(); 1799 1800 // C++ [expr.dynamic.cast]p4: 1801 // If the value of v is a null pointer value in the pointer case, the result 1802 // is the null pointer value of type T. 1803 bool ShouldNullCheckSrcValue = SrcTy->isPointerType(); 1804 1805 llvm::BasicBlock *CastNull = 0; 1806 llvm::BasicBlock *CastNotNull = 0; 1807 llvm::BasicBlock *CastEnd = createBasicBlock("dynamic_cast.end"); 1808 1809 if (ShouldNullCheckSrcValue) { 1810 CastNull = createBasicBlock("dynamic_cast.null"); 1811 CastNotNull = createBasicBlock("dynamic_cast.notnull"); 1812 1813 llvm::Value *IsNull = Builder.CreateIsNull(Value); 1814 Builder.CreateCondBr(IsNull, CastNull, CastNotNull); 1815 EmitBlock(CastNotNull); 1816 } 1817 1818 Value = EmitDynamicCastCall(*this, Value, SrcTy, DestTy, CastEnd); 1819 1820 if (ShouldNullCheckSrcValue) { 1821 EmitBranch(CastEnd); 1822 1823 EmitBlock(CastNull); 1824 EmitBranch(CastEnd); 1825 } 1826 1827 EmitBlock(CastEnd); 1828 1829 if (ShouldNullCheckSrcValue) { 1830 llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2); 1831 PHI->addIncoming(Value, CastNotNull); 1832 PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull); 1833 1834 Value = PHI; 1835 } 1836 1837 return Value; 1838 } 1839 1840 void CodeGenFunction::EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Slot) { 1841 RunCleanupsScope Scope(*this); 1842 LValue SlotLV = MakeAddrLValue(Slot.getAddr(), E->getType(), 1843 Slot.getAlignment()); 1844 1845 CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin(); 1846 for (LambdaExpr::capture_init_iterator i = E->capture_init_begin(), 1847 e = E->capture_init_end(); 1848 i != e; ++i, ++CurField) { 1849 // Emit initialization 1850 1851 LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField); 1852 ArrayRef<VarDecl *> ArrayIndexes; 1853 if (CurField->getType()->isArrayType()) 1854 ArrayIndexes = E->getCaptureInitIndexVars(i); 1855 EmitInitializerForField(*CurField, LV, *i, ArrayIndexes); 1856 } 1857 } 1858