1 //===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code dealing with code generation of C++ expressions 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "clang/Frontend/CodeGenOptions.h" 15 #include "CodeGenFunction.h" 16 #include "CGCXXABI.h" 17 #include "CGObjCRuntime.h" 18 #include "CGDebugInfo.h" 19 #include "llvm/Intrinsics.h" 20 using namespace clang; 21 using namespace CodeGen; 22 23 RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD, 24 llvm::Value *Callee, 25 ReturnValueSlot ReturnValue, 26 llvm::Value *This, 27 llvm::Value *VTT, 28 CallExpr::const_arg_iterator ArgBeg, 29 CallExpr::const_arg_iterator ArgEnd) { 30 assert(MD->isInstance() && 31 "Trying to emit a member call expr on a static method!"); 32 33 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 34 35 CallArgList Args; 36 37 // Push the this ptr. 38 Args.push_back(std::make_pair(RValue::get(This), 39 MD->getThisType(getContext()))); 40 41 // If there is a VTT parameter, emit it. 42 if (VTT) { 43 QualType T = getContext().getPointerType(getContext().VoidPtrTy); 44 Args.push_back(std::make_pair(RValue::get(VTT), T)); 45 } 46 47 // And the rest of the call args 48 EmitCallArgs(Args, FPT, ArgBeg, ArgEnd); 49 50 QualType ResultType = FPT->getResultType(); 51 return EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args, 52 FPT->getExtInfo()), 53 Callee, ReturnValue, Args, MD); 54 } 55 56 /// canDevirtualizeMemberFunctionCalls - Checks whether virtual calls on given 57 /// expr can be devirtualized. 58 static bool canDevirtualizeMemberFunctionCalls(ASTContext &Context, 59 const Expr *Base, 60 const CXXMethodDecl *MD) { 61 62 // Cannot divirtualize in kext mode. 63 if (Context.getLangOptions().AppleKext) 64 return false; 65 66 // If the member function is marked 'final', we know that it can't be 67 // overridden and can therefore devirtualize it. 68 if (MD->hasAttr<FinalAttr>()) 69 return true; 70 71 // Similarly, if the class itself is marked 'final' it can't be overridden 72 // and we can therefore devirtualize the member function call. 73 if (MD->getParent()->hasAttr<FinalAttr>()) 74 return true; 75 76 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) { 77 if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) { 78 // This is a record decl. We know the type and can devirtualize it. 79 return VD->getType()->isRecordType(); 80 } 81 82 return false; 83 } 84 85 // We can always devirtualize calls on temporary object expressions. 86 if (isa<CXXConstructExpr>(Base)) 87 return true; 88 89 // And calls on bound temporaries. 90 if (isa<CXXBindTemporaryExpr>(Base)) 91 return true; 92 93 // Check if this is a call expr that returns a record type. 94 if (const CallExpr *CE = dyn_cast<CallExpr>(Base)) 95 return CE->getCallReturnType()->isRecordType(); 96 97 // We can't devirtualize the call. 98 return false; 99 } 100 101 // Note: This function also emit constructor calls to support a MSVC 102 // extensions allowing explicit constructor function call. 103 RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE, 104 ReturnValueSlot ReturnValue) { 105 if (isa<BinaryOperator>(CE->getCallee()->IgnoreParens())) 106 return EmitCXXMemberPointerCallExpr(CE, ReturnValue); 107 108 const MemberExpr *ME = cast<MemberExpr>(CE->getCallee()->IgnoreParens()); 109 const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl()); 110 111 CGDebugInfo *DI = getDebugInfo(); 112 if (DI && CGM.getCodeGenOpts().LimitDebugInfo 113 && !isa<CallExpr>(ME->getBase())) { 114 QualType PQTy = ME->getBase()->IgnoreParenImpCasts()->getType(); 115 if (const PointerType * PTy = dyn_cast<PointerType>(PQTy)) { 116 DI->getOrCreateRecordType(PTy->getPointeeType(), 117 MD->getParent()->getLocation()); 118 } 119 } 120 121 if (MD->isStatic()) { 122 // The method is static, emit it as we would a regular call. 123 llvm::Value *Callee = CGM.GetAddrOfFunction(MD); 124 return EmitCall(getContext().getPointerType(MD->getType()), Callee, 125 ReturnValue, CE->arg_begin(), CE->arg_end()); 126 } 127 128 // Compute the object pointer. 129 llvm::Value *This; 130 if (ME->isArrow()) 131 This = EmitScalarExpr(ME->getBase()); 132 else 133 This = EmitLValue(ME->getBase()).getAddress(); 134 135 if (MD->isTrivial()) { 136 if (isa<CXXDestructorDecl>(MD)) return RValue::get(0); 137 if (isa<CXXConstructorDecl>(MD) && 138 cast<CXXConstructorDecl>(MD)->isDefaultConstructor()) 139 return RValue::get(0); 140 141 if (MD->isCopyAssignmentOperator()) { 142 // We don't like to generate the trivial copy assignment operator when 143 // it isn't necessary; just produce the proper effect here. 144 llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress(); 145 EmitAggregateCopy(This, RHS, CE->getType()); 146 return RValue::get(This); 147 } 148 149 if (isa<CXXConstructorDecl>(MD) && 150 cast<CXXConstructorDecl>(MD)->isCopyConstructor()) { 151 llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress(); 152 EmitSynthesizedCXXCopyCtorCall(cast<CXXConstructorDecl>(MD), This, RHS, 153 CE->arg_begin(), CE->arg_end()); 154 return RValue::get(This); 155 } 156 llvm_unreachable("unknown trivial member function"); 157 } 158 159 // Compute the function type we're calling. 160 const CGFunctionInfo *FInfo = 0; 161 if (isa<CXXDestructorDecl>(MD)) 162 FInfo = &CGM.getTypes().getFunctionInfo(cast<CXXDestructorDecl>(MD), 163 Dtor_Complete); 164 else if (isa<CXXConstructorDecl>(MD)) 165 FInfo = &CGM.getTypes().getFunctionInfo(cast<CXXConstructorDecl>(MD), 166 Ctor_Complete); 167 else 168 FInfo = &CGM.getTypes().getFunctionInfo(MD); 169 170 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 171 const llvm::Type *Ty 172 = CGM.getTypes().GetFunctionType(*FInfo, FPT->isVariadic()); 173 174 // C++ [class.virtual]p12: 175 // Explicit qualification with the scope operator (5.1) suppresses the 176 // virtual call mechanism. 177 // 178 // We also don't emit a virtual call if the base expression has a record type 179 // because then we know what the type is. 180 bool UseVirtualCall; 181 UseVirtualCall = MD->isVirtual() && !ME->hasQualifier() 182 && !canDevirtualizeMemberFunctionCalls(getContext(), 183 ME->getBase(), MD); 184 llvm::Value *Callee; 185 if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD)) { 186 if (UseVirtualCall) { 187 Callee = BuildVirtualCall(Dtor, Dtor_Complete, This, Ty); 188 } else { 189 Callee = CGM.GetAddrOfFunction(GlobalDecl(Dtor, Dtor_Complete), Ty); 190 } 191 } else if (const CXXConstructorDecl *Ctor = 192 dyn_cast<CXXConstructorDecl>(MD)) { 193 Callee = CGM.GetAddrOfFunction(GlobalDecl(Ctor, Ctor_Complete), Ty); 194 } else if (UseVirtualCall) { 195 Callee = BuildVirtualCall(MD, This, Ty); 196 } else { 197 if (getContext().getLangOptions().AppleKext && 198 ME->hasQualifier()) 199 Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), This, Ty); 200 else 201 Callee = CGM.GetAddrOfFunction(MD, Ty); 202 } 203 204 return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0, 205 CE->arg_begin(), CE->arg_end()); 206 } 207 208 RValue 209 CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E, 210 ReturnValueSlot ReturnValue) { 211 const BinaryOperator *BO = 212 cast<BinaryOperator>(E->getCallee()->IgnoreParens()); 213 const Expr *BaseExpr = BO->getLHS(); 214 const Expr *MemFnExpr = BO->getRHS(); 215 216 const MemberPointerType *MPT = 217 MemFnExpr->getType()->getAs<MemberPointerType>(); 218 219 const FunctionProtoType *FPT = 220 MPT->getPointeeType()->getAs<FunctionProtoType>(); 221 const CXXRecordDecl *RD = 222 cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl()); 223 224 // Get the member function pointer. 225 llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr); 226 227 // Emit the 'this' pointer. 228 llvm::Value *This; 229 230 if (BO->getOpcode() == BO_PtrMemI) 231 This = EmitScalarExpr(BaseExpr); 232 else 233 This = EmitLValue(BaseExpr).getAddress(); 234 235 // Ask the ABI to load the callee. Note that This is modified. 236 llvm::Value *Callee = 237 CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(CGF, This, MemFnPtr, MPT); 238 239 CallArgList Args; 240 241 QualType ThisType = 242 getContext().getPointerType(getContext().getTagDeclType(RD)); 243 244 // Push the this ptr. 245 Args.push_back(std::make_pair(RValue::get(This), ThisType)); 246 247 // And the rest of the call args 248 EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end()); 249 const FunctionType *BO_FPT = BO->getType()->getAs<FunctionProtoType>(); 250 return EmitCall(CGM.getTypes().getFunctionInfo(Args, BO_FPT), Callee, 251 ReturnValue, Args); 252 } 253 254 RValue 255 CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E, 256 const CXXMethodDecl *MD, 257 ReturnValueSlot ReturnValue) { 258 assert(MD->isInstance() && 259 "Trying to emit a member call expr on a static method!"); 260 LValue LV = EmitLValue(E->getArg(0)); 261 llvm::Value *This = LV.getAddress(); 262 263 if (MD->isCopyAssignmentOperator()) { 264 const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(MD->getDeclContext()); 265 if (ClassDecl->hasTrivialCopyAssignment()) { 266 assert(!ClassDecl->hasUserDeclaredCopyAssignment() && 267 "EmitCXXOperatorMemberCallExpr - user declared copy assignment"); 268 llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress(); 269 QualType Ty = E->getType(); 270 EmitAggregateCopy(This, Src, Ty); 271 return RValue::get(This); 272 } 273 } 274 275 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 276 const llvm::Type *Ty = 277 CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD), 278 FPT->isVariadic()); 279 llvm::Value *Callee; 280 if (MD->isVirtual() && 281 !canDevirtualizeMemberFunctionCalls(getContext(), 282 E->getArg(0), MD)) 283 Callee = BuildVirtualCall(MD, This, Ty); 284 else 285 Callee = CGM.GetAddrOfFunction(MD, Ty); 286 287 return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0, 288 E->arg_begin() + 1, E->arg_end()); 289 } 290 291 void 292 CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E, 293 AggValueSlot Dest) { 294 assert(!Dest.isIgnored() && "Must have a destination!"); 295 const CXXConstructorDecl *CD = E->getConstructor(); 296 297 // If we require zero initialization before (or instead of) calling the 298 // constructor, as can be the case with a non-user-provided default 299 // constructor, emit the zero initialization now. 300 if (E->requiresZeroInitialization()) 301 EmitNullInitialization(Dest.getAddr(), E->getType()); 302 303 // If this is a call to a trivial default constructor, do nothing. 304 if (CD->isTrivial() && CD->isDefaultConstructor()) 305 return; 306 307 // Elide the constructor if we're constructing from a temporary. 308 // The temporary check is required because Sema sets this on NRVO 309 // returns. 310 if (getContext().getLangOptions().ElideConstructors && E->isElidable()) { 311 assert(getContext().hasSameUnqualifiedType(E->getType(), 312 E->getArg(0)->getType())); 313 if (E->getArg(0)->isTemporaryObject(getContext(), CD->getParent())) { 314 EmitAggExpr(E->getArg(0), Dest); 315 return; 316 } 317 } 318 319 const ConstantArrayType *Array 320 = getContext().getAsConstantArrayType(E->getType()); 321 if (Array) { 322 QualType BaseElementTy = getContext().getBaseElementType(Array); 323 const llvm::Type *BasePtr = ConvertType(BaseElementTy); 324 BasePtr = llvm::PointerType::getUnqual(BasePtr); 325 llvm::Value *BaseAddrPtr = 326 Builder.CreateBitCast(Dest.getAddr(), BasePtr); 327 328 EmitCXXAggrConstructorCall(CD, Array, BaseAddrPtr, 329 E->arg_begin(), E->arg_end()); 330 } 331 else { 332 CXXCtorType Type = 333 (E->getConstructionKind() == CXXConstructExpr::CK_Complete) 334 ? Ctor_Complete : Ctor_Base; 335 bool ForVirtualBase = 336 E->getConstructionKind() == CXXConstructExpr::CK_VirtualBase; 337 338 // Call the constructor. 339 EmitCXXConstructorCall(CD, Type, ForVirtualBase, Dest.getAddr(), 340 E->arg_begin(), E->arg_end()); 341 } 342 } 343 344 void 345 CodeGenFunction::EmitSynthesizedCXXCopyCtor(llvm::Value *Dest, 346 llvm::Value *Src, 347 const Expr *Exp) { 348 if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Exp)) 349 Exp = E->getSubExpr(); 350 assert(isa<CXXConstructExpr>(Exp) && 351 "EmitSynthesizedCXXCopyCtor - unknown copy ctor expr"); 352 const CXXConstructExpr* E = cast<CXXConstructExpr>(Exp); 353 const CXXConstructorDecl *CD = E->getConstructor(); 354 RunCleanupsScope Scope(*this); 355 356 // If we require zero initialization before (or instead of) calling the 357 // constructor, as can be the case with a non-user-provided default 358 // constructor, emit the zero initialization now. 359 // FIXME. Do I still need this for a copy ctor synthesis? 360 if (E->requiresZeroInitialization()) 361 EmitNullInitialization(Dest, E->getType()); 362 363 assert(!getContext().getAsConstantArrayType(E->getType()) 364 && "EmitSynthesizedCXXCopyCtor - Copied-in Array"); 365 EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src, 366 E->arg_begin(), E->arg_end()); 367 } 368 369 /// Check whether the given operator new[] is the global placement 370 /// operator new[]. 371 static bool IsPlacementOperatorNewArray(ASTContext &Ctx, 372 const FunctionDecl *Fn) { 373 // Must be in global scope. Note that allocation functions can't be 374 // declared in namespaces. 375 if (!Fn->getDeclContext()->getRedeclContext()->isFileContext()) 376 return false; 377 378 // Signature must be void *operator new[](size_t, void*). 379 // The size_t is common to all operator new[]s. 380 if (Fn->getNumParams() != 2) 381 return false; 382 383 CanQualType ParamType = Ctx.getCanonicalType(Fn->getParamDecl(1)->getType()); 384 return (ParamType == Ctx.VoidPtrTy); 385 } 386 387 static CharUnits CalculateCookiePadding(CodeGenFunction &CGF, 388 const CXXNewExpr *E) { 389 if (!E->isArray()) 390 return CharUnits::Zero(); 391 392 // No cookie is required if the new operator being used is 393 // ::operator new[](size_t, void*). 394 const FunctionDecl *OperatorNew = E->getOperatorNew(); 395 if (IsPlacementOperatorNewArray(CGF.getContext(), OperatorNew)) 396 return CharUnits::Zero(); 397 398 return CGF.CGM.getCXXABI().GetArrayCookieSize(E); 399 } 400 401 static llvm::Value *EmitCXXNewAllocSize(ASTContext &Context, 402 CodeGenFunction &CGF, 403 const CXXNewExpr *E, 404 llvm::Value *&NumElements, 405 llvm::Value *&SizeWithoutCookie) { 406 QualType ElemType = E->getAllocatedType(); 407 408 const llvm::IntegerType *SizeTy = 409 cast<llvm::IntegerType>(CGF.ConvertType(CGF.getContext().getSizeType())); 410 411 CharUnits TypeSize = CGF.getContext().getTypeSizeInChars(ElemType); 412 413 if (!E->isArray()) { 414 SizeWithoutCookie = llvm::ConstantInt::get(SizeTy, TypeSize.getQuantity()); 415 return SizeWithoutCookie; 416 } 417 418 // Figure out the cookie size. 419 CharUnits CookieSize = CalculateCookiePadding(CGF, E); 420 421 // Emit the array size expression. 422 // We multiply the size of all dimensions for NumElements. 423 // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6. 424 NumElements = CGF.EmitScalarExpr(E->getArraySize()); 425 assert(NumElements->getType() == SizeTy && "element count not a size_t"); 426 427 uint64_t ArraySizeMultiplier = 1; 428 while (const ConstantArrayType *CAT 429 = CGF.getContext().getAsConstantArrayType(ElemType)) { 430 ElemType = CAT->getElementType(); 431 ArraySizeMultiplier *= CAT->getSize().getZExtValue(); 432 } 433 434 llvm::Value *Size; 435 436 // If someone is doing 'new int[42]' there is no need to do a dynamic check. 437 // Don't bloat the -O0 code. 438 if (llvm::ConstantInt *NumElementsC = 439 dyn_cast<llvm::ConstantInt>(NumElements)) { 440 llvm::APInt NEC = NumElementsC->getValue(); 441 unsigned SizeWidth = NEC.getBitWidth(); 442 443 // Determine if there is an overflow here by doing an extended multiply. 444 NEC = NEC.zext(SizeWidth*2); 445 llvm::APInt SC(SizeWidth*2, TypeSize.getQuantity()); 446 SC *= NEC; 447 448 if (!CookieSize.isZero()) { 449 // Save the current size without a cookie. We don't care if an 450 // overflow's already happened because SizeWithoutCookie isn't 451 // used if the allocator returns null or throws, as it should 452 // always do on an overflow. 453 llvm::APInt SWC = SC.trunc(SizeWidth); 454 SizeWithoutCookie = llvm::ConstantInt::get(SizeTy, SWC); 455 456 // Add the cookie size. 457 SC += llvm::APInt(SizeWidth*2, CookieSize.getQuantity()); 458 } 459 460 if (SC.countLeadingZeros() >= SizeWidth) { 461 SC = SC.trunc(SizeWidth); 462 Size = llvm::ConstantInt::get(SizeTy, SC); 463 } else { 464 // On overflow, produce a -1 so operator new throws. 465 Size = llvm::Constant::getAllOnesValue(SizeTy); 466 } 467 468 // Scale NumElements while we're at it. 469 uint64_t N = NEC.getZExtValue() * ArraySizeMultiplier; 470 NumElements = llvm::ConstantInt::get(SizeTy, N); 471 472 // Otherwise, we don't need to do an overflow-checked multiplication if 473 // we're multiplying by one. 474 } else if (TypeSize.isOne()) { 475 assert(ArraySizeMultiplier == 1); 476 477 Size = NumElements; 478 479 // If we need a cookie, add its size in with an overflow check. 480 // This is maybe a little paranoid. 481 if (!CookieSize.isZero()) { 482 SizeWithoutCookie = Size; 483 484 llvm::Value *CookieSizeV 485 = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity()); 486 487 const llvm::Type *Types[] = { SizeTy }; 488 llvm::Value *UAddF 489 = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, Types, 1); 490 llvm::Value *AddRes 491 = CGF.Builder.CreateCall2(UAddF, Size, CookieSizeV); 492 493 Size = CGF.Builder.CreateExtractValue(AddRes, 0); 494 llvm::Value *DidOverflow = CGF.Builder.CreateExtractValue(AddRes, 1); 495 Size = CGF.Builder.CreateSelect(DidOverflow, 496 llvm::ConstantInt::get(SizeTy, -1), 497 Size); 498 } 499 500 // Otherwise use the int.umul.with.overflow intrinsic. 501 } else { 502 llvm::Value *OutermostElementSize 503 = llvm::ConstantInt::get(SizeTy, TypeSize.getQuantity()); 504 505 llvm::Value *NumOutermostElements = NumElements; 506 507 // Scale NumElements by the array size multiplier. This might 508 // overflow, but only if the multiplication below also overflows, 509 // in which case this multiplication isn't used. 510 if (ArraySizeMultiplier != 1) 511 NumElements = CGF.Builder.CreateMul(NumElements, 512 llvm::ConstantInt::get(SizeTy, ArraySizeMultiplier)); 513 514 // The requested size of the outermost array is non-constant. 515 // Multiply that by the static size of the elements of that array; 516 // on unsigned overflow, set the size to -1 to trigger an 517 // exception from the allocation routine. This is sufficient to 518 // prevent buffer overruns from the allocator returning a 519 // seemingly valid pointer to insufficient space. This idea comes 520 // originally from MSVC, and GCC has an open bug requesting 521 // similar behavior: 522 // http://gcc.gnu.org/bugzilla/show_bug.cgi?id=19351 523 // 524 // This will not be sufficient for C++0x, which requires a 525 // specific exception class (std::bad_array_new_length). 526 // That will require ABI support that has not yet been specified. 527 const llvm::Type *Types[] = { SizeTy }; 528 llvm::Value *UMulF 529 = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, Types, 1); 530 llvm::Value *MulRes = CGF.Builder.CreateCall2(UMulF, NumOutermostElements, 531 OutermostElementSize); 532 533 // The overflow bit. 534 llvm::Value *DidOverflow = CGF.Builder.CreateExtractValue(MulRes, 1); 535 536 // The result of the multiplication. 537 Size = CGF.Builder.CreateExtractValue(MulRes, 0); 538 539 // If we have a cookie, we need to add that size in, too. 540 if (!CookieSize.isZero()) { 541 SizeWithoutCookie = Size; 542 543 llvm::Value *CookieSizeV 544 = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity()); 545 llvm::Value *UAddF 546 = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, Types, 1); 547 llvm::Value *AddRes 548 = CGF.Builder.CreateCall2(UAddF, SizeWithoutCookie, CookieSizeV); 549 550 Size = CGF.Builder.CreateExtractValue(AddRes, 0); 551 552 llvm::Value *AddDidOverflow = CGF.Builder.CreateExtractValue(AddRes, 1); 553 DidOverflow = CGF.Builder.CreateAnd(DidOverflow, AddDidOverflow); 554 } 555 556 Size = CGF.Builder.CreateSelect(DidOverflow, 557 llvm::ConstantInt::get(SizeTy, -1), 558 Size); 559 } 560 561 if (CookieSize.isZero()) 562 SizeWithoutCookie = Size; 563 else 564 assert(SizeWithoutCookie && "didn't set SizeWithoutCookie?"); 565 566 return Size; 567 } 568 569 static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const CXXNewExpr *E, 570 llvm::Value *NewPtr) { 571 572 assert(E->getNumConstructorArgs() == 1 && 573 "Can only have one argument to initializer of POD type."); 574 575 const Expr *Init = E->getConstructorArg(0); 576 QualType AllocType = E->getAllocatedType(); 577 578 unsigned Alignment = 579 CGF.getContext().getTypeAlignInChars(AllocType).getQuantity(); 580 if (!CGF.hasAggregateLLVMType(AllocType)) 581 CGF.EmitStoreOfScalar(CGF.EmitScalarExpr(Init), NewPtr, 582 AllocType.isVolatileQualified(), Alignment, 583 AllocType); 584 else if (AllocType->isAnyComplexType()) 585 CGF.EmitComplexExprIntoAddr(Init, NewPtr, 586 AllocType.isVolatileQualified()); 587 else { 588 AggValueSlot Slot 589 = AggValueSlot::forAddr(NewPtr, AllocType.isVolatileQualified(), true); 590 CGF.EmitAggExpr(Init, Slot); 591 } 592 } 593 594 void 595 CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E, 596 llvm::Value *NewPtr, 597 llvm::Value *NumElements) { 598 // We have a POD type. 599 if (E->getNumConstructorArgs() == 0) 600 return; 601 602 const llvm::Type *SizeTy = ConvertType(getContext().getSizeType()); 603 604 // Create a temporary for the loop index and initialize it with 0. 605 llvm::Value *IndexPtr = CreateTempAlloca(SizeTy, "loop.index"); 606 llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy); 607 Builder.CreateStore(Zero, IndexPtr); 608 609 // Start the loop with a block that tests the condition. 610 llvm::BasicBlock *CondBlock = createBasicBlock("for.cond"); 611 llvm::BasicBlock *AfterFor = createBasicBlock("for.end"); 612 613 EmitBlock(CondBlock); 614 615 llvm::BasicBlock *ForBody = createBasicBlock("for.body"); 616 617 // Generate: if (loop-index < number-of-elements fall to the loop body, 618 // otherwise, go to the block after the for-loop. 619 llvm::Value *Counter = Builder.CreateLoad(IndexPtr); 620 llvm::Value *IsLess = Builder.CreateICmpULT(Counter, NumElements, "isless"); 621 // If the condition is true, execute the body. 622 Builder.CreateCondBr(IsLess, ForBody, AfterFor); 623 624 EmitBlock(ForBody); 625 626 llvm::BasicBlock *ContinueBlock = createBasicBlock("for.inc"); 627 // Inside the loop body, emit the constructor call on the array element. 628 Counter = Builder.CreateLoad(IndexPtr); 629 llvm::Value *Address = Builder.CreateInBoundsGEP(NewPtr, Counter, 630 "arrayidx"); 631 StoreAnyExprIntoOneUnit(*this, E, Address); 632 633 EmitBlock(ContinueBlock); 634 635 // Emit the increment of the loop counter. 636 llvm::Value *NextVal = llvm::ConstantInt::get(SizeTy, 1); 637 Counter = Builder.CreateLoad(IndexPtr); 638 NextVal = Builder.CreateAdd(Counter, NextVal, "inc"); 639 Builder.CreateStore(NextVal, IndexPtr); 640 641 // Finally, branch back up to the condition for the next iteration. 642 EmitBranch(CondBlock); 643 644 // Emit the fall-through block. 645 EmitBlock(AfterFor, true); 646 } 647 648 static void EmitZeroMemSet(CodeGenFunction &CGF, QualType T, 649 llvm::Value *NewPtr, llvm::Value *Size) { 650 llvm::LLVMContext &VMContext = CGF.CGM.getLLVMContext(); 651 const llvm::Type *BP = llvm::Type::getInt8PtrTy(VMContext); 652 if (NewPtr->getType() != BP) 653 NewPtr = CGF.Builder.CreateBitCast(NewPtr, BP, "tmp"); 654 655 CharUnits Alignment = CGF.getContext().getTypeAlignInChars(T); 656 CGF.Builder.CreateMemSet(NewPtr, CGF.Builder.getInt8(0), Size, 657 Alignment.getQuantity(), false); 658 } 659 660 static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E, 661 llvm::Value *NewPtr, 662 llvm::Value *NumElements, 663 llvm::Value *AllocSizeWithoutCookie) { 664 if (E->isArray()) { 665 if (CXXConstructorDecl *Ctor = E->getConstructor()) { 666 bool RequiresZeroInitialization = false; 667 if (Ctor->getParent()->hasTrivialConstructor()) { 668 // If new expression did not specify value-initialization, then there 669 // is no initialization. 670 if (!E->hasInitializer() || Ctor->getParent()->isEmpty()) 671 return; 672 673 if (CGF.CGM.getTypes().isZeroInitializable(E->getAllocatedType())) { 674 // Optimization: since zero initialization will just set the memory 675 // to all zeroes, generate a single memset to do it in one shot. 676 EmitZeroMemSet(CGF, E->getAllocatedType(), NewPtr, 677 AllocSizeWithoutCookie); 678 return; 679 } 680 681 RequiresZeroInitialization = true; 682 } 683 684 CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr, 685 E->constructor_arg_begin(), 686 E->constructor_arg_end(), 687 RequiresZeroInitialization); 688 return; 689 } else if (E->getNumConstructorArgs() == 1 && 690 isa<ImplicitValueInitExpr>(E->getConstructorArg(0))) { 691 // Optimization: since zero initialization will just set the memory 692 // to all zeroes, generate a single memset to do it in one shot. 693 EmitZeroMemSet(CGF, E->getAllocatedType(), NewPtr, 694 AllocSizeWithoutCookie); 695 return; 696 } else { 697 CGF.EmitNewArrayInitializer(E, NewPtr, NumElements); 698 return; 699 } 700 } 701 702 if (CXXConstructorDecl *Ctor = E->getConstructor()) { 703 // Per C++ [expr.new]p15, if we have an initializer, then we're performing 704 // direct initialization. C++ [dcl.init]p5 requires that we 705 // zero-initialize storage if there are no user-declared constructors. 706 if (E->hasInitializer() && 707 !Ctor->getParent()->hasUserDeclaredConstructor() && 708 !Ctor->getParent()->isEmpty()) 709 CGF.EmitNullInitialization(NewPtr, E->getAllocatedType()); 710 711 CGF.EmitCXXConstructorCall(Ctor, Ctor_Complete, /*ForVirtualBase=*/false, 712 NewPtr, E->constructor_arg_begin(), 713 E->constructor_arg_end()); 714 715 return; 716 } 717 // We have a POD type. 718 if (E->getNumConstructorArgs() == 0) 719 return; 720 721 StoreAnyExprIntoOneUnit(CGF, E, NewPtr); 722 } 723 724 namespace { 725 /// A utility class for saving an rvalue. 726 class SavedRValue { 727 public: 728 enum Kind { ScalarLiteral, ScalarAddress, 729 AggregateLiteral, AggregateAddress, 730 Complex }; 731 732 private: 733 llvm::Value *Value; 734 Kind K; 735 736 SavedRValue(llvm::Value *V, Kind K) : Value(V), K(K) {} 737 738 public: 739 SavedRValue() {} 740 741 static SavedRValue forScalarLiteral(llvm::Value *V) { 742 return SavedRValue(V, ScalarLiteral); 743 } 744 745 static SavedRValue forScalarAddress(llvm::Value *Addr) { 746 return SavedRValue(Addr, ScalarAddress); 747 } 748 749 static SavedRValue forAggregateLiteral(llvm::Value *V) { 750 return SavedRValue(V, AggregateLiteral); 751 } 752 753 static SavedRValue forAggregateAddress(llvm::Value *Addr) { 754 return SavedRValue(Addr, AggregateAddress); 755 } 756 757 static SavedRValue forComplexAddress(llvm::Value *Addr) { 758 return SavedRValue(Addr, Complex); 759 } 760 761 Kind getKind() const { return K; } 762 llvm::Value *getValue() const { return Value; } 763 }; 764 } // end anonymous namespace 765 766 /// Given an r-value, perform the code necessary to make sure that a 767 /// future RestoreRValue will be able to load the value without 768 /// domination concerns. 769 static SavedRValue SaveRValue(CodeGenFunction &CGF, RValue RV) { 770 if (RV.isScalar()) { 771 llvm::Value *V = RV.getScalarVal(); 772 773 // These automatically dominate and don't need to be saved. 774 if (isa<llvm::Constant>(V) || isa<llvm::AllocaInst>(V)) 775 return SavedRValue::forScalarLiteral(V); 776 777 // Everything else needs an alloca. 778 llvm::Value *Addr = CGF.CreateTempAlloca(V->getType(), "saved-rvalue"); 779 CGF.Builder.CreateStore(V, Addr); 780 return SavedRValue::forScalarAddress(Addr); 781 } 782 783 if (RV.isComplex()) { 784 CodeGenFunction::ComplexPairTy V = RV.getComplexVal(); 785 const llvm::Type *ComplexTy = 786 llvm::StructType::get(CGF.getLLVMContext(), 787 V.first->getType(), V.second->getType(), 788 (void*) 0); 789 llvm::Value *Addr = CGF.CreateTempAlloca(ComplexTy, "saved-complex"); 790 CGF.StoreComplexToAddr(V, Addr, /*volatile*/ false); 791 return SavedRValue::forComplexAddress(Addr); 792 } 793 794 assert(RV.isAggregate()); 795 llvm::Value *V = RV.getAggregateAddr(); // TODO: volatile? 796 if (isa<llvm::Constant>(V) || isa<llvm::AllocaInst>(V)) 797 return SavedRValue::forAggregateLiteral(V); 798 799 llvm::Value *Addr = CGF.CreateTempAlloca(V->getType(), "saved-rvalue"); 800 CGF.Builder.CreateStore(V, Addr); 801 return SavedRValue::forAggregateAddress(Addr); 802 } 803 804 /// Given a saved r-value produced by SaveRValue, perform the code 805 /// necessary to restore it to usability at the current insertion 806 /// point. 807 static RValue RestoreRValue(CodeGenFunction &CGF, SavedRValue RV) { 808 switch (RV.getKind()) { 809 case SavedRValue::ScalarLiteral: 810 return RValue::get(RV.getValue()); 811 case SavedRValue::ScalarAddress: 812 return RValue::get(CGF.Builder.CreateLoad(RV.getValue())); 813 case SavedRValue::AggregateLiteral: 814 return RValue::getAggregate(RV.getValue()); 815 case SavedRValue::AggregateAddress: 816 return RValue::getAggregate(CGF.Builder.CreateLoad(RV.getValue())); 817 case SavedRValue::Complex: 818 return RValue::getComplex(CGF.LoadComplexFromAddr(RV.getValue(), false)); 819 } 820 821 llvm_unreachable("bad saved r-value kind"); 822 return RValue(); 823 } 824 825 namespace { 826 /// A cleanup to call the given 'operator delete' function upon 827 /// abnormal exit from a new expression. 828 class CallDeleteDuringNew : public EHScopeStack::Cleanup { 829 size_t NumPlacementArgs; 830 const FunctionDecl *OperatorDelete; 831 llvm::Value *Ptr; 832 llvm::Value *AllocSize; 833 834 RValue *getPlacementArgs() { return reinterpret_cast<RValue*>(this+1); } 835 836 public: 837 static size_t getExtraSize(size_t NumPlacementArgs) { 838 return NumPlacementArgs * sizeof(RValue); 839 } 840 841 CallDeleteDuringNew(size_t NumPlacementArgs, 842 const FunctionDecl *OperatorDelete, 843 llvm::Value *Ptr, 844 llvm::Value *AllocSize) 845 : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete), 846 Ptr(Ptr), AllocSize(AllocSize) {} 847 848 void setPlacementArg(unsigned I, RValue Arg) { 849 assert(I < NumPlacementArgs && "index out of range"); 850 getPlacementArgs()[I] = Arg; 851 } 852 853 void Emit(CodeGenFunction &CGF, bool IsForEH) { 854 const FunctionProtoType *FPT 855 = OperatorDelete->getType()->getAs<FunctionProtoType>(); 856 assert(FPT->getNumArgs() == NumPlacementArgs + 1 || 857 (FPT->getNumArgs() == 2 && NumPlacementArgs == 0)); 858 859 CallArgList DeleteArgs; 860 861 // The first argument is always a void*. 862 FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin(); 863 DeleteArgs.push_back(std::make_pair(RValue::get(Ptr), *AI++)); 864 865 // A member 'operator delete' can take an extra 'size_t' argument. 866 if (FPT->getNumArgs() == NumPlacementArgs + 2) 867 DeleteArgs.push_back(std::make_pair(RValue::get(AllocSize), *AI++)); 868 869 // Pass the rest of the arguments, which must match exactly. 870 for (unsigned I = 0; I != NumPlacementArgs; ++I) 871 DeleteArgs.push_back(std::make_pair(getPlacementArgs()[I], *AI++)); 872 873 // Call 'operator delete'. 874 CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(DeleteArgs, FPT), 875 CGF.CGM.GetAddrOfFunction(OperatorDelete), 876 ReturnValueSlot(), DeleteArgs, OperatorDelete); 877 } 878 }; 879 880 /// A cleanup to call the given 'operator delete' function upon 881 /// abnormal exit from a new expression when the new expression is 882 /// conditional. 883 class CallDeleteDuringConditionalNew : public EHScopeStack::Cleanup { 884 size_t NumPlacementArgs; 885 const FunctionDecl *OperatorDelete; 886 SavedRValue Ptr; 887 SavedRValue AllocSize; 888 889 SavedRValue *getPlacementArgs() { 890 return reinterpret_cast<SavedRValue*>(this+1); 891 } 892 893 public: 894 static size_t getExtraSize(size_t NumPlacementArgs) { 895 return NumPlacementArgs * sizeof(SavedRValue); 896 } 897 898 CallDeleteDuringConditionalNew(size_t NumPlacementArgs, 899 const FunctionDecl *OperatorDelete, 900 SavedRValue Ptr, 901 SavedRValue AllocSize) 902 : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete), 903 Ptr(Ptr), AllocSize(AllocSize) {} 904 905 void setPlacementArg(unsigned I, SavedRValue Arg) { 906 assert(I < NumPlacementArgs && "index out of range"); 907 getPlacementArgs()[I] = Arg; 908 } 909 910 void Emit(CodeGenFunction &CGF, bool IsForEH) { 911 const FunctionProtoType *FPT 912 = OperatorDelete->getType()->getAs<FunctionProtoType>(); 913 assert(FPT->getNumArgs() == NumPlacementArgs + 1 || 914 (FPT->getNumArgs() == 2 && NumPlacementArgs == 0)); 915 916 CallArgList DeleteArgs; 917 918 // The first argument is always a void*. 919 FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin(); 920 DeleteArgs.push_back(std::make_pair(RestoreRValue(CGF, Ptr), *AI++)); 921 922 // A member 'operator delete' can take an extra 'size_t' argument. 923 if (FPT->getNumArgs() == NumPlacementArgs + 2) { 924 RValue RV = RestoreRValue(CGF, AllocSize); 925 DeleteArgs.push_back(std::make_pair(RV, *AI++)); 926 } 927 928 // Pass the rest of the arguments, which must match exactly. 929 for (unsigned I = 0; I != NumPlacementArgs; ++I) { 930 RValue RV = RestoreRValue(CGF, getPlacementArgs()[I]); 931 DeleteArgs.push_back(std::make_pair(RV, *AI++)); 932 } 933 934 // Call 'operator delete'. 935 CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(DeleteArgs, FPT), 936 CGF.CGM.GetAddrOfFunction(OperatorDelete), 937 ReturnValueSlot(), DeleteArgs, OperatorDelete); 938 } 939 }; 940 } 941 942 /// Enter a cleanup to call 'operator delete' if the initializer in a 943 /// new-expression throws. 944 static void EnterNewDeleteCleanup(CodeGenFunction &CGF, 945 const CXXNewExpr *E, 946 llvm::Value *NewPtr, 947 llvm::Value *AllocSize, 948 const CallArgList &NewArgs) { 949 // If we're not inside a conditional branch, then the cleanup will 950 // dominate and we can do the easier (and more efficient) thing. 951 if (!CGF.isInConditionalBranch()) { 952 CallDeleteDuringNew *Cleanup = CGF.EHStack 953 .pushCleanupWithExtra<CallDeleteDuringNew>(EHCleanup, 954 E->getNumPlacementArgs(), 955 E->getOperatorDelete(), 956 NewPtr, AllocSize); 957 for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) 958 Cleanup->setPlacementArg(I, NewArgs[I+1].first); 959 960 return; 961 } 962 963 // Otherwise, we need to save all this stuff. 964 SavedRValue SavedNewPtr = SaveRValue(CGF, RValue::get(NewPtr)); 965 SavedRValue SavedAllocSize = SaveRValue(CGF, RValue::get(AllocSize)); 966 967 CallDeleteDuringConditionalNew *Cleanup = CGF.EHStack 968 .pushCleanupWithExtra<CallDeleteDuringConditionalNew>(InactiveEHCleanup, 969 E->getNumPlacementArgs(), 970 E->getOperatorDelete(), 971 SavedNewPtr, 972 SavedAllocSize); 973 for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) 974 Cleanup->setPlacementArg(I, SaveRValue(CGF, NewArgs[I+1].first)); 975 976 CGF.ActivateCleanupBlock(CGF.EHStack.stable_begin()); 977 } 978 979 llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) { 980 QualType AllocType = E->getAllocatedType(); 981 if (AllocType->isArrayType()) 982 while (const ArrayType *AType = getContext().getAsArrayType(AllocType)) 983 AllocType = AType->getElementType(); 984 985 FunctionDecl *NewFD = E->getOperatorNew(); 986 const FunctionProtoType *NewFTy = NewFD->getType()->getAs<FunctionProtoType>(); 987 988 CallArgList NewArgs; 989 990 // The allocation size is the first argument. 991 QualType SizeTy = getContext().getSizeType(); 992 993 llvm::Value *NumElements = 0; 994 llvm::Value *AllocSizeWithoutCookie = 0; 995 llvm::Value *AllocSize = EmitCXXNewAllocSize(getContext(), 996 *this, E, NumElements, 997 AllocSizeWithoutCookie); 998 999 NewArgs.push_back(std::make_pair(RValue::get(AllocSize), SizeTy)); 1000 1001 // Emit the rest of the arguments. 1002 // FIXME: Ideally, this should just use EmitCallArgs. 1003 CXXNewExpr::const_arg_iterator NewArg = E->placement_arg_begin(); 1004 1005 // First, use the types from the function type. 1006 // We start at 1 here because the first argument (the allocation size) 1007 // has already been emitted. 1008 for (unsigned i = 1, e = NewFTy->getNumArgs(); i != e; ++i, ++NewArg) { 1009 QualType ArgType = NewFTy->getArgType(i); 1010 1011 assert(getContext().getCanonicalType(ArgType.getNonReferenceType()). 1012 getTypePtr() == 1013 getContext().getCanonicalType(NewArg->getType()).getTypePtr() && 1014 "type mismatch in call argument!"); 1015 1016 NewArgs.push_back(std::make_pair(EmitCallArg(*NewArg, ArgType), 1017 ArgType)); 1018 1019 } 1020 1021 // Either we've emitted all the call args, or we have a call to a 1022 // variadic function. 1023 assert((NewArg == E->placement_arg_end() || NewFTy->isVariadic()) && 1024 "Extra arguments in non-variadic function!"); 1025 1026 // If we still have any arguments, emit them using the type of the argument. 1027 for (CXXNewExpr::const_arg_iterator NewArgEnd = E->placement_arg_end(); 1028 NewArg != NewArgEnd; ++NewArg) { 1029 QualType ArgType = NewArg->getType(); 1030 NewArgs.push_back(std::make_pair(EmitCallArg(*NewArg, ArgType), 1031 ArgType)); 1032 } 1033 1034 // Emit the call to new. 1035 RValue RV = 1036 EmitCall(CGM.getTypes().getFunctionInfo(NewArgs, NewFTy), 1037 CGM.GetAddrOfFunction(NewFD), ReturnValueSlot(), NewArgs, NewFD); 1038 1039 // If an allocation function is declared with an empty exception specification 1040 // it returns null to indicate failure to allocate storage. [expr.new]p13. 1041 // (We don't need to check for null when there's no new initializer and 1042 // we're allocating a POD type). 1043 bool NullCheckResult = NewFTy->hasEmptyExceptionSpec() && 1044 !(AllocType->isPODType() && !E->hasInitializer()); 1045 1046 llvm::BasicBlock *NullCheckSource = 0; 1047 llvm::BasicBlock *NewNotNull = 0; 1048 llvm::BasicBlock *NewEnd = 0; 1049 1050 llvm::Value *NewPtr = RV.getScalarVal(); 1051 unsigned AS = cast<llvm::PointerType>(NewPtr->getType())->getAddressSpace(); 1052 1053 if (NullCheckResult) { 1054 NullCheckSource = Builder.GetInsertBlock(); 1055 NewNotNull = createBasicBlock("new.notnull"); 1056 NewEnd = createBasicBlock("new.end"); 1057 1058 llvm::Value *IsNull = Builder.CreateIsNull(NewPtr, "new.isnull"); 1059 Builder.CreateCondBr(IsNull, NewEnd, NewNotNull); 1060 EmitBlock(NewNotNull); 1061 } 1062 1063 assert((AllocSize == AllocSizeWithoutCookie) == 1064 CalculateCookiePadding(*this, E).isZero()); 1065 if (AllocSize != AllocSizeWithoutCookie) { 1066 assert(E->isArray()); 1067 NewPtr = CGM.getCXXABI().InitializeArrayCookie(CGF, NewPtr, NumElements, 1068 E, AllocType); 1069 } 1070 1071 // If there's an operator delete, enter a cleanup to call it if an 1072 // exception is thrown. 1073 EHScopeStack::stable_iterator CallOperatorDelete; 1074 if (E->getOperatorDelete()) { 1075 EnterNewDeleteCleanup(*this, E, NewPtr, AllocSize, NewArgs); 1076 CallOperatorDelete = EHStack.stable_begin(); 1077 } 1078 1079 const llvm::Type *ElementPtrTy 1080 = ConvertTypeForMem(AllocType)->getPointerTo(AS); 1081 NewPtr = Builder.CreateBitCast(NewPtr, ElementPtrTy); 1082 1083 if (E->isArray()) { 1084 EmitNewInitializer(*this, E, NewPtr, NumElements, AllocSizeWithoutCookie); 1085 1086 // NewPtr is a pointer to the base element type. If we're 1087 // allocating an array of arrays, we'll need to cast back to the 1088 // array pointer type. 1089 const llvm::Type *ResultTy = ConvertTypeForMem(E->getType()); 1090 if (NewPtr->getType() != ResultTy) 1091 NewPtr = Builder.CreateBitCast(NewPtr, ResultTy); 1092 } else { 1093 EmitNewInitializer(*this, E, NewPtr, NumElements, AllocSizeWithoutCookie); 1094 } 1095 1096 // Deactivate the 'operator delete' cleanup if we finished 1097 // initialization. 1098 if (CallOperatorDelete.isValid()) 1099 DeactivateCleanupBlock(CallOperatorDelete); 1100 1101 if (NullCheckResult) { 1102 Builder.CreateBr(NewEnd); 1103 llvm::BasicBlock *NotNullSource = Builder.GetInsertBlock(); 1104 EmitBlock(NewEnd); 1105 1106 llvm::PHINode *PHI = Builder.CreatePHI(NewPtr->getType()); 1107 PHI->reserveOperandSpace(2); 1108 PHI->addIncoming(NewPtr, NotNullSource); 1109 PHI->addIncoming(llvm::Constant::getNullValue(NewPtr->getType()), 1110 NullCheckSource); 1111 1112 NewPtr = PHI; 1113 } 1114 1115 return NewPtr; 1116 } 1117 1118 void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD, 1119 llvm::Value *Ptr, 1120 QualType DeleteTy) { 1121 assert(DeleteFD->getOverloadedOperator() == OO_Delete); 1122 1123 const FunctionProtoType *DeleteFTy = 1124 DeleteFD->getType()->getAs<FunctionProtoType>(); 1125 1126 CallArgList DeleteArgs; 1127 1128 // Check if we need to pass the size to the delete operator. 1129 llvm::Value *Size = 0; 1130 QualType SizeTy; 1131 if (DeleteFTy->getNumArgs() == 2) { 1132 SizeTy = DeleteFTy->getArgType(1); 1133 CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy); 1134 Size = llvm::ConstantInt::get(ConvertType(SizeTy), 1135 DeleteTypeSize.getQuantity()); 1136 } 1137 1138 QualType ArgTy = DeleteFTy->getArgType(0); 1139 llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy)); 1140 DeleteArgs.push_back(std::make_pair(RValue::get(DeletePtr), ArgTy)); 1141 1142 if (Size) 1143 DeleteArgs.push_back(std::make_pair(RValue::get(Size), SizeTy)); 1144 1145 // Emit the call to delete. 1146 EmitCall(CGM.getTypes().getFunctionInfo(DeleteArgs, DeleteFTy), 1147 CGM.GetAddrOfFunction(DeleteFD), ReturnValueSlot(), 1148 DeleteArgs, DeleteFD); 1149 } 1150 1151 namespace { 1152 /// Calls the given 'operator delete' on a single object. 1153 struct CallObjectDelete : EHScopeStack::Cleanup { 1154 llvm::Value *Ptr; 1155 const FunctionDecl *OperatorDelete; 1156 QualType ElementType; 1157 1158 CallObjectDelete(llvm::Value *Ptr, 1159 const FunctionDecl *OperatorDelete, 1160 QualType ElementType) 1161 : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {} 1162 1163 void Emit(CodeGenFunction &CGF, bool IsForEH) { 1164 CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType); 1165 } 1166 }; 1167 } 1168 1169 /// Emit the code for deleting a single object. 1170 static void EmitObjectDelete(CodeGenFunction &CGF, 1171 const FunctionDecl *OperatorDelete, 1172 llvm::Value *Ptr, 1173 QualType ElementType) { 1174 // Find the destructor for the type, if applicable. If the 1175 // destructor is virtual, we'll just emit the vcall and return. 1176 const CXXDestructorDecl *Dtor = 0; 1177 if (const RecordType *RT = ElementType->getAs<RecordType>()) { 1178 CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); 1179 if (!RD->hasTrivialDestructor()) { 1180 Dtor = RD->getDestructor(); 1181 1182 if (Dtor->isVirtual()) { 1183 const llvm::Type *Ty = 1184 CGF.getTypes().GetFunctionType(CGF.getTypes().getFunctionInfo(Dtor, 1185 Dtor_Complete), 1186 /*isVariadic=*/false); 1187 1188 llvm::Value *Callee 1189 = CGF.BuildVirtualCall(Dtor, Dtor_Deleting, Ptr, Ty); 1190 CGF.EmitCXXMemberCall(Dtor, Callee, ReturnValueSlot(), Ptr, /*VTT=*/0, 1191 0, 0); 1192 1193 // The dtor took care of deleting the object. 1194 return; 1195 } 1196 } 1197 } 1198 1199 // Make sure that we call delete even if the dtor throws. 1200 CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup, 1201 Ptr, OperatorDelete, ElementType); 1202 1203 if (Dtor) 1204 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, 1205 /*ForVirtualBase=*/false, Ptr); 1206 1207 CGF.PopCleanupBlock(); 1208 } 1209 1210 namespace { 1211 /// Calls the given 'operator delete' on an array of objects. 1212 struct CallArrayDelete : EHScopeStack::Cleanup { 1213 llvm::Value *Ptr; 1214 const FunctionDecl *OperatorDelete; 1215 llvm::Value *NumElements; 1216 QualType ElementType; 1217 CharUnits CookieSize; 1218 1219 CallArrayDelete(llvm::Value *Ptr, 1220 const FunctionDecl *OperatorDelete, 1221 llvm::Value *NumElements, 1222 QualType ElementType, 1223 CharUnits CookieSize) 1224 : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements), 1225 ElementType(ElementType), CookieSize(CookieSize) {} 1226 1227 void Emit(CodeGenFunction &CGF, bool IsForEH) { 1228 const FunctionProtoType *DeleteFTy = 1229 OperatorDelete->getType()->getAs<FunctionProtoType>(); 1230 assert(DeleteFTy->getNumArgs() == 1 || DeleteFTy->getNumArgs() == 2); 1231 1232 CallArgList Args; 1233 1234 // Pass the pointer as the first argument. 1235 QualType VoidPtrTy = DeleteFTy->getArgType(0); 1236 llvm::Value *DeletePtr 1237 = CGF.Builder.CreateBitCast(Ptr, CGF.ConvertType(VoidPtrTy)); 1238 Args.push_back(std::make_pair(RValue::get(DeletePtr), VoidPtrTy)); 1239 1240 // Pass the original requested size as the second argument. 1241 if (DeleteFTy->getNumArgs() == 2) { 1242 QualType size_t = DeleteFTy->getArgType(1); 1243 const llvm::IntegerType *SizeTy 1244 = cast<llvm::IntegerType>(CGF.ConvertType(size_t)); 1245 1246 CharUnits ElementTypeSize = 1247 CGF.CGM.getContext().getTypeSizeInChars(ElementType); 1248 1249 // The size of an element, multiplied by the number of elements. 1250 llvm::Value *Size 1251 = llvm::ConstantInt::get(SizeTy, ElementTypeSize.getQuantity()); 1252 Size = CGF.Builder.CreateMul(Size, NumElements); 1253 1254 // Plus the size of the cookie if applicable. 1255 if (!CookieSize.isZero()) { 1256 llvm::Value *CookieSizeV 1257 = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity()); 1258 Size = CGF.Builder.CreateAdd(Size, CookieSizeV); 1259 } 1260 1261 Args.push_back(std::make_pair(RValue::get(Size), size_t)); 1262 } 1263 1264 // Emit the call to delete. 1265 CGF.EmitCall(CGF.getTypes().getFunctionInfo(Args, DeleteFTy), 1266 CGF.CGM.GetAddrOfFunction(OperatorDelete), 1267 ReturnValueSlot(), Args, OperatorDelete); 1268 } 1269 }; 1270 } 1271 1272 /// Emit the code for deleting an array of objects. 1273 static void EmitArrayDelete(CodeGenFunction &CGF, 1274 const CXXDeleteExpr *E, 1275 llvm::Value *Ptr, 1276 QualType ElementType) { 1277 llvm::Value *NumElements = 0; 1278 llvm::Value *AllocatedPtr = 0; 1279 CharUnits CookieSize; 1280 CGF.CGM.getCXXABI().ReadArrayCookie(CGF, Ptr, E, ElementType, 1281 NumElements, AllocatedPtr, CookieSize); 1282 1283 assert(AllocatedPtr && "ReadArrayCookie didn't set AllocatedPtr"); 1284 1285 // Make sure that we call delete even if one of the dtors throws. 1286 const FunctionDecl *OperatorDelete = E->getOperatorDelete(); 1287 CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup, 1288 AllocatedPtr, OperatorDelete, 1289 NumElements, ElementType, 1290 CookieSize); 1291 1292 if (const CXXRecordDecl *RD = ElementType->getAsCXXRecordDecl()) { 1293 if (!RD->hasTrivialDestructor()) { 1294 assert(NumElements && "ReadArrayCookie didn't find element count" 1295 " for a class with destructor"); 1296 CGF.EmitCXXAggrDestructorCall(RD->getDestructor(), NumElements, Ptr); 1297 } 1298 } 1299 1300 CGF.PopCleanupBlock(); 1301 } 1302 1303 void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) { 1304 1305 // Get at the argument before we performed the implicit conversion 1306 // to void*. 1307 const Expr *Arg = E->getArgument(); 1308 while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) { 1309 if (ICE->getCastKind() != CK_UserDefinedConversion && 1310 ICE->getType()->isVoidPointerType()) 1311 Arg = ICE->getSubExpr(); 1312 else 1313 break; 1314 } 1315 1316 llvm::Value *Ptr = EmitScalarExpr(Arg); 1317 1318 // Null check the pointer. 1319 llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull"); 1320 llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end"); 1321 1322 llvm::Value *IsNull = 1323 Builder.CreateICmpEQ(Ptr, llvm::Constant::getNullValue(Ptr->getType()), 1324 "isnull"); 1325 1326 Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull); 1327 EmitBlock(DeleteNotNull); 1328 1329 // We might be deleting a pointer to array. If so, GEP down to the 1330 // first non-array element. 1331 // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*) 1332 QualType DeleteTy = Arg->getType()->getAs<PointerType>()->getPointeeType(); 1333 if (DeleteTy->isConstantArrayType()) { 1334 llvm::Value *Zero = Builder.getInt32(0); 1335 llvm::SmallVector<llvm::Value*,8> GEP; 1336 1337 GEP.push_back(Zero); // point at the outermost array 1338 1339 // For each layer of array type we're pointing at: 1340 while (const ConstantArrayType *Arr 1341 = getContext().getAsConstantArrayType(DeleteTy)) { 1342 // 1. Unpeel the array type. 1343 DeleteTy = Arr->getElementType(); 1344 1345 // 2. GEP to the first element of the array. 1346 GEP.push_back(Zero); 1347 } 1348 1349 Ptr = Builder.CreateInBoundsGEP(Ptr, GEP.begin(), GEP.end(), "del.first"); 1350 } 1351 1352 assert(ConvertTypeForMem(DeleteTy) == 1353 cast<llvm::PointerType>(Ptr->getType())->getElementType()); 1354 1355 if (E->isArrayForm()) { 1356 EmitArrayDelete(*this, E, Ptr, DeleteTy); 1357 } else { 1358 EmitObjectDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy); 1359 } 1360 1361 EmitBlock(DeleteEnd); 1362 } 1363 1364 llvm::Value * CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) { 1365 QualType Ty = E->getType(); 1366 const llvm::Type *LTy = ConvertType(Ty)->getPointerTo(); 1367 1368 if (E->isTypeOperand()) { 1369 llvm::Constant *TypeInfo = 1370 CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand()); 1371 return Builder.CreateBitCast(TypeInfo, LTy); 1372 } 1373 1374 Expr *subE = E->getExprOperand(); 1375 Ty = subE->getType(); 1376 CanQualType CanTy = CGM.getContext().getCanonicalType(Ty); 1377 Ty = CanTy.getUnqualifiedType().getNonReferenceType(); 1378 if (const RecordType *RT = Ty->getAs<RecordType>()) { 1379 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); 1380 if (RD->isPolymorphic()) { 1381 // FIXME: if subE is an lvalue do 1382 LValue Obj = EmitLValue(subE); 1383 llvm::Value *This = Obj.getAddress(); 1384 // We need to do a zero check for *p, unless it has NonNullAttr. 1385 // FIXME: PointerType->hasAttr<NonNullAttr>() 1386 bool CanBeZero = false; 1387 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(subE->IgnoreParens())) 1388 if (UO->getOpcode() == UO_Deref) 1389 CanBeZero = true; 1390 if (CanBeZero) { 1391 llvm::BasicBlock *NonZeroBlock = createBasicBlock(); 1392 llvm::BasicBlock *ZeroBlock = createBasicBlock(); 1393 1394 llvm::Value *Zero = llvm::Constant::getNullValue(This->getType()); 1395 Builder.CreateCondBr(Builder.CreateICmpNE(This, Zero), 1396 NonZeroBlock, ZeroBlock); 1397 EmitBlock(ZeroBlock); 1398 /// Call __cxa_bad_typeid 1399 const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext); 1400 const llvm::FunctionType *FTy; 1401 FTy = llvm::FunctionType::get(ResultType, false); 1402 llvm::Value *F = CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid"); 1403 Builder.CreateCall(F)->setDoesNotReturn(); 1404 Builder.CreateUnreachable(); 1405 EmitBlock(NonZeroBlock); 1406 } 1407 llvm::Value *V = GetVTablePtr(This, LTy->getPointerTo()); 1408 V = Builder.CreateConstInBoundsGEP1_64(V, -1ULL); 1409 V = Builder.CreateLoad(V); 1410 return V; 1411 } 1412 } 1413 return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(Ty), LTy); 1414 } 1415 1416 llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *V, 1417 const CXXDynamicCastExpr *DCE) { 1418 QualType SrcTy = DCE->getSubExpr()->getType(); 1419 QualType DestTy = DCE->getTypeAsWritten(); 1420 QualType InnerType = DestTy->getPointeeType(); 1421 1422 const llvm::Type *LTy = ConvertType(DCE->getType()); 1423 1424 bool CanBeZero = false; 1425 bool ToVoid = false; 1426 bool ThrowOnBad = false; 1427 if (DestTy->isPointerType()) { 1428 // FIXME: if PointerType->hasAttr<NonNullAttr>(), we don't set this 1429 CanBeZero = true; 1430 if (InnerType->isVoidType()) 1431 ToVoid = true; 1432 } else { 1433 LTy = LTy->getPointerTo(); 1434 1435 // FIXME: What if exceptions are disabled? 1436 ThrowOnBad = true; 1437 } 1438 1439 if (SrcTy->isPointerType() || SrcTy->isReferenceType()) 1440 SrcTy = SrcTy->getPointeeType(); 1441 SrcTy = SrcTy.getUnqualifiedType(); 1442 1443 if (DestTy->isPointerType() || DestTy->isReferenceType()) 1444 DestTy = DestTy->getPointeeType(); 1445 DestTy = DestTy.getUnqualifiedType(); 1446 1447 llvm::BasicBlock *ContBlock = createBasicBlock(); 1448 llvm::BasicBlock *NullBlock = 0; 1449 llvm::BasicBlock *NonZeroBlock = 0; 1450 if (CanBeZero) { 1451 NonZeroBlock = createBasicBlock(); 1452 NullBlock = createBasicBlock(); 1453 Builder.CreateCondBr(Builder.CreateIsNotNull(V), NonZeroBlock, NullBlock); 1454 EmitBlock(NonZeroBlock); 1455 } 1456 1457 llvm::BasicBlock *BadCastBlock = 0; 1458 1459 const llvm::Type *PtrDiffTy = ConvertType(getContext().getPointerDiffType()); 1460 1461 // See if this is a dynamic_cast(void*) 1462 if (ToVoid) { 1463 llvm::Value *This = V; 1464 V = GetVTablePtr(This, PtrDiffTy->getPointerTo()); 1465 V = Builder.CreateConstInBoundsGEP1_64(V, -2ULL); 1466 V = Builder.CreateLoad(V, "offset to top"); 1467 This = Builder.CreateBitCast(This, llvm::Type::getInt8PtrTy(VMContext)); 1468 V = Builder.CreateInBoundsGEP(This, V); 1469 V = Builder.CreateBitCast(V, LTy); 1470 } else { 1471 /// Call __dynamic_cast 1472 const llvm::Type *ResultType = llvm::Type::getInt8PtrTy(VMContext); 1473 const llvm::FunctionType *FTy; 1474 std::vector<const llvm::Type*> ArgTys; 1475 const llvm::Type *PtrToInt8Ty 1476 = llvm::Type::getInt8Ty(VMContext)->getPointerTo(); 1477 ArgTys.push_back(PtrToInt8Ty); 1478 ArgTys.push_back(PtrToInt8Ty); 1479 ArgTys.push_back(PtrToInt8Ty); 1480 ArgTys.push_back(PtrDiffTy); 1481 FTy = llvm::FunctionType::get(ResultType, ArgTys, false); 1482 1483 // FIXME: Calculate better hint. 1484 llvm::Value *hint = llvm::ConstantInt::get(PtrDiffTy, -1ULL); 1485 1486 assert(SrcTy->isRecordType() && "Src type must be record type!"); 1487 assert(DestTy->isRecordType() && "Dest type must be record type!"); 1488 1489 llvm::Value *SrcArg 1490 = CGM.GetAddrOfRTTIDescriptor(SrcTy.getUnqualifiedType()); 1491 llvm::Value *DestArg 1492 = CGM.GetAddrOfRTTIDescriptor(DestTy.getUnqualifiedType()); 1493 1494 V = Builder.CreateBitCast(V, PtrToInt8Ty); 1495 V = Builder.CreateCall4(CGM.CreateRuntimeFunction(FTy, "__dynamic_cast"), 1496 V, SrcArg, DestArg, hint); 1497 V = Builder.CreateBitCast(V, LTy); 1498 1499 if (ThrowOnBad) { 1500 BadCastBlock = createBasicBlock(); 1501 Builder.CreateCondBr(Builder.CreateIsNotNull(V), ContBlock, BadCastBlock); 1502 EmitBlock(BadCastBlock); 1503 /// Invoke __cxa_bad_cast 1504 ResultType = llvm::Type::getVoidTy(VMContext); 1505 const llvm::FunctionType *FBadTy; 1506 FBadTy = llvm::FunctionType::get(ResultType, false); 1507 llvm::Value *F = CGM.CreateRuntimeFunction(FBadTy, "__cxa_bad_cast"); 1508 if (llvm::BasicBlock *InvokeDest = getInvokeDest()) { 1509 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); 1510 Builder.CreateInvoke(F, Cont, InvokeDest)->setDoesNotReturn(); 1511 EmitBlock(Cont); 1512 } else { 1513 // FIXME: Does this ever make sense? 1514 Builder.CreateCall(F)->setDoesNotReturn(); 1515 } 1516 Builder.CreateUnreachable(); 1517 } 1518 } 1519 1520 if (CanBeZero) { 1521 Builder.CreateBr(ContBlock); 1522 EmitBlock(NullBlock); 1523 Builder.CreateBr(ContBlock); 1524 } 1525 EmitBlock(ContBlock); 1526 if (CanBeZero) { 1527 llvm::PHINode *PHI = Builder.CreatePHI(LTy); 1528 PHI->reserveOperandSpace(2); 1529 PHI->addIncoming(V, NonZeroBlock); 1530 PHI->addIncoming(llvm::Constant::getNullValue(LTy), NullBlock); 1531 V = PHI; 1532 } 1533 1534 return V; 1535 } 1536