1 //===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code dealing with code generation of C++ expressions 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "clang/Frontend/CodeGenOptions.h" 15 #include "CodeGenFunction.h" 16 #include "CGCXXABI.h" 17 #include "CGObjCRuntime.h" 18 #include "CGDebugInfo.h" 19 #include "llvm/Intrinsics.h" 20 using namespace clang; 21 using namespace CodeGen; 22 23 RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD, 24 llvm::Value *Callee, 25 ReturnValueSlot ReturnValue, 26 llvm::Value *This, 27 llvm::Value *VTT, 28 CallExpr::const_arg_iterator ArgBeg, 29 CallExpr::const_arg_iterator ArgEnd) { 30 assert(MD->isInstance() && 31 "Trying to emit a member call expr on a static method!"); 32 33 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 34 35 CallArgList Args; 36 37 // Push the this ptr. 38 Args.push_back(std::make_pair(RValue::get(This), 39 MD->getThisType(getContext()))); 40 41 // If there is a VTT parameter, emit it. 42 if (VTT) { 43 QualType T = getContext().getPointerType(getContext().VoidPtrTy); 44 Args.push_back(std::make_pair(RValue::get(VTT), T)); 45 } 46 47 // And the rest of the call args 48 EmitCallArgs(Args, FPT, ArgBeg, ArgEnd); 49 50 QualType ResultType = FPT->getResultType(); 51 return EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args, 52 FPT->getExtInfo()), 53 Callee, ReturnValue, Args, MD); 54 } 55 56 /// canDevirtualizeMemberFunctionCalls - Checks whether virtual calls on given 57 /// expr can be devirtualized. 58 static bool canDevirtualizeMemberFunctionCalls(const Expr *Base) { 59 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) { 60 if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) { 61 // This is a record decl. We know the type and can devirtualize it. 62 return VD->getType()->isRecordType(); 63 } 64 65 return false; 66 } 67 68 // We can always devirtualize calls on temporary object expressions. 69 if (isa<CXXConstructExpr>(Base)) 70 return true; 71 72 // And calls on bound temporaries. 73 if (isa<CXXBindTemporaryExpr>(Base)) 74 return true; 75 76 // Check if this is a call expr that returns a record type. 77 if (const CallExpr *CE = dyn_cast<CallExpr>(Base)) 78 return CE->getCallReturnType()->isRecordType(); 79 80 // We can't devirtualize the call. 81 return false; 82 } 83 84 RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE, 85 ReturnValueSlot ReturnValue) { 86 if (isa<BinaryOperator>(CE->getCallee()->IgnoreParens())) 87 return EmitCXXMemberPointerCallExpr(CE, ReturnValue); 88 89 const MemberExpr *ME = cast<MemberExpr>(CE->getCallee()->IgnoreParens()); 90 const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl()); 91 92 CGDebugInfo *DI = getDebugInfo(); 93 if (DI && CGM.getCodeGenOpts().LimitDebugInfo) { 94 QualType PQTy = ME->getBase()->IgnoreParenImpCasts()->getType(); 95 if (const PointerType * PTy = dyn_cast<PointerType>(PQTy)) { 96 DI->getOrCreateRecordType(PTy->getPointeeType(), 97 MD->getParent()->getLocation()); 98 } 99 } 100 101 if (MD->isStatic()) { 102 // The method is static, emit it as we would a regular call. 103 llvm::Value *Callee = CGM.GetAddrOfFunction(MD); 104 return EmitCall(getContext().getPointerType(MD->getType()), Callee, 105 ReturnValue, CE->arg_begin(), CE->arg_end()); 106 } 107 108 // Compute the object pointer. 109 llvm::Value *This; 110 if (ME->isArrow()) 111 This = EmitScalarExpr(ME->getBase()); 112 else { 113 LValue BaseLV = EmitLValue(ME->getBase()); 114 if (BaseLV.isPropertyRef() || BaseLV.isKVCRef()) { 115 QualType QT = ME->getBase()->getType(); 116 RValue RV = 117 BaseLV.isPropertyRef() ? EmitLoadOfPropertyRefLValue(BaseLV, QT) 118 : EmitLoadOfKVCRefLValue(BaseLV, QT); 119 This = RV.isScalar() ? RV.getScalarVal() : RV.getAggregateAddr(); 120 } 121 else 122 This = BaseLV.getAddress(); 123 } 124 125 if (MD->isTrivial()) { 126 if (isa<CXXDestructorDecl>(MD)) return RValue::get(0); 127 128 assert(MD->isCopyAssignmentOperator() && "unknown trivial member function"); 129 // We don't like to generate the trivial copy assignment operator when 130 // it isn't necessary; just produce the proper effect here. 131 llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress(); 132 EmitAggregateCopy(This, RHS, CE->getType()); 133 return RValue::get(This); 134 } 135 136 // Compute the function type we're calling. 137 const CGFunctionInfo &FInfo = 138 (isa<CXXDestructorDecl>(MD) 139 ? CGM.getTypes().getFunctionInfo(cast<CXXDestructorDecl>(MD), 140 Dtor_Complete) 141 : CGM.getTypes().getFunctionInfo(MD)); 142 143 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 144 const llvm::Type *Ty 145 = CGM.getTypes().GetFunctionType(FInfo, FPT->isVariadic()); 146 147 // C++ [class.virtual]p12: 148 // Explicit qualification with the scope operator (5.1) suppresses the 149 // virtual call mechanism. 150 // 151 // We also don't emit a virtual call if the base expression has a record type 152 // because then we know what the type is. 153 bool UseVirtualCall = MD->isVirtual() && !ME->hasQualifier() 154 && !canDevirtualizeMemberFunctionCalls(ME->getBase()); 155 156 llvm::Value *Callee; 157 if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD)) { 158 if (UseVirtualCall) { 159 Callee = BuildVirtualCall(Dtor, Dtor_Complete, This, Ty); 160 } else { 161 Callee = CGM.GetAddrOfFunction(GlobalDecl(Dtor, Dtor_Complete), Ty); 162 } 163 } else if (UseVirtualCall) { 164 Callee = BuildVirtualCall(MD, This, Ty); 165 } else { 166 Callee = CGM.GetAddrOfFunction(MD, Ty); 167 } 168 169 return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0, 170 CE->arg_begin(), CE->arg_end()); 171 } 172 173 RValue 174 CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E, 175 ReturnValueSlot ReturnValue) { 176 const BinaryOperator *BO = 177 cast<BinaryOperator>(E->getCallee()->IgnoreParens()); 178 const Expr *BaseExpr = BO->getLHS(); 179 const Expr *MemFnExpr = BO->getRHS(); 180 181 const MemberPointerType *MPT = 182 MemFnExpr->getType()->getAs<MemberPointerType>(); 183 184 const FunctionProtoType *FPT = 185 MPT->getPointeeType()->getAs<FunctionProtoType>(); 186 const CXXRecordDecl *RD = 187 cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl()); 188 189 // Get the member function pointer. 190 llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr); 191 192 // Emit the 'this' pointer. 193 llvm::Value *This; 194 195 if (BO->getOpcode() == BO_PtrMemI) 196 This = EmitScalarExpr(BaseExpr); 197 else 198 This = EmitLValue(BaseExpr).getAddress(); 199 200 // Ask the ABI to load the callee. Note that This is modified. 201 llvm::Value *Callee = 202 CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(CGF, This, MemFnPtr, MPT); 203 204 CallArgList Args; 205 206 QualType ThisType = 207 getContext().getPointerType(getContext().getTagDeclType(RD)); 208 209 // Push the this ptr. 210 Args.push_back(std::make_pair(RValue::get(This), ThisType)); 211 212 // And the rest of the call args 213 EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end()); 214 const FunctionType *BO_FPT = BO->getType()->getAs<FunctionProtoType>(); 215 return EmitCall(CGM.getTypes().getFunctionInfo(Args, BO_FPT), Callee, 216 ReturnValue, Args); 217 } 218 219 RValue 220 CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E, 221 const CXXMethodDecl *MD, 222 ReturnValueSlot ReturnValue) { 223 assert(MD->isInstance() && 224 "Trying to emit a member call expr on a static method!"); 225 if (MD->isCopyAssignmentOperator()) { 226 const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(MD->getDeclContext()); 227 if (ClassDecl->hasTrivialCopyAssignment()) { 228 assert(!ClassDecl->hasUserDeclaredCopyAssignment() && 229 "EmitCXXOperatorMemberCallExpr - user declared copy assignment"); 230 LValue LV = EmitLValue(E->getArg(0)); 231 llvm::Value *This; 232 if (LV.isPropertyRef() || LV.isKVCRef()) { 233 AggValueSlot Slot = CreateAggTemp(E->getArg(1)->getType()); 234 EmitAggExpr(E->getArg(1), Slot); 235 if (LV.isPropertyRef()) 236 EmitObjCPropertySet(LV.getPropertyRefExpr(), Slot.asRValue()); 237 else 238 EmitObjCPropertySet(LV.getKVCRefExpr(), Slot.asRValue()); 239 return RValue::getAggregate(0, false); 240 } 241 else 242 This = LV.getAddress(); 243 244 llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress(); 245 QualType Ty = E->getType(); 246 EmitAggregateCopy(This, Src, Ty); 247 return RValue::get(This); 248 } 249 } 250 251 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 252 const llvm::Type *Ty = 253 CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD), 254 FPT->isVariadic()); 255 LValue LV = EmitLValue(E->getArg(0)); 256 llvm::Value *This; 257 if (LV.isPropertyRef() || LV.isKVCRef()) { 258 QualType QT = E->getArg(0)->getType(); 259 RValue RV = 260 LV.isPropertyRef() ? EmitLoadOfPropertyRefLValue(LV, QT) 261 : EmitLoadOfKVCRefLValue(LV, QT); 262 assert (!RV.isScalar() && "EmitCXXOperatorMemberCallExpr"); 263 This = RV.getAggregateAddr(); 264 } 265 else 266 This = LV.getAddress(); 267 268 llvm::Value *Callee; 269 if (MD->isVirtual() && !canDevirtualizeMemberFunctionCalls(E->getArg(0))) 270 Callee = BuildVirtualCall(MD, This, Ty); 271 else 272 Callee = CGM.GetAddrOfFunction(MD, Ty); 273 274 return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0, 275 E->arg_begin() + 1, E->arg_end()); 276 } 277 278 void 279 CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E, 280 AggValueSlot Dest) { 281 assert(!Dest.isIgnored() && "Must have a destination!"); 282 const CXXConstructorDecl *CD = E->getConstructor(); 283 284 // If we require zero initialization before (or instead of) calling the 285 // constructor, as can be the case with a non-user-provided default 286 // constructor, emit the zero initialization now. 287 if (E->requiresZeroInitialization()) 288 EmitNullInitialization(Dest.getAddr(), E->getType()); 289 290 // If this is a call to a trivial default constructor, do nothing. 291 if (CD->isTrivial() && CD->isDefaultConstructor()) 292 return; 293 294 // Elide the constructor if we're constructing from a temporary. 295 // The temporary check is required because Sema sets this on NRVO 296 // returns. 297 if (getContext().getLangOptions().ElideConstructors && E->isElidable()) { 298 assert(getContext().hasSameUnqualifiedType(E->getType(), 299 E->getArg(0)->getType())); 300 if (E->getArg(0)->isTemporaryObject(getContext(), CD->getParent())) { 301 EmitAggExpr(E->getArg(0), Dest); 302 return; 303 } 304 } 305 306 const ConstantArrayType *Array 307 = getContext().getAsConstantArrayType(E->getType()); 308 if (Array) { 309 QualType BaseElementTy = getContext().getBaseElementType(Array); 310 const llvm::Type *BasePtr = ConvertType(BaseElementTy); 311 BasePtr = llvm::PointerType::getUnqual(BasePtr); 312 llvm::Value *BaseAddrPtr = 313 Builder.CreateBitCast(Dest.getAddr(), BasePtr); 314 315 EmitCXXAggrConstructorCall(CD, Array, BaseAddrPtr, 316 E->arg_begin(), E->arg_end()); 317 } 318 else { 319 CXXCtorType Type = 320 (E->getConstructionKind() == CXXConstructExpr::CK_Complete) 321 ? Ctor_Complete : Ctor_Base; 322 bool ForVirtualBase = 323 E->getConstructionKind() == CXXConstructExpr::CK_VirtualBase; 324 325 // Call the constructor. 326 EmitCXXConstructorCall(CD, Type, ForVirtualBase, Dest.getAddr(), 327 E->arg_begin(), E->arg_end()); 328 } 329 } 330 331 /// Check whether the given operator new[] is the global placement 332 /// operator new[]. 333 static bool IsPlacementOperatorNewArray(ASTContext &Ctx, 334 const FunctionDecl *Fn) { 335 // Must be in global scope. Note that allocation functions can't be 336 // declared in namespaces. 337 if (!Fn->getDeclContext()->getRedeclContext()->isFileContext()) 338 return false; 339 340 // Signature must be void *operator new[](size_t, void*). 341 // The size_t is common to all operator new[]s. 342 if (Fn->getNumParams() != 2) 343 return false; 344 345 CanQualType ParamType = Ctx.getCanonicalType(Fn->getParamDecl(1)->getType()); 346 return (ParamType == Ctx.VoidPtrTy); 347 } 348 349 static CharUnits CalculateCookiePadding(CodeGenFunction &CGF, 350 const CXXNewExpr *E) { 351 if (!E->isArray()) 352 return CharUnits::Zero(); 353 354 // No cookie is required if the new operator being used is 355 // ::operator new[](size_t, void*). 356 const FunctionDecl *OperatorNew = E->getOperatorNew(); 357 if (IsPlacementOperatorNewArray(CGF.getContext(), OperatorNew)) 358 return CharUnits::Zero(); 359 360 return CGF.CGM.getCXXABI().GetArrayCookieSize(E->getAllocatedType()); 361 } 362 363 static llvm::Value *EmitCXXNewAllocSize(ASTContext &Context, 364 CodeGenFunction &CGF, 365 const CXXNewExpr *E, 366 llvm::Value *&NumElements, 367 llvm::Value *&SizeWithoutCookie) { 368 QualType ElemType = E->getAllocatedType(); 369 370 const llvm::IntegerType *SizeTy = 371 cast<llvm::IntegerType>(CGF.ConvertType(CGF.getContext().getSizeType())); 372 373 CharUnits TypeSize = CGF.getContext().getTypeSizeInChars(ElemType); 374 375 if (!E->isArray()) { 376 SizeWithoutCookie = llvm::ConstantInt::get(SizeTy, TypeSize.getQuantity()); 377 return SizeWithoutCookie; 378 } 379 380 // Figure out the cookie size. 381 CharUnits CookieSize = CalculateCookiePadding(CGF, E); 382 383 // Emit the array size expression. 384 // We multiply the size of all dimensions for NumElements. 385 // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6. 386 NumElements = CGF.EmitScalarExpr(E->getArraySize()); 387 assert(NumElements->getType() == SizeTy && "element count not a size_t"); 388 389 uint64_t ArraySizeMultiplier = 1; 390 while (const ConstantArrayType *CAT 391 = CGF.getContext().getAsConstantArrayType(ElemType)) { 392 ElemType = CAT->getElementType(); 393 ArraySizeMultiplier *= CAT->getSize().getZExtValue(); 394 } 395 396 llvm::Value *Size; 397 398 // If someone is doing 'new int[42]' there is no need to do a dynamic check. 399 // Don't bloat the -O0 code. 400 if (llvm::ConstantInt *NumElementsC = 401 dyn_cast<llvm::ConstantInt>(NumElements)) { 402 llvm::APInt NEC = NumElementsC->getValue(); 403 unsigned SizeWidth = NEC.getBitWidth(); 404 405 // Determine if there is an overflow here by doing an extended multiply. 406 NEC.zext(SizeWidth*2); 407 llvm::APInt SC(SizeWidth*2, TypeSize.getQuantity()); 408 SC *= NEC; 409 410 if (!CookieSize.isZero()) { 411 // Save the current size without a cookie. We don't care if an 412 // overflow's already happened because SizeWithoutCookie isn't 413 // used if the allocator returns null or throws, as it should 414 // always do on an overflow. 415 llvm::APInt SWC = SC; 416 SWC.trunc(SizeWidth); 417 SizeWithoutCookie = llvm::ConstantInt::get(SizeTy, SWC); 418 419 // Add the cookie size. 420 SC += llvm::APInt(SizeWidth*2, CookieSize.getQuantity()); 421 } 422 423 if (SC.countLeadingZeros() >= SizeWidth) { 424 SC.trunc(SizeWidth); 425 Size = llvm::ConstantInt::get(SizeTy, SC); 426 } else { 427 // On overflow, produce a -1 so operator new throws. 428 Size = llvm::Constant::getAllOnesValue(SizeTy); 429 } 430 431 // Scale NumElements while we're at it. 432 uint64_t N = NEC.getZExtValue() * ArraySizeMultiplier; 433 NumElements = llvm::ConstantInt::get(SizeTy, N); 434 435 // Otherwise, we don't need to do an overflow-checked multiplication if 436 // we're multiplying by one. 437 } else if (TypeSize.isOne()) { 438 assert(ArraySizeMultiplier == 1); 439 440 Size = NumElements; 441 442 // If we need a cookie, add its size in with an overflow check. 443 // This is maybe a little paranoid. 444 if (!CookieSize.isZero()) { 445 SizeWithoutCookie = Size; 446 447 llvm::Value *CookieSizeV 448 = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity()); 449 450 const llvm::Type *Types[] = { SizeTy }; 451 llvm::Value *UAddF 452 = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, Types, 1); 453 llvm::Value *AddRes 454 = CGF.Builder.CreateCall2(UAddF, Size, CookieSizeV); 455 456 Size = CGF.Builder.CreateExtractValue(AddRes, 0); 457 llvm::Value *DidOverflow = CGF.Builder.CreateExtractValue(AddRes, 1); 458 Size = CGF.Builder.CreateSelect(DidOverflow, 459 llvm::ConstantInt::get(SizeTy, -1), 460 Size); 461 } 462 463 // Otherwise use the int.umul.with.overflow intrinsic. 464 } else { 465 llvm::Value *OutermostElementSize 466 = llvm::ConstantInt::get(SizeTy, TypeSize.getQuantity()); 467 468 llvm::Value *NumOutermostElements = NumElements; 469 470 // Scale NumElements by the array size multiplier. This might 471 // overflow, but only if the multiplication below also overflows, 472 // in which case this multiplication isn't used. 473 if (ArraySizeMultiplier != 1) 474 NumElements = CGF.Builder.CreateMul(NumElements, 475 llvm::ConstantInt::get(SizeTy, ArraySizeMultiplier)); 476 477 // The requested size of the outermost array is non-constant. 478 // Multiply that by the static size of the elements of that array; 479 // on unsigned overflow, set the size to -1 to trigger an 480 // exception from the allocation routine. This is sufficient to 481 // prevent buffer overruns from the allocator returning a 482 // seemingly valid pointer to insufficient space. This idea comes 483 // originally from MSVC, and GCC has an open bug requesting 484 // similar behavior: 485 // http://gcc.gnu.org/bugzilla/show_bug.cgi?id=19351 486 // 487 // This will not be sufficient for C++0x, which requires a 488 // specific exception class (std::bad_array_new_length). 489 // That will require ABI support that has not yet been specified. 490 const llvm::Type *Types[] = { SizeTy }; 491 llvm::Value *UMulF 492 = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, Types, 1); 493 llvm::Value *MulRes = CGF.Builder.CreateCall2(UMulF, NumOutermostElements, 494 OutermostElementSize); 495 496 // The overflow bit. 497 llvm::Value *DidOverflow = CGF.Builder.CreateExtractValue(MulRes, 1); 498 499 // The result of the multiplication. 500 Size = CGF.Builder.CreateExtractValue(MulRes, 0); 501 502 // If we have a cookie, we need to add that size in, too. 503 if (!CookieSize.isZero()) { 504 SizeWithoutCookie = Size; 505 506 llvm::Value *CookieSizeV 507 = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity()); 508 llvm::Value *UAddF 509 = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, Types, 1); 510 llvm::Value *AddRes 511 = CGF.Builder.CreateCall2(UAddF, SizeWithoutCookie, CookieSizeV); 512 513 Size = CGF.Builder.CreateExtractValue(AddRes, 0); 514 515 llvm::Value *AddDidOverflow = CGF.Builder.CreateExtractValue(AddRes, 1); 516 DidOverflow = CGF.Builder.CreateAnd(DidOverflow, AddDidOverflow); 517 } 518 519 Size = CGF.Builder.CreateSelect(DidOverflow, 520 llvm::ConstantInt::get(SizeTy, -1), 521 Size); 522 } 523 524 if (CookieSize.isZero()) 525 SizeWithoutCookie = Size; 526 else 527 assert(SizeWithoutCookie && "didn't set SizeWithoutCookie?"); 528 529 return Size; 530 } 531 532 static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const CXXNewExpr *E, 533 llvm::Value *NewPtr) { 534 535 assert(E->getNumConstructorArgs() == 1 && 536 "Can only have one argument to initializer of POD type."); 537 538 const Expr *Init = E->getConstructorArg(0); 539 QualType AllocType = E->getAllocatedType(); 540 541 unsigned Alignment = 542 CGF.getContext().getTypeAlignInChars(AllocType).getQuantity(); 543 if (!CGF.hasAggregateLLVMType(AllocType)) 544 CGF.EmitStoreOfScalar(CGF.EmitScalarExpr(Init), NewPtr, 545 AllocType.isVolatileQualified(), Alignment, 546 AllocType); 547 else if (AllocType->isAnyComplexType()) 548 CGF.EmitComplexExprIntoAddr(Init, NewPtr, 549 AllocType.isVolatileQualified()); 550 else { 551 AggValueSlot Slot 552 = AggValueSlot::forAddr(NewPtr, AllocType.isVolatileQualified(), true); 553 CGF.EmitAggExpr(Init, Slot); 554 } 555 } 556 557 void 558 CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E, 559 llvm::Value *NewPtr, 560 llvm::Value *NumElements) { 561 // We have a POD type. 562 if (E->getNumConstructorArgs() == 0) 563 return; 564 565 const llvm::Type *SizeTy = ConvertType(getContext().getSizeType()); 566 567 // Create a temporary for the loop index and initialize it with 0. 568 llvm::Value *IndexPtr = CreateTempAlloca(SizeTy, "loop.index"); 569 llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy); 570 Builder.CreateStore(Zero, IndexPtr); 571 572 // Start the loop with a block that tests the condition. 573 llvm::BasicBlock *CondBlock = createBasicBlock("for.cond"); 574 llvm::BasicBlock *AfterFor = createBasicBlock("for.end"); 575 576 EmitBlock(CondBlock); 577 578 llvm::BasicBlock *ForBody = createBasicBlock("for.body"); 579 580 // Generate: if (loop-index < number-of-elements fall to the loop body, 581 // otherwise, go to the block after the for-loop. 582 llvm::Value *Counter = Builder.CreateLoad(IndexPtr); 583 llvm::Value *IsLess = Builder.CreateICmpULT(Counter, NumElements, "isless"); 584 // If the condition is true, execute the body. 585 Builder.CreateCondBr(IsLess, ForBody, AfterFor); 586 587 EmitBlock(ForBody); 588 589 llvm::BasicBlock *ContinueBlock = createBasicBlock("for.inc"); 590 // Inside the loop body, emit the constructor call on the array element. 591 Counter = Builder.CreateLoad(IndexPtr); 592 llvm::Value *Address = Builder.CreateInBoundsGEP(NewPtr, Counter, 593 "arrayidx"); 594 StoreAnyExprIntoOneUnit(*this, E, Address); 595 596 EmitBlock(ContinueBlock); 597 598 // Emit the increment of the loop counter. 599 llvm::Value *NextVal = llvm::ConstantInt::get(SizeTy, 1); 600 Counter = Builder.CreateLoad(IndexPtr); 601 NextVal = Builder.CreateAdd(Counter, NextVal, "inc"); 602 Builder.CreateStore(NextVal, IndexPtr); 603 604 // Finally, branch back up to the condition for the next iteration. 605 EmitBranch(CondBlock); 606 607 // Emit the fall-through block. 608 EmitBlock(AfterFor, true); 609 } 610 611 static void EmitZeroMemSet(CodeGenFunction &CGF, QualType T, 612 llvm::Value *NewPtr, llvm::Value *Size) { 613 llvm::LLVMContext &VMContext = CGF.CGM.getLLVMContext(); 614 const llvm::Type *BP = llvm::Type::getInt8PtrTy(VMContext); 615 if (NewPtr->getType() != BP) 616 NewPtr = CGF.Builder.CreateBitCast(NewPtr, BP, "tmp"); 617 618 CGF.Builder.CreateCall5(CGF.CGM.getMemSetFn(BP, CGF.IntPtrTy), NewPtr, 619 llvm::Constant::getNullValue(llvm::Type::getInt8Ty(VMContext)), 620 Size, 621 llvm::ConstantInt::get(CGF.Int32Ty, 622 CGF.getContext().getTypeAlign(T)/8), 623 llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 624 0)); 625 } 626 627 static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E, 628 llvm::Value *NewPtr, 629 llvm::Value *NumElements, 630 llvm::Value *AllocSizeWithoutCookie) { 631 if (E->isArray()) { 632 if (CXXConstructorDecl *Ctor = E->getConstructor()) { 633 bool RequiresZeroInitialization = false; 634 if (Ctor->getParent()->hasTrivialConstructor()) { 635 // If new expression did not specify value-initialization, then there 636 // is no initialization. 637 if (!E->hasInitializer() || Ctor->getParent()->isEmpty()) 638 return; 639 640 if (CGF.CGM.getTypes().isZeroInitializable(E->getAllocatedType())) { 641 // Optimization: since zero initialization will just set the memory 642 // to all zeroes, generate a single memset to do it in one shot. 643 EmitZeroMemSet(CGF, E->getAllocatedType(), NewPtr, 644 AllocSizeWithoutCookie); 645 return; 646 } 647 648 RequiresZeroInitialization = true; 649 } 650 651 CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr, 652 E->constructor_arg_begin(), 653 E->constructor_arg_end(), 654 RequiresZeroInitialization); 655 return; 656 } else if (E->getNumConstructorArgs() == 1 && 657 isa<ImplicitValueInitExpr>(E->getConstructorArg(0))) { 658 // Optimization: since zero initialization will just set the memory 659 // to all zeroes, generate a single memset to do it in one shot. 660 EmitZeroMemSet(CGF, E->getAllocatedType(), NewPtr, 661 AllocSizeWithoutCookie); 662 return; 663 } else { 664 CGF.EmitNewArrayInitializer(E, NewPtr, NumElements); 665 return; 666 } 667 } 668 669 if (CXXConstructorDecl *Ctor = E->getConstructor()) { 670 // Per C++ [expr.new]p15, if we have an initializer, then we're performing 671 // direct initialization. C++ [dcl.init]p5 requires that we 672 // zero-initialize storage if there are no user-declared constructors. 673 if (E->hasInitializer() && 674 !Ctor->getParent()->hasUserDeclaredConstructor() && 675 !Ctor->getParent()->isEmpty()) 676 CGF.EmitNullInitialization(NewPtr, E->getAllocatedType()); 677 678 CGF.EmitCXXConstructorCall(Ctor, Ctor_Complete, /*ForVirtualBase=*/false, 679 NewPtr, E->constructor_arg_begin(), 680 E->constructor_arg_end()); 681 682 return; 683 } 684 // We have a POD type. 685 if (E->getNumConstructorArgs() == 0) 686 return; 687 688 StoreAnyExprIntoOneUnit(CGF, E, NewPtr); 689 } 690 691 /// A utility class for saving an rvalue. 692 class SavedRValue { 693 public: 694 enum Kind { ScalarLiteral, ScalarAddress, 695 AggregateLiteral, AggregateAddress, 696 Complex }; 697 698 private: 699 llvm::Value *Value; 700 Kind K; 701 702 SavedRValue(llvm::Value *V, Kind K) : Value(V), K(K) {} 703 704 public: 705 SavedRValue() {} 706 707 static SavedRValue forScalarLiteral(llvm::Value *V) { 708 return SavedRValue(V, ScalarLiteral); 709 } 710 711 static SavedRValue forScalarAddress(llvm::Value *Addr) { 712 return SavedRValue(Addr, ScalarAddress); 713 } 714 715 static SavedRValue forAggregateLiteral(llvm::Value *V) { 716 return SavedRValue(V, AggregateLiteral); 717 } 718 719 static SavedRValue forAggregateAddress(llvm::Value *Addr) { 720 return SavedRValue(Addr, AggregateAddress); 721 } 722 723 static SavedRValue forComplexAddress(llvm::Value *Addr) { 724 return SavedRValue(Addr, Complex); 725 } 726 727 Kind getKind() const { return K; } 728 llvm::Value *getValue() const { return Value; } 729 }; 730 731 /// Given an r-value, perform the code necessary to make sure that a 732 /// future RestoreRValue will be able to load the value without 733 /// domination concerns. 734 static SavedRValue SaveRValue(CodeGenFunction &CGF, RValue RV) { 735 if (RV.isScalar()) { 736 llvm::Value *V = RV.getScalarVal(); 737 738 // These automatically dominate and don't need to be saved. 739 if (isa<llvm::Constant>(V) || isa<llvm::AllocaInst>(V)) 740 return SavedRValue::forScalarLiteral(V); 741 742 // Everything else needs an alloca. 743 llvm::Value *Addr = CGF.CreateTempAlloca(V->getType(), "saved-rvalue"); 744 CGF.Builder.CreateStore(V, Addr); 745 return SavedRValue::forScalarAddress(Addr); 746 } 747 748 if (RV.isComplex()) { 749 CodeGenFunction::ComplexPairTy V = RV.getComplexVal(); 750 const llvm::Type *ComplexTy = 751 llvm::StructType::get(CGF.getLLVMContext(), 752 V.first->getType(), V.second->getType(), 753 (void*) 0); 754 llvm::Value *Addr = CGF.CreateTempAlloca(ComplexTy, "saved-complex"); 755 CGF.StoreComplexToAddr(V, Addr, /*volatile*/ false); 756 return SavedRValue::forComplexAddress(Addr); 757 } 758 759 assert(RV.isAggregate()); 760 llvm::Value *V = RV.getAggregateAddr(); // TODO: volatile? 761 if (isa<llvm::Constant>(V) || isa<llvm::AllocaInst>(V)) 762 return SavedRValue::forAggregateLiteral(V); 763 764 llvm::Value *Addr = CGF.CreateTempAlloca(V->getType(), "saved-rvalue"); 765 CGF.Builder.CreateStore(V, Addr); 766 return SavedRValue::forAggregateAddress(Addr); 767 } 768 769 /// Given a saved r-value produced by SaveRValue, perform the code 770 /// necessary to restore it to usability at the current insertion 771 /// point. 772 static RValue RestoreRValue(CodeGenFunction &CGF, SavedRValue RV) { 773 switch (RV.getKind()) { 774 case SavedRValue::ScalarLiteral: 775 return RValue::get(RV.getValue()); 776 case SavedRValue::ScalarAddress: 777 return RValue::get(CGF.Builder.CreateLoad(RV.getValue())); 778 case SavedRValue::AggregateLiteral: 779 return RValue::getAggregate(RV.getValue()); 780 case SavedRValue::AggregateAddress: 781 return RValue::getAggregate(CGF.Builder.CreateLoad(RV.getValue())); 782 case SavedRValue::Complex: 783 return RValue::getComplex(CGF.LoadComplexFromAddr(RV.getValue(), false)); 784 } 785 786 llvm_unreachable("bad saved r-value kind"); 787 return RValue(); 788 } 789 790 namespace { 791 /// A cleanup to call the given 'operator delete' function upon 792 /// abnormal exit from a new expression. 793 class CallDeleteDuringNew : public EHScopeStack::Cleanup { 794 size_t NumPlacementArgs; 795 const FunctionDecl *OperatorDelete; 796 llvm::Value *Ptr; 797 llvm::Value *AllocSize; 798 799 RValue *getPlacementArgs() { return reinterpret_cast<RValue*>(this+1); } 800 801 public: 802 static size_t getExtraSize(size_t NumPlacementArgs) { 803 return NumPlacementArgs * sizeof(RValue); 804 } 805 806 CallDeleteDuringNew(size_t NumPlacementArgs, 807 const FunctionDecl *OperatorDelete, 808 llvm::Value *Ptr, 809 llvm::Value *AllocSize) 810 : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete), 811 Ptr(Ptr), AllocSize(AllocSize) {} 812 813 void setPlacementArg(unsigned I, RValue Arg) { 814 assert(I < NumPlacementArgs && "index out of range"); 815 getPlacementArgs()[I] = Arg; 816 } 817 818 void Emit(CodeGenFunction &CGF, bool IsForEH) { 819 const FunctionProtoType *FPT 820 = OperatorDelete->getType()->getAs<FunctionProtoType>(); 821 assert(FPT->getNumArgs() == NumPlacementArgs + 1 || 822 (FPT->getNumArgs() == 2 && NumPlacementArgs == 0)); 823 824 CallArgList DeleteArgs; 825 826 // The first argument is always a void*. 827 FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin(); 828 DeleteArgs.push_back(std::make_pair(RValue::get(Ptr), *AI++)); 829 830 // A member 'operator delete' can take an extra 'size_t' argument. 831 if (FPT->getNumArgs() == NumPlacementArgs + 2) 832 DeleteArgs.push_back(std::make_pair(RValue::get(AllocSize), *AI++)); 833 834 // Pass the rest of the arguments, which must match exactly. 835 for (unsigned I = 0; I != NumPlacementArgs; ++I) 836 DeleteArgs.push_back(std::make_pair(getPlacementArgs()[I], *AI++)); 837 838 // Call 'operator delete'. 839 CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(DeleteArgs, FPT), 840 CGF.CGM.GetAddrOfFunction(OperatorDelete), 841 ReturnValueSlot(), DeleteArgs, OperatorDelete); 842 } 843 }; 844 845 /// A cleanup to call the given 'operator delete' function upon 846 /// abnormal exit from a new expression when the new expression is 847 /// conditional. 848 class CallDeleteDuringConditionalNew : public EHScopeStack::Cleanup { 849 size_t NumPlacementArgs; 850 const FunctionDecl *OperatorDelete; 851 SavedRValue Ptr; 852 SavedRValue AllocSize; 853 854 SavedRValue *getPlacementArgs() { 855 return reinterpret_cast<SavedRValue*>(this+1); 856 } 857 858 public: 859 static size_t getExtraSize(size_t NumPlacementArgs) { 860 return NumPlacementArgs * sizeof(SavedRValue); 861 } 862 863 CallDeleteDuringConditionalNew(size_t NumPlacementArgs, 864 const FunctionDecl *OperatorDelete, 865 SavedRValue Ptr, 866 SavedRValue AllocSize) 867 : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete), 868 Ptr(Ptr), AllocSize(AllocSize) {} 869 870 void setPlacementArg(unsigned I, SavedRValue Arg) { 871 assert(I < NumPlacementArgs && "index out of range"); 872 getPlacementArgs()[I] = Arg; 873 } 874 875 void Emit(CodeGenFunction &CGF, bool IsForEH) { 876 const FunctionProtoType *FPT 877 = OperatorDelete->getType()->getAs<FunctionProtoType>(); 878 assert(FPT->getNumArgs() == NumPlacementArgs + 1 || 879 (FPT->getNumArgs() == 2 && NumPlacementArgs == 0)); 880 881 CallArgList DeleteArgs; 882 883 // The first argument is always a void*. 884 FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin(); 885 DeleteArgs.push_back(std::make_pair(RestoreRValue(CGF, Ptr), *AI++)); 886 887 // A member 'operator delete' can take an extra 'size_t' argument. 888 if (FPT->getNumArgs() == NumPlacementArgs + 2) { 889 RValue RV = RestoreRValue(CGF, AllocSize); 890 DeleteArgs.push_back(std::make_pair(RV, *AI++)); 891 } 892 893 // Pass the rest of the arguments, which must match exactly. 894 for (unsigned I = 0; I != NumPlacementArgs; ++I) { 895 RValue RV = RestoreRValue(CGF, getPlacementArgs()[I]); 896 DeleteArgs.push_back(std::make_pair(RV, *AI++)); 897 } 898 899 // Call 'operator delete'. 900 CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(DeleteArgs, FPT), 901 CGF.CGM.GetAddrOfFunction(OperatorDelete), 902 ReturnValueSlot(), DeleteArgs, OperatorDelete); 903 } 904 }; 905 } 906 907 /// Enter a cleanup to call 'operator delete' if the initializer in a 908 /// new-expression throws. 909 static void EnterNewDeleteCleanup(CodeGenFunction &CGF, 910 const CXXNewExpr *E, 911 llvm::Value *NewPtr, 912 llvm::Value *AllocSize, 913 const CallArgList &NewArgs) { 914 // If we're not inside a conditional branch, then the cleanup will 915 // dominate and we can do the easier (and more efficient) thing. 916 if (!CGF.isInConditionalBranch()) { 917 CallDeleteDuringNew *Cleanup = CGF.EHStack 918 .pushCleanupWithExtra<CallDeleteDuringNew>(EHCleanup, 919 E->getNumPlacementArgs(), 920 E->getOperatorDelete(), 921 NewPtr, AllocSize); 922 for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) 923 Cleanup->setPlacementArg(I, NewArgs[I+1].first); 924 925 return; 926 } 927 928 // Otherwise, we need to save all this stuff. 929 SavedRValue SavedNewPtr = SaveRValue(CGF, RValue::get(NewPtr)); 930 SavedRValue SavedAllocSize = SaveRValue(CGF, RValue::get(AllocSize)); 931 932 CallDeleteDuringConditionalNew *Cleanup = CGF.EHStack 933 .pushCleanupWithExtra<CallDeleteDuringConditionalNew>(InactiveEHCleanup, 934 E->getNumPlacementArgs(), 935 E->getOperatorDelete(), 936 SavedNewPtr, 937 SavedAllocSize); 938 for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) 939 Cleanup->setPlacementArg(I, SaveRValue(CGF, NewArgs[I+1].first)); 940 941 CGF.ActivateCleanupBlock(CGF.EHStack.stable_begin()); 942 } 943 944 llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) { 945 QualType AllocType = E->getAllocatedType(); 946 if (AllocType->isArrayType()) 947 while (const ArrayType *AType = getContext().getAsArrayType(AllocType)) 948 AllocType = AType->getElementType(); 949 950 FunctionDecl *NewFD = E->getOperatorNew(); 951 const FunctionProtoType *NewFTy = NewFD->getType()->getAs<FunctionProtoType>(); 952 953 CallArgList NewArgs; 954 955 // The allocation size is the first argument. 956 QualType SizeTy = getContext().getSizeType(); 957 958 llvm::Value *NumElements = 0; 959 llvm::Value *AllocSizeWithoutCookie = 0; 960 llvm::Value *AllocSize = EmitCXXNewAllocSize(getContext(), 961 *this, E, NumElements, 962 AllocSizeWithoutCookie); 963 964 NewArgs.push_back(std::make_pair(RValue::get(AllocSize), SizeTy)); 965 966 // Emit the rest of the arguments. 967 // FIXME: Ideally, this should just use EmitCallArgs. 968 CXXNewExpr::const_arg_iterator NewArg = E->placement_arg_begin(); 969 970 // First, use the types from the function type. 971 // We start at 1 here because the first argument (the allocation size) 972 // has already been emitted. 973 for (unsigned i = 1, e = NewFTy->getNumArgs(); i != e; ++i, ++NewArg) { 974 QualType ArgType = NewFTy->getArgType(i); 975 976 assert(getContext().getCanonicalType(ArgType.getNonReferenceType()). 977 getTypePtr() == 978 getContext().getCanonicalType(NewArg->getType()).getTypePtr() && 979 "type mismatch in call argument!"); 980 981 NewArgs.push_back(std::make_pair(EmitCallArg(*NewArg, ArgType), 982 ArgType)); 983 984 } 985 986 // Either we've emitted all the call args, or we have a call to a 987 // variadic function. 988 assert((NewArg == E->placement_arg_end() || NewFTy->isVariadic()) && 989 "Extra arguments in non-variadic function!"); 990 991 // If we still have any arguments, emit them using the type of the argument. 992 for (CXXNewExpr::const_arg_iterator NewArgEnd = E->placement_arg_end(); 993 NewArg != NewArgEnd; ++NewArg) { 994 QualType ArgType = NewArg->getType(); 995 NewArgs.push_back(std::make_pair(EmitCallArg(*NewArg, ArgType), 996 ArgType)); 997 } 998 999 // Emit the call to new. 1000 RValue RV = 1001 EmitCall(CGM.getTypes().getFunctionInfo(NewArgs, NewFTy), 1002 CGM.GetAddrOfFunction(NewFD), ReturnValueSlot(), NewArgs, NewFD); 1003 1004 // If an allocation function is declared with an empty exception specification 1005 // it returns null to indicate failure to allocate storage. [expr.new]p13. 1006 // (We don't need to check for null when there's no new initializer and 1007 // we're allocating a POD type). 1008 bool NullCheckResult = NewFTy->hasEmptyExceptionSpec() && 1009 !(AllocType->isPODType() && !E->hasInitializer()); 1010 1011 llvm::BasicBlock *NullCheckSource = 0; 1012 llvm::BasicBlock *NewNotNull = 0; 1013 llvm::BasicBlock *NewEnd = 0; 1014 1015 llvm::Value *NewPtr = RV.getScalarVal(); 1016 unsigned AS = cast<llvm::PointerType>(NewPtr->getType())->getAddressSpace(); 1017 1018 if (NullCheckResult) { 1019 NullCheckSource = Builder.GetInsertBlock(); 1020 NewNotNull = createBasicBlock("new.notnull"); 1021 NewEnd = createBasicBlock("new.end"); 1022 1023 llvm::Value *IsNull = Builder.CreateIsNull(NewPtr, "new.isnull"); 1024 Builder.CreateCondBr(IsNull, NewEnd, NewNotNull); 1025 EmitBlock(NewNotNull); 1026 } 1027 1028 assert((AllocSize == AllocSizeWithoutCookie) == 1029 CalculateCookiePadding(*this, E).isZero()); 1030 if (AllocSize != AllocSizeWithoutCookie) { 1031 assert(E->isArray()); 1032 NewPtr = CGM.getCXXABI().InitializeArrayCookie(CGF, NewPtr, NumElements, 1033 AllocType); 1034 } 1035 1036 // If there's an operator delete, enter a cleanup to call it if an 1037 // exception is thrown. 1038 EHScopeStack::stable_iterator CallOperatorDelete; 1039 if (E->getOperatorDelete()) { 1040 EnterNewDeleteCleanup(*this, E, NewPtr, AllocSize, NewArgs); 1041 CallOperatorDelete = EHStack.stable_begin(); 1042 } 1043 1044 const llvm::Type *ElementPtrTy 1045 = ConvertTypeForMem(AllocType)->getPointerTo(AS); 1046 NewPtr = Builder.CreateBitCast(NewPtr, ElementPtrTy); 1047 1048 if (E->isArray()) { 1049 EmitNewInitializer(*this, E, NewPtr, NumElements, AllocSizeWithoutCookie); 1050 1051 // NewPtr is a pointer to the base element type. If we're 1052 // allocating an array of arrays, we'll need to cast back to the 1053 // array pointer type. 1054 const llvm::Type *ResultTy = ConvertTypeForMem(E->getType()); 1055 if (NewPtr->getType() != ResultTy) 1056 NewPtr = Builder.CreateBitCast(NewPtr, ResultTy); 1057 } else { 1058 EmitNewInitializer(*this, E, NewPtr, NumElements, AllocSizeWithoutCookie); 1059 } 1060 1061 // Deactivate the 'operator delete' cleanup if we finished 1062 // initialization. 1063 if (CallOperatorDelete.isValid()) 1064 DeactivateCleanupBlock(CallOperatorDelete); 1065 1066 if (NullCheckResult) { 1067 Builder.CreateBr(NewEnd); 1068 llvm::BasicBlock *NotNullSource = Builder.GetInsertBlock(); 1069 EmitBlock(NewEnd); 1070 1071 llvm::PHINode *PHI = Builder.CreatePHI(NewPtr->getType()); 1072 PHI->reserveOperandSpace(2); 1073 PHI->addIncoming(NewPtr, NotNullSource); 1074 PHI->addIncoming(llvm::Constant::getNullValue(NewPtr->getType()), 1075 NullCheckSource); 1076 1077 NewPtr = PHI; 1078 } 1079 1080 return NewPtr; 1081 } 1082 1083 void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD, 1084 llvm::Value *Ptr, 1085 QualType DeleteTy) { 1086 assert(DeleteFD->getOverloadedOperator() == OO_Delete); 1087 1088 const FunctionProtoType *DeleteFTy = 1089 DeleteFD->getType()->getAs<FunctionProtoType>(); 1090 1091 CallArgList DeleteArgs; 1092 1093 // Check if we need to pass the size to the delete operator. 1094 llvm::Value *Size = 0; 1095 QualType SizeTy; 1096 if (DeleteFTy->getNumArgs() == 2) { 1097 SizeTy = DeleteFTy->getArgType(1); 1098 CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy); 1099 Size = llvm::ConstantInt::get(ConvertType(SizeTy), 1100 DeleteTypeSize.getQuantity()); 1101 } 1102 1103 QualType ArgTy = DeleteFTy->getArgType(0); 1104 llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy)); 1105 DeleteArgs.push_back(std::make_pair(RValue::get(DeletePtr), ArgTy)); 1106 1107 if (Size) 1108 DeleteArgs.push_back(std::make_pair(RValue::get(Size), SizeTy)); 1109 1110 // Emit the call to delete. 1111 EmitCall(CGM.getTypes().getFunctionInfo(DeleteArgs, DeleteFTy), 1112 CGM.GetAddrOfFunction(DeleteFD), ReturnValueSlot(), 1113 DeleteArgs, DeleteFD); 1114 } 1115 1116 namespace { 1117 /// Calls the given 'operator delete' on a single object. 1118 struct CallObjectDelete : EHScopeStack::Cleanup { 1119 llvm::Value *Ptr; 1120 const FunctionDecl *OperatorDelete; 1121 QualType ElementType; 1122 1123 CallObjectDelete(llvm::Value *Ptr, 1124 const FunctionDecl *OperatorDelete, 1125 QualType ElementType) 1126 : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {} 1127 1128 void Emit(CodeGenFunction &CGF, bool IsForEH) { 1129 CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType); 1130 } 1131 }; 1132 } 1133 1134 /// Emit the code for deleting a single object. 1135 static void EmitObjectDelete(CodeGenFunction &CGF, 1136 const FunctionDecl *OperatorDelete, 1137 llvm::Value *Ptr, 1138 QualType ElementType) { 1139 // Find the destructor for the type, if applicable. If the 1140 // destructor is virtual, we'll just emit the vcall and return. 1141 const CXXDestructorDecl *Dtor = 0; 1142 if (const RecordType *RT = ElementType->getAs<RecordType>()) { 1143 CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); 1144 if (!RD->hasTrivialDestructor()) { 1145 Dtor = RD->getDestructor(); 1146 1147 if (Dtor->isVirtual()) { 1148 const llvm::Type *Ty = 1149 CGF.getTypes().GetFunctionType(CGF.getTypes().getFunctionInfo(Dtor, 1150 Dtor_Complete), 1151 /*isVariadic=*/false); 1152 1153 llvm::Value *Callee 1154 = CGF.BuildVirtualCall(Dtor, Dtor_Deleting, Ptr, Ty); 1155 CGF.EmitCXXMemberCall(Dtor, Callee, ReturnValueSlot(), Ptr, /*VTT=*/0, 1156 0, 0); 1157 1158 // The dtor took care of deleting the object. 1159 return; 1160 } 1161 } 1162 } 1163 1164 // Make sure that we call delete even if the dtor throws. 1165 CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup, 1166 Ptr, OperatorDelete, ElementType); 1167 1168 if (Dtor) 1169 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, 1170 /*ForVirtualBase=*/false, Ptr); 1171 1172 CGF.PopCleanupBlock(); 1173 } 1174 1175 namespace { 1176 /// Calls the given 'operator delete' on an array of objects. 1177 struct CallArrayDelete : EHScopeStack::Cleanup { 1178 llvm::Value *Ptr; 1179 const FunctionDecl *OperatorDelete; 1180 llvm::Value *NumElements; 1181 QualType ElementType; 1182 CharUnits CookieSize; 1183 1184 CallArrayDelete(llvm::Value *Ptr, 1185 const FunctionDecl *OperatorDelete, 1186 llvm::Value *NumElements, 1187 QualType ElementType, 1188 CharUnits CookieSize) 1189 : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements), 1190 ElementType(ElementType), CookieSize(CookieSize) {} 1191 1192 void Emit(CodeGenFunction &CGF, bool IsForEH) { 1193 const FunctionProtoType *DeleteFTy = 1194 OperatorDelete->getType()->getAs<FunctionProtoType>(); 1195 assert(DeleteFTy->getNumArgs() == 1 || DeleteFTy->getNumArgs() == 2); 1196 1197 CallArgList Args; 1198 1199 // Pass the pointer as the first argument. 1200 QualType VoidPtrTy = DeleteFTy->getArgType(0); 1201 llvm::Value *DeletePtr 1202 = CGF.Builder.CreateBitCast(Ptr, CGF.ConvertType(VoidPtrTy)); 1203 Args.push_back(std::make_pair(RValue::get(DeletePtr), VoidPtrTy)); 1204 1205 // Pass the original requested size as the second argument. 1206 if (DeleteFTy->getNumArgs() == 2) { 1207 QualType size_t = DeleteFTy->getArgType(1); 1208 const llvm::IntegerType *SizeTy 1209 = cast<llvm::IntegerType>(CGF.ConvertType(size_t)); 1210 1211 CharUnits ElementTypeSize = 1212 CGF.CGM.getContext().getTypeSizeInChars(ElementType); 1213 1214 // The size of an element, multiplied by the number of elements. 1215 llvm::Value *Size 1216 = llvm::ConstantInt::get(SizeTy, ElementTypeSize.getQuantity()); 1217 Size = CGF.Builder.CreateMul(Size, NumElements); 1218 1219 // Plus the size of the cookie if applicable. 1220 if (!CookieSize.isZero()) { 1221 llvm::Value *CookieSizeV 1222 = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity()); 1223 Size = CGF.Builder.CreateAdd(Size, CookieSizeV); 1224 } 1225 1226 Args.push_back(std::make_pair(RValue::get(Size), size_t)); 1227 } 1228 1229 // Emit the call to delete. 1230 CGF.EmitCall(CGF.getTypes().getFunctionInfo(Args, DeleteFTy), 1231 CGF.CGM.GetAddrOfFunction(OperatorDelete), 1232 ReturnValueSlot(), Args, OperatorDelete); 1233 } 1234 }; 1235 } 1236 1237 /// Emit the code for deleting an array of objects. 1238 static void EmitArrayDelete(CodeGenFunction &CGF, 1239 const FunctionDecl *OperatorDelete, 1240 llvm::Value *Ptr, 1241 QualType ElementType) { 1242 llvm::Value *NumElements = 0; 1243 llvm::Value *AllocatedPtr = 0; 1244 CharUnits CookieSize; 1245 CGF.CGM.getCXXABI().ReadArrayCookie(CGF, Ptr, ElementType, 1246 NumElements, AllocatedPtr, CookieSize); 1247 1248 assert(AllocatedPtr && "ReadArrayCookie didn't set AllocatedPtr"); 1249 1250 // Make sure that we call delete even if one of the dtors throws. 1251 CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup, 1252 AllocatedPtr, OperatorDelete, 1253 NumElements, ElementType, 1254 CookieSize); 1255 1256 if (const CXXRecordDecl *RD = ElementType->getAsCXXRecordDecl()) { 1257 if (!RD->hasTrivialDestructor()) { 1258 assert(NumElements && "ReadArrayCookie didn't find element count" 1259 " for a class with destructor"); 1260 CGF.EmitCXXAggrDestructorCall(RD->getDestructor(), NumElements, Ptr); 1261 } 1262 } 1263 1264 CGF.PopCleanupBlock(); 1265 } 1266 1267 void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) { 1268 1269 // Get at the argument before we performed the implicit conversion 1270 // to void*. 1271 const Expr *Arg = E->getArgument(); 1272 while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) { 1273 if (ICE->getCastKind() != CK_UserDefinedConversion && 1274 ICE->getType()->isVoidPointerType()) 1275 Arg = ICE->getSubExpr(); 1276 else 1277 break; 1278 } 1279 1280 llvm::Value *Ptr = EmitScalarExpr(Arg); 1281 1282 // Null check the pointer. 1283 llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull"); 1284 llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end"); 1285 1286 llvm::Value *IsNull = 1287 Builder.CreateICmpEQ(Ptr, llvm::Constant::getNullValue(Ptr->getType()), 1288 "isnull"); 1289 1290 Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull); 1291 EmitBlock(DeleteNotNull); 1292 1293 // We might be deleting a pointer to array. If so, GEP down to the 1294 // first non-array element. 1295 // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*) 1296 QualType DeleteTy = Arg->getType()->getAs<PointerType>()->getPointeeType(); 1297 if (DeleteTy->isConstantArrayType()) { 1298 llvm::Value *Zero = Builder.getInt32(0); 1299 llvm::SmallVector<llvm::Value*,8> GEP; 1300 1301 GEP.push_back(Zero); // point at the outermost array 1302 1303 // For each layer of array type we're pointing at: 1304 while (const ConstantArrayType *Arr 1305 = getContext().getAsConstantArrayType(DeleteTy)) { 1306 // 1. Unpeel the array type. 1307 DeleteTy = Arr->getElementType(); 1308 1309 // 2. GEP to the first element of the array. 1310 GEP.push_back(Zero); 1311 } 1312 1313 Ptr = Builder.CreateInBoundsGEP(Ptr, GEP.begin(), GEP.end(), "del.first"); 1314 } 1315 1316 assert(ConvertTypeForMem(DeleteTy) == 1317 cast<llvm::PointerType>(Ptr->getType())->getElementType()); 1318 1319 if (E->isArrayForm()) { 1320 EmitArrayDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy); 1321 } else { 1322 EmitObjectDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy); 1323 } 1324 1325 EmitBlock(DeleteEnd); 1326 } 1327 1328 llvm::Value * CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) { 1329 QualType Ty = E->getType(); 1330 const llvm::Type *LTy = ConvertType(Ty)->getPointerTo(); 1331 1332 if (E->isTypeOperand()) { 1333 llvm::Constant *TypeInfo = 1334 CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand()); 1335 return Builder.CreateBitCast(TypeInfo, LTy); 1336 } 1337 1338 Expr *subE = E->getExprOperand(); 1339 Ty = subE->getType(); 1340 CanQualType CanTy = CGM.getContext().getCanonicalType(Ty); 1341 Ty = CanTy.getUnqualifiedType().getNonReferenceType(); 1342 if (const RecordType *RT = Ty->getAs<RecordType>()) { 1343 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); 1344 if (RD->isPolymorphic()) { 1345 // FIXME: if subE is an lvalue do 1346 LValue Obj = EmitLValue(subE); 1347 llvm::Value *This = Obj.getAddress(); 1348 LTy = LTy->getPointerTo()->getPointerTo(); 1349 llvm::Value *V = Builder.CreateBitCast(This, LTy); 1350 // We need to do a zero check for *p, unless it has NonNullAttr. 1351 // FIXME: PointerType->hasAttr<NonNullAttr>() 1352 bool CanBeZero = false; 1353 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(subE->IgnoreParens())) 1354 if (UO->getOpcode() == UO_Deref) 1355 CanBeZero = true; 1356 if (CanBeZero) { 1357 llvm::BasicBlock *NonZeroBlock = createBasicBlock(); 1358 llvm::BasicBlock *ZeroBlock = createBasicBlock(); 1359 1360 llvm::Value *Zero = llvm::Constant::getNullValue(LTy); 1361 Builder.CreateCondBr(Builder.CreateICmpNE(V, Zero), 1362 NonZeroBlock, ZeroBlock); 1363 EmitBlock(ZeroBlock); 1364 /// Call __cxa_bad_typeid 1365 const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext); 1366 const llvm::FunctionType *FTy; 1367 FTy = llvm::FunctionType::get(ResultType, false); 1368 llvm::Value *F = CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid"); 1369 Builder.CreateCall(F)->setDoesNotReturn(); 1370 Builder.CreateUnreachable(); 1371 EmitBlock(NonZeroBlock); 1372 } 1373 V = Builder.CreateLoad(V, "vtable"); 1374 V = Builder.CreateConstInBoundsGEP1_64(V, -1ULL); 1375 V = Builder.CreateLoad(V); 1376 return V; 1377 } 1378 } 1379 return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(Ty), LTy); 1380 } 1381 1382 llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *V, 1383 const CXXDynamicCastExpr *DCE) { 1384 QualType SrcTy = DCE->getSubExpr()->getType(); 1385 QualType DestTy = DCE->getTypeAsWritten(); 1386 QualType InnerType = DestTy->getPointeeType(); 1387 1388 const llvm::Type *LTy = ConvertType(DCE->getType()); 1389 1390 bool CanBeZero = false; 1391 bool ToVoid = false; 1392 bool ThrowOnBad = false; 1393 if (DestTy->isPointerType()) { 1394 // FIXME: if PointerType->hasAttr<NonNullAttr>(), we don't set this 1395 CanBeZero = true; 1396 if (InnerType->isVoidType()) 1397 ToVoid = true; 1398 } else { 1399 LTy = LTy->getPointerTo(); 1400 1401 // FIXME: What if exceptions are disabled? 1402 ThrowOnBad = true; 1403 } 1404 1405 if (SrcTy->isPointerType() || SrcTy->isReferenceType()) 1406 SrcTy = SrcTy->getPointeeType(); 1407 SrcTy = SrcTy.getUnqualifiedType(); 1408 1409 if (DestTy->isPointerType() || DestTy->isReferenceType()) 1410 DestTy = DestTy->getPointeeType(); 1411 DestTy = DestTy.getUnqualifiedType(); 1412 1413 llvm::BasicBlock *ContBlock = createBasicBlock(); 1414 llvm::BasicBlock *NullBlock = 0; 1415 llvm::BasicBlock *NonZeroBlock = 0; 1416 if (CanBeZero) { 1417 NonZeroBlock = createBasicBlock(); 1418 NullBlock = createBasicBlock(); 1419 Builder.CreateCondBr(Builder.CreateIsNotNull(V), NonZeroBlock, NullBlock); 1420 EmitBlock(NonZeroBlock); 1421 } 1422 1423 llvm::BasicBlock *BadCastBlock = 0; 1424 1425 const llvm::Type *PtrDiffTy = ConvertType(getContext().getPointerDiffType()); 1426 1427 // See if this is a dynamic_cast(void*) 1428 if (ToVoid) { 1429 llvm::Value *This = V; 1430 V = Builder.CreateBitCast(This, PtrDiffTy->getPointerTo()->getPointerTo()); 1431 V = Builder.CreateLoad(V, "vtable"); 1432 V = Builder.CreateConstInBoundsGEP1_64(V, -2ULL); 1433 V = Builder.CreateLoad(V, "offset to top"); 1434 This = Builder.CreateBitCast(This, llvm::Type::getInt8PtrTy(VMContext)); 1435 V = Builder.CreateInBoundsGEP(This, V); 1436 V = Builder.CreateBitCast(V, LTy); 1437 } else { 1438 /// Call __dynamic_cast 1439 const llvm::Type *ResultType = llvm::Type::getInt8PtrTy(VMContext); 1440 const llvm::FunctionType *FTy; 1441 std::vector<const llvm::Type*> ArgTys; 1442 const llvm::Type *PtrToInt8Ty 1443 = llvm::Type::getInt8Ty(VMContext)->getPointerTo(); 1444 ArgTys.push_back(PtrToInt8Ty); 1445 ArgTys.push_back(PtrToInt8Ty); 1446 ArgTys.push_back(PtrToInt8Ty); 1447 ArgTys.push_back(PtrDiffTy); 1448 FTy = llvm::FunctionType::get(ResultType, ArgTys, false); 1449 1450 // FIXME: Calculate better hint. 1451 llvm::Value *hint = llvm::ConstantInt::get(PtrDiffTy, -1ULL); 1452 1453 assert(SrcTy->isRecordType() && "Src type must be record type!"); 1454 assert(DestTy->isRecordType() && "Dest type must be record type!"); 1455 1456 llvm::Value *SrcArg 1457 = CGM.GetAddrOfRTTIDescriptor(SrcTy.getUnqualifiedType()); 1458 llvm::Value *DestArg 1459 = CGM.GetAddrOfRTTIDescriptor(DestTy.getUnqualifiedType()); 1460 1461 V = Builder.CreateBitCast(V, PtrToInt8Ty); 1462 V = Builder.CreateCall4(CGM.CreateRuntimeFunction(FTy, "__dynamic_cast"), 1463 V, SrcArg, DestArg, hint); 1464 V = Builder.CreateBitCast(V, LTy); 1465 1466 if (ThrowOnBad) { 1467 BadCastBlock = createBasicBlock(); 1468 Builder.CreateCondBr(Builder.CreateIsNotNull(V), ContBlock, BadCastBlock); 1469 EmitBlock(BadCastBlock); 1470 /// Invoke __cxa_bad_cast 1471 ResultType = llvm::Type::getVoidTy(VMContext); 1472 const llvm::FunctionType *FBadTy; 1473 FBadTy = llvm::FunctionType::get(ResultType, false); 1474 llvm::Value *F = CGM.CreateRuntimeFunction(FBadTy, "__cxa_bad_cast"); 1475 if (llvm::BasicBlock *InvokeDest = getInvokeDest()) { 1476 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); 1477 Builder.CreateInvoke(F, Cont, InvokeDest)->setDoesNotReturn(); 1478 EmitBlock(Cont); 1479 } else { 1480 // FIXME: Does this ever make sense? 1481 Builder.CreateCall(F)->setDoesNotReturn(); 1482 } 1483 Builder.CreateUnreachable(); 1484 } 1485 } 1486 1487 if (CanBeZero) { 1488 Builder.CreateBr(ContBlock); 1489 EmitBlock(NullBlock); 1490 Builder.CreateBr(ContBlock); 1491 } 1492 EmitBlock(ContBlock); 1493 if (CanBeZero) { 1494 llvm::PHINode *PHI = Builder.CreatePHI(LTy); 1495 PHI->reserveOperandSpace(2); 1496 PHI->addIncoming(V, NonZeroBlock); 1497 PHI->addIncoming(llvm::Constant::getNullValue(LTy), NullBlock); 1498 V = PHI; 1499 } 1500 1501 return V; 1502 } 1503