1 //===--- CGClass.cpp - Emit LLVM Code for C++ classes ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code dealing with C++ code generation of classes 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CGBlocks.h" 15 #include "CGDebugInfo.h" 16 #include "CGRecordLayout.h" 17 #include "CodeGenFunction.h" 18 #include "CGCXXABI.h" 19 #include "clang/AST/CXXInheritance.h" 20 #include "clang/AST/EvaluatedExprVisitor.h" 21 #include "clang/AST/RecordLayout.h" 22 #include "clang/AST/StmtCXX.h" 23 #include "clang/Basic/TargetBuiltins.h" 24 #include "clang/Frontend/CodeGenOptions.h" 25 26 using namespace clang; 27 using namespace CodeGen; 28 29 static CharUnits 30 ComputeNonVirtualBaseClassOffset(ASTContext &Context, 31 const CXXRecordDecl *DerivedClass, 32 CastExpr::path_const_iterator Start, 33 CastExpr::path_const_iterator End) { 34 CharUnits Offset = CharUnits::Zero(); 35 36 const CXXRecordDecl *RD = DerivedClass; 37 38 for (CastExpr::path_const_iterator I = Start; I != End; ++I) { 39 const CXXBaseSpecifier *Base = *I; 40 assert(!Base->isVirtual() && "Should not see virtual bases here!"); 41 42 // Get the layout. 43 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 44 45 const CXXRecordDecl *BaseDecl = 46 cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl()); 47 48 // Add the offset. 49 Offset += Layout.getBaseClassOffset(BaseDecl); 50 51 RD = BaseDecl; 52 } 53 54 return Offset; 55 } 56 57 llvm::Constant * 58 CodeGenModule::GetNonVirtualBaseClassOffset(const CXXRecordDecl *ClassDecl, 59 CastExpr::path_const_iterator PathBegin, 60 CastExpr::path_const_iterator PathEnd) { 61 assert(PathBegin != PathEnd && "Base path should not be empty!"); 62 63 CharUnits Offset = 64 ComputeNonVirtualBaseClassOffset(getContext(), ClassDecl, 65 PathBegin, PathEnd); 66 if (Offset.isZero()) 67 return 0; 68 69 llvm::Type *PtrDiffTy = 70 Types.ConvertType(getContext().getPointerDiffType()); 71 72 return llvm::ConstantInt::get(PtrDiffTy, Offset.getQuantity()); 73 } 74 75 /// Gets the address of a direct base class within a complete object. 76 /// This should only be used for (1) non-virtual bases or (2) virtual bases 77 /// when the type is known to be complete (e.g. in complete destructors). 78 /// 79 /// The object pointed to by 'This' is assumed to be non-null. 80 llvm::Value * 81 CodeGenFunction::GetAddressOfDirectBaseInCompleteClass(llvm::Value *This, 82 const CXXRecordDecl *Derived, 83 const CXXRecordDecl *Base, 84 bool BaseIsVirtual) { 85 // 'this' must be a pointer (in some address space) to Derived. 86 assert(This->getType()->isPointerTy() && 87 cast<llvm::PointerType>(This->getType())->getElementType() 88 == ConvertType(Derived)); 89 90 // Compute the offset of the virtual base. 91 CharUnits Offset; 92 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(Derived); 93 if (BaseIsVirtual) 94 Offset = Layout.getVBaseClassOffset(Base); 95 else 96 Offset = Layout.getBaseClassOffset(Base); 97 98 // Shift and cast down to the base type. 99 // TODO: for complete types, this should be possible with a GEP. 100 llvm::Value *V = This; 101 if (Offset.isPositive()) { 102 V = Builder.CreateBitCast(V, Int8PtrTy); 103 V = Builder.CreateConstInBoundsGEP1_64(V, Offset.getQuantity()); 104 } 105 V = Builder.CreateBitCast(V, ConvertType(Base)->getPointerTo()); 106 107 return V; 108 } 109 110 static llvm::Value * 111 ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, llvm::Value *ptr, 112 CharUnits nonVirtualOffset, 113 llvm::Value *virtualOffset) { 114 // Assert that we have something to do. 115 assert(!nonVirtualOffset.isZero() || virtualOffset != 0); 116 117 // Compute the offset from the static and dynamic components. 118 llvm::Value *baseOffset; 119 if (!nonVirtualOffset.isZero()) { 120 baseOffset = llvm::ConstantInt::get(CGF.PtrDiffTy, 121 nonVirtualOffset.getQuantity()); 122 if (virtualOffset) { 123 baseOffset = CGF.Builder.CreateAdd(virtualOffset, baseOffset); 124 } 125 } else { 126 baseOffset = virtualOffset; 127 } 128 129 // Apply the base offset. 130 ptr = CGF.Builder.CreateBitCast(ptr, CGF.Int8PtrTy); 131 ptr = CGF.Builder.CreateInBoundsGEP(ptr, baseOffset, "add.ptr"); 132 return ptr; 133 } 134 135 llvm::Value * 136 CodeGenFunction::GetAddressOfBaseClass(llvm::Value *Value, 137 const CXXRecordDecl *Derived, 138 CastExpr::path_const_iterator PathBegin, 139 CastExpr::path_const_iterator PathEnd, 140 bool NullCheckValue) { 141 assert(PathBegin != PathEnd && "Base path should not be empty!"); 142 143 CastExpr::path_const_iterator Start = PathBegin; 144 const CXXRecordDecl *VBase = 0; 145 146 // Sema has done some convenient canonicalization here: if the 147 // access path involved any virtual steps, the conversion path will 148 // *start* with a step down to the correct virtual base subobject, 149 // and hence will not require any further steps. 150 if ((*Start)->isVirtual()) { 151 VBase = 152 cast<CXXRecordDecl>((*Start)->getType()->getAs<RecordType>()->getDecl()); 153 ++Start; 154 } 155 156 // Compute the static offset of the ultimate destination within its 157 // allocating subobject (the virtual base, if there is one, or else 158 // the "complete" object that we see). 159 CharUnits NonVirtualOffset = 160 ComputeNonVirtualBaseClassOffset(getContext(), VBase ? VBase : Derived, 161 Start, PathEnd); 162 163 // If there's a virtual step, we can sometimes "devirtualize" it. 164 // For now, that's limited to when the derived type is final. 165 // TODO: "devirtualize" this for accesses to known-complete objects. 166 if (VBase && Derived->hasAttr<FinalAttr>()) { 167 const ASTRecordLayout &layout = getContext().getASTRecordLayout(Derived); 168 CharUnits vBaseOffset = layout.getVBaseClassOffset(VBase); 169 NonVirtualOffset += vBaseOffset; 170 VBase = 0; // we no longer have a virtual step 171 } 172 173 // Get the base pointer type. 174 llvm::Type *BasePtrTy = 175 ConvertType((PathEnd[-1])->getType())->getPointerTo(); 176 177 // If the static offset is zero and we don't have a virtual step, 178 // just do a bitcast; null checks are unnecessary. 179 if (NonVirtualOffset.isZero() && !VBase) { 180 return Builder.CreateBitCast(Value, BasePtrTy); 181 } 182 183 llvm::BasicBlock *origBB = 0; 184 llvm::BasicBlock *endBB = 0; 185 186 // Skip over the offset (and the vtable load) if we're supposed to 187 // null-check the pointer. 188 if (NullCheckValue) { 189 origBB = Builder.GetInsertBlock(); 190 llvm::BasicBlock *notNullBB = createBasicBlock("cast.notnull"); 191 endBB = createBasicBlock("cast.end"); 192 193 llvm::Value *isNull = Builder.CreateIsNull(Value); 194 Builder.CreateCondBr(isNull, endBB, notNullBB); 195 EmitBlock(notNullBB); 196 } 197 198 // Compute the virtual offset. 199 llvm::Value *VirtualOffset = 0; 200 if (VBase) { 201 VirtualOffset = GetVirtualBaseClassOffset(Value, Derived, VBase); 202 } 203 204 // Apply both offsets. 205 Value = ApplyNonVirtualAndVirtualOffset(*this, Value, 206 NonVirtualOffset, 207 VirtualOffset); 208 209 // Cast to the destination type. 210 Value = Builder.CreateBitCast(Value, BasePtrTy); 211 212 // Build a phi if we needed a null check. 213 if (NullCheckValue) { 214 llvm::BasicBlock *notNullBB = Builder.GetInsertBlock(); 215 Builder.CreateBr(endBB); 216 EmitBlock(endBB); 217 218 llvm::PHINode *PHI = Builder.CreatePHI(BasePtrTy, 2, "cast.result"); 219 PHI->addIncoming(Value, notNullBB); 220 PHI->addIncoming(llvm::Constant::getNullValue(BasePtrTy), origBB); 221 Value = PHI; 222 } 223 224 return Value; 225 } 226 227 llvm::Value * 228 CodeGenFunction::GetAddressOfDerivedClass(llvm::Value *Value, 229 const CXXRecordDecl *Derived, 230 CastExpr::path_const_iterator PathBegin, 231 CastExpr::path_const_iterator PathEnd, 232 bool NullCheckValue) { 233 assert(PathBegin != PathEnd && "Base path should not be empty!"); 234 235 QualType DerivedTy = 236 getContext().getCanonicalType(getContext().getTagDeclType(Derived)); 237 llvm::Type *DerivedPtrTy = ConvertType(DerivedTy)->getPointerTo(); 238 239 llvm::Value *NonVirtualOffset = 240 CGM.GetNonVirtualBaseClassOffset(Derived, PathBegin, PathEnd); 241 242 if (!NonVirtualOffset) { 243 // No offset, we can just cast back. 244 return Builder.CreateBitCast(Value, DerivedPtrTy); 245 } 246 247 llvm::BasicBlock *CastNull = 0; 248 llvm::BasicBlock *CastNotNull = 0; 249 llvm::BasicBlock *CastEnd = 0; 250 251 if (NullCheckValue) { 252 CastNull = createBasicBlock("cast.null"); 253 CastNotNull = createBasicBlock("cast.notnull"); 254 CastEnd = createBasicBlock("cast.end"); 255 256 llvm::Value *IsNull = Builder.CreateIsNull(Value); 257 Builder.CreateCondBr(IsNull, CastNull, CastNotNull); 258 EmitBlock(CastNotNull); 259 } 260 261 // Apply the offset. 262 Value = Builder.CreateBitCast(Value, Int8PtrTy); 263 Value = Builder.CreateGEP(Value, Builder.CreateNeg(NonVirtualOffset), 264 "sub.ptr"); 265 266 // Just cast. 267 Value = Builder.CreateBitCast(Value, DerivedPtrTy); 268 269 if (NullCheckValue) { 270 Builder.CreateBr(CastEnd); 271 EmitBlock(CastNull); 272 Builder.CreateBr(CastEnd); 273 EmitBlock(CastEnd); 274 275 llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2); 276 PHI->addIncoming(Value, CastNotNull); 277 PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), 278 CastNull); 279 Value = PHI; 280 } 281 282 return Value; 283 } 284 285 llvm::Value *CodeGenFunction::GetVTTParameter(GlobalDecl GD, 286 bool ForVirtualBase, 287 bool Delegating) { 288 if (!CodeGenVTables::needsVTTParameter(GD)) { 289 // This constructor/destructor does not need a VTT parameter. 290 return 0; 291 } 292 293 const CXXRecordDecl *RD = cast<CXXMethodDecl>(CurCodeDecl)->getParent(); 294 const CXXRecordDecl *Base = cast<CXXMethodDecl>(GD.getDecl())->getParent(); 295 296 llvm::Value *VTT; 297 298 uint64_t SubVTTIndex; 299 300 if (Delegating) { 301 // If this is a delegating constructor call, just load the VTT. 302 return LoadCXXVTT(); 303 } else if (RD == Base) { 304 // If the record matches the base, this is the complete ctor/dtor 305 // variant calling the base variant in a class with virtual bases. 306 assert(!CodeGenVTables::needsVTTParameter(CurGD) && 307 "doing no-op VTT offset in base dtor/ctor?"); 308 assert(!ForVirtualBase && "Can't have same class as virtual base!"); 309 SubVTTIndex = 0; 310 } else { 311 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 312 CharUnits BaseOffset = ForVirtualBase ? 313 Layout.getVBaseClassOffset(Base) : 314 Layout.getBaseClassOffset(Base); 315 316 SubVTTIndex = 317 CGM.getVTables().getSubVTTIndex(RD, BaseSubobject(Base, BaseOffset)); 318 assert(SubVTTIndex != 0 && "Sub-VTT index must be greater than zero!"); 319 } 320 321 if (CodeGenVTables::needsVTTParameter(CurGD)) { 322 // A VTT parameter was passed to the constructor, use it. 323 VTT = LoadCXXVTT(); 324 VTT = Builder.CreateConstInBoundsGEP1_64(VTT, SubVTTIndex); 325 } else { 326 // We're the complete constructor, so get the VTT by name. 327 VTT = CGM.getVTables().GetAddrOfVTT(RD); 328 VTT = Builder.CreateConstInBoundsGEP2_64(VTT, 0, SubVTTIndex); 329 } 330 331 return VTT; 332 } 333 334 namespace { 335 /// Call the destructor for a direct base class. 336 struct CallBaseDtor : EHScopeStack::Cleanup { 337 const CXXRecordDecl *BaseClass; 338 bool BaseIsVirtual; 339 CallBaseDtor(const CXXRecordDecl *Base, bool BaseIsVirtual) 340 : BaseClass(Base), BaseIsVirtual(BaseIsVirtual) {} 341 342 void Emit(CodeGenFunction &CGF, Flags flags) { 343 const CXXRecordDecl *DerivedClass = 344 cast<CXXMethodDecl>(CGF.CurCodeDecl)->getParent(); 345 346 const CXXDestructorDecl *D = BaseClass->getDestructor(); 347 llvm::Value *Addr = 348 CGF.GetAddressOfDirectBaseInCompleteClass(CGF.LoadCXXThis(), 349 DerivedClass, BaseClass, 350 BaseIsVirtual); 351 CGF.EmitCXXDestructorCall(D, Dtor_Base, BaseIsVirtual, 352 /*Delegating=*/false, Addr); 353 } 354 }; 355 356 /// A visitor which checks whether an initializer uses 'this' in a 357 /// way which requires the vtable to be properly set. 358 struct DynamicThisUseChecker : EvaluatedExprVisitor<DynamicThisUseChecker> { 359 typedef EvaluatedExprVisitor<DynamicThisUseChecker> super; 360 361 bool UsesThis; 362 363 DynamicThisUseChecker(ASTContext &C) : super(C), UsesThis(false) {} 364 365 // Black-list all explicit and implicit references to 'this'. 366 // 367 // Do we need to worry about external references to 'this' derived 368 // from arbitrary code? If so, then anything which runs arbitrary 369 // external code might potentially access the vtable. 370 void VisitCXXThisExpr(CXXThisExpr *E) { UsesThis = true; } 371 }; 372 } 373 374 static bool BaseInitializerUsesThis(ASTContext &C, const Expr *Init) { 375 DynamicThisUseChecker Checker(C); 376 Checker.Visit(const_cast<Expr*>(Init)); 377 return Checker.UsesThis; 378 } 379 380 static void EmitBaseInitializer(CodeGenFunction &CGF, 381 const CXXRecordDecl *ClassDecl, 382 CXXCtorInitializer *BaseInit, 383 CXXCtorType CtorType) { 384 assert(BaseInit->isBaseInitializer() && 385 "Must have base initializer!"); 386 387 llvm::Value *ThisPtr = CGF.LoadCXXThis(); 388 389 const Type *BaseType = BaseInit->getBaseClass(); 390 CXXRecordDecl *BaseClassDecl = 391 cast<CXXRecordDecl>(BaseType->getAs<RecordType>()->getDecl()); 392 393 bool isBaseVirtual = BaseInit->isBaseVirtual(); 394 395 // The base constructor doesn't construct virtual bases. 396 if (CtorType == Ctor_Base && isBaseVirtual) 397 return; 398 399 // If the initializer for the base (other than the constructor 400 // itself) accesses 'this' in any way, we need to initialize the 401 // vtables. 402 if (BaseInitializerUsesThis(CGF.getContext(), BaseInit->getInit())) 403 CGF.InitializeVTablePointers(ClassDecl); 404 405 // We can pretend to be a complete class because it only matters for 406 // virtual bases, and we only do virtual bases for complete ctors. 407 llvm::Value *V = 408 CGF.GetAddressOfDirectBaseInCompleteClass(ThisPtr, ClassDecl, 409 BaseClassDecl, 410 isBaseVirtual); 411 CharUnits Alignment = CGF.getContext().getTypeAlignInChars(BaseType); 412 AggValueSlot AggSlot = 413 AggValueSlot::forAddr(V, Alignment, Qualifiers(), 414 AggValueSlot::IsDestructed, 415 AggValueSlot::DoesNotNeedGCBarriers, 416 AggValueSlot::IsNotAliased); 417 418 CGF.EmitAggExpr(BaseInit->getInit(), AggSlot); 419 420 if (CGF.CGM.getLangOpts().Exceptions && 421 !BaseClassDecl->hasTrivialDestructor()) 422 CGF.EHStack.pushCleanup<CallBaseDtor>(EHCleanup, BaseClassDecl, 423 isBaseVirtual); 424 } 425 426 static void EmitAggMemberInitializer(CodeGenFunction &CGF, 427 LValue LHS, 428 Expr *Init, 429 llvm::Value *ArrayIndexVar, 430 QualType T, 431 ArrayRef<VarDecl *> ArrayIndexes, 432 unsigned Index) { 433 if (Index == ArrayIndexes.size()) { 434 LValue LV = LHS; 435 { // Scope for Cleanups. 436 CodeGenFunction::RunCleanupsScope Cleanups(CGF); 437 438 if (ArrayIndexVar) { 439 // If we have an array index variable, load it and use it as an offset. 440 // Then, increment the value. 441 llvm::Value *Dest = LHS.getAddress(); 442 llvm::Value *ArrayIndex = CGF.Builder.CreateLoad(ArrayIndexVar); 443 Dest = CGF.Builder.CreateInBoundsGEP(Dest, ArrayIndex, "destaddress"); 444 llvm::Value *Next = llvm::ConstantInt::get(ArrayIndex->getType(), 1); 445 Next = CGF.Builder.CreateAdd(ArrayIndex, Next, "inc"); 446 CGF.Builder.CreateStore(Next, ArrayIndexVar); 447 448 // Update the LValue. 449 LV.setAddress(Dest); 450 CharUnits Align = CGF.getContext().getTypeAlignInChars(T); 451 LV.setAlignment(std::min(Align, LV.getAlignment())); 452 } 453 454 switch (CGF.getEvaluationKind(T)) { 455 case TEK_Scalar: 456 CGF.EmitScalarInit(Init, /*decl*/ 0, LV, false); 457 break; 458 case TEK_Complex: 459 CGF.EmitComplexExprIntoLValue(Init, LV, /*isInit*/ true); 460 break; 461 case TEK_Aggregate: { 462 AggValueSlot Slot = 463 AggValueSlot::forLValue(LV, 464 AggValueSlot::IsDestructed, 465 AggValueSlot::DoesNotNeedGCBarriers, 466 AggValueSlot::IsNotAliased); 467 468 CGF.EmitAggExpr(Init, Slot); 469 break; 470 } 471 } 472 } 473 474 // Now, outside of the initializer cleanup scope, destroy the backing array 475 // for a std::initializer_list member. 476 CGF.MaybeEmitStdInitializerListCleanup(LV.getAddress(), Init); 477 478 return; 479 } 480 481 const ConstantArrayType *Array = CGF.getContext().getAsConstantArrayType(T); 482 assert(Array && "Array initialization without the array type?"); 483 llvm::Value *IndexVar 484 = CGF.GetAddrOfLocalVar(ArrayIndexes[Index]); 485 assert(IndexVar && "Array index variable not loaded"); 486 487 // Initialize this index variable to zero. 488 llvm::Value* Zero 489 = llvm::Constant::getNullValue( 490 CGF.ConvertType(CGF.getContext().getSizeType())); 491 CGF.Builder.CreateStore(Zero, IndexVar); 492 493 // Start the loop with a block that tests the condition. 494 llvm::BasicBlock *CondBlock = CGF.createBasicBlock("for.cond"); 495 llvm::BasicBlock *AfterFor = CGF.createBasicBlock("for.end"); 496 497 CGF.EmitBlock(CondBlock); 498 499 llvm::BasicBlock *ForBody = CGF.createBasicBlock("for.body"); 500 // Generate: if (loop-index < number-of-elements) fall to the loop body, 501 // otherwise, go to the block after the for-loop. 502 uint64_t NumElements = Array->getSize().getZExtValue(); 503 llvm::Value *Counter = CGF.Builder.CreateLoad(IndexVar); 504 llvm::Value *NumElementsPtr = 505 llvm::ConstantInt::get(Counter->getType(), NumElements); 506 llvm::Value *IsLess = CGF.Builder.CreateICmpULT(Counter, NumElementsPtr, 507 "isless"); 508 509 // If the condition is true, execute the body. 510 CGF.Builder.CreateCondBr(IsLess, ForBody, AfterFor); 511 512 CGF.EmitBlock(ForBody); 513 llvm::BasicBlock *ContinueBlock = CGF.createBasicBlock("for.inc"); 514 515 { 516 CodeGenFunction::RunCleanupsScope Cleanups(CGF); 517 518 // Inside the loop body recurse to emit the inner loop or, eventually, the 519 // constructor call. 520 EmitAggMemberInitializer(CGF, LHS, Init, ArrayIndexVar, 521 Array->getElementType(), ArrayIndexes, Index + 1); 522 } 523 524 CGF.EmitBlock(ContinueBlock); 525 526 // Emit the increment of the loop counter. 527 llvm::Value *NextVal = llvm::ConstantInt::get(Counter->getType(), 1); 528 Counter = CGF.Builder.CreateLoad(IndexVar); 529 NextVal = CGF.Builder.CreateAdd(Counter, NextVal, "inc"); 530 CGF.Builder.CreateStore(NextVal, IndexVar); 531 532 // Finally, branch back up to the condition for the next iteration. 533 CGF.EmitBranch(CondBlock); 534 535 // Emit the fall-through block. 536 CGF.EmitBlock(AfterFor, true); 537 } 538 539 static void EmitMemberInitializer(CodeGenFunction &CGF, 540 const CXXRecordDecl *ClassDecl, 541 CXXCtorInitializer *MemberInit, 542 const CXXConstructorDecl *Constructor, 543 FunctionArgList &Args) { 544 assert(MemberInit->isAnyMemberInitializer() && 545 "Must have member initializer!"); 546 assert(MemberInit->getInit() && "Must have initializer!"); 547 548 // non-static data member initializers. 549 FieldDecl *Field = MemberInit->getAnyMember(); 550 QualType FieldType = Field->getType(); 551 552 llvm::Value *ThisPtr = CGF.LoadCXXThis(); 553 QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl); 554 LValue LHS = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy); 555 556 if (MemberInit->isIndirectMemberInitializer()) { 557 // If we are initializing an anonymous union field, drill down to 558 // the field. 559 IndirectFieldDecl *IndirectField = MemberInit->getIndirectMember(); 560 IndirectFieldDecl::chain_iterator I = IndirectField->chain_begin(), 561 IEnd = IndirectField->chain_end(); 562 for ( ; I != IEnd; ++I) 563 LHS = CGF.EmitLValueForFieldInitialization(LHS, cast<FieldDecl>(*I)); 564 FieldType = MemberInit->getIndirectMember()->getAnonField()->getType(); 565 } else { 566 LHS = CGF.EmitLValueForFieldInitialization(LHS, Field); 567 } 568 569 // Special case: if we are in a copy or move constructor, and we are copying 570 // an array of PODs or classes with trivial copy constructors, ignore the 571 // AST and perform the copy we know is equivalent. 572 // FIXME: This is hacky at best... if we had a bit more explicit information 573 // in the AST, we could generalize it more easily. 574 const ConstantArrayType *Array 575 = CGF.getContext().getAsConstantArrayType(FieldType); 576 if (Array && Constructor->isImplicitlyDefined() && 577 Constructor->isCopyOrMoveConstructor()) { 578 QualType BaseElementTy = CGF.getContext().getBaseElementType(Array); 579 CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(MemberInit->getInit()); 580 if (BaseElementTy.isPODType(CGF.getContext()) || 581 (CE && CE->getConstructor()->isTrivial())) { 582 // Find the source pointer. We know it's the last argument because 583 // we know we're in an implicit copy constructor. 584 unsigned SrcArgIndex = Args.size() - 1; 585 llvm::Value *SrcPtr 586 = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(Args[SrcArgIndex])); 587 LValue ThisRHSLV = CGF.MakeNaturalAlignAddrLValue(SrcPtr, RecordTy); 588 LValue Src = CGF.EmitLValueForFieldInitialization(ThisRHSLV, Field); 589 590 // Copy the aggregate. 591 CGF.EmitAggregateCopy(LHS.getAddress(), Src.getAddress(), FieldType, 592 LHS.isVolatileQualified()); 593 return; 594 } 595 } 596 597 ArrayRef<VarDecl *> ArrayIndexes; 598 if (MemberInit->getNumArrayIndices()) 599 ArrayIndexes = MemberInit->getArrayIndexes(); 600 CGF.EmitInitializerForField(Field, LHS, MemberInit->getInit(), ArrayIndexes); 601 } 602 603 void CodeGenFunction::EmitInitializerForField(FieldDecl *Field, 604 LValue LHS, Expr *Init, 605 ArrayRef<VarDecl *> ArrayIndexes) { 606 QualType FieldType = Field->getType(); 607 switch (getEvaluationKind(FieldType)) { 608 case TEK_Scalar: 609 if (LHS.isSimple()) { 610 EmitExprAsInit(Init, Field, LHS, false); 611 } else { 612 RValue RHS = RValue::get(EmitScalarExpr(Init)); 613 EmitStoreThroughLValue(RHS, LHS); 614 } 615 break; 616 case TEK_Complex: 617 EmitComplexExprIntoLValue(Init, LHS, /*isInit*/ true); 618 break; 619 case TEK_Aggregate: { 620 llvm::Value *ArrayIndexVar = 0; 621 if (ArrayIndexes.size()) { 622 llvm::Type *SizeTy = ConvertType(getContext().getSizeType()); 623 624 // The LHS is a pointer to the first object we'll be constructing, as 625 // a flat array. 626 QualType BaseElementTy = getContext().getBaseElementType(FieldType); 627 llvm::Type *BasePtr = ConvertType(BaseElementTy); 628 BasePtr = llvm::PointerType::getUnqual(BasePtr); 629 llvm::Value *BaseAddrPtr = Builder.CreateBitCast(LHS.getAddress(), 630 BasePtr); 631 LHS = MakeAddrLValue(BaseAddrPtr, BaseElementTy); 632 633 // Create an array index that will be used to walk over all of the 634 // objects we're constructing. 635 ArrayIndexVar = CreateTempAlloca(SizeTy, "object.index"); 636 llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy); 637 Builder.CreateStore(Zero, ArrayIndexVar); 638 639 640 // Emit the block variables for the array indices, if any. 641 for (unsigned I = 0, N = ArrayIndexes.size(); I != N; ++I) 642 EmitAutoVarDecl(*ArrayIndexes[I]); 643 } 644 645 EmitAggMemberInitializer(*this, LHS, Init, ArrayIndexVar, FieldType, 646 ArrayIndexes, 0); 647 } 648 } 649 650 // Ensure that we destroy this object if an exception is thrown 651 // later in the constructor. 652 QualType::DestructionKind dtorKind = FieldType.isDestructedType(); 653 if (needsEHCleanup(dtorKind)) 654 pushEHDestroy(dtorKind, LHS.getAddress(), FieldType); 655 } 656 657 /// Checks whether the given constructor is a valid subject for the 658 /// complete-to-base constructor delegation optimization, i.e. 659 /// emitting the complete constructor as a simple call to the base 660 /// constructor. 661 static bool IsConstructorDelegationValid(const CXXConstructorDecl *Ctor) { 662 663 // Currently we disable the optimization for classes with virtual 664 // bases because (1) the addresses of parameter variables need to be 665 // consistent across all initializers but (2) the delegate function 666 // call necessarily creates a second copy of the parameter variable. 667 // 668 // The limiting example (purely theoretical AFAIK): 669 // struct A { A(int &c) { c++; } }; 670 // struct B : virtual A { 671 // B(int count) : A(count) { printf("%d\n", count); } 672 // }; 673 // ...although even this example could in principle be emitted as a 674 // delegation since the address of the parameter doesn't escape. 675 if (Ctor->getParent()->getNumVBases()) { 676 // TODO: white-list trivial vbase initializers. This case wouldn't 677 // be subject to the restrictions below. 678 679 // TODO: white-list cases where: 680 // - there are no non-reference parameters to the constructor 681 // - the initializers don't access any non-reference parameters 682 // - the initializers don't take the address of non-reference 683 // parameters 684 // - etc. 685 // If we ever add any of the above cases, remember that: 686 // - function-try-blocks will always blacklist this optimization 687 // - we need to perform the constructor prologue and cleanup in 688 // EmitConstructorBody. 689 690 return false; 691 } 692 693 // We also disable the optimization for variadic functions because 694 // it's impossible to "re-pass" varargs. 695 if (Ctor->getType()->getAs<FunctionProtoType>()->isVariadic()) 696 return false; 697 698 // FIXME: Decide if we can do a delegation of a delegating constructor. 699 if (Ctor->isDelegatingConstructor()) 700 return false; 701 702 return true; 703 } 704 705 /// EmitConstructorBody - Emits the body of the current constructor. 706 void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) { 707 const CXXConstructorDecl *Ctor = cast<CXXConstructorDecl>(CurGD.getDecl()); 708 CXXCtorType CtorType = CurGD.getCtorType(); 709 710 // Before we go any further, try the complete->base constructor 711 // delegation optimization. 712 if (CtorType == Ctor_Complete && IsConstructorDelegationValid(Ctor) && 713 CGM.getTarget().getCXXABI().hasConstructorVariants()) { 714 if (CGDebugInfo *DI = getDebugInfo()) 715 DI->EmitLocation(Builder, Ctor->getLocEnd()); 716 EmitDelegateCXXConstructorCall(Ctor, Ctor_Base, Args); 717 return; 718 } 719 720 Stmt *Body = Ctor->getBody(); 721 722 // Enter the function-try-block before the constructor prologue if 723 // applicable. 724 bool IsTryBody = (Body && isa<CXXTryStmt>(Body)); 725 if (IsTryBody) 726 EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true); 727 728 EHScopeStack::stable_iterator CleanupDepth = EHStack.stable_begin(); 729 730 // TODO: in restricted cases, we can emit the vbase initializers of 731 // a complete ctor and then delegate to the base ctor. 732 733 // Emit the constructor prologue, i.e. the base and member 734 // initializers. 735 EmitCtorPrologue(Ctor, CtorType, Args); 736 737 // Emit the body of the statement. 738 if (IsTryBody) 739 EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock()); 740 else if (Body) 741 EmitStmt(Body); 742 743 // Emit any cleanup blocks associated with the member or base 744 // initializers, which includes (along the exceptional path) the 745 // destructors for those members and bases that were fully 746 // constructed. 747 PopCleanupBlocks(CleanupDepth); 748 749 if (IsTryBody) 750 ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true); 751 } 752 753 namespace { 754 class FieldMemcpyizer { 755 public: 756 FieldMemcpyizer(CodeGenFunction &CGF, const CXXRecordDecl *ClassDecl, 757 const VarDecl *SrcRec) 758 : CGF(CGF), ClassDecl(ClassDecl), SrcRec(SrcRec), 759 RecLayout(CGF.getContext().getASTRecordLayout(ClassDecl)), 760 FirstField(0), LastField(0), FirstFieldOffset(0), LastFieldOffset(0), 761 LastAddedFieldIndex(0) { } 762 763 static bool isMemcpyableField(FieldDecl *F) { 764 Qualifiers Qual = F->getType().getQualifiers(); 765 if (Qual.hasVolatile() || Qual.hasObjCLifetime()) 766 return false; 767 return true; 768 } 769 770 void addMemcpyableField(FieldDecl *F) { 771 if (FirstField == 0) 772 addInitialField(F); 773 else 774 addNextField(F); 775 } 776 777 CharUnits getMemcpySize() const { 778 unsigned LastFieldSize = 779 LastField->isBitField() ? 780 LastField->getBitWidthValue(CGF.getContext()) : 781 CGF.getContext().getTypeSize(LastField->getType()); 782 uint64_t MemcpySizeBits = 783 LastFieldOffset + LastFieldSize - FirstFieldOffset + 784 CGF.getContext().getCharWidth() - 1; 785 CharUnits MemcpySize = 786 CGF.getContext().toCharUnitsFromBits(MemcpySizeBits); 787 return MemcpySize; 788 } 789 790 void emitMemcpy() { 791 // Give the subclass a chance to bail out if it feels the memcpy isn't 792 // worth it (e.g. Hasn't aggregated enough data). 793 if (FirstField == 0) { 794 return; 795 } 796 797 CharUnits Alignment; 798 799 if (FirstField->isBitField()) { 800 const CGRecordLayout &RL = 801 CGF.getTypes().getCGRecordLayout(FirstField->getParent()); 802 const CGBitFieldInfo &BFInfo = RL.getBitFieldInfo(FirstField); 803 Alignment = CharUnits::fromQuantity(BFInfo.StorageAlignment); 804 } else { 805 Alignment = CGF.getContext().getDeclAlign(FirstField); 806 } 807 808 assert((CGF.getContext().toCharUnitsFromBits(FirstFieldOffset) % 809 Alignment) == 0 && "Bad field alignment."); 810 811 CharUnits MemcpySize = getMemcpySize(); 812 QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl); 813 llvm::Value *ThisPtr = CGF.LoadCXXThis(); 814 LValue DestLV = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy); 815 LValue Dest = CGF.EmitLValueForFieldInitialization(DestLV, FirstField); 816 llvm::Value *SrcPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(SrcRec)); 817 LValue SrcLV = CGF.MakeNaturalAlignAddrLValue(SrcPtr, RecordTy); 818 LValue Src = CGF.EmitLValueForFieldInitialization(SrcLV, FirstField); 819 820 emitMemcpyIR(Dest.isBitField() ? Dest.getBitFieldAddr() : Dest.getAddress(), 821 Src.isBitField() ? Src.getBitFieldAddr() : Src.getAddress(), 822 MemcpySize, Alignment); 823 reset(); 824 } 825 826 void reset() { 827 FirstField = 0; 828 } 829 830 protected: 831 CodeGenFunction &CGF; 832 const CXXRecordDecl *ClassDecl; 833 834 private: 835 836 void emitMemcpyIR(llvm::Value *DestPtr, llvm::Value *SrcPtr, 837 CharUnits Size, CharUnits Alignment) { 838 llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType()); 839 llvm::Type *DBP = 840 llvm::Type::getInt8PtrTy(CGF.getLLVMContext(), DPT->getAddressSpace()); 841 DestPtr = CGF.Builder.CreateBitCast(DestPtr, DBP); 842 843 llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType()); 844 llvm::Type *SBP = 845 llvm::Type::getInt8PtrTy(CGF.getLLVMContext(), SPT->getAddressSpace()); 846 SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, SBP); 847 848 CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, Size.getQuantity(), 849 Alignment.getQuantity()); 850 } 851 852 void addInitialField(FieldDecl *F) { 853 FirstField = F; 854 LastField = F; 855 FirstFieldOffset = RecLayout.getFieldOffset(F->getFieldIndex()); 856 LastFieldOffset = FirstFieldOffset; 857 LastAddedFieldIndex = F->getFieldIndex(); 858 return; 859 } 860 861 void addNextField(FieldDecl *F) { 862 assert(F->getFieldIndex() == LastAddedFieldIndex + 1 && 863 "Cannot aggregate non-contiguous fields."); 864 LastAddedFieldIndex = F->getFieldIndex(); 865 866 // The 'first' and 'last' fields are chosen by offset, rather than field 867 // index. This allows the code to support bitfields, as well as regular 868 // fields. 869 uint64_t FOffset = RecLayout.getFieldOffset(F->getFieldIndex()); 870 if (FOffset < FirstFieldOffset) { 871 FirstField = F; 872 FirstFieldOffset = FOffset; 873 } else if (FOffset > LastFieldOffset) { 874 LastField = F; 875 LastFieldOffset = FOffset; 876 } 877 } 878 879 const VarDecl *SrcRec; 880 const ASTRecordLayout &RecLayout; 881 FieldDecl *FirstField; 882 FieldDecl *LastField; 883 uint64_t FirstFieldOffset, LastFieldOffset; 884 unsigned LastAddedFieldIndex; 885 }; 886 887 class ConstructorMemcpyizer : public FieldMemcpyizer { 888 private: 889 890 /// Get source argument for copy constructor. Returns null if not a copy 891 /// constructor. 892 static const VarDecl* getTrivialCopySource(const CXXConstructorDecl *CD, 893 FunctionArgList &Args) { 894 if (CD->isCopyOrMoveConstructor() && CD->isImplicitlyDefined()) 895 return Args[Args.size() - 1]; 896 return 0; 897 } 898 899 // Returns true if a CXXCtorInitializer represents a member initialization 900 // that can be rolled into a memcpy. 901 bool isMemberInitMemcpyable(CXXCtorInitializer *MemberInit) const { 902 if (!MemcpyableCtor) 903 return false; 904 FieldDecl *Field = MemberInit->getMember(); 905 assert(Field != 0 && "No field for member init."); 906 QualType FieldType = Field->getType(); 907 CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(MemberInit->getInit()); 908 909 // Bail out on non-POD, not-trivially-constructable members. 910 if (!(CE && CE->getConstructor()->isTrivial()) && 911 !(FieldType.isTriviallyCopyableType(CGF.getContext()) || 912 FieldType->isReferenceType())) 913 return false; 914 915 // Bail out on volatile fields. 916 if (!isMemcpyableField(Field)) 917 return false; 918 919 // Otherwise we're good. 920 return true; 921 } 922 923 public: 924 ConstructorMemcpyizer(CodeGenFunction &CGF, const CXXConstructorDecl *CD, 925 FunctionArgList &Args) 926 : FieldMemcpyizer(CGF, CD->getParent(), getTrivialCopySource(CD, Args)), 927 ConstructorDecl(CD), 928 MemcpyableCtor(CD->isImplicitlyDefined() && 929 CD->isCopyOrMoveConstructor() && 930 CGF.getLangOpts().getGC() == LangOptions::NonGC), 931 Args(Args) { } 932 933 void addMemberInitializer(CXXCtorInitializer *MemberInit) { 934 if (isMemberInitMemcpyable(MemberInit)) { 935 AggregatedInits.push_back(MemberInit); 936 addMemcpyableField(MemberInit->getMember()); 937 } else { 938 emitAggregatedInits(); 939 EmitMemberInitializer(CGF, ConstructorDecl->getParent(), MemberInit, 940 ConstructorDecl, Args); 941 } 942 } 943 944 void emitAggregatedInits() { 945 if (AggregatedInits.size() <= 1) { 946 // This memcpy is too small to be worthwhile. Fall back on default 947 // codegen. 948 for (unsigned i = 0; i < AggregatedInits.size(); ++i) { 949 EmitMemberInitializer(CGF, ConstructorDecl->getParent(), 950 AggregatedInits[i], ConstructorDecl, Args); 951 } 952 reset(); 953 return; 954 } 955 956 pushEHDestructors(); 957 emitMemcpy(); 958 AggregatedInits.clear(); 959 } 960 961 void pushEHDestructors() { 962 llvm::Value *ThisPtr = CGF.LoadCXXThis(); 963 QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl); 964 LValue LHS = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy); 965 966 for (unsigned i = 0; i < AggregatedInits.size(); ++i) { 967 QualType FieldType = AggregatedInits[i]->getMember()->getType(); 968 QualType::DestructionKind dtorKind = FieldType.isDestructedType(); 969 if (CGF.needsEHCleanup(dtorKind)) 970 CGF.pushEHDestroy(dtorKind, LHS.getAddress(), FieldType); 971 } 972 } 973 974 void finish() { 975 emitAggregatedInits(); 976 } 977 978 private: 979 const CXXConstructorDecl *ConstructorDecl; 980 bool MemcpyableCtor; 981 FunctionArgList &Args; 982 SmallVector<CXXCtorInitializer*, 16> AggregatedInits; 983 }; 984 985 class AssignmentMemcpyizer : public FieldMemcpyizer { 986 private: 987 988 // Returns the memcpyable field copied by the given statement, if one 989 // exists. Otherwise r 990 FieldDecl* getMemcpyableField(Stmt *S) { 991 if (!AssignmentsMemcpyable) 992 return 0; 993 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(S)) { 994 // Recognise trivial assignments. 995 if (BO->getOpcode() != BO_Assign) 996 return 0; 997 MemberExpr *ME = dyn_cast<MemberExpr>(BO->getLHS()); 998 if (!ME) 999 return 0; 1000 FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl()); 1001 if (!Field || !isMemcpyableField(Field)) 1002 return 0; 1003 Stmt *RHS = BO->getRHS(); 1004 if (ImplicitCastExpr *EC = dyn_cast<ImplicitCastExpr>(RHS)) 1005 RHS = EC->getSubExpr(); 1006 if (!RHS) 1007 return 0; 1008 MemberExpr *ME2 = dyn_cast<MemberExpr>(RHS); 1009 if (dyn_cast<FieldDecl>(ME2->getMemberDecl()) != Field) 1010 return 0; 1011 return Field; 1012 } else if (CXXMemberCallExpr *MCE = dyn_cast<CXXMemberCallExpr>(S)) { 1013 CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MCE->getCalleeDecl()); 1014 if (!(MD && (MD->isCopyAssignmentOperator() || 1015 MD->isMoveAssignmentOperator()) && 1016 MD->isTrivial())) 1017 return 0; 1018 MemberExpr *IOA = dyn_cast<MemberExpr>(MCE->getImplicitObjectArgument()); 1019 if (!IOA) 1020 return 0; 1021 FieldDecl *Field = dyn_cast<FieldDecl>(IOA->getMemberDecl()); 1022 if (!Field || !isMemcpyableField(Field)) 1023 return 0; 1024 MemberExpr *Arg0 = dyn_cast<MemberExpr>(MCE->getArg(0)); 1025 if (!Arg0 || Field != dyn_cast<FieldDecl>(Arg0->getMemberDecl())) 1026 return 0; 1027 return Field; 1028 } else if (CallExpr *CE = dyn_cast<CallExpr>(S)) { 1029 FunctionDecl *FD = dyn_cast<FunctionDecl>(CE->getCalleeDecl()); 1030 if (!FD || FD->getBuiltinID() != Builtin::BI__builtin_memcpy) 1031 return 0; 1032 Expr *DstPtr = CE->getArg(0); 1033 if (ImplicitCastExpr *DC = dyn_cast<ImplicitCastExpr>(DstPtr)) 1034 DstPtr = DC->getSubExpr(); 1035 UnaryOperator *DUO = dyn_cast<UnaryOperator>(DstPtr); 1036 if (!DUO || DUO->getOpcode() != UO_AddrOf) 1037 return 0; 1038 MemberExpr *ME = dyn_cast<MemberExpr>(DUO->getSubExpr()); 1039 if (!ME) 1040 return 0; 1041 FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl()); 1042 if (!Field || !isMemcpyableField(Field)) 1043 return 0; 1044 Expr *SrcPtr = CE->getArg(1); 1045 if (ImplicitCastExpr *SC = dyn_cast<ImplicitCastExpr>(SrcPtr)) 1046 SrcPtr = SC->getSubExpr(); 1047 UnaryOperator *SUO = dyn_cast<UnaryOperator>(SrcPtr); 1048 if (!SUO || SUO->getOpcode() != UO_AddrOf) 1049 return 0; 1050 MemberExpr *ME2 = dyn_cast<MemberExpr>(SUO->getSubExpr()); 1051 if (!ME2 || Field != dyn_cast<FieldDecl>(ME2->getMemberDecl())) 1052 return 0; 1053 return Field; 1054 } 1055 1056 return 0; 1057 } 1058 1059 bool AssignmentsMemcpyable; 1060 SmallVector<Stmt*, 16> AggregatedStmts; 1061 1062 public: 1063 1064 AssignmentMemcpyizer(CodeGenFunction &CGF, const CXXMethodDecl *AD, 1065 FunctionArgList &Args) 1066 : FieldMemcpyizer(CGF, AD->getParent(), Args[Args.size() - 1]), 1067 AssignmentsMemcpyable(CGF.getLangOpts().getGC() == LangOptions::NonGC) { 1068 assert(Args.size() == 2); 1069 } 1070 1071 void emitAssignment(Stmt *S) { 1072 FieldDecl *F = getMemcpyableField(S); 1073 if (F) { 1074 addMemcpyableField(F); 1075 AggregatedStmts.push_back(S); 1076 } else { 1077 emitAggregatedStmts(); 1078 CGF.EmitStmt(S); 1079 } 1080 } 1081 1082 void emitAggregatedStmts() { 1083 if (AggregatedStmts.size() <= 1) { 1084 for (unsigned i = 0; i < AggregatedStmts.size(); ++i) 1085 CGF.EmitStmt(AggregatedStmts[i]); 1086 reset(); 1087 } 1088 1089 emitMemcpy(); 1090 AggregatedStmts.clear(); 1091 } 1092 1093 void finish() { 1094 emitAggregatedStmts(); 1095 } 1096 }; 1097 1098 } 1099 1100 /// EmitCtorPrologue - This routine generates necessary code to initialize 1101 /// base classes and non-static data members belonging to this constructor. 1102 void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD, 1103 CXXCtorType CtorType, 1104 FunctionArgList &Args) { 1105 if (CD->isDelegatingConstructor()) 1106 return EmitDelegatingCXXConstructorCall(CD, Args); 1107 1108 const CXXRecordDecl *ClassDecl = CD->getParent(); 1109 1110 CXXConstructorDecl::init_const_iterator B = CD->init_begin(), 1111 E = CD->init_end(); 1112 1113 llvm::BasicBlock *BaseCtorContinueBB = 0; 1114 if (ClassDecl->getNumVBases() && 1115 !CGM.getTarget().getCXXABI().hasConstructorVariants()) { 1116 // The ABIs that don't have constructor variants need to put a branch 1117 // before the virtual base initialization code. 1118 BaseCtorContinueBB = CGM.getCXXABI().EmitCtorCompleteObjectHandler(*this); 1119 assert(BaseCtorContinueBB); 1120 } 1121 1122 // Virtual base initializers first. 1123 for (; B != E && (*B)->isBaseInitializer() && (*B)->isBaseVirtual(); B++) { 1124 EmitBaseInitializer(*this, ClassDecl, *B, CtorType); 1125 } 1126 1127 if (BaseCtorContinueBB) { 1128 // Complete object handler should continue to the remaining initializers. 1129 Builder.CreateBr(BaseCtorContinueBB); 1130 EmitBlock(BaseCtorContinueBB); 1131 } 1132 1133 // Then, non-virtual base initializers. 1134 for (; B != E && (*B)->isBaseInitializer(); B++) { 1135 assert(!(*B)->isBaseVirtual()); 1136 EmitBaseInitializer(*this, ClassDecl, *B, CtorType); 1137 } 1138 1139 InitializeVTablePointers(ClassDecl); 1140 1141 // And finally, initialize class members. 1142 FieldConstructionScope FCS(*this, CXXThisValue); 1143 ConstructorMemcpyizer CM(*this, CD, Args); 1144 for (; B != E; B++) { 1145 CXXCtorInitializer *Member = (*B); 1146 assert(!Member->isBaseInitializer()); 1147 assert(Member->isAnyMemberInitializer() && 1148 "Delegating initializer on non-delegating constructor"); 1149 CM.addMemberInitializer(Member); 1150 } 1151 CM.finish(); 1152 } 1153 1154 static bool 1155 FieldHasTrivialDestructorBody(ASTContext &Context, const FieldDecl *Field); 1156 1157 static bool 1158 HasTrivialDestructorBody(ASTContext &Context, 1159 const CXXRecordDecl *BaseClassDecl, 1160 const CXXRecordDecl *MostDerivedClassDecl) 1161 { 1162 // If the destructor is trivial we don't have to check anything else. 1163 if (BaseClassDecl->hasTrivialDestructor()) 1164 return true; 1165 1166 if (!BaseClassDecl->getDestructor()->hasTrivialBody()) 1167 return false; 1168 1169 // Check fields. 1170 for (CXXRecordDecl::field_iterator I = BaseClassDecl->field_begin(), 1171 E = BaseClassDecl->field_end(); I != E; ++I) { 1172 const FieldDecl *Field = *I; 1173 1174 if (!FieldHasTrivialDestructorBody(Context, Field)) 1175 return false; 1176 } 1177 1178 // Check non-virtual bases. 1179 for (CXXRecordDecl::base_class_const_iterator I = 1180 BaseClassDecl->bases_begin(), E = BaseClassDecl->bases_end(); 1181 I != E; ++I) { 1182 if (I->isVirtual()) 1183 continue; 1184 1185 const CXXRecordDecl *NonVirtualBase = 1186 cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl()); 1187 if (!HasTrivialDestructorBody(Context, NonVirtualBase, 1188 MostDerivedClassDecl)) 1189 return false; 1190 } 1191 1192 if (BaseClassDecl == MostDerivedClassDecl) { 1193 // Check virtual bases. 1194 for (CXXRecordDecl::base_class_const_iterator I = 1195 BaseClassDecl->vbases_begin(), E = BaseClassDecl->vbases_end(); 1196 I != E; ++I) { 1197 const CXXRecordDecl *VirtualBase = 1198 cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl()); 1199 if (!HasTrivialDestructorBody(Context, VirtualBase, 1200 MostDerivedClassDecl)) 1201 return false; 1202 } 1203 } 1204 1205 return true; 1206 } 1207 1208 static bool 1209 FieldHasTrivialDestructorBody(ASTContext &Context, 1210 const FieldDecl *Field) 1211 { 1212 QualType FieldBaseElementType = Context.getBaseElementType(Field->getType()); 1213 1214 const RecordType *RT = FieldBaseElementType->getAs<RecordType>(); 1215 if (!RT) 1216 return true; 1217 1218 CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(RT->getDecl()); 1219 return HasTrivialDestructorBody(Context, FieldClassDecl, FieldClassDecl); 1220 } 1221 1222 /// CanSkipVTablePointerInitialization - Check whether we need to initialize 1223 /// any vtable pointers before calling this destructor. 1224 static bool CanSkipVTablePointerInitialization(ASTContext &Context, 1225 const CXXDestructorDecl *Dtor) { 1226 if (!Dtor->hasTrivialBody()) 1227 return false; 1228 1229 // Check the fields. 1230 const CXXRecordDecl *ClassDecl = Dtor->getParent(); 1231 for (CXXRecordDecl::field_iterator I = ClassDecl->field_begin(), 1232 E = ClassDecl->field_end(); I != E; ++I) { 1233 const FieldDecl *Field = *I; 1234 1235 if (!FieldHasTrivialDestructorBody(Context, Field)) 1236 return false; 1237 } 1238 1239 return true; 1240 } 1241 1242 /// EmitDestructorBody - Emits the body of the current destructor. 1243 void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) { 1244 const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CurGD.getDecl()); 1245 CXXDtorType DtorType = CurGD.getDtorType(); 1246 1247 // The call to operator delete in a deleting destructor happens 1248 // outside of the function-try-block, which means it's always 1249 // possible to delegate the destructor body to the complete 1250 // destructor. Do so. 1251 if (DtorType == Dtor_Deleting) { 1252 EnterDtorCleanups(Dtor, Dtor_Deleting); 1253 EmitCXXDestructorCall(Dtor, Dtor_Complete, /*ForVirtualBase=*/false, 1254 /*Delegating=*/false, LoadCXXThis()); 1255 PopCleanupBlock(); 1256 return; 1257 } 1258 1259 Stmt *Body = Dtor->getBody(); 1260 1261 // If the body is a function-try-block, enter the try before 1262 // anything else. 1263 bool isTryBody = (Body && isa<CXXTryStmt>(Body)); 1264 if (isTryBody) 1265 EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true); 1266 1267 // Enter the epilogue cleanups. 1268 RunCleanupsScope DtorEpilogue(*this); 1269 1270 // If this is the complete variant, just invoke the base variant; 1271 // the epilogue will destruct the virtual bases. But we can't do 1272 // this optimization if the body is a function-try-block, because 1273 // we'd introduce *two* handler blocks. 1274 switch (DtorType) { 1275 case Dtor_Deleting: llvm_unreachable("already handled deleting case"); 1276 1277 case Dtor_Complete: 1278 // Enter the cleanup scopes for virtual bases. 1279 EnterDtorCleanups(Dtor, Dtor_Complete); 1280 1281 if (!isTryBody && 1282 CGM.getTarget().getCXXABI().hasDestructorVariants()) { 1283 EmitCXXDestructorCall(Dtor, Dtor_Base, /*ForVirtualBase=*/false, 1284 /*Delegating=*/false, LoadCXXThis()); 1285 break; 1286 } 1287 // Fallthrough: act like we're in the base variant. 1288 1289 case Dtor_Base: 1290 // Enter the cleanup scopes for fields and non-virtual bases. 1291 EnterDtorCleanups(Dtor, Dtor_Base); 1292 1293 // Initialize the vtable pointers before entering the body. 1294 if (!CanSkipVTablePointerInitialization(getContext(), Dtor)) 1295 InitializeVTablePointers(Dtor->getParent()); 1296 1297 if (isTryBody) 1298 EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock()); 1299 else if (Body) 1300 EmitStmt(Body); 1301 else { 1302 assert(Dtor->isImplicit() && "bodyless dtor not implicit"); 1303 // nothing to do besides what's in the epilogue 1304 } 1305 // -fapple-kext must inline any call to this dtor into 1306 // the caller's body. 1307 if (getLangOpts().AppleKext) 1308 CurFn->addFnAttr(llvm::Attribute::AlwaysInline); 1309 break; 1310 } 1311 1312 // Jump out through the epilogue cleanups. 1313 DtorEpilogue.ForceCleanup(); 1314 1315 // Exit the try if applicable. 1316 if (isTryBody) 1317 ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true); 1318 } 1319 1320 void CodeGenFunction::emitImplicitAssignmentOperatorBody(FunctionArgList &Args) { 1321 const CXXMethodDecl *AssignOp = cast<CXXMethodDecl>(CurGD.getDecl()); 1322 const Stmt *RootS = AssignOp->getBody(); 1323 assert(isa<CompoundStmt>(RootS) && 1324 "Body of an implicit assignment operator should be compound stmt."); 1325 const CompoundStmt *RootCS = cast<CompoundStmt>(RootS); 1326 1327 LexicalScope Scope(*this, RootCS->getSourceRange()); 1328 1329 AssignmentMemcpyizer AM(*this, AssignOp, Args); 1330 for (CompoundStmt::const_body_iterator I = RootCS->body_begin(), 1331 E = RootCS->body_end(); 1332 I != E; ++I) { 1333 AM.emitAssignment(*I); 1334 } 1335 AM.finish(); 1336 } 1337 1338 namespace { 1339 /// Call the operator delete associated with the current destructor. 1340 struct CallDtorDelete : EHScopeStack::Cleanup { 1341 CallDtorDelete() {} 1342 1343 void Emit(CodeGenFunction &CGF, Flags flags) { 1344 const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CGF.CurCodeDecl); 1345 const CXXRecordDecl *ClassDecl = Dtor->getParent(); 1346 CGF.EmitDeleteCall(Dtor->getOperatorDelete(), CGF.LoadCXXThis(), 1347 CGF.getContext().getTagDeclType(ClassDecl)); 1348 } 1349 }; 1350 1351 struct CallDtorDeleteConditional : EHScopeStack::Cleanup { 1352 llvm::Value *ShouldDeleteCondition; 1353 public: 1354 CallDtorDeleteConditional(llvm::Value *ShouldDeleteCondition) 1355 : ShouldDeleteCondition(ShouldDeleteCondition) { 1356 assert(ShouldDeleteCondition != NULL); 1357 } 1358 1359 void Emit(CodeGenFunction &CGF, Flags flags) { 1360 llvm::BasicBlock *callDeleteBB = CGF.createBasicBlock("dtor.call_delete"); 1361 llvm::BasicBlock *continueBB = CGF.createBasicBlock("dtor.continue"); 1362 llvm::Value *ShouldCallDelete 1363 = CGF.Builder.CreateIsNull(ShouldDeleteCondition); 1364 CGF.Builder.CreateCondBr(ShouldCallDelete, continueBB, callDeleteBB); 1365 1366 CGF.EmitBlock(callDeleteBB); 1367 const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CGF.CurCodeDecl); 1368 const CXXRecordDecl *ClassDecl = Dtor->getParent(); 1369 CGF.EmitDeleteCall(Dtor->getOperatorDelete(), CGF.LoadCXXThis(), 1370 CGF.getContext().getTagDeclType(ClassDecl)); 1371 CGF.Builder.CreateBr(continueBB); 1372 1373 CGF.EmitBlock(continueBB); 1374 } 1375 }; 1376 1377 class DestroyField : public EHScopeStack::Cleanup { 1378 const FieldDecl *field; 1379 CodeGenFunction::Destroyer *destroyer; 1380 bool useEHCleanupForArray; 1381 1382 public: 1383 DestroyField(const FieldDecl *field, CodeGenFunction::Destroyer *destroyer, 1384 bool useEHCleanupForArray) 1385 : field(field), destroyer(destroyer), 1386 useEHCleanupForArray(useEHCleanupForArray) {} 1387 1388 void Emit(CodeGenFunction &CGF, Flags flags) { 1389 // Find the address of the field. 1390 llvm::Value *thisValue = CGF.LoadCXXThis(); 1391 QualType RecordTy = CGF.getContext().getTagDeclType(field->getParent()); 1392 LValue ThisLV = CGF.MakeAddrLValue(thisValue, RecordTy); 1393 LValue LV = CGF.EmitLValueForField(ThisLV, field); 1394 assert(LV.isSimple()); 1395 1396 CGF.emitDestroy(LV.getAddress(), field->getType(), destroyer, 1397 flags.isForNormalCleanup() && useEHCleanupForArray); 1398 } 1399 }; 1400 } 1401 1402 /// EmitDtorEpilogue - Emit all code that comes at the end of class's 1403 /// destructor. This is to call destructors on members and base classes 1404 /// in reverse order of their construction. 1405 void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD, 1406 CXXDtorType DtorType) { 1407 assert(!DD->isTrivial() && 1408 "Should not emit dtor epilogue for trivial dtor!"); 1409 1410 // The deleting-destructor phase just needs to call the appropriate 1411 // operator delete that Sema picked up. 1412 if (DtorType == Dtor_Deleting) { 1413 assert(DD->getOperatorDelete() && 1414 "operator delete missing - EmitDtorEpilogue"); 1415 if (CXXStructorImplicitParamValue) { 1416 // If there is an implicit param to the deleting dtor, it's a boolean 1417 // telling whether we should call delete at the end of the dtor. 1418 EHStack.pushCleanup<CallDtorDeleteConditional>( 1419 NormalAndEHCleanup, CXXStructorImplicitParamValue); 1420 } else { 1421 EHStack.pushCleanup<CallDtorDelete>(NormalAndEHCleanup); 1422 } 1423 return; 1424 } 1425 1426 const CXXRecordDecl *ClassDecl = DD->getParent(); 1427 1428 // Unions have no bases and do not call field destructors. 1429 if (ClassDecl->isUnion()) 1430 return; 1431 1432 // The complete-destructor phase just destructs all the virtual bases. 1433 if (DtorType == Dtor_Complete) { 1434 1435 // We push them in the forward order so that they'll be popped in 1436 // the reverse order. 1437 for (CXXRecordDecl::base_class_const_iterator I = 1438 ClassDecl->vbases_begin(), E = ClassDecl->vbases_end(); 1439 I != E; ++I) { 1440 const CXXBaseSpecifier &Base = *I; 1441 CXXRecordDecl *BaseClassDecl 1442 = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl()); 1443 1444 // Ignore trivial destructors. 1445 if (BaseClassDecl->hasTrivialDestructor()) 1446 continue; 1447 1448 EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup, 1449 BaseClassDecl, 1450 /*BaseIsVirtual*/ true); 1451 } 1452 1453 return; 1454 } 1455 1456 assert(DtorType == Dtor_Base); 1457 1458 // Destroy non-virtual bases. 1459 for (CXXRecordDecl::base_class_const_iterator I = 1460 ClassDecl->bases_begin(), E = ClassDecl->bases_end(); I != E; ++I) { 1461 const CXXBaseSpecifier &Base = *I; 1462 1463 // Ignore virtual bases. 1464 if (Base.isVirtual()) 1465 continue; 1466 1467 CXXRecordDecl *BaseClassDecl = Base.getType()->getAsCXXRecordDecl(); 1468 1469 // Ignore trivial destructors. 1470 if (BaseClassDecl->hasTrivialDestructor()) 1471 continue; 1472 1473 EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup, 1474 BaseClassDecl, 1475 /*BaseIsVirtual*/ false); 1476 } 1477 1478 // Destroy direct fields. 1479 SmallVector<const FieldDecl *, 16> FieldDecls; 1480 for (CXXRecordDecl::field_iterator I = ClassDecl->field_begin(), 1481 E = ClassDecl->field_end(); I != E; ++I) { 1482 const FieldDecl *field = *I; 1483 QualType type = field->getType(); 1484 QualType::DestructionKind dtorKind = type.isDestructedType(); 1485 if (!dtorKind) continue; 1486 1487 // Anonymous union members do not have their destructors called. 1488 const RecordType *RT = type->getAsUnionType(); 1489 if (RT && RT->getDecl()->isAnonymousStructOrUnion()) continue; 1490 1491 CleanupKind cleanupKind = getCleanupKind(dtorKind); 1492 EHStack.pushCleanup<DestroyField>(cleanupKind, field, 1493 getDestroyer(dtorKind), 1494 cleanupKind & EHCleanup); 1495 } 1496 } 1497 1498 /// EmitCXXAggrConstructorCall - Emit a loop to call a particular 1499 /// constructor for each of several members of an array. 1500 /// 1501 /// \param ctor the constructor to call for each element 1502 /// \param arrayType the type of the array to initialize 1503 /// \param arrayBegin an arrayType* 1504 /// \param zeroInitialize true if each element should be 1505 /// zero-initialized before it is constructed 1506 void 1507 CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor, 1508 const ConstantArrayType *arrayType, 1509 llvm::Value *arrayBegin, 1510 CallExpr::const_arg_iterator argBegin, 1511 CallExpr::const_arg_iterator argEnd, 1512 bool zeroInitialize) { 1513 QualType elementType; 1514 llvm::Value *numElements = 1515 emitArrayLength(arrayType, elementType, arrayBegin); 1516 1517 EmitCXXAggrConstructorCall(ctor, numElements, arrayBegin, 1518 argBegin, argEnd, zeroInitialize); 1519 } 1520 1521 /// EmitCXXAggrConstructorCall - Emit a loop to call a particular 1522 /// constructor for each of several members of an array. 1523 /// 1524 /// \param ctor the constructor to call for each element 1525 /// \param numElements the number of elements in the array; 1526 /// may be zero 1527 /// \param arrayBegin a T*, where T is the type constructed by ctor 1528 /// \param zeroInitialize true if each element should be 1529 /// zero-initialized before it is constructed 1530 void 1531 CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor, 1532 llvm::Value *numElements, 1533 llvm::Value *arrayBegin, 1534 CallExpr::const_arg_iterator argBegin, 1535 CallExpr::const_arg_iterator argEnd, 1536 bool zeroInitialize) { 1537 1538 // It's legal for numElements to be zero. This can happen both 1539 // dynamically, because x can be zero in 'new A[x]', and statically, 1540 // because of GCC extensions that permit zero-length arrays. There 1541 // are probably legitimate places where we could assume that this 1542 // doesn't happen, but it's not clear that it's worth it. 1543 llvm::BranchInst *zeroCheckBranch = 0; 1544 1545 // Optimize for a constant count. 1546 llvm::ConstantInt *constantCount 1547 = dyn_cast<llvm::ConstantInt>(numElements); 1548 if (constantCount) { 1549 // Just skip out if the constant count is zero. 1550 if (constantCount->isZero()) return; 1551 1552 // Otherwise, emit the check. 1553 } else { 1554 llvm::BasicBlock *loopBB = createBasicBlock("new.ctorloop"); 1555 llvm::Value *iszero = Builder.CreateIsNull(numElements, "isempty"); 1556 zeroCheckBranch = Builder.CreateCondBr(iszero, loopBB, loopBB); 1557 EmitBlock(loopBB); 1558 } 1559 1560 // Find the end of the array. 1561 llvm::Value *arrayEnd = Builder.CreateInBoundsGEP(arrayBegin, numElements, 1562 "arrayctor.end"); 1563 1564 // Enter the loop, setting up a phi for the current location to initialize. 1565 llvm::BasicBlock *entryBB = Builder.GetInsertBlock(); 1566 llvm::BasicBlock *loopBB = createBasicBlock("arrayctor.loop"); 1567 EmitBlock(loopBB); 1568 llvm::PHINode *cur = Builder.CreatePHI(arrayBegin->getType(), 2, 1569 "arrayctor.cur"); 1570 cur->addIncoming(arrayBegin, entryBB); 1571 1572 // Inside the loop body, emit the constructor call on the array element. 1573 1574 QualType type = getContext().getTypeDeclType(ctor->getParent()); 1575 1576 // Zero initialize the storage, if requested. 1577 if (zeroInitialize) 1578 EmitNullInitialization(cur, type); 1579 1580 // C++ [class.temporary]p4: 1581 // There are two contexts in which temporaries are destroyed at a different 1582 // point than the end of the full-expression. The first context is when a 1583 // default constructor is called to initialize an element of an array. 1584 // If the constructor has one or more default arguments, the destruction of 1585 // every temporary created in a default argument expression is sequenced 1586 // before the construction of the next array element, if any. 1587 1588 { 1589 RunCleanupsScope Scope(*this); 1590 1591 // Evaluate the constructor and its arguments in a regular 1592 // partial-destroy cleanup. 1593 if (getLangOpts().Exceptions && 1594 !ctor->getParent()->hasTrivialDestructor()) { 1595 Destroyer *destroyer = destroyCXXObject; 1596 pushRegularPartialArrayCleanup(arrayBegin, cur, type, *destroyer); 1597 } 1598 1599 EmitCXXConstructorCall(ctor, Ctor_Complete, /*ForVirtualBase=*/ false, 1600 /*Delegating=*/false, cur, argBegin, argEnd); 1601 } 1602 1603 // Go to the next element. 1604 llvm::Value *next = 1605 Builder.CreateInBoundsGEP(cur, llvm::ConstantInt::get(SizeTy, 1), 1606 "arrayctor.next"); 1607 cur->addIncoming(next, Builder.GetInsertBlock()); 1608 1609 // Check whether that's the end of the loop. 1610 llvm::Value *done = Builder.CreateICmpEQ(next, arrayEnd, "arrayctor.done"); 1611 llvm::BasicBlock *contBB = createBasicBlock("arrayctor.cont"); 1612 Builder.CreateCondBr(done, contBB, loopBB); 1613 1614 // Patch the earlier check to skip over the loop. 1615 if (zeroCheckBranch) zeroCheckBranch->setSuccessor(0, contBB); 1616 1617 EmitBlock(contBB); 1618 } 1619 1620 void CodeGenFunction::destroyCXXObject(CodeGenFunction &CGF, 1621 llvm::Value *addr, 1622 QualType type) { 1623 const RecordType *rtype = type->castAs<RecordType>(); 1624 const CXXRecordDecl *record = cast<CXXRecordDecl>(rtype->getDecl()); 1625 const CXXDestructorDecl *dtor = record->getDestructor(); 1626 assert(!dtor->isTrivial()); 1627 CGF.EmitCXXDestructorCall(dtor, Dtor_Complete, /*for vbase*/ false, 1628 /*Delegating=*/false, addr); 1629 } 1630 1631 void 1632 CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D, 1633 CXXCtorType Type, bool ForVirtualBase, 1634 bool Delegating, 1635 llvm::Value *This, 1636 CallExpr::const_arg_iterator ArgBeg, 1637 CallExpr::const_arg_iterator ArgEnd) { 1638 1639 CGDebugInfo *DI = getDebugInfo(); 1640 if (DI && 1641 CGM.getCodeGenOpts().getDebugInfo() == CodeGenOptions::LimitedDebugInfo) { 1642 // If debug info for this class has not been emitted then this is the 1643 // right time to do so. 1644 const CXXRecordDecl *Parent = D->getParent(); 1645 DI->getOrCreateRecordType(CGM.getContext().getTypeDeclType(Parent), 1646 Parent->getLocation()); 1647 } 1648 1649 // If this is a trivial constructor, just emit what's needed. 1650 if (D->isTrivial()) { 1651 if (ArgBeg == ArgEnd) { 1652 // Trivial default constructor, no codegen required. 1653 assert(D->isDefaultConstructor() && 1654 "trivial 0-arg ctor not a default ctor"); 1655 return; 1656 } 1657 1658 assert(ArgBeg + 1 == ArgEnd && "unexpected argcount for trivial ctor"); 1659 assert(D->isCopyOrMoveConstructor() && 1660 "trivial 1-arg ctor not a copy/move ctor"); 1661 1662 const Expr *E = (*ArgBeg); 1663 QualType Ty = E->getType(); 1664 llvm::Value *Src = EmitLValue(E).getAddress(); 1665 EmitAggregateCopy(This, Src, Ty); 1666 return; 1667 } 1668 1669 // Non-trivial constructors are handled in an ABI-specific manner. 1670 llvm::Value *Callee = CGM.getCXXABI().EmitConstructorCall(*this, D, Type, 1671 ForVirtualBase, Delegating, This, ArgBeg, ArgEnd); 1672 if (CGM.getCXXABI().HasThisReturn(CurGD) && 1673 CGM.getCXXABI().HasThisReturn(GlobalDecl(D, Type))) 1674 CalleeWithThisReturn = Callee; 1675 } 1676 1677 void 1678 CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D, 1679 llvm::Value *This, llvm::Value *Src, 1680 CallExpr::const_arg_iterator ArgBeg, 1681 CallExpr::const_arg_iterator ArgEnd) { 1682 if (D->isTrivial()) { 1683 assert(ArgBeg + 1 == ArgEnd && "unexpected argcount for trivial ctor"); 1684 assert(D->isCopyOrMoveConstructor() && 1685 "trivial 1-arg ctor not a copy/move ctor"); 1686 EmitAggregateCopy(This, Src, (*ArgBeg)->getType()); 1687 return; 1688 } 1689 llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(D, 1690 clang::Ctor_Complete); 1691 assert(D->isInstance() && 1692 "Trying to emit a member call expr on a static method!"); 1693 1694 const FunctionProtoType *FPT = D->getType()->getAs<FunctionProtoType>(); 1695 1696 CallArgList Args; 1697 1698 // Push the this ptr. 1699 Args.add(RValue::get(This), D->getThisType(getContext())); 1700 1701 1702 // Push the src ptr. 1703 QualType QT = *(FPT->arg_type_begin()); 1704 llvm::Type *t = CGM.getTypes().ConvertType(QT); 1705 Src = Builder.CreateBitCast(Src, t); 1706 Args.add(RValue::get(Src), QT); 1707 1708 // Skip over first argument (Src). 1709 ++ArgBeg; 1710 CallExpr::const_arg_iterator Arg = ArgBeg; 1711 for (FunctionProtoType::arg_type_iterator I = FPT->arg_type_begin()+1, 1712 E = FPT->arg_type_end(); I != E; ++I, ++Arg) { 1713 assert(Arg != ArgEnd && "Running over edge of argument list!"); 1714 EmitCallArg(Args, *Arg, *I); 1715 } 1716 // Either we've emitted all the call args, or we have a call to a 1717 // variadic function. 1718 assert((Arg == ArgEnd || FPT->isVariadic()) && 1719 "Extra arguments in non-variadic function!"); 1720 // If we still have any arguments, emit them using the type of the argument. 1721 for (; Arg != ArgEnd; ++Arg) { 1722 QualType ArgType = Arg->getType(); 1723 EmitCallArg(Args, *Arg, ArgType); 1724 } 1725 1726 EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, RequiredArgs::All), 1727 Callee, ReturnValueSlot(), Args, D); 1728 } 1729 1730 void 1731 CodeGenFunction::EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor, 1732 CXXCtorType CtorType, 1733 const FunctionArgList &Args) { 1734 CallArgList DelegateArgs; 1735 1736 FunctionArgList::const_iterator I = Args.begin(), E = Args.end(); 1737 assert(I != E && "no parameters to constructor"); 1738 1739 // this 1740 DelegateArgs.add(RValue::get(LoadCXXThis()), (*I)->getType()); 1741 ++I; 1742 1743 // vtt 1744 if (llvm::Value *VTT = GetVTTParameter(GlobalDecl(Ctor, CtorType), 1745 /*ForVirtualBase=*/false, 1746 /*Delegating=*/true)) { 1747 QualType VoidPP = getContext().getPointerType(getContext().VoidPtrTy); 1748 DelegateArgs.add(RValue::get(VTT), VoidPP); 1749 1750 if (CodeGenVTables::needsVTTParameter(CurGD)) { 1751 assert(I != E && "cannot skip vtt parameter, already done with args"); 1752 assert((*I)->getType() == VoidPP && "skipping parameter not of vtt type"); 1753 ++I; 1754 } 1755 } 1756 1757 // Explicit arguments. 1758 for (; I != E; ++I) { 1759 const VarDecl *param = *I; 1760 EmitDelegateCallArg(DelegateArgs, param); 1761 } 1762 1763 llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(Ctor, CtorType); 1764 EmitCall(CGM.getTypes().arrangeCXXConstructorDeclaration(Ctor, CtorType), 1765 Callee, ReturnValueSlot(), DelegateArgs, Ctor); 1766 if (CGM.getCXXABI().HasThisReturn(CurGD) && 1767 CGM.getCXXABI().HasThisReturn(GlobalDecl(Ctor, CtorType))) 1768 CalleeWithThisReturn = Callee; 1769 } 1770 1771 namespace { 1772 struct CallDelegatingCtorDtor : EHScopeStack::Cleanup { 1773 const CXXDestructorDecl *Dtor; 1774 llvm::Value *Addr; 1775 CXXDtorType Type; 1776 1777 CallDelegatingCtorDtor(const CXXDestructorDecl *D, llvm::Value *Addr, 1778 CXXDtorType Type) 1779 : Dtor(D), Addr(Addr), Type(Type) {} 1780 1781 void Emit(CodeGenFunction &CGF, Flags flags) { 1782 CGF.EmitCXXDestructorCall(Dtor, Type, /*ForVirtualBase=*/false, 1783 /*Delegating=*/true, Addr); 1784 } 1785 }; 1786 } 1787 1788 void 1789 CodeGenFunction::EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor, 1790 const FunctionArgList &Args) { 1791 assert(Ctor->isDelegatingConstructor()); 1792 1793 llvm::Value *ThisPtr = LoadCXXThis(); 1794 1795 QualType Ty = getContext().getTagDeclType(Ctor->getParent()); 1796 CharUnits Alignment = getContext().getTypeAlignInChars(Ty); 1797 AggValueSlot AggSlot = 1798 AggValueSlot::forAddr(ThisPtr, Alignment, Qualifiers(), 1799 AggValueSlot::IsDestructed, 1800 AggValueSlot::DoesNotNeedGCBarriers, 1801 AggValueSlot::IsNotAliased); 1802 1803 EmitAggExpr(Ctor->init_begin()[0]->getInit(), AggSlot); 1804 1805 const CXXRecordDecl *ClassDecl = Ctor->getParent(); 1806 if (CGM.getLangOpts().Exceptions && !ClassDecl->hasTrivialDestructor()) { 1807 CXXDtorType Type = 1808 CurGD.getCtorType() == Ctor_Complete ? Dtor_Complete : Dtor_Base; 1809 1810 EHStack.pushCleanup<CallDelegatingCtorDtor>(EHCleanup, 1811 ClassDecl->getDestructor(), 1812 ThisPtr, Type); 1813 } 1814 } 1815 1816 void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *DD, 1817 CXXDtorType Type, 1818 bool ForVirtualBase, 1819 bool Delegating, 1820 llvm::Value *This) { 1821 llvm::Value *VTT = GetVTTParameter(GlobalDecl(DD, Type), 1822 ForVirtualBase, Delegating); 1823 llvm::Value *Callee = 0; 1824 if (getLangOpts().AppleKext) 1825 Callee = BuildAppleKextVirtualDestructorCall(DD, Type, 1826 DD->getParent()); 1827 1828 if (!Callee) 1829 Callee = CGM.GetAddrOfCXXDestructor(DD, Type); 1830 1831 // FIXME: Provide a source location here. 1832 EmitCXXMemberCall(DD, SourceLocation(), Callee, ReturnValueSlot(), This, 1833 VTT, getContext().getPointerType(getContext().VoidPtrTy), 1834 0, 0); 1835 if (CGM.getCXXABI().HasThisReturn(CurGD) && 1836 CGM.getCXXABI().HasThisReturn(GlobalDecl(DD, Type))) 1837 CalleeWithThisReturn = Callee; 1838 } 1839 1840 namespace { 1841 struct CallLocalDtor : EHScopeStack::Cleanup { 1842 const CXXDestructorDecl *Dtor; 1843 llvm::Value *Addr; 1844 1845 CallLocalDtor(const CXXDestructorDecl *D, llvm::Value *Addr) 1846 : Dtor(D), Addr(Addr) {} 1847 1848 void Emit(CodeGenFunction &CGF, Flags flags) { 1849 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, 1850 /*ForVirtualBase=*/false, 1851 /*Delegating=*/false, Addr); 1852 } 1853 }; 1854 } 1855 1856 void CodeGenFunction::PushDestructorCleanup(const CXXDestructorDecl *D, 1857 llvm::Value *Addr) { 1858 EHStack.pushCleanup<CallLocalDtor>(NormalAndEHCleanup, D, Addr); 1859 } 1860 1861 void CodeGenFunction::PushDestructorCleanup(QualType T, llvm::Value *Addr) { 1862 CXXRecordDecl *ClassDecl = T->getAsCXXRecordDecl(); 1863 if (!ClassDecl) return; 1864 if (ClassDecl->hasTrivialDestructor()) return; 1865 1866 const CXXDestructorDecl *D = ClassDecl->getDestructor(); 1867 assert(D && D->isUsed() && "destructor not marked as used!"); 1868 PushDestructorCleanup(D, Addr); 1869 } 1870 1871 llvm::Value * 1872 CodeGenFunction::GetVirtualBaseClassOffset(llvm::Value *This, 1873 const CXXRecordDecl *ClassDecl, 1874 const CXXRecordDecl *BaseClassDecl) { 1875 llvm::Value *VTablePtr = GetVTablePtr(This, Int8PtrTy); 1876 CharUnits VBaseOffsetOffset = 1877 CGM.getVTableContext().getVirtualBaseOffsetOffset(ClassDecl, BaseClassDecl); 1878 1879 llvm::Value *VBaseOffsetPtr = 1880 Builder.CreateConstGEP1_64(VTablePtr, VBaseOffsetOffset.getQuantity(), 1881 "vbase.offset.ptr"); 1882 llvm::Type *PtrDiffTy = 1883 ConvertType(getContext().getPointerDiffType()); 1884 1885 VBaseOffsetPtr = Builder.CreateBitCast(VBaseOffsetPtr, 1886 PtrDiffTy->getPointerTo()); 1887 1888 llvm::Value *VBaseOffset = Builder.CreateLoad(VBaseOffsetPtr, "vbase.offset"); 1889 1890 return VBaseOffset; 1891 } 1892 1893 void 1894 CodeGenFunction::InitializeVTablePointer(BaseSubobject Base, 1895 const CXXRecordDecl *NearestVBase, 1896 CharUnits OffsetFromNearestVBase, 1897 llvm::Constant *VTable, 1898 const CXXRecordDecl *VTableClass) { 1899 const CXXRecordDecl *RD = Base.getBase(); 1900 1901 // Compute the address point. 1902 llvm::Value *VTableAddressPoint; 1903 1904 // Check if we need to use a vtable from the VTT. 1905 if (CodeGenVTables::needsVTTParameter(CurGD) && 1906 (RD->getNumVBases() || NearestVBase)) { 1907 // Get the secondary vpointer index. 1908 uint64_t VirtualPointerIndex = 1909 CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base); 1910 1911 /// Load the VTT. 1912 llvm::Value *VTT = LoadCXXVTT(); 1913 if (VirtualPointerIndex) 1914 VTT = Builder.CreateConstInBoundsGEP1_64(VTT, VirtualPointerIndex); 1915 1916 // And load the address point from the VTT. 1917 VTableAddressPoint = Builder.CreateLoad(VTT); 1918 } else { 1919 uint64_t AddressPoint = 1920 CGM.getVTableContext().getVTableLayout(VTableClass).getAddressPoint(Base); 1921 VTableAddressPoint = 1922 Builder.CreateConstInBoundsGEP2_64(VTable, 0, AddressPoint); 1923 } 1924 1925 // Compute where to store the address point. 1926 llvm::Value *VirtualOffset = 0; 1927 CharUnits NonVirtualOffset = CharUnits::Zero(); 1928 1929 if (CodeGenVTables::needsVTTParameter(CurGD) && NearestVBase) { 1930 // We need to use the virtual base offset offset because the virtual base 1931 // might have a different offset in the most derived class. 1932 VirtualOffset = GetVirtualBaseClassOffset(LoadCXXThis(), VTableClass, 1933 NearestVBase); 1934 NonVirtualOffset = OffsetFromNearestVBase; 1935 } else { 1936 // We can just use the base offset in the complete class. 1937 NonVirtualOffset = Base.getBaseOffset(); 1938 } 1939 1940 // Apply the offsets. 1941 llvm::Value *VTableField = LoadCXXThis(); 1942 1943 if (!NonVirtualOffset.isZero() || VirtualOffset) 1944 VTableField = ApplyNonVirtualAndVirtualOffset(*this, VTableField, 1945 NonVirtualOffset, 1946 VirtualOffset); 1947 1948 // Finally, store the address point. 1949 llvm::Type *AddressPointPtrTy = 1950 VTableAddressPoint->getType()->getPointerTo(); 1951 VTableField = Builder.CreateBitCast(VTableField, AddressPointPtrTy); 1952 llvm::StoreInst *Store = Builder.CreateStore(VTableAddressPoint, VTableField); 1953 CGM.DecorateInstruction(Store, CGM.getTBAAInfoForVTablePtr()); 1954 } 1955 1956 void 1957 CodeGenFunction::InitializeVTablePointers(BaseSubobject Base, 1958 const CXXRecordDecl *NearestVBase, 1959 CharUnits OffsetFromNearestVBase, 1960 bool BaseIsNonVirtualPrimaryBase, 1961 llvm::Constant *VTable, 1962 const CXXRecordDecl *VTableClass, 1963 VisitedVirtualBasesSetTy& VBases) { 1964 // If this base is a non-virtual primary base the address point has already 1965 // been set. 1966 if (!BaseIsNonVirtualPrimaryBase) { 1967 // Initialize the vtable pointer for this base. 1968 InitializeVTablePointer(Base, NearestVBase, OffsetFromNearestVBase, 1969 VTable, VTableClass); 1970 } 1971 1972 const CXXRecordDecl *RD = Base.getBase(); 1973 1974 // Traverse bases. 1975 for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), 1976 E = RD->bases_end(); I != E; ++I) { 1977 CXXRecordDecl *BaseDecl 1978 = cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); 1979 1980 // Ignore classes without a vtable. 1981 if (!BaseDecl->isDynamicClass()) 1982 continue; 1983 1984 CharUnits BaseOffset; 1985 CharUnits BaseOffsetFromNearestVBase; 1986 bool BaseDeclIsNonVirtualPrimaryBase; 1987 1988 if (I->isVirtual()) { 1989 // Check if we've visited this virtual base before. 1990 if (!VBases.insert(BaseDecl)) 1991 continue; 1992 1993 const ASTRecordLayout &Layout = 1994 getContext().getASTRecordLayout(VTableClass); 1995 1996 BaseOffset = Layout.getVBaseClassOffset(BaseDecl); 1997 BaseOffsetFromNearestVBase = CharUnits::Zero(); 1998 BaseDeclIsNonVirtualPrimaryBase = false; 1999 } else { 2000 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 2001 2002 BaseOffset = Base.getBaseOffset() + Layout.getBaseClassOffset(BaseDecl); 2003 BaseOffsetFromNearestVBase = 2004 OffsetFromNearestVBase + Layout.getBaseClassOffset(BaseDecl); 2005 BaseDeclIsNonVirtualPrimaryBase = Layout.getPrimaryBase() == BaseDecl; 2006 } 2007 2008 InitializeVTablePointers(BaseSubobject(BaseDecl, BaseOffset), 2009 I->isVirtual() ? BaseDecl : NearestVBase, 2010 BaseOffsetFromNearestVBase, 2011 BaseDeclIsNonVirtualPrimaryBase, 2012 VTable, VTableClass, VBases); 2013 } 2014 } 2015 2016 void CodeGenFunction::InitializeVTablePointers(const CXXRecordDecl *RD) { 2017 // Ignore classes without a vtable. 2018 if (!RD->isDynamicClass()) 2019 return; 2020 2021 // Get the VTable. 2022 llvm::Constant *VTable = CGM.getVTables().GetAddrOfVTable(RD); 2023 2024 // Initialize the vtable pointers for this class and all of its bases. 2025 VisitedVirtualBasesSetTy VBases; 2026 InitializeVTablePointers(BaseSubobject(RD, CharUnits::Zero()), 2027 /*NearestVBase=*/0, 2028 /*OffsetFromNearestVBase=*/CharUnits::Zero(), 2029 /*BaseIsNonVirtualPrimaryBase=*/false, 2030 VTable, RD, VBases); 2031 } 2032 2033 llvm::Value *CodeGenFunction::GetVTablePtr(llvm::Value *This, 2034 llvm::Type *Ty) { 2035 llvm::Value *VTablePtrSrc = Builder.CreateBitCast(This, Ty->getPointerTo()); 2036 llvm::Instruction *VTable = Builder.CreateLoad(VTablePtrSrc, "vtable"); 2037 CGM.DecorateInstruction(VTable, CGM.getTBAAInfoForVTablePtr()); 2038 return VTable; 2039 } 2040 2041 static const CXXRecordDecl *getMostDerivedClassDecl(const Expr *Base) { 2042 const Expr *E = Base; 2043 2044 while (true) { 2045 E = E->IgnoreParens(); 2046 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) { 2047 if (CE->getCastKind() == CK_DerivedToBase || 2048 CE->getCastKind() == CK_UncheckedDerivedToBase || 2049 CE->getCastKind() == CK_NoOp) { 2050 E = CE->getSubExpr(); 2051 continue; 2052 } 2053 } 2054 2055 break; 2056 } 2057 2058 QualType DerivedType = E->getType(); 2059 if (const PointerType *PTy = DerivedType->getAs<PointerType>()) 2060 DerivedType = PTy->getPointeeType(); 2061 2062 return cast<CXXRecordDecl>(DerivedType->castAs<RecordType>()->getDecl()); 2063 } 2064 2065 // FIXME: Ideally Expr::IgnoreParenNoopCasts should do this, but it doesn't do 2066 // quite what we want. 2067 static const Expr *skipNoOpCastsAndParens(const Expr *E) { 2068 while (true) { 2069 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) { 2070 E = PE->getSubExpr(); 2071 continue; 2072 } 2073 2074 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) { 2075 if (CE->getCastKind() == CK_NoOp) { 2076 E = CE->getSubExpr(); 2077 continue; 2078 } 2079 } 2080 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 2081 if (UO->getOpcode() == UO_Extension) { 2082 E = UO->getSubExpr(); 2083 continue; 2084 } 2085 } 2086 return E; 2087 } 2088 } 2089 2090 /// canDevirtualizeMemberFunctionCall - Checks whether the given virtual member 2091 /// function call on the given expr can be devirtualized. 2092 static bool canDevirtualizeMemberFunctionCall(const Expr *Base, 2093 const CXXMethodDecl *MD) { 2094 // If the most derived class is marked final, we know that no subclass can 2095 // override this member function and so we can devirtualize it. For example: 2096 // 2097 // struct A { virtual void f(); } 2098 // struct B final : A { }; 2099 // 2100 // void f(B *b) { 2101 // b->f(); 2102 // } 2103 // 2104 const CXXRecordDecl *MostDerivedClassDecl = getMostDerivedClassDecl(Base); 2105 if (MostDerivedClassDecl->hasAttr<FinalAttr>()) 2106 return true; 2107 2108 // If the member function is marked 'final', we know that it can't be 2109 // overridden and can therefore devirtualize it. 2110 if (MD->hasAttr<FinalAttr>()) 2111 return true; 2112 2113 // Similarly, if the class itself is marked 'final' it can't be overridden 2114 // and we can therefore devirtualize the member function call. 2115 if (MD->getParent()->hasAttr<FinalAttr>()) 2116 return true; 2117 2118 Base = skipNoOpCastsAndParens(Base); 2119 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) { 2120 if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) { 2121 // This is a record decl. We know the type and can devirtualize it. 2122 return VD->getType()->isRecordType(); 2123 } 2124 2125 return false; 2126 } 2127 2128 // We can always devirtualize calls on temporary object expressions. 2129 if (isa<CXXConstructExpr>(Base)) 2130 return true; 2131 2132 // And calls on bound temporaries. 2133 if (isa<CXXBindTemporaryExpr>(Base)) 2134 return true; 2135 2136 // Check if this is a call expr that returns a record type. 2137 if (const CallExpr *CE = dyn_cast<CallExpr>(Base)) 2138 return CE->getCallReturnType()->isRecordType(); 2139 2140 // We can't devirtualize the call. 2141 return false; 2142 } 2143 2144 static bool UseVirtualCall(ASTContext &Context, 2145 const CXXOperatorCallExpr *CE, 2146 const CXXMethodDecl *MD) { 2147 if (!MD->isVirtual()) 2148 return false; 2149 2150 // When building with -fapple-kext, all calls must go through the vtable since 2151 // the kernel linker can do runtime patching of vtables. 2152 if (Context.getLangOpts().AppleKext) 2153 return true; 2154 2155 return !canDevirtualizeMemberFunctionCall(CE->getArg(0), MD); 2156 } 2157 2158 llvm::Value * 2159 CodeGenFunction::EmitCXXOperatorMemberCallee(const CXXOperatorCallExpr *E, 2160 const CXXMethodDecl *MD, 2161 llvm::Value *This) { 2162 llvm::FunctionType *fnType = 2163 CGM.getTypes().GetFunctionType( 2164 CGM.getTypes().arrangeCXXMethodDeclaration(MD)); 2165 2166 if (UseVirtualCall(getContext(), E, MD)) 2167 return BuildVirtualCall(MD, This, fnType); 2168 2169 return CGM.GetAddrOfFunction(MD, fnType); 2170 } 2171 2172 void CodeGenFunction::EmitForwardingCallToLambda(const CXXRecordDecl *lambda, 2173 CallArgList &callArgs) { 2174 // Lookup the call operator 2175 DeclarationName operatorName 2176 = getContext().DeclarationNames.getCXXOperatorName(OO_Call); 2177 CXXMethodDecl *callOperator = 2178 cast<CXXMethodDecl>(lambda->lookup(operatorName).front()); 2179 2180 // Get the address of the call operator. 2181 const CGFunctionInfo &calleeFnInfo = 2182 CGM.getTypes().arrangeCXXMethodDeclaration(callOperator); 2183 llvm::Value *callee = 2184 CGM.GetAddrOfFunction(GlobalDecl(callOperator), 2185 CGM.getTypes().GetFunctionType(calleeFnInfo)); 2186 2187 // Prepare the return slot. 2188 const FunctionProtoType *FPT = 2189 callOperator->getType()->castAs<FunctionProtoType>(); 2190 QualType resultType = FPT->getResultType(); 2191 ReturnValueSlot returnSlot; 2192 if (!resultType->isVoidType() && 2193 calleeFnInfo.getReturnInfo().getKind() == ABIArgInfo::Indirect && 2194 !hasScalarEvaluationKind(calleeFnInfo.getReturnType())) 2195 returnSlot = ReturnValueSlot(ReturnValue, resultType.isVolatileQualified()); 2196 2197 // We don't need to separately arrange the call arguments because 2198 // the call can't be variadic anyway --- it's impossible to forward 2199 // variadic arguments. 2200 2201 // Now emit our call. 2202 RValue RV = EmitCall(calleeFnInfo, callee, returnSlot, 2203 callArgs, callOperator); 2204 2205 // If necessary, copy the returned value into the slot. 2206 if (!resultType->isVoidType() && returnSlot.isNull()) 2207 EmitReturnOfRValue(RV, resultType); 2208 else 2209 EmitBranchThroughCleanup(ReturnBlock); 2210 } 2211 2212 void CodeGenFunction::EmitLambdaBlockInvokeBody() { 2213 const BlockDecl *BD = BlockInfo->getBlockDecl(); 2214 const VarDecl *variable = BD->capture_begin()->getVariable(); 2215 const CXXRecordDecl *Lambda = variable->getType()->getAsCXXRecordDecl(); 2216 2217 // Start building arguments for forwarding call 2218 CallArgList CallArgs; 2219 2220 QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda)); 2221 llvm::Value *ThisPtr = GetAddrOfBlockDecl(variable, false); 2222 CallArgs.add(RValue::get(ThisPtr), ThisType); 2223 2224 // Add the rest of the parameters. 2225 for (BlockDecl::param_const_iterator I = BD->param_begin(), 2226 E = BD->param_end(); I != E; ++I) { 2227 ParmVarDecl *param = *I; 2228 EmitDelegateCallArg(CallArgs, param); 2229 } 2230 2231 EmitForwardingCallToLambda(Lambda, CallArgs); 2232 } 2233 2234 void CodeGenFunction::EmitLambdaToBlockPointerBody(FunctionArgList &Args) { 2235 if (cast<CXXMethodDecl>(CurCodeDecl)->isVariadic()) { 2236 // FIXME: Making this work correctly is nasty because it requires either 2237 // cloning the body of the call operator or making the call operator forward. 2238 CGM.ErrorUnsupported(CurCodeDecl, "lambda conversion to variadic function"); 2239 return; 2240 } 2241 2242 EmitFunctionBody(Args); 2243 } 2244 2245 void CodeGenFunction::EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD) { 2246 const CXXRecordDecl *Lambda = MD->getParent(); 2247 2248 // Start building arguments for forwarding call 2249 CallArgList CallArgs; 2250 2251 QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda)); 2252 llvm::Value *ThisPtr = llvm::UndefValue::get(getTypes().ConvertType(ThisType)); 2253 CallArgs.add(RValue::get(ThisPtr), ThisType); 2254 2255 // Add the rest of the parameters. 2256 for (FunctionDecl::param_const_iterator I = MD->param_begin(), 2257 E = MD->param_end(); I != E; ++I) { 2258 ParmVarDecl *param = *I; 2259 EmitDelegateCallArg(CallArgs, param); 2260 } 2261 2262 EmitForwardingCallToLambda(Lambda, CallArgs); 2263 } 2264 2265 void CodeGenFunction::EmitLambdaStaticInvokeFunction(const CXXMethodDecl *MD) { 2266 if (MD->isVariadic()) { 2267 // FIXME: Making this work correctly is nasty because it requires either 2268 // cloning the body of the call operator or making the call operator forward. 2269 CGM.ErrorUnsupported(MD, "lambda conversion to variadic function"); 2270 return; 2271 } 2272 2273 EmitLambdaDelegatingInvokeBody(MD); 2274 } 2275