1 //===--- CGClass.cpp - Emit LLVM Code for C++ classes ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code dealing with C++ code generation of classes 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CGBlocks.h" 15 #include "CGCXXABI.h" 16 #include "CGDebugInfo.h" 17 #include "CGRecordLayout.h" 18 #include "CodeGenFunction.h" 19 #include "clang/AST/CXXInheritance.h" 20 #include "clang/AST/DeclTemplate.h" 21 #include "clang/AST/EvaluatedExprVisitor.h" 22 #include "clang/AST/RecordLayout.h" 23 #include "clang/AST/StmtCXX.h" 24 #include "clang/Basic/TargetBuiltins.h" 25 #include "clang/CodeGen/CGFunctionInfo.h" 26 #include "clang/Frontend/CodeGenOptions.h" 27 28 using namespace clang; 29 using namespace CodeGen; 30 31 static CharUnits 32 ComputeNonVirtualBaseClassOffset(ASTContext &Context, 33 const CXXRecordDecl *DerivedClass, 34 CastExpr::path_const_iterator Start, 35 CastExpr::path_const_iterator End) { 36 CharUnits Offset = CharUnits::Zero(); 37 38 const CXXRecordDecl *RD = DerivedClass; 39 40 for (CastExpr::path_const_iterator I = Start; I != End; ++I) { 41 const CXXBaseSpecifier *Base = *I; 42 assert(!Base->isVirtual() && "Should not see virtual bases here!"); 43 44 // Get the layout. 45 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 46 47 const CXXRecordDecl *BaseDecl = 48 cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl()); 49 50 // Add the offset. 51 Offset += Layout.getBaseClassOffset(BaseDecl); 52 53 RD = BaseDecl; 54 } 55 56 return Offset; 57 } 58 59 llvm::Constant * 60 CodeGenModule::GetNonVirtualBaseClassOffset(const CXXRecordDecl *ClassDecl, 61 CastExpr::path_const_iterator PathBegin, 62 CastExpr::path_const_iterator PathEnd) { 63 assert(PathBegin != PathEnd && "Base path should not be empty!"); 64 65 CharUnits Offset = 66 ComputeNonVirtualBaseClassOffset(getContext(), ClassDecl, 67 PathBegin, PathEnd); 68 if (Offset.isZero()) 69 return 0; 70 71 llvm::Type *PtrDiffTy = 72 Types.ConvertType(getContext().getPointerDiffType()); 73 74 return llvm::ConstantInt::get(PtrDiffTy, Offset.getQuantity()); 75 } 76 77 /// Gets the address of a direct base class within a complete object. 78 /// This should only be used for (1) non-virtual bases or (2) virtual bases 79 /// when the type is known to be complete (e.g. in complete destructors). 80 /// 81 /// The object pointed to by 'This' is assumed to be non-null. 82 llvm::Value * 83 CodeGenFunction::GetAddressOfDirectBaseInCompleteClass(llvm::Value *This, 84 const CXXRecordDecl *Derived, 85 const CXXRecordDecl *Base, 86 bool BaseIsVirtual) { 87 // 'this' must be a pointer (in some address space) to Derived. 88 assert(This->getType()->isPointerTy() && 89 cast<llvm::PointerType>(This->getType())->getElementType() 90 == ConvertType(Derived)); 91 92 // Compute the offset of the virtual base. 93 CharUnits Offset; 94 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(Derived); 95 if (BaseIsVirtual) 96 Offset = Layout.getVBaseClassOffset(Base); 97 else 98 Offset = Layout.getBaseClassOffset(Base); 99 100 // Shift and cast down to the base type. 101 // TODO: for complete types, this should be possible with a GEP. 102 llvm::Value *V = This; 103 if (Offset.isPositive()) { 104 V = Builder.CreateBitCast(V, Int8PtrTy); 105 V = Builder.CreateConstInBoundsGEP1_64(V, Offset.getQuantity()); 106 } 107 V = Builder.CreateBitCast(V, ConvertType(Base)->getPointerTo()); 108 109 return V; 110 } 111 112 static llvm::Value * 113 ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, llvm::Value *ptr, 114 CharUnits nonVirtualOffset, 115 llvm::Value *virtualOffset) { 116 // Assert that we have something to do. 117 assert(!nonVirtualOffset.isZero() || virtualOffset != 0); 118 119 // Compute the offset from the static and dynamic components. 120 llvm::Value *baseOffset; 121 if (!nonVirtualOffset.isZero()) { 122 baseOffset = llvm::ConstantInt::get(CGF.PtrDiffTy, 123 nonVirtualOffset.getQuantity()); 124 if (virtualOffset) { 125 baseOffset = CGF.Builder.CreateAdd(virtualOffset, baseOffset); 126 } 127 } else { 128 baseOffset = virtualOffset; 129 } 130 131 // Apply the base offset. 132 ptr = CGF.Builder.CreateBitCast(ptr, CGF.Int8PtrTy); 133 ptr = CGF.Builder.CreateInBoundsGEP(ptr, baseOffset, "add.ptr"); 134 return ptr; 135 } 136 137 llvm::Value * 138 CodeGenFunction::GetAddressOfBaseClass(llvm::Value *Value, 139 const CXXRecordDecl *Derived, 140 CastExpr::path_const_iterator PathBegin, 141 CastExpr::path_const_iterator PathEnd, 142 bool NullCheckValue) { 143 assert(PathBegin != PathEnd && "Base path should not be empty!"); 144 145 CastExpr::path_const_iterator Start = PathBegin; 146 const CXXRecordDecl *VBase = 0; 147 148 // Sema has done some convenient canonicalization here: if the 149 // access path involved any virtual steps, the conversion path will 150 // *start* with a step down to the correct virtual base subobject, 151 // and hence will not require any further steps. 152 if ((*Start)->isVirtual()) { 153 VBase = 154 cast<CXXRecordDecl>((*Start)->getType()->getAs<RecordType>()->getDecl()); 155 ++Start; 156 } 157 158 // Compute the static offset of the ultimate destination within its 159 // allocating subobject (the virtual base, if there is one, or else 160 // the "complete" object that we see). 161 CharUnits NonVirtualOffset = 162 ComputeNonVirtualBaseClassOffset(getContext(), VBase ? VBase : Derived, 163 Start, PathEnd); 164 165 // If there's a virtual step, we can sometimes "devirtualize" it. 166 // For now, that's limited to when the derived type is final. 167 // TODO: "devirtualize" this for accesses to known-complete objects. 168 if (VBase && Derived->hasAttr<FinalAttr>()) { 169 const ASTRecordLayout &layout = getContext().getASTRecordLayout(Derived); 170 CharUnits vBaseOffset = layout.getVBaseClassOffset(VBase); 171 NonVirtualOffset += vBaseOffset; 172 VBase = 0; // we no longer have a virtual step 173 } 174 175 // Get the base pointer type. 176 llvm::Type *BasePtrTy = 177 ConvertType((PathEnd[-1])->getType())->getPointerTo(); 178 179 // If the static offset is zero and we don't have a virtual step, 180 // just do a bitcast; null checks are unnecessary. 181 if (NonVirtualOffset.isZero() && !VBase) { 182 return Builder.CreateBitCast(Value, BasePtrTy); 183 } 184 185 llvm::BasicBlock *origBB = 0; 186 llvm::BasicBlock *endBB = 0; 187 188 // Skip over the offset (and the vtable load) if we're supposed to 189 // null-check the pointer. 190 if (NullCheckValue) { 191 origBB = Builder.GetInsertBlock(); 192 llvm::BasicBlock *notNullBB = createBasicBlock("cast.notnull"); 193 endBB = createBasicBlock("cast.end"); 194 195 llvm::Value *isNull = Builder.CreateIsNull(Value); 196 Builder.CreateCondBr(isNull, endBB, notNullBB); 197 EmitBlock(notNullBB); 198 } 199 200 // Compute the virtual offset. 201 llvm::Value *VirtualOffset = 0; 202 if (VBase) { 203 VirtualOffset = 204 CGM.getCXXABI().GetVirtualBaseClassOffset(*this, Value, Derived, VBase); 205 } 206 207 // Apply both offsets. 208 Value = ApplyNonVirtualAndVirtualOffset(*this, Value, 209 NonVirtualOffset, 210 VirtualOffset); 211 212 // Cast to the destination type. 213 Value = Builder.CreateBitCast(Value, BasePtrTy); 214 215 // Build a phi if we needed a null check. 216 if (NullCheckValue) { 217 llvm::BasicBlock *notNullBB = Builder.GetInsertBlock(); 218 Builder.CreateBr(endBB); 219 EmitBlock(endBB); 220 221 llvm::PHINode *PHI = Builder.CreatePHI(BasePtrTy, 2, "cast.result"); 222 PHI->addIncoming(Value, notNullBB); 223 PHI->addIncoming(llvm::Constant::getNullValue(BasePtrTy), origBB); 224 Value = PHI; 225 } 226 227 return Value; 228 } 229 230 llvm::Value * 231 CodeGenFunction::GetAddressOfDerivedClass(llvm::Value *Value, 232 const CXXRecordDecl *Derived, 233 CastExpr::path_const_iterator PathBegin, 234 CastExpr::path_const_iterator PathEnd, 235 bool NullCheckValue) { 236 assert(PathBegin != PathEnd && "Base path should not be empty!"); 237 238 QualType DerivedTy = 239 getContext().getCanonicalType(getContext().getTagDeclType(Derived)); 240 llvm::Type *DerivedPtrTy = ConvertType(DerivedTy)->getPointerTo(); 241 242 llvm::Value *NonVirtualOffset = 243 CGM.GetNonVirtualBaseClassOffset(Derived, PathBegin, PathEnd); 244 245 if (!NonVirtualOffset) { 246 // No offset, we can just cast back. 247 return Builder.CreateBitCast(Value, DerivedPtrTy); 248 } 249 250 llvm::BasicBlock *CastNull = 0; 251 llvm::BasicBlock *CastNotNull = 0; 252 llvm::BasicBlock *CastEnd = 0; 253 254 if (NullCheckValue) { 255 CastNull = createBasicBlock("cast.null"); 256 CastNotNull = createBasicBlock("cast.notnull"); 257 CastEnd = createBasicBlock("cast.end"); 258 259 llvm::Value *IsNull = Builder.CreateIsNull(Value); 260 Builder.CreateCondBr(IsNull, CastNull, CastNotNull); 261 EmitBlock(CastNotNull); 262 } 263 264 // Apply the offset. 265 Value = Builder.CreateBitCast(Value, Int8PtrTy); 266 Value = Builder.CreateGEP(Value, Builder.CreateNeg(NonVirtualOffset), 267 "sub.ptr"); 268 269 // Just cast. 270 Value = Builder.CreateBitCast(Value, DerivedPtrTy); 271 272 if (NullCheckValue) { 273 Builder.CreateBr(CastEnd); 274 EmitBlock(CastNull); 275 Builder.CreateBr(CastEnd); 276 EmitBlock(CastEnd); 277 278 llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2); 279 PHI->addIncoming(Value, CastNotNull); 280 PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), 281 CastNull); 282 Value = PHI; 283 } 284 285 return Value; 286 } 287 288 llvm::Value *CodeGenFunction::GetVTTParameter(GlobalDecl GD, 289 bool ForVirtualBase, 290 bool Delegating) { 291 if (!CGM.getCXXABI().NeedsVTTParameter(GD)) { 292 // This constructor/destructor does not need a VTT parameter. 293 return 0; 294 } 295 296 const CXXRecordDecl *RD = cast<CXXMethodDecl>(CurCodeDecl)->getParent(); 297 const CXXRecordDecl *Base = cast<CXXMethodDecl>(GD.getDecl())->getParent(); 298 299 llvm::Value *VTT; 300 301 uint64_t SubVTTIndex; 302 303 if (Delegating) { 304 // If this is a delegating constructor call, just load the VTT. 305 return LoadCXXVTT(); 306 } else if (RD == Base) { 307 // If the record matches the base, this is the complete ctor/dtor 308 // variant calling the base variant in a class with virtual bases. 309 assert(!CGM.getCXXABI().NeedsVTTParameter(CurGD) && 310 "doing no-op VTT offset in base dtor/ctor?"); 311 assert(!ForVirtualBase && "Can't have same class as virtual base!"); 312 SubVTTIndex = 0; 313 } else { 314 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 315 CharUnits BaseOffset = ForVirtualBase ? 316 Layout.getVBaseClassOffset(Base) : 317 Layout.getBaseClassOffset(Base); 318 319 SubVTTIndex = 320 CGM.getVTables().getSubVTTIndex(RD, BaseSubobject(Base, BaseOffset)); 321 assert(SubVTTIndex != 0 && "Sub-VTT index must be greater than zero!"); 322 } 323 324 if (CGM.getCXXABI().NeedsVTTParameter(CurGD)) { 325 // A VTT parameter was passed to the constructor, use it. 326 VTT = LoadCXXVTT(); 327 VTT = Builder.CreateConstInBoundsGEP1_64(VTT, SubVTTIndex); 328 } else { 329 // We're the complete constructor, so get the VTT by name. 330 VTT = CGM.getVTables().GetAddrOfVTT(RD); 331 VTT = Builder.CreateConstInBoundsGEP2_64(VTT, 0, SubVTTIndex); 332 } 333 334 return VTT; 335 } 336 337 namespace { 338 /// Call the destructor for a direct base class. 339 struct CallBaseDtor : EHScopeStack::Cleanup { 340 const CXXRecordDecl *BaseClass; 341 bool BaseIsVirtual; 342 CallBaseDtor(const CXXRecordDecl *Base, bool BaseIsVirtual) 343 : BaseClass(Base), BaseIsVirtual(BaseIsVirtual) {} 344 345 void Emit(CodeGenFunction &CGF, Flags flags) { 346 const CXXRecordDecl *DerivedClass = 347 cast<CXXMethodDecl>(CGF.CurCodeDecl)->getParent(); 348 349 const CXXDestructorDecl *D = BaseClass->getDestructor(); 350 llvm::Value *Addr = 351 CGF.GetAddressOfDirectBaseInCompleteClass(CGF.LoadCXXThis(), 352 DerivedClass, BaseClass, 353 BaseIsVirtual); 354 CGF.EmitCXXDestructorCall(D, Dtor_Base, BaseIsVirtual, 355 /*Delegating=*/false, Addr); 356 } 357 }; 358 359 /// A visitor which checks whether an initializer uses 'this' in a 360 /// way which requires the vtable to be properly set. 361 struct DynamicThisUseChecker : EvaluatedExprVisitor<DynamicThisUseChecker> { 362 typedef EvaluatedExprVisitor<DynamicThisUseChecker> super; 363 364 bool UsesThis; 365 366 DynamicThisUseChecker(ASTContext &C) : super(C), UsesThis(false) {} 367 368 // Black-list all explicit and implicit references to 'this'. 369 // 370 // Do we need to worry about external references to 'this' derived 371 // from arbitrary code? If so, then anything which runs arbitrary 372 // external code might potentially access the vtable. 373 void VisitCXXThisExpr(CXXThisExpr *E) { UsesThis = true; } 374 }; 375 } 376 377 static bool BaseInitializerUsesThis(ASTContext &C, const Expr *Init) { 378 DynamicThisUseChecker Checker(C); 379 Checker.Visit(const_cast<Expr*>(Init)); 380 return Checker.UsesThis; 381 } 382 383 static void EmitBaseInitializer(CodeGenFunction &CGF, 384 const CXXRecordDecl *ClassDecl, 385 CXXCtorInitializer *BaseInit, 386 CXXCtorType CtorType) { 387 assert(BaseInit->isBaseInitializer() && 388 "Must have base initializer!"); 389 390 llvm::Value *ThisPtr = CGF.LoadCXXThis(); 391 392 const Type *BaseType = BaseInit->getBaseClass(); 393 CXXRecordDecl *BaseClassDecl = 394 cast<CXXRecordDecl>(BaseType->getAs<RecordType>()->getDecl()); 395 396 bool isBaseVirtual = BaseInit->isBaseVirtual(); 397 398 // The base constructor doesn't construct virtual bases. 399 if (CtorType == Ctor_Base && isBaseVirtual) 400 return; 401 402 // If the initializer for the base (other than the constructor 403 // itself) accesses 'this' in any way, we need to initialize the 404 // vtables. 405 if (BaseInitializerUsesThis(CGF.getContext(), BaseInit->getInit())) 406 CGF.InitializeVTablePointers(ClassDecl); 407 408 // We can pretend to be a complete class because it only matters for 409 // virtual bases, and we only do virtual bases for complete ctors. 410 llvm::Value *V = 411 CGF.GetAddressOfDirectBaseInCompleteClass(ThisPtr, ClassDecl, 412 BaseClassDecl, 413 isBaseVirtual); 414 CharUnits Alignment = CGF.getContext().getTypeAlignInChars(BaseType); 415 AggValueSlot AggSlot = 416 AggValueSlot::forAddr(V, Alignment, Qualifiers(), 417 AggValueSlot::IsDestructed, 418 AggValueSlot::DoesNotNeedGCBarriers, 419 AggValueSlot::IsNotAliased); 420 421 CGF.EmitAggExpr(BaseInit->getInit(), AggSlot); 422 423 if (CGF.CGM.getLangOpts().Exceptions && 424 !BaseClassDecl->hasTrivialDestructor()) 425 CGF.EHStack.pushCleanup<CallBaseDtor>(EHCleanup, BaseClassDecl, 426 isBaseVirtual); 427 } 428 429 static void EmitAggMemberInitializer(CodeGenFunction &CGF, 430 LValue LHS, 431 Expr *Init, 432 llvm::Value *ArrayIndexVar, 433 QualType T, 434 ArrayRef<VarDecl *> ArrayIndexes, 435 unsigned Index) { 436 if (Index == ArrayIndexes.size()) { 437 LValue LV = LHS; 438 439 if (ArrayIndexVar) { 440 // If we have an array index variable, load it and use it as an offset. 441 // Then, increment the value. 442 llvm::Value *Dest = LHS.getAddress(); 443 llvm::Value *ArrayIndex = CGF.Builder.CreateLoad(ArrayIndexVar); 444 Dest = CGF.Builder.CreateInBoundsGEP(Dest, ArrayIndex, "destaddress"); 445 llvm::Value *Next = llvm::ConstantInt::get(ArrayIndex->getType(), 1); 446 Next = CGF.Builder.CreateAdd(ArrayIndex, Next, "inc"); 447 CGF.Builder.CreateStore(Next, ArrayIndexVar); 448 449 // Update the LValue. 450 LV.setAddress(Dest); 451 CharUnits Align = CGF.getContext().getTypeAlignInChars(T); 452 LV.setAlignment(std::min(Align, LV.getAlignment())); 453 } 454 455 switch (CGF.getEvaluationKind(T)) { 456 case TEK_Scalar: 457 CGF.EmitScalarInit(Init, /*decl*/ 0, LV, false); 458 break; 459 case TEK_Complex: 460 CGF.EmitComplexExprIntoLValue(Init, LV, /*isInit*/ true); 461 break; 462 case TEK_Aggregate: { 463 AggValueSlot Slot = 464 AggValueSlot::forLValue(LV, 465 AggValueSlot::IsDestructed, 466 AggValueSlot::DoesNotNeedGCBarriers, 467 AggValueSlot::IsNotAliased); 468 469 CGF.EmitAggExpr(Init, Slot); 470 break; 471 } 472 } 473 474 return; 475 } 476 477 const ConstantArrayType *Array = CGF.getContext().getAsConstantArrayType(T); 478 assert(Array && "Array initialization without the array type?"); 479 llvm::Value *IndexVar 480 = CGF.GetAddrOfLocalVar(ArrayIndexes[Index]); 481 assert(IndexVar && "Array index variable not loaded"); 482 483 // Initialize this index variable to zero. 484 llvm::Value* Zero 485 = llvm::Constant::getNullValue( 486 CGF.ConvertType(CGF.getContext().getSizeType())); 487 CGF.Builder.CreateStore(Zero, IndexVar); 488 489 // Start the loop with a block that tests the condition. 490 llvm::BasicBlock *CondBlock = CGF.createBasicBlock("for.cond"); 491 llvm::BasicBlock *AfterFor = CGF.createBasicBlock("for.end"); 492 493 CGF.EmitBlock(CondBlock); 494 495 llvm::BasicBlock *ForBody = CGF.createBasicBlock("for.body"); 496 // Generate: if (loop-index < number-of-elements) fall to the loop body, 497 // otherwise, go to the block after the for-loop. 498 uint64_t NumElements = Array->getSize().getZExtValue(); 499 llvm::Value *Counter = CGF.Builder.CreateLoad(IndexVar); 500 llvm::Value *NumElementsPtr = 501 llvm::ConstantInt::get(Counter->getType(), NumElements); 502 llvm::Value *IsLess = CGF.Builder.CreateICmpULT(Counter, NumElementsPtr, 503 "isless"); 504 505 // If the condition is true, execute the body. 506 CGF.Builder.CreateCondBr(IsLess, ForBody, AfterFor); 507 508 CGF.EmitBlock(ForBody); 509 llvm::BasicBlock *ContinueBlock = CGF.createBasicBlock("for.inc"); 510 511 // Inside the loop body recurse to emit the inner loop or, eventually, the 512 // constructor call. 513 EmitAggMemberInitializer(CGF, LHS, Init, ArrayIndexVar, 514 Array->getElementType(), ArrayIndexes, Index + 1); 515 516 CGF.EmitBlock(ContinueBlock); 517 518 // Emit the increment of the loop counter. 519 llvm::Value *NextVal = llvm::ConstantInt::get(Counter->getType(), 1); 520 Counter = CGF.Builder.CreateLoad(IndexVar); 521 NextVal = CGF.Builder.CreateAdd(Counter, NextVal, "inc"); 522 CGF.Builder.CreateStore(NextVal, IndexVar); 523 524 // Finally, branch back up to the condition for the next iteration. 525 CGF.EmitBranch(CondBlock); 526 527 // Emit the fall-through block. 528 CGF.EmitBlock(AfterFor, true); 529 } 530 531 static void EmitMemberInitializer(CodeGenFunction &CGF, 532 const CXXRecordDecl *ClassDecl, 533 CXXCtorInitializer *MemberInit, 534 const CXXConstructorDecl *Constructor, 535 FunctionArgList &Args) { 536 assert(MemberInit->isAnyMemberInitializer() && 537 "Must have member initializer!"); 538 assert(MemberInit->getInit() && "Must have initializer!"); 539 540 // non-static data member initializers. 541 FieldDecl *Field = MemberInit->getAnyMember(); 542 QualType FieldType = Field->getType(); 543 544 llvm::Value *ThisPtr = CGF.LoadCXXThis(); 545 QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl); 546 LValue LHS = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy); 547 548 if (MemberInit->isIndirectMemberInitializer()) { 549 // If we are initializing an anonymous union field, drill down to 550 // the field. 551 IndirectFieldDecl *IndirectField = MemberInit->getIndirectMember(); 552 IndirectFieldDecl::chain_iterator I = IndirectField->chain_begin(), 553 IEnd = IndirectField->chain_end(); 554 for ( ; I != IEnd; ++I) 555 LHS = CGF.EmitLValueForFieldInitialization(LHS, cast<FieldDecl>(*I)); 556 FieldType = MemberInit->getIndirectMember()->getAnonField()->getType(); 557 } else { 558 LHS = CGF.EmitLValueForFieldInitialization(LHS, Field); 559 } 560 561 // Special case: if we are in a copy or move constructor, and we are copying 562 // an array of PODs or classes with trivial copy constructors, ignore the 563 // AST and perform the copy we know is equivalent. 564 // FIXME: This is hacky at best... if we had a bit more explicit information 565 // in the AST, we could generalize it more easily. 566 const ConstantArrayType *Array 567 = CGF.getContext().getAsConstantArrayType(FieldType); 568 if (Array && Constructor->isDefaulted() && 569 Constructor->isCopyOrMoveConstructor()) { 570 QualType BaseElementTy = CGF.getContext().getBaseElementType(Array); 571 CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(MemberInit->getInit()); 572 if (BaseElementTy.isPODType(CGF.getContext()) || 573 (CE && CE->getConstructor()->isTrivial())) { 574 // Find the source pointer. We know it's the last argument because 575 // we know we're in an implicit copy constructor. 576 unsigned SrcArgIndex = Args.size() - 1; 577 llvm::Value *SrcPtr 578 = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(Args[SrcArgIndex])); 579 LValue ThisRHSLV = CGF.MakeNaturalAlignAddrLValue(SrcPtr, RecordTy); 580 LValue Src = CGF.EmitLValueForFieldInitialization(ThisRHSLV, Field); 581 582 // Copy the aggregate. 583 CGF.EmitAggregateCopy(LHS.getAddress(), Src.getAddress(), FieldType, 584 LHS.isVolatileQualified()); 585 return; 586 } 587 } 588 589 ArrayRef<VarDecl *> ArrayIndexes; 590 if (MemberInit->getNumArrayIndices()) 591 ArrayIndexes = MemberInit->getArrayIndexes(); 592 CGF.EmitInitializerForField(Field, LHS, MemberInit->getInit(), ArrayIndexes); 593 } 594 595 void CodeGenFunction::EmitInitializerForField(FieldDecl *Field, 596 LValue LHS, Expr *Init, 597 ArrayRef<VarDecl *> ArrayIndexes) { 598 QualType FieldType = Field->getType(); 599 switch (getEvaluationKind(FieldType)) { 600 case TEK_Scalar: 601 if (LHS.isSimple()) { 602 EmitExprAsInit(Init, Field, LHS, false); 603 } else { 604 RValue RHS = RValue::get(EmitScalarExpr(Init)); 605 EmitStoreThroughLValue(RHS, LHS); 606 } 607 break; 608 case TEK_Complex: 609 EmitComplexExprIntoLValue(Init, LHS, /*isInit*/ true); 610 break; 611 case TEK_Aggregate: { 612 llvm::Value *ArrayIndexVar = 0; 613 if (ArrayIndexes.size()) { 614 llvm::Type *SizeTy = ConvertType(getContext().getSizeType()); 615 616 // The LHS is a pointer to the first object we'll be constructing, as 617 // a flat array. 618 QualType BaseElementTy = getContext().getBaseElementType(FieldType); 619 llvm::Type *BasePtr = ConvertType(BaseElementTy); 620 BasePtr = llvm::PointerType::getUnqual(BasePtr); 621 llvm::Value *BaseAddrPtr = Builder.CreateBitCast(LHS.getAddress(), 622 BasePtr); 623 LHS = MakeAddrLValue(BaseAddrPtr, BaseElementTy); 624 625 // Create an array index that will be used to walk over all of the 626 // objects we're constructing. 627 ArrayIndexVar = CreateTempAlloca(SizeTy, "object.index"); 628 llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy); 629 Builder.CreateStore(Zero, ArrayIndexVar); 630 631 632 // Emit the block variables for the array indices, if any. 633 for (unsigned I = 0, N = ArrayIndexes.size(); I != N; ++I) 634 EmitAutoVarDecl(*ArrayIndexes[I]); 635 } 636 637 EmitAggMemberInitializer(*this, LHS, Init, ArrayIndexVar, FieldType, 638 ArrayIndexes, 0); 639 } 640 } 641 642 // Ensure that we destroy this object if an exception is thrown 643 // later in the constructor. 644 QualType::DestructionKind dtorKind = FieldType.isDestructedType(); 645 if (needsEHCleanup(dtorKind)) 646 pushEHDestroy(dtorKind, LHS.getAddress(), FieldType); 647 } 648 649 /// Checks whether the given constructor is a valid subject for the 650 /// complete-to-base constructor delegation optimization, i.e. 651 /// emitting the complete constructor as a simple call to the base 652 /// constructor. 653 static bool IsConstructorDelegationValid(const CXXConstructorDecl *Ctor) { 654 655 // Currently we disable the optimization for classes with virtual 656 // bases because (1) the addresses of parameter variables need to be 657 // consistent across all initializers but (2) the delegate function 658 // call necessarily creates a second copy of the parameter variable. 659 // 660 // The limiting example (purely theoretical AFAIK): 661 // struct A { A(int &c) { c++; } }; 662 // struct B : virtual A { 663 // B(int count) : A(count) { printf("%d\n", count); } 664 // }; 665 // ...although even this example could in principle be emitted as a 666 // delegation since the address of the parameter doesn't escape. 667 if (Ctor->getParent()->getNumVBases()) { 668 // TODO: white-list trivial vbase initializers. This case wouldn't 669 // be subject to the restrictions below. 670 671 // TODO: white-list cases where: 672 // - there are no non-reference parameters to the constructor 673 // - the initializers don't access any non-reference parameters 674 // - the initializers don't take the address of non-reference 675 // parameters 676 // - etc. 677 // If we ever add any of the above cases, remember that: 678 // - function-try-blocks will always blacklist this optimization 679 // - we need to perform the constructor prologue and cleanup in 680 // EmitConstructorBody. 681 682 return false; 683 } 684 685 // We also disable the optimization for variadic functions because 686 // it's impossible to "re-pass" varargs. 687 if (Ctor->getType()->getAs<FunctionProtoType>()->isVariadic()) 688 return false; 689 690 // FIXME: Decide if we can do a delegation of a delegating constructor. 691 if (Ctor->isDelegatingConstructor()) 692 return false; 693 694 return true; 695 } 696 697 /// EmitConstructorBody - Emits the body of the current constructor. 698 void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) { 699 const CXXConstructorDecl *Ctor = cast<CXXConstructorDecl>(CurGD.getDecl()); 700 CXXCtorType CtorType = CurGD.getCtorType(); 701 702 assert((CGM.getTarget().getCXXABI().hasConstructorVariants() || 703 CtorType == Ctor_Complete) && 704 "can only generate complete ctor for this ABI"); 705 706 // Before we go any further, try the complete->base constructor 707 // delegation optimization. 708 if (CtorType == Ctor_Complete && IsConstructorDelegationValid(Ctor) && 709 CGM.getTarget().getCXXABI().hasConstructorVariants()) { 710 if (CGDebugInfo *DI = getDebugInfo()) 711 DI->EmitLocation(Builder, Ctor->getLocEnd()); 712 EmitDelegateCXXConstructorCall(Ctor, Ctor_Base, Args, Ctor->getLocEnd()); 713 return; 714 } 715 716 Stmt *Body = Ctor->getBody(); 717 718 // Enter the function-try-block before the constructor prologue if 719 // applicable. 720 bool IsTryBody = (Body && isa<CXXTryStmt>(Body)); 721 if (IsTryBody) 722 EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true); 723 724 RunCleanupsScope RunCleanups(*this); 725 726 // TODO: in restricted cases, we can emit the vbase initializers of 727 // a complete ctor and then delegate to the base ctor. 728 729 // Emit the constructor prologue, i.e. the base and member 730 // initializers. 731 EmitCtorPrologue(Ctor, CtorType, Args); 732 733 // Emit the body of the statement. 734 if (IsTryBody) 735 EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock()); 736 else if (Body) 737 EmitStmt(Body); 738 739 // Emit any cleanup blocks associated with the member or base 740 // initializers, which includes (along the exceptional path) the 741 // destructors for those members and bases that were fully 742 // constructed. 743 RunCleanups.ForceCleanup(); 744 745 if (IsTryBody) 746 ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true); 747 } 748 749 namespace { 750 /// RAII object to indicate that codegen is copying the value representation 751 /// instead of the object representation. Useful when copying a struct or 752 /// class which has uninitialized members and we're only performing 753 /// lvalue-to-rvalue conversion on the object but not its members. 754 class CopyingValueRepresentation { 755 public: 756 explicit CopyingValueRepresentation(CodeGenFunction &CGF) 757 : CGF(CGF), SO(*CGF.SanOpts), OldSanOpts(CGF.SanOpts) { 758 SO.Bool = false; 759 SO.Enum = false; 760 CGF.SanOpts = &SO; 761 } 762 ~CopyingValueRepresentation() { 763 CGF.SanOpts = OldSanOpts; 764 } 765 private: 766 CodeGenFunction &CGF; 767 SanitizerOptions SO; 768 const SanitizerOptions *OldSanOpts; 769 }; 770 } 771 772 namespace { 773 class FieldMemcpyizer { 774 public: 775 FieldMemcpyizer(CodeGenFunction &CGF, const CXXRecordDecl *ClassDecl, 776 const VarDecl *SrcRec) 777 : CGF(CGF), ClassDecl(ClassDecl), SrcRec(SrcRec), 778 RecLayout(CGF.getContext().getASTRecordLayout(ClassDecl)), 779 FirstField(0), LastField(0), FirstFieldOffset(0), LastFieldOffset(0), 780 LastAddedFieldIndex(0) { } 781 782 static bool isMemcpyableField(FieldDecl *F) { 783 Qualifiers Qual = F->getType().getQualifiers(); 784 if (Qual.hasVolatile() || Qual.hasObjCLifetime()) 785 return false; 786 return true; 787 } 788 789 void addMemcpyableField(FieldDecl *F) { 790 if (FirstField == 0) 791 addInitialField(F); 792 else 793 addNextField(F); 794 } 795 796 CharUnits getMemcpySize() const { 797 unsigned LastFieldSize = 798 LastField->isBitField() ? 799 LastField->getBitWidthValue(CGF.getContext()) : 800 CGF.getContext().getTypeSize(LastField->getType()); 801 uint64_t MemcpySizeBits = 802 LastFieldOffset + LastFieldSize - FirstFieldOffset + 803 CGF.getContext().getCharWidth() - 1; 804 CharUnits MemcpySize = 805 CGF.getContext().toCharUnitsFromBits(MemcpySizeBits); 806 return MemcpySize; 807 } 808 809 void emitMemcpy() { 810 // Give the subclass a chance to bail out if it feels the memcpy isn't 811 // worth it (e.g. Hasn't aggregated enough data). 812 if (FirstField == 0) { 813 return; 814 } 815 816 CharUnits Alignment; 817 818 if (FirstField->isBitField()) { 819 const CGRecordLayout &RL = 820 CGF.getTypes().getCGRecordLayout(FirstField->getParent()); 821 const CGBitFieldInfo &BFInfo = RL.getBitFieldInfo(FirstField); 822 Alignment = CharUnits::fromQuantity(BFInfo.StorageAlignment); 823 } else { 824 Alignment = CGF.getContext().getDeclAlign(FirstField); 825 } 826 827 assert((CGF.getContext().toCharUnitsFromBits(FirstFieldOffset) % 828 Alignment) == 0 && "Bad field alignment."); 829 830 CharUnits MemcpySize = getMemcpySize(); 831 QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl); 832 llvm::Value *ThisPtr = CGF.LoadCXXThis(); 833 LValue DestLV = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy); 834 LValue Dest = CGF.EmitLValueForFieldInitialization(DestLV, FirstField); 835 llvm::Value *SrcPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(SrcRec)); 836 LValue SrcLV = CGF.MakeNaturalAlignAddrLValue(SrcPtr, RecordTy); 837 LValue Src = CGF.EmitLValueForFieldInitialization(SrcLV, FirstField); 838 839 emitMemcpyIR(Dest.isBitField() ? Dest.getBitFieldAddr() : Dest.getAddress(), 840 Src.isBitField() ? Src.getBitFieldAddr() : Src.getAddress(), 841 MemcpySize, Alignment); 842 reset(); 843 } 844 845 void reset() { 846 FirstField = 0; 847 } 848 849 protected: 850 CodeGenFunction &CGF; 851 const CXXRecordDecl *ClassDecl; 852 853 private: 854 855 void emitMemcpyIR(llvm::Value *DestPtr, llvm::Value *SrcPtr, 856 CharUnits Size, CharUnits Alignment) { 857 llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType()); 858 llvm::Type *DBP = 859 llvm::Type::getInt8PtrTy(CGF.getLLVMContext(), DPT->getAddressSpace()); 860 DestPtr = CGF.Builder.CreateBitCast(DestPtr, DBP); 861 862 llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType()); 863 llvm::Type *SBP = 864 llvm::Type::getInt8PtrTy(CGF.getLLVMContext(), SPT->getAddressSpace()); 865 SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, SBP); 866 867 CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, Size.getQuantity(), 868 Alignment.getQuantity()); 869 } 870 871 void addInitialField(FieldDecl *F) { 872 FirstField = F; 873 LastField = F; 874 FirstFieldOffset = RecLayout.getFieldOffset(F->getFieldIndex()); 875 LastFieldOffset = FirstFieldOffset; 876 LastAddedFieldIndex = F->getFieldIndex(); 877 return; 878 } 879 880 void addNextField(FieldDecl *F) { 881 // For the most part, the following invariant will hold: 882 // F->getFieldIndex() == LastAddedFieldIndex + 1 883 // The one exception is that Sema won't add a copy-initializer for an 884 // unnamed bitfield, which will show up here as a gap in the sequence. 885 assert(F->getFieldIndex() >= LastAddedFieldIndex + 1 && 886 "Cannot aggregate fields out of order."); 887 LastAddedFieldIndex = F->getFieldIndex(); 888 889 // The 'first' and 'last' fields are chosen by offset, rather than field 890 // index. This allows the code to support bitfields, as well as regular 891 // fields. 892 uint64_t FOffset = RecLayout.getFieldOffset(F->getFieldIndex()); 893 if (FOffset < FirstFieldOffset) { 894 FirstField = F; 895 FirstFieldOffset = FOffset; 896 } else if (FOffset > LastFieldOffset) { 897 LastField = F; 898 LastFieldOffset = FOffset; 899 } 900 } 901 902 const VarDecl *SrcRec; 903 const ASTRecordLayout &RecLayout; 904 FieldDecl *FirstField; 905 FieldDecl *LastField; 906 uint64_t FirstFieldOffset, LastFieldOffset; 907 unsigned LastAddedFieldIndex; 908 }; 909 910 class ConstructorMemcpyizer : public FieldMemcpyizer { 911 private: 912 913 /// Get source argument for copy constructor. Returns null if not a copy 914 /// constructor. 915 static const VarDecl* getTrivialCopySource(const CXXConstructorDecl *CD, 916 FunctionArgList &Args) { 917 if (CD->isCopyOrMoveConstructor() && CD->isDefaulted()) 918 return Args[Args.size() - 1]; 919 return 0; 920 } 921 922 // Returns true if a CXXCtorInitializer represents a member initialization 923 // that can be rolled into a memcpy. 924 bool isMemberInitMemcpyable(CXXCtorInitializer *MemberInit) const { 925 if (!MemcpyableCtor) 926 return false; 927 FieldDecl *Field = MemberInit->getMember(); 928 assert(Field != 0 && "No field for member init."); 929 QualType FieldType = Field->getType(); 930 CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(MemberInit->getInit()); 931 932 // Bail out on non-POD, not-trivially-constructable members. 933 if (!(CE && CE->getConstructor()->isTrivial()) && 934 !(FieldType.isTriviallyCopyableType(CGF.getContext()) || 935 FieldType->isReferenceType())) 936 return false; 937 938 // Bail out on volatile fields. 939 if (!isMemcpyableField(Field)) 940 return false; 941 942 // Otherwise we're good. 943 return true; 944 } 945 946 public: 947 ConstructorMemcpyizer(CodeGenFunction &CGF, const CXXConstructorDecl *CD, 948 FunctionArgList &Args) 949 : FieldMemcpyizer(CGF, CD->getParent(), getTrivialCopySource(CD, Args)), 950 ConstructorDecl(CD), 951 MemcpyableCtor(CD->isDefaulted() && 952 CD->isCopyOrMoveConstructor() && 953 CGF.getLangOpts().getGC() == LangOptions::NonGC), 954 Args(Args) { } 955 956 void addMemberInitializer(CXXCtorInitializer *MemberInit) { 957 if (isMemberInitMemcpyable(MemberInit)) { 958 AggregatedInits.push_back(MemberInit); 959 addMemcpyableField(MemberInit->getMember()); 960 } else { 961 emitAggregatedInits(); 962 EmitMemberInitializer(CGF, ConstructorDecl->getParent(), MemberInit, 963 ConstructorDecl, Args); 964 } 965 } 966 967 void emitAggregatedInits() { 968 if (AggregatedInits.size() <= 1) { 969 // This memcpy is too small to be worthwhile. Fall back on default 970 // codegen. 971 if (!AggregatedInits.empty()) { 972 CopyingValueRepresentation CVR(CGF); 973 EmitMemberInitializer(CGF, ConstructorDecl->getParent(), 974 AggregatedInits[0], ConstructorDecl, Args); 975 } 976 reset(); 977 return; 978 } 979 980 pushEHDestructors(); 981 emitMemcpy(); 982 AggregatedInits.clear(); 983 } 984 985 void pushEHDestructors() { 986 llvm::Value *ThisPtr = CGF.LoadCXXThis(); 987 QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl); 988 LValue LHS = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy); 989 990 for (unsigned i = 0; i < AggregatedInits.size(); ++i) { 991 QualType FieldType = AggregatedInits[i]->getMember()->getType(); 992 QualType::DestructionKind dtorKind = FieldType.isDestructedType(); 993 if (CGF.needsEHCleanup(dtorKind)) 994 CGF.pushEHDestroy(dtorKind, LHS.getAddress(), FieldType); 995 } 996 } 997 998 void finish() { 999 emitAggregatedInits(); 1000 } 1001 1002 private: 1003 const CXXConstructorDecl *ConstructorDecl; 1004 bool MemcpyableCtor; 1005 FunctionArgList &Args; 1006 SmallVector<CXXCtorInitializer*, 16> AggregatedInits; 1007 }; 1008 1009 class AssignmentMemcpyizer : public FieldMemcpyizer { 1010 private: 1011 1012 // Returns the memcpyable field copied by the given statement, if one 1013 // exists. Otherwise returns null. 1014 FieldDecl *getMemcpyableField(Stmt *S) { 1015 if (!AssignmentsMemcpyable) 1016 return 0; 1017 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(S)) { 1018 // Recognise trivial assignments. 1019 if (BO->getOpcode() != BO_Assign) 1020 return 0; 1021 MemberExpr *ME = dyn_cast<MemberExpr>(BO->getLHS()); 1022 if (!ME) 1023 return 0; 1024 FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl()); 1025 if (!Field || !isMemcpyableField(Field)) 1026 return 0; 1027 Stmt *RHS = BO->getRHS(); 1028 if (ImplicitCastExpr *EC = dyn_cast<ImplicitCastExpr>(RHS)) 1029 RHS = EC->getSubExpr(); 1030 if (!RHS) 1031 return 0; 1032 MemberExpr *ME2 = dyn_cast<MemberExpr>(RHS); 1033 if (dyn_cast<FieldDecl>(ME2->getMemberDecl()) != Field) 1034 return 0; 1035 return Field; 1036 } else if (CXXMemberCallExpr *MCE = dyn_cast<CXXMemberCallExpr>(S)) { 1037 CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MCE->getCalleeDecl()); 1038 if (!(MD && (MD->isCopyAssignmentOperator() || 1039 MD->isMoveAssignmentOperator()) && 1040 MD->isTrivial())) 1041 return 0; 1042 MemberExpr *IOA = dyn_cast<MemberExpr>(MCE->getImplicitObjectArgument()); 1043 if (!IOA) 1044 return 0; 1045 FieldDecl *Field = dyn_cast<FieldDecl>(IOA->getMemberDecl()); 1046 if (!Field || !isMemcpyableField(Field)) 1047 return 0; 1048 MemberExpr *Arg0 = dyn_cast<MemberExpr>(MCE->getArg(0)); 1049 if (!Arg0 || Field != dyn_cast<FieldDecl>(Arg0->getMemberDecl())) 1050 return 0; 1051 return Field; 1052 } else if (CallExpr *CE = dyn_cast<CallExpr>(S)) { 1053 FunctionDecl *FD = dyn_cast<FunctionDecl>(CE->getCalleeDecl()); 1054 if (!FD || FD->getBuiltinID() != Builtin::BI__builtin_memcpy) 1055 return 0; 1056 Expr *DstPtr = CE->getArg(0); 1057 if (ImplicitCastExpr *DC = dyn_cast<ImplicitCastExpr>(DstPtr)) 1058 DstPtr = DC->getSubExpr(); 1059 UnaryOperator *DUO = dyn_cast<UnaryOperator>(DstPtr); 1060 if (!DUO || DUO->getOpcode() != UO_AddrOf) 1061 return 0; 1062 MemberExpr *ME = dyn_cast<MemberExpr>(DUO->getSubExpr()); 1063 if (!ME) 1064 return 0; 1065 FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl()); 1066 if (!Field || !isMemcpyableField(Field)) 1067 return 0; 1068 Expr *SrcPtr = CE->getArg(1); 1069 if (ImplicitCastExpr *SC = dyn_cast<ImplicitCastExpr>(SrcPtr)) 1070 SrcPtr = SC->getSubExpr(); 1071 UnaryOperator *SUO = dyn_cast<UnaryOperator>(SrcPtr); 1072 if (!SUO || SUO->getOpcode() != UO_AddrOf) 1073 return 0; 1074 MemberExpr *ME2 = dyn_cast<MemberExpr>(SUO->getSubExpr()); 1075 if (!ME2 || Field != dyn_cast<FieldDecl>(ME2->getMemberDecl())) 1076 return 0; 1077 return Field; 1078 } 1079 1080 return 0; 1081 } 1082 1083 bool AssignmentsMemcpyable; 1084 SmallVector<Stmt*, 16> AggregatedStmts; 1085 1086 public: 1087 1088 AssignmentMemcpyizer(CodeGenFunction &CGF, const CXXMethodDecl *AD, 1089 FunctionArgList &Args) 1090 : FieldMemcpyizer(CGF, AD->getParent(), Args[Args.size() - 1]), 1091 AssignmentsMemcpyable(CGF.getLangOpts().getGC() == LangOptions::NonGC) { 1092 assert(Args.size() == 2); 1093 } 1094 1095 void emitAssignment(Stmt *S) { 1096 FieldDecl *F = getMemcpyableField(S); 1097 if (F) { 1098 addMemcpyableField(F); 1099 AggregatedStmts.push_back(S); 1100 } else { 1101 emitAggregatedStmts(); 1102 CGF.EmitStmt(S); 1103 } 1104 } 1105 1106 void emitAggregatedStmts() { 1107 if (AggregatedStmts.size() <= 1) { 1108 if (!AggregatedStmts.empty()) { 1109 CopyingValueRepresentation CVR(CGF); 1110 CGF.EmitStmt(AggregatedStmts[0]); 1111 } 1112 reset(); 1113 } 1114 1115 emitMemcpy(); 1116 AggregatedStmts.clear(); 1117 } 1118 1119 void finish() { 1120 emitAggregatedStmts(); 1121 } 1122 }; 1123 1124 } 1125 1126 /// EmitCtorPrologue - This routine generates necessary code to initialize 1127 /// base classes and non-static data members belonging to this constructor. 1128 void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD, 1129 CXXCtorType CtorType, 1130 FunctionArgList &Args) { 1131 if (CD->isDelegatingConstructor()) 1132 return EmitDelegatingCXXConstructorCall(CD, Args); 1133 1134 const CXXRecordDecl *ClassDecl = CD->getParent(); 1135 1136 CXXConstructorDecl::init_const_iterator B = CD->init_begin(), 1137 E = CD->init_end(); 1138 1139 llvm::BasicBlock *BaseCtorContinueBB = 0; 1140 if (ClassDecl->getNumVBases() && 1141 !CGM.getTarget().getCXXABI().hasConstructorVariants()) { 1142 // The ABIs that don't have constructor variants need to put a branch 1143 // before the virtual base initialization code. 1144 BaseCtorContinueBB = 1145 CGM.getCXXABI().EmitCtorCompleteObjectHandler(*this, ClassDecl); 1146 assert(BaseCtorContinueBB); 1147 } 1148 1149 // Virtual base initializers first. 1150 for (; B != E && (*B)->isBaseInitializer() && (*B)->isBaseVirtual(); B++) { 1151 EmitBaseInitializer(*this, ClassDecl, *B, CtorType); 1152 } 1153 1154 if (BaseCtorContinueBB) { 1155 // Complete object handler should continue to the remaining initializers. 1156 Builder.CreateBr(BaseCtorContinueBB); 1157 EmitBlock(BaseCtorContinueBB); 1158 } 1159 1160 // Then, non-virtual base initializers. 1161 for (; B != E && (*B)->isBaseInitializer(); B++) { 1162 assert(!(*B)->isBaseVirtual()); 1163 EmitBaseInitializer(*this, ClassDecl, *B, CtorType); 1164 } 1165 1166 InitializeVTablePointers(ClassDecl); 1167 1168 // And finally, initialize class members. 1169 FieldConstructionScope FCS(*this, CXXThisValue); 1170 ConstructorMemcpyizer CM(*this, CD, Args); 1171 for (; B != E; B++) { 1172 CXXCtorInitializer *Member = (*B); 1173 assert(!Member->isBaseInitializer()); 1174 assert(Member->isAnyMemberInitializer() && 1175 "Delegating initializer on non-delegating constructor"); 1176 CM.addMemberInitializer(Member); 1177 } 1178 CM.finish(); 1179 } 1180 1181 static bool 1182 FieldHasTrivialDestructorBody(ASTContext &Context, const FieldDecl *Field); 1183 1184 static bool 1185 HasTrivialDestructorBody(ASTContext &Context, 1186 const CXXRecordDecl *BaseClassDecl, 1187 const CXXRecordDecl *MostDerivedClassDecl) 1188 { 1189 // If the destructor is trivial we don't have to check anything else. 1190 if (BaseClassDecl->hasTrivialDestructor()) 1191 return true; 1192 1193 if (!BaseClassDecl->getDestructor()->hasTrivialBody()) 1194 return false; 1195 1196 // Check fields. 1197 for (CXXRecordDecl::field_iterator I = BaseClassDecl->field_begin(), 1198 E = BaseClassDecl->field_end(); I != E; ++I) { 1199 const FieldDecl *Field = *I; 1200 1201 if (!FieldHasTrivialDestructorBody(Context, Field)) 1202 return false; 1203 } 1204 1205 // Check non-virtual bases. 1206 for (CXXRecordDecl::base_class_const_iterator I = 1207 BaseClassDecl->bases_begin(), E = BaseClassDecl->bases_end(); 1208 I != E; ++I) { 1209 if (I->isVirtual()) 1210 continue; 1211 1212 const CXXRecordDecl *NonVirtualBase = 1213 cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl()); 1214 if (!HasTrivialDestructorBody(Context, NonVirtualBase, 1215 MostDerivedClassDecl)) 1216 return false; 1217 } 1218 1219 if (BaseClassDecl == MostDerivedClassDecl) { 1220 // Check virtual bases. 1221 for (CXXRecordDecl::base_class_const_iterator I = 1222 BaseClassDecl->vbases_begin(), E = BaseClassDecl->vbases_end(); 1223 I != E; ++I) { 1224 const CXXRecordDecl *VirtualBase = 1225 cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl()); 1226 if (!HasTrivialDestructorBody(Context, VirtualBase, 1227 MostDerivedClassDecl)) 1228 return false; 1229 } 1230 } 1231 1232 return true; 1233 } 1234 1235 static bool 1236 FieldHasTrivialDestructorBody(ASTContext &Context, 1237 const FieldDecl *Field) 1238 { 1239 QualType FieldBaseElementType = Context.getBaseElementType(Field->getType()); 1240 1241 const RecordType *RT = FieldBaseElementType->getAs<RecordType>(); 1242 if (!RT) 1243 return true; 1244 1245 CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(RT->getDecl()); 1246 return HasTrivialDestructorBody(Context, FieldClassDecl, FieldClassDecl); 1247 } 1248 1249 /// CanSkipVTablePointerInitialization - Check whether we need to initialize 1250 /// any vtable pointers before calling this destructor. 1251 static bool CanSkipVTablePointerInitialization(ASTContext &Context, 1252 const CXXDestructorDecl *Dtor) { 1253 if (!Dtor->hasTrivialBody()) 1254 return false; 1255 1256 // Check the fields. 1257 const CXXRecordDecl *ClassDecl = Dtor->getParent(); 1258 for (CXXRecordDecl::field_iterator I = ClassDecl->field_begin(), 1259 E = ClassDecl->field_end(); I != E; ++I) { 1260 const FieldDecl *Field = *I; 1261 1262 if (!FieldHasTrivialDestructorBody(Context, Field)) 1263 return false; 1264 } 1265 1266 return true; 1267 } 1268 1269 /// EmitDestructorBody - Emits the body of the current destructor. 1270 void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) { 1271 const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CurGD.getDecl()); 1272 CXXDtorType DtorType = CurGD.getDtorType(); 1273 1274 // The call to operator delete in a deleting destructor happens 1275 // outside of the function-try-block, which means it's always 1276 // possible to delegate the destructor body to the complete 1277 // destructor. Do so. 1278 if (DtorType == Dtor_Deleting) { 1279 EnterDtorCleanups(Dtor, Dtor_Deleting); 1280 EmitCXXDestructorCall(Dtor, Dtor_Complete, /*ForVirtualBase=*/false, 1281 /*Delegating=*/false, LoadCXXThis()); 1282 PopCleanupBlock(); 1283 return; 1284 } 1285 1286 Stmt *Body = Dtor->getBody(); 1287 1288 // If the body is a function-try-block, enter the try before 1289 // anything else. 1290 bool isTryBody = (Body && isa<CXXTryStmt>(Body)); 1291 if (isTryBody) 1292 EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true); 1293 1294 // Enter the epilogue cleanups. 1295 RunCleanupsScope DtorEpilogue(*this); 1296 1297 // If this is the complete variant, just invoke the base variant; 1298 // the epilogue will destruct the virtual bases. But we can't do 1299 // this optimization if the body is a function-try-block, because 1300 // we'd introduce *two* handler blocks. In the Microsoft ABI, we 1301 // always delegate because we might not have a definition in this TU. 1302 switch (DtorType) { 1303 case Dtor_Deleting: llvm_unreachable("already handled deleting case"); 1304 1305 case Dtor_Complete: 1306 assert((Body || getTarget().getCXXABI().isMicrosoft()) && 1307 "can't emit a dtor without a body for non-Microsoft ABIs"); 1308 1309 // Enter the cleanup scopes for virtual bases. 1310 EnterDtorCleanups(Dtor, Dtor_Complete); 1311 1312 if (!isTryBody) { 1313 EmitCXXDestructorCall(Dtor, Dtor_Base, /*ForVirtualBase=*/false, 1314 /*Delegating=*/false, LoadCXXThis()); 1315 break; 1316 } 1317 // Fallthrough: act like we're in the base variant. 1318 1319 case Dtor_Base: 1320 assert(Body); 1321 1322 // Enter the cleanup scopes for fields and non-virtual bases. 1323 EnterDtorCleanups(Dtor, Dtor_Base); 1324 1325 // Initialize the vtable pointers before entering the body. 1326 if (!CanSkipVTablePointerInitialization(getContext(), Dtor)) 1327 InitializeVTablePointers(Dtor->getParent()); 1328 1329 if (isTryBody) 1330 EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock()); 1331 else if (Body) 1332 EmitStmt(Body); 1333 else { 1334 assert(Dtor->isImplicit() && "bodyless dtor not implicit"); 1335 // nothing to do besides what's in the epilogue 1336 } 1337 // -fapple-kext must inline any call to this dtor into 1338 // the caller's body. 1339 if (getLangOpts().AppleKext) 1340 CurFn->addFnAttr(llvm::Attribute::AlwaysInline); 1341 break; 1342 } 1343 1344 // Jump out through the epilogue cleanups. 1345 DtorEpilogue.ForceCleanup(); 1346 1347 // Exit the try if applicable. 1348 if (isTryBody) 1349 ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true); 1350 } 1351 1352 void CodeGenFunction::emitImplicitAssignmentOperatorBody(FunctionArgList &Args) { 1353 const CXXMethodDecl *AssignOp = cast<CXXMethodDecl>(CurGD.getDecl()); 1354 const Stmt *RootS = AssignOp->getBody(); 1355 assert(isa<CompoundStmt>(RootS) && 1356 "Body of an implicit assignment operator should be compound stmt."); 1357 const CompoundStmt *RootCS = cast<CompoundStmt>(RootS); 1358 1359 LexicalScope Scope(*this, RootCS->getSourceRange()); 1360 1361 AssignmentMemcpyizer AM(*this, AssignOp, Args); 1362 for (CompoundStmt::const_body_iterator I = RootCS->body_begin(), 1363 E = RootCS->body_end(); 1364 I != E; ++I) { 1365 AM.emitAssignment(*I); 1366 } 1367 AM.finish(); 1368 } 1369 1370 namespace { 1371 /// Call the operator delete associated with the current destructor. 1372 struct CallDtorDelete : EHScopeStack::Cleanup { 1373 CallDtorDelete() {} 1374 1375 void Emit(CodeGenFunction &CGF, Flags flags) { 1376 const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CGF.CurCodeDecl); 1377 const CXXRecordDecl *ClassDecl = Dtor->getParent(); 1378 CGF.EmitDeleteCall(Dtor->getOperatorDelete(), CGF.LoadCXXThis(), 1379 CGF.getContext().getTagDeclType(ClassDecl)); 1380 } 1381 }; 1382 1383 struct CallDtorDeleteConditional : EHScopeStack::Cleanup { 1384 llvm::Value *ShouldDeleteCondition; 1385 public: 1386 CallDtorDeleteConditional(llvm::Value *ShouldDeleteCondition) 1387 : ShouldDeleteCondition(ShouldDeleteCondition) { 1388 assert(ShouldDeleteCondition != NULL); 1389 } 1390 1391 void Emit(CodeGenFunction &CGF, Flags flags) { 1392 llvm::BasicBlock *callDeleteBB = CGF.createBasicBlock("dtor.call_delete"); 1393 llvm::BasicBlock *continueBB = CGF.createBasicBlock("dtor.continue"); 1394 llvm::Value *ShouldCallDelete 1395 = CGF.Builder.CreateIsNull(ShouldDeleteCondition); 1396 CGF.Builder.CreateCondBr(ShouldCallDelete, continueBB, callDeleteBB); 1397 1398 CGF.EmitBlock(callDeleteBB); 1399 const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CGF.CurCodeDecl); 1400 const CXXRecordDecl *ClassDecl = Dtor->getParent(); 1401 CGF.EmitDeleteCall(Dtor->getOperatorDelete(), CGF.LoadCXXThis(), 1402 CGF.getContext().getTagDeclType(ClassDecl)); 1403 CGF.Builder.CreateBr(continueBB); 1404 1405 CGF.EmitBlock(continueBB); 1406 } 1407 }; 1408 1409 class DestroyField : public EHScopeStack::Cleanup { 1410 const FieldDecl *field; 1411 CodeGenFunction::Destroyer *destroyer; 1412 bool useEHCleanupForArray; 1413 1414 public: 1415 DestroyField(const FieldDecl *field, CodeGenFunction::Destroyer *destroyer, 1416 bool useEHCleanupForArray) 1417 : field(field), destroyer(destroyer), 1418 useEHCleanupForArray(useEHCleanupForArray) {} 1419 1420 void Emit(CodeGenFunction &CGF, Flags flags) { 1421 // Find the address of the field. 1422 llvm::Value *thisValue = CGF.LoadCXXThis(); 1423 QualType RecordTy = CGF.getContext().getTagDeclType(field->getParent()); 1424 LValue ThisLV = CGF.MakeAddrLValue(thisValue, RecordTy); 1425 LValue LV = CGF.EmitLValueForField(ThisLV, field); 1426 assert(LV.isSimple()); 1427 1428 CGF.emitDestroy(LV.getAddress(), field->getType(), destroyer, 1429 flags.isForNormalCleanup() && useEHCleanupForArray); 1430 } 1431 }; 1432 } 1433 1434 /// \brief Emit all code that comes at the end of class's 1435 /// destructor. This is to call destructors on members and base classes 1436 /// in reverse order of their construction. 1437 void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD, 1438 CXXDtorType DtorType) { 1439 assert(!DD->isTrivial() && 1440 "Should not emit dtor epilogue for trivial dtor!"); 1441 1442 // The deleting-destructor phase just needs to call the appropriate 1443 // operator delete that Sema picked up. 1444 if (DtorType == Dtor_Deleting) { 1445 assert(DD->getOperatorDelete() && 1446 "operator delete missing - EnterDtorCleanups"); 1447 if (CXXStructorImplicitParamValue) { 1448 // If there is an implicit param to the deleting dtor, it's a boolean 1449 // telling whether we should call delete at the end of the dtor. 1450 EHStack.pushCleanup<CallDtorDeleteConditional>( 1451 NormalAndEHCleanup, CXXStructorImplicitParamValue); 1452 } else { 1453 EHStack.pushCleanup<CallDtorDelete>(NormalAndEHCleanup); 1454 } 1455 return; 1456 } 1457 1458 const CXXRecordDecl *ClassDecl = DD->getParent(); 1459 1460 // Unions have no bases and do not call field destructors. 1461 if (ClassDecl->isUnion()) 1462 return; 1463 1464 // The complete-destructor phase just destructs all the virtual bases. 1465 if (DtorType == Dtor_Complete) { 1466 1467 // We push them in the forward order so that they'll be popped in 1468 // the reverse order. 1469 for (CXXRecordDecl::base_class_const_iterator I = 1470 ClassDecl->vbases_begin(), E = ClassDecl->vbases_end(); 1471 I != E; ++I) { 1472 const CXXBaseSpecifier &Base = *I; 1473 CXXRecordDecl *BaseClassDecl 1474 = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl()); 1475 1476 // Ignore trivial destructors. 1477 if (BaseClassDecl->hasTrivialDestructor()) 1478 continue; 1479 1480 EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup, 1481 BaseClassDecl, 1482 /*BaseIsVirtual*/ true); 1483 } 1484 1485 return; 1486 } 1487 1488 assert(DtorType == Dtor_Base); 1489 1490 // Destroy non-virtual bases. 1491 for (CXXRecordDecl::base_class_const_iterator I = 1492 ClassDecl->bases_begin(), E = ClassDecl->bases_end(); I != E; ++I) { 1493 const CXXBaseSpecifier &Base = *I; 1494 1495 // Ignore virtual bases. 1496 if (Base.isVirtual()) 1497 continue; 1498 1499 CXXRecordDecl *BaseClassDecl = Base.getType()->getAsCXXRecordDecl(); 1500 1501 // Ignore trivial destructors. 1502 if (BaseClassDecl->hasTrivialDestructor()) 1503 continue; 1504 1505 EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup, 1506 BaseClassDecl, 1507 /*BaseIsVirtual*/ false); 1508 } 1509 1510 // Destroy direct fields. 1511 for (CXXRecordDecl::field_iterator I = ClassDecl->field_begin(), 1512 E = ClassDecl->field_end(); I != E; ++I) { 1513 const FieldDecl *field = *I; 1514 QualType type = field->getType(); 1515 QualType::DestructionKind dtorKind = type.isDestructedType(); 1516 if (!dtorKind) continue; 1517 1518 // Anonymous union members do not have their destructors called. 1519 const RecordType *RT = type->getAsUnionType(); 1520 if (RT && RT->getDecl()->isAnonymousStructOrUnion()) continue; 1521 1522 CleanupKind cleanupKind = getCleanupKind(dtorKind); 1523 EHStack.pushCleanup<DestroyField>(cleanupKind, field, 1524 getDestroyer(dtorKind), 1525 cleanupKind & EHCleanup); 1526 } 1527 } 1528 1529 /// EmitCXXAggrConstructorCall - Emit a loop to call a particular 1530 /// constructor for each of several members of an array. 1531 /// 1532 /// \param ctor the constructor to call for each element 1533 /// \param arrayType the type of the array to initialize 1534 /// \param arrayBegin an arrayType* 1535 /// \param zeroInitialize true if each element should be 1536 /// zero-initialized before it is constructed 1537 void 1538 CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor, 1539 const ConstantArrayType *arrayType, 1540 llvm::Value *arrayBegin, 1541 CallExpr::const_arg_iterator argBegin, 1542 CallExpr::const_arg_iterator argEnd, 1543 bool zeroInitialize) { 1544 QualType elementType; 1545 llvm::Value *numElements = 1546 emitArrayLength(arrayType, elementType, arrayBegin); 1547 1548 EmitCXXAggrConstructorCall(ctor, numElements, arrayBegin, 1549 argBegin, argEnd, zeroInitialize); 1550 } 1551 1552 /// EmitCXXAggrConstructorCall - Emit a loop to call a particular 1553 /// constructor for each of several members of an array. 1554 /// 1555 /// \param ctor the constructor to call for each element 1556 /// \param numElements the number of elements in the array; 1557 /// may be zero 1558 /// \param arrayBegin a T*, where T is the type constructed by ctor 1559 /// \param zeroInitialize true if each element should be 1560 /// zero-initialized before it is constructed 1561 void 1562 CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor, 1563 llvm::Value *numElements, 1564 llvm::Value *arrayBegin, 1565 CallExpr::const_arg_iterator argBegin, 1566 CallExpr::const_arg_iterator argEnd, 1567 bool zeroInitialize) { 1568 1569 // It's legal for numElements to be zero. This can happen both 1570 // dynamically, because x can be zero in 'new A[x]', and statically, 1571 // because of GCC extensions that permit zero-length arrays. There 1572 // are probably legitimate places where we could assume that this 1573 // doesn't happen, but it's not clear that it's worth it. 1574 llvm::BranchInst *zeroCheckBranch = 0; 1575 1576 // Optimize for a constant count. 1577 llvm::ConstantInt *constantCount 1578 = dyn_cast<llvm::ConstantInt>(numElements); 1579 if (constantCount) { 1580 // Just skip out if the constant count is zero. 1581 if (constantCount->isZero()) return; 1582 1583 // Otherwise, emit the check. 1584 } else { 1585 llvm::BasicBlock *loopBB = createBasicBlock("new.ctorloop"); 1586 llvm::Value *iszero = Builder.CreateIsNull(numElements, "isempty"); 1587 zeroCheckBranch = Builder.CreateCondBr(iszero, loopBB, loopBB); 1588 EmitBlock(loopBB); 1589 } 1590 1591 // Find the end of the array. 1592 llvm::Value *arrayEnd = Builder.CreateInBoundsGEP(arrayBegin, numElements, 1593 "arrayctor.end"); 1594 1595 // Enter the loop, setting up a phi for the current location to initialize. 1596 llvm::BasicBlock *entryBB = Builder.GetInsertBlock(); 1597 llvm::BasicBlock *loopBB = createBasicBlock("arrayctor.loop"); 1598 EmitBlock(loopBB); 1599 llvm::PHINode *cur = Builder.CreatePHI(arrayBegin->getType(), 2, 1600 "arrayctor.cur"); 1601 cur->addIncoming(arrayBegin, entryBB); 1602 1603 // Inside the loop body, emit the constructor call on the array element. 1604 1605 QualType type = getContext().getTypeDeclType(ctor->getParent()); 1606 1607 // Zero initialize the storage, if requested. 1608 if (zeroInitialize) 1609 EmitNullInitialization(cur, type); 1610 1611 // C++ [class.temporary]p4: 1612 // There are two contexts in which temporaries are destroyed at a different 1613 // point than the end of the full-expression. The first context is when a 1614 // default constructor is called to initialize an element of an array. 1615 // If the constructor has one or more default arguments, the destruction of 1616 // every temporary created in a default argument expression is sequenced 1617 // before the construction of the next array element, if any. 1618 1619 { 1620 RunCleanupsScope Scope(*this); 1621 1622 // Evaluate the constructor and its arguments in a regular 1623 // partial-destroy cleanup. 1624 if (getLangOpts().Exceptions && 1625 !ctor->getParent()->hasTrivialDestructor()) { 1626 Destroyer *destroyer = destroyCXXObject; 1627 pushRegularPartialArrayCleanup(arrayBegin, cur, type, *destroyer); 1628 } 1629 1630 EmitCXXConstructorCall(ctor, Ctor_Complete, /*ForVirtualBase=*/ false, 1631 /*Delegating=*/false, cur, argBegin, argEnd); 1632 } 1633 1634 // Go to the next element. 1635 llvm::Value *next = 1636 Builder.CreateInBoundsGEP(cur, llvm::ConstantInt::get(SizeTy, 1), 1637 "arrayctor.next"); 1638 cur->addIncoming(next, Builder.GetInsertBlock()); 1639 1640 // Check whether that's the end of the loop. 1641 llvm::Value *done = Builder.CreateICmpEQ(next, arrayEnd, "arrayctor.done"); 1642 llvm::BasicBlock *contBB = createBasicBlock("arrayctor.cont"); 1643 Builder.CreateCondBr(done, contBB, loopBB); 1644 1645 // Patch the earlier check to skip over the loop. 1646 if (zeroCheckBranch) zeroCheckBranch->setSuccessor(0, contBB); 1647 1648 EmitBlock(contBB); 1649 } 1650 1651 void CodeGenFunction::destroyCXXObject(CodeGenFunction &CGF, 1652 llvm::Value *addr, 1653 QualType type) { 1654 const RecordType *rtype = type->castAs<RecordType>(); 1655 const CXXRecordDecl *record = cast<CXXRecordDecl>(rtype->getDecl()); 1656 const CXXDestructorDecl *dtor = record->getDestructor(); 1657 assert(!dtor->isTrivial()); 1658 CGF.EmitCXXDestructorCall(dtor, Dtor_Complete, /*for vbase*/ false, 1659 /*Delegating=*/false, addr); 1660 } 1661 1662 void 1663 CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D, 1664 CXXCtorType Type, bool ForVirtualBase, 1665 bool Delegating, 1666 llvm::Value *This, 1667 CallExpr::const_arg_iterator ArgBeg, 1668 CallExpr::const_arg_iterator ArgEnd) { 1669 // If this is a trivial constructor, just emit what's needed. 1670 if (D->isTrivial()) { 1671 if (ArgBeg == ArgEnd) { 1672 // Trivial default constructor, no codegen required. 1673 assert(D->isDefaultConstructor() && 1674 "trivial 0-arg ctor not a default ctor"); 1675 return; 1676 } 1677 1678 assert(ArgBeg + 1 == ArgEnd && "unexpected argcount for trivial ctor"); 1679 assert(D->isCopyOrMoveConstructor() && 1680 "trivial 1-arg ctor not a copy/move ctor"); 1681 1682 const Expr *E = (*ArgBeg); 1683 QualType Ty = E->getType(); 1684 llvm::Value *Src = EmitLValue(E).getAddress(); 1685 EmitAggregateCopy(This, Src, Ty); 1686 return; 1687 } 1688 1689 // C++11 [class.mfct.non-static]p2: 1690 // If a non-static member function of a class X is called for an object that 1691 // is not of type X, or of a type derived from X, the behavior is undefined. 1692 // FIXME: Provide a source location here. 1693 EmitTypeCheck(CodeGenFunction::TCK_ConstructorCall, SourceLocation(), This, 1694 getContext().getRecordType(D->getParent())); 1695 1696 CallArgList Args; 1697 1698 // Push the this ptr. 1699 Args.add(RValue::get(This), D->getThisType(getContext())); 1700 1701 // Add the rest of the user-supplied arguments. 1702 const FunctionProtoType *FPT = D->getType()->castAs<FunctionProtoType>(); 1703 EmitCallArgs(Args, FPT, ArgBeg, ArgEnd); 1704 1705 // Insert any ABI-specific implicit constructor arguments. 1706 unsigned ExtraArgs = CGM.getCXXABI().addImplicitConstructorArgs( 1707 *this, D, Type, ForVirtualBase, Delegating, Args); 1708 1709 // Emit the call. 1710 llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(D, Type); 1711 RequiredArgs Required = RequiredArgs::forPrototypePlus(FPT, 1 + ExtraArgs); 1712 const CGFunctionInfo &Info = 1713 CGM.getTypes().arrangeCXXMethodCall(Args, FPT, Required); 1714 EmitCall(Info, Callee, ReturnValueSlot(), Args, D); 1715 } 1716 1717 void 1718 CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D, 1719 llvm::Value *This, llvm::Value *Src, 1720 CallExpr::const_arg_iterator ArgBeg, 1721 CallExpr::const_arg_iterator ArgEnd) { 1722 if (D->isTrivial()) { 1723 assert(ArgBeg + 1 == ArgEnd && "unexpected argcount for trivial ctor"); 1724 assert(D->isCopyOrMoveConstructor() && 1725 "trivial 1-arg ctor not a copy/move ctor"); 1726 EmitAggregateCopy(This, Src, (*ArgBeg)->getType()); 1727 return; 1728 } 1729 llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(D, clang::Ctor_Complete); 1730 assert(D->isInstance() && 1731 "Trying to emit a member call expr on a static method!"); 1732 1733 const FunctionProtoType *FPT = D->getType()->castAs<FunctionProtoType>(); 1734 1735 CallArgList Args; 1736 1737 // Push the this ptr. 1738 Args.add(RValue::get(This), D->getThisType(getContext())); 1739 1740 // Push the src ptr. 1741 QualType QT = *(FPT->arg_type_begin()); 1742 llvm::Type *t = CGM.getTypes().ConvertType(QT); 1743 Src = Builder.CreateBitCast(Src, t); 1744 Args.add(RValue::get(Src), QT); 1745 1746 // Skip over first argument (Src). 1747 EmitCallArgs(Args, FPT->isVariadic(), FPT->arg_type_begin() + 1, 1748 FPT->arg_type_end(), ArgBeg + 1, ArgEnd); 1749 1750 EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, RequiredArgs::All), 1751 Callee, ReturnValueSlot(), Args, D); 1752 } 1753 1754 void 1755 CodeGenFunction::EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor, 1756 CXXCtorType CtorType, 1757 const FunctionArgList &Args, 1758 SourceLocation Loc) { 1759 CallArgList DelegateArgs; 1760 1761 FunctionArgList::const_iterator I = Args.begin(), E = Args.end(); 1762 assert(I != E && "no parameters to constructor"); 1763 1764 // this 1765 DelegateArgs.add(RValue::get(LoadCXXThis()), (*I)->getType()); 1766 ++I; 1767 1768 // vtt 1769 if (llvm::Value *VTT = GetVTTParameter(GlobalDecl(Ctor, CtorType), 1770 /*ForVirtualBase=*/false, 1771 /*Delegating=*/true)) { 1772 QualType VoidPP = getContext().getPointerType(getContext().VoidPtrTy); 1773 DelegateArgs.add(RValue::get(VTT), VoidPP); 1774 1775 if (CGM.getCXXABI().NeedsVTTParameter(CurGD)) { 1776 assert(I != E && "cannot skip vtt parameter, already done with args"); 1777 assert((*I)->getType() == VoidPP && "skipping parameter not of vtt type"); 1778 ++I; 1779 } 1780 } 1781 1782 // Explicit arguments. 1783 for (; I != E; ++I) { 1784 const VarDecl *param = *I; 1785 // FIXME: per-argument source location 1786 EmitDelegateCallArg(DelegateArgs, param, Loc); 1787 } 1788 1789 llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(Ctor, CtorType); 1790 EmitCall(CGM.getTypes().arrangeCXXConstructorDeclaration(Ctor, CtorType), 1791 Callee, ReturnValueSlot(), DelegateArgs, Ctor); 1792 } 1793 1794 namespace { 1795 struct CallDelegatingCtorDtor : EHScopeStack::Cleanup { 1796 const CXXDestructorDecl *Dtor; 1797 llvm::Value *Addr; 1798 CXXDtorType Type; 1799 1800 CallDelegatingCtorDtor(const CXXDestructorDecl *D, llvm::Value *Addr, 1801 CXXDtorType Type) 1802 : Dtor(D), Addr(Addr), Type(Type) {} 1803 1804 void Emit(CodeGenFunction &CGF, Flags flags) { 1805 CGF.EmitCXXDestructorCall(Dtor, Type, /*ForVirtualBase=*/false, 1806 /*Delegating=*/true, Addr); 1807 } 1808 }; 1809 } 1810 1811 void 1812 CodeGenFunction::EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor, 1813 const FunctionArgList &Args) { 1814 assert(Ctor->isDelegatingConstructor()); 1815 1816 llvm::Value *ThisPtr = LoadCXXThis(); 1817 1818 QualType Ty = getContext().getTagDeclType(Ctor->getParent()); 1819 CharUnits Alignment = getContext().getTypeAlignInChars(Ty); 1820 AggValueSlot AggSlot = 1821 AggValueSlot::forAddr(ThisPtr, Alignment, Qualifiers(), 1822 AggValueSlot::IsDestructed, 1823 AggValueSlot::DoesNotNeedGCBarriers, 1824 AggValueSlot::IsNotAliased); 1825 1826 EmitAggExpr(Ctor->init_begin()[0]->getInit(), AggSlot); 1827 1828 const CXXRecordDecl *ClassDecl = Ctor->getParent(); 1829 if (CGM.getLangOpts().Exceptions && !ClassDecl->hasTrivialDestructor()) { 1830 CXXDtorType Type = 1831 CurGD.getCtorType() == Ctor_Complete ? Dtor_Complete : Dtor_Base; 1832 1833 EHStack.pushCleanup<CallDelegatingCtorDtor>(EHCleanup, 1834 ClassDecl->getDestructor(), 1835 ThisPtr, Type); 1836 } 1837 } 1838 1839 void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *DD, 1840 CXXDtorType Type, 1841 bool ForVirtualBase, 1842 bool Delegating, 1843 llvm::Value *This) { 1844 CGM.getCXXABI().EmitDestructorCall(*this, DD, Type, ForVirtualBase, 1845 Delegating, This); 1846 } 1847 1848 namespace { 1849 struct CallLocalDtor : EHScopeStack::Cleanup { 1850 const CXXDestructorDecl *Dtor; 1851 llvm::Value *Addr; 1852 1853 CallLocalDtor(const CXXDestructorDecl *D, llvm::Value *Addr) 1854 : Dtor(D), Addr(Addr) {} 1855 1856 void Emit(CodeGenFunction &CGF, Flags flags) { 1857 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, 1858 /*ForVirtualBase=*/false, 1859 /*Delegating=*/false, Addr); 1860 } 1861 }; 1862 } 1863 1864 void CodeGenFunction::PushDestructorCleanup(const CXXDestructorDecl *D, 1865 llvm::Value *Addr) { 1866 EHStack.pushCleanup<CallLocalDtor>(NormalAndEHCleanup, D, Addr); 1867 } 1868 1869 void CodeGenFunction::PushDestructorCleanup(QualType T, llvm::Value *Addr) { 1870 CXXRecordDecl *ClassDecl = T->getAsCXXRecordDecl(); 1871 if (!ClassDecl) return; 1872 if (ClassDecl->hasTrivialDestructor()) return; 1873 1874 const CXXDestructorDecl *D = ClassDecl->getDestructor(); 1875 assert(D && D->isUsed() && "destructor not marked as used!"); 1876 PushDestructorCleanup(D, Addr); 1877 } 1878 1879 void 1880 CodeGenFunction::InitializeVTablePointer(BaseSubobject Base, 1881 const CXXRecordDecl *NearestVBase, 1882 CharUnits OffsetFromNearestVBase, 1883 const CXXRecordDecl *VTableClass) { 1884 // Compute the address point. 1885 bool NeedsVirtualOffset; 1886 llvm::Value *VTableAddressPoint = 1887 CGM.getCXXABI().getVTableAddressPointInStructor( 1888 *this, VTableClass, Base, NearestVBase, NeedsVirtualOffset); 1889 if (!VTableAddressPoint) 1890 return; 1891 1892 // Compute where to store the address point. 1893 llvm::Value *VirtualOffset = 0; 1894 CharUnits NonVirtualOffset = CharUnits::Zero(); 1895 1896 if (NeedsVirtualOffset) { 1897 // We need to use the virtual base offset offset because the virtual base 1898 // might have a different offset in the most derived class. 1899 VirtualOffset = CGM.getCXXABI().GetVirtualBaseClassOffset(*this, 1900 LoadCXXThis(), 1901 VTableClass, 1902 NearestVBase); 1903 NonVirtualOffset = OffsetFromNearestVBase; 1904 } else { 1905 // We can just use the base offset in the complete class. 1906 NonVirtualOffset = Base.getBaseOffset(); 1907 } 1908 1909 // Apply the offsets. 1910 llvm::Value *VTableField = LoadCXXThis(); 1911 1912 if (!NonVirtualOffset.isZero() || VirtualOffset) 1913 VTableField = ApplyNonVirtualAndVirtualOffset(*this, VTableField, 1914 NonVirtualOffset, 1915 VirtualOffset); 1916 1917 // Finally, store the address point. 1918 llvm::Type *AddressPointPtrTy = 1919 VTableAddressPoint->getType()->getPointerTo(); 1920 VTableField = Builder.CreateBitCast(VTableField, AddressPointPtrTy); 1921 llvm::StoreInst *Store = Builder.CreateStore(VTableAddressPoint, VTableField); 1922 CGM.DecorateInstruction(Store, CGM.getTBAAInfoForVTablePtr()); 1923 } 1924 1925 void 1926 CodeGenFunction::InitializeVTablePointers(BaseSubobject Base, 1927 const CXXRecordDecl *NearestVBase, 1928 CharUnits OffsetFromNearestVBase, 1929 bool BaseIsNonVirtualPrimaryBase, 1930 const CXXRecordDecl *VTableClass, 1931 VisitedVirtualBasesSetTy& VBases) { 1932 // If this base is a non-virtual primary base the address point has already 1933 // been set. 1934 if (!BaseIsNonVirtualPrimaryBase) { 1935 // Initialize the vtable pointer for this base. 1936 InitializeVTablePointer(Base, NearestVBase, OffsetFromNearestVBase, 1937 VTableClass); 1938 } 1939 1940 const CXXRecordDecl *RD = Base.getBase(); 1941 1942 // Traverse bases. 1943 for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), 1944 E = RD->bases_end(); I != E; ++I) { 1945 CXXRecordDecl *BaseDecl 1946 = cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); 1947 1948 // Ignore classes without a vtable. 1949 if (!BaseDecl->isDynamicClass()) 1950 continue; 1951 1952 CharUnits BaseOffset; 1953 CharUnits BaseOffsetFromNearestVBase; 1954 bool BaseDeclIsNonVirtualPrimaryBase; 1955 1956 if (I->isVirtual()) { 1957 // Check if we've visited this virtual base before. 1958 if (!VBases.insert(BaseDecl)) 1959 continue; 1960 1961 const ASTRecordLayout &Layout = 1962 getContext().getASTRecordLayout(VTableClass); 1963 1964 BaseOffset = Layout.getVBaseClassOffset(BaseDecl); 1965 BaseOffsetFromNearestVBase = CharUnits::Zero(); 1966 BaseDeclIsNonVirtualPrimaryBase = false; 1967 } else { 1968 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 1969 1970 BaseOffset = Base.getBaseOffset() + Layout.getBaseClassOffset(BaseDecl); 1971 BaseOffsetFromNearestVBase = 1972 OffsetFromNearestVBase + Layout.getBaseClassOffset(BaseDecl); 1973 BaseDeclIsNonVirtualPrimaryBase = Layout.getPrimaryBase() == BaseDecl; 1974 } 1975 1976 InitializeVTablePointers(BaseSubobject(BaseDecl, BaseOffset), 1977 I->isVirtual() ? BaseDecl : NearestVBase, 1978 BaseOffsetFromNearestVBase, 1979 BaseDeclIsNonVirtualPrimaryBase, 1980 VTableClass, VBases); 1981 } 1982 } 1983 1984 void CodeGenFunction::InitializeVTablePointers(const CXXRecordDecl *RD) { 1985 // Ignore classes without a vtable. 1986 if (!RD->isDynamicClass()) 1987 return; 1988 1989 // Initialize the vtable pointers for this class and all of its bases. 1990 VisitedVirtualBasesSetTy VBases; 1991 InitializeVTablePointers(BaseSubobject(RD, CharUnits::Zero()), 1992 /*NearestVBase=*/0, 1993 /*OffsetFromNearestVBase=*/CharUnits::Zero(), 1994 /*BaseIsNonVirtualPrimaryBase=*/false, RD, VBases); 1995 1996 if (RD->getNumVBases()) 1997 CGM.getCXXABI().initializeHiddenVirtualInheritanceMembers(*this, RD); 1998 } 1999 2000 llvm::Value *CodeGenFunction::GetVTablePtr(llvm::Value *This, 2001 llvm::Type *Ty) { 2002 llvm::Value *VTablePtrSrc = Builder.CreateBitCast(This, Ty->getPointerTo()); 2003 llvm::Instruction *VTable = Builder.CreateLoad(VTablePtrSrc, "vtable"); 2004 CGM.DecorateInstruction(VTable, CGM.getTBAAInfoForVTablePtr()); 2005 return VTable; 2006 } 2007 2008 2009 // FIXME: Ideally Expr::IgnoreParenNoopCasts should do this, but it doesn't do 2010 // quite what we want. 2011 static const Expr *skipNoOpCastsAndParens(const Expr *E) { 2012 while (true) { 2013 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) { 2014 E = PE->getSubExpr(); 2015 continue; 2016 } 2017 2018 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) { 2019 if (CE->getCastKind() == CK_NoOp) { 2020 E = CE->getSubExpr(); 2021 continue; 2022 } 2023 } 2024 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 2025 if (UO->getOpcode() == UO_Extension) { 2026 E = UO->getSubExpr(); 2027 continue; 2028 } 2029 } 2030 return E; 2031 } 2032 } 2033 2034 bool 2035 CodeGenFunction::CanDevirtualizeMemberFunctionCall(const Expr *Base, 2036 const CXXMethodDecl *MD) { 2037 // When building with -fapple-kext, all calls must go through the vtable since 2038 // the kernel linker can do runtime patching of vtables. 2039 if (getLangOpts().AppleKext) 2040 return false; 2041 2042 // If the most derived class is marked final, we know that no subclass can 2043 // override this member function and so we can devirtualize it. For example: 2044 // 2045 // struct A { virtual void f(); } 2046 // struct B final : A { }; 2047 // 2048 // void f(B *b) { 2049 // b->f(); 2050 // } 2051 // 2052 const CXXRecordDecl *MostDerivedClassDecl = Base->getBestDynamicClassType(); 2053 if (MostDerivedClassDecl->hasAttr<FinalAttr>()) 2054 return true; 2055 2056 // If the member function is marked 'final', we know that it can't be 2057 // overridden and can therefore devirtualize it. 2058 if (MD->hasAttr<FinalAttr>()) 2059 return true; 2060 2061 // Similarly, if the class itself is marked 'final' it can't be overridden 2062 // and we can therefore devirtualize the member function call. 2063 if (MD->getParent()->hasAttr<FinalAttr>()) 2064 return true; 2065 2066 Base = skipNoOpCastsAndParens(Base); 2067 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) { 2068 if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) { 2069 // This is a record decl. We know the type and can devirtualize it. 2070 return VD->getType()->isRecordType(); 2071 } 2072 2073 return false; 2074 } 2075 2076 // We can devirtualize calls on an object accessed by a class member access 2077 // expression, since by C++11 [basic.life]p6 we know that it can't refer to 2078 // a derived class object constructed in the same location. 2079 if (const MemberExpr *ME = dyn_cast<MemberExpr>(Base)) 2080 if (const ValueDecl *VD = dyn_cast<ValueDecl>(ME->getMemberDecl())) 2081 return VD->getType()->isRecordType(); 2082 2083 // We can always devirtualize calls on temporary object expressions. 2084 if (isa<CXXConstructExpr>(Base)) 2085 return true; 2086 2087 // And calls on bound temporaries. 2088 if (isa<CXXBindTemporaryExpr>(Base)) 2089 return true; 2090 2091 // Check if this is a call expr that returns a record type. 2092 if (const CallExpr *CE = dyn_cast<CallExpr>(Base)) 2093 return CE->getCallReturnType()->isRecordType(); 2094 2095 // We can't devirtualize the call. 2096 return false; 2097 } 2098 2099 llvm::Value * 2100 CodeGenFunction::EmitCXXOperatorMemberCallee(const CXXOperatorCallExpr *E, 2101 const CXXMethodDecl *MD, 2102 llvm::Value *This) { 2103 llvm::FunctionType *fnType = 2104 CGM.getTypes().GetFunctionType( 2105 CGM.getTypes().arrangeCXXMethodDeclaration(MD)); 2106 2107 if (MD->isVirtual() && !CanDevirtualizeMemberFunctionCall(E->getArg(0), MD)) 2108 return CGM.getCXXABI().getVirtualFunctionPointer(*this, MD, This, fnType); 2109 2110 return CGM.GetAddrOfFunction(MD, fnType); 2111 } 2112 2113 void CodeGenFunction::EmitForwardingCallToLambda( 2114 const CXXMethodDecl *callOperator, 2115 CallArgList &callArgs) { 2116 // Get the address of the call operator. 2117 const CGFunctionInfo &calleeFnInfo = 2118 CGM.getTypes().arrangeCXXMethodDeclaration(callOperator); 2119 llvm::Value *callee = 2120 CGM.GetAddrOfFunction(GlobalDecl(callOperator), 2121 CGM.getTypes().GetFunctionType(calleeFnInfo)); 2122 2123 // Prepare the return slot. 2124 const FunctionProtoType *FPT = 2125 callOperator->getType()->castAs<FunctionProtoType>(); 2126 QualType resultType = FPT->getResultType(); 2127 ReturnValueSlot returnSlot; 2128 if (!resultType->isVoidType() && 2129 calleeFnInfo.getReturnInfo().getKind() == ABIArgInfo::Indirect && 2130 !hasScalarEvaluationKind(calleeFnInfo.getReturnType())) 2131 returnSlot = ReturnValueSlot(ReturnValue, resultType.isVolatileQualified()); 2132 2133 // We don't need to separately arrange the call arguments because 2134 // the call can't be variadic anyway --- it's impossible to forward 2135 // variadic arguments. 2136 2137 // Now emit our call. 2138 RValue RV = EmitCall(calleeFnInfo, callee, returnSlot, 2139 callArgs, callOperator); 2140 2141 // If necessary, copy the returned value into the slot. 2142 if (!resultType->isVoidType() && returnSlot.isNull()) 2143 EmitReturnOfRValue(RV, resultType); 2144 else 2145 EmitBranchThroughCleanup(ReturnBlock); 2146 } 2147 2148 void CodeGenFunction::EmitLambdaBlockInvokeBody() { 2149 const BlockDecl *BD = BlockInfo->getBlockDecl(); 2150 const VarDecl *variable = BD->capture_begin()->getVariable(); 2151 const CXXRecordDecl *Lambda = variable->getType()->getAsCXXRecordDecl(); 2152 2153 // Start building arguments for forwarding call 2154 CallArgList CallArgs; 2155 2156 QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda)); 2157 llvm::Value *ThisPtr = GetAddrOfBlockDecl(variable, false); 2158 CallArgs.add(RValue::get(ThisPtr), ThisType); 2159 2160 // Add the rest of the parameters. 2161 for (BlockDecl::param_const_iterator I = BD->param_begin(), 2162 E = BD->param_end(); I != E; ++I) { 2163 ParmVarDecl *param = *I; 2164 EmitDelegateCallArg(CallArgs, param, param->getLocStart()); 2165 } 2166 assert(!Lambda->isGenericLambda() && 2167 "generic lambda interconversion to block not implemented"); 2168 EmitForwardingCallToLambda(Lambda->getLambdaCallOperator(), CallArgs); 2169 } 2170 2171 void CodeGenFunction::EmitLambdaToBlockPointerBody(FunctionArgList &Args) { 2172 if (cast<CXXMethodDecl>(CurCodeDecl)->isVariadic()) { 2173 // FIXME: Making this work correctly is nasty because it requires either 2174 // cloning the body of the call operator or making the call operator forward. 2175 CGM.ErrorUnsupported(CurCodeDecl, "lambda conversion to variadic function"); 2176 return; 2177 } 2178 2179 EmitFunctionBody(Args, cast<FunctionDecl>(CurGD.getDecl())->getBody()); 2180 } 2181 2182 void CodeGenFunction::EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD) { 2183 const CXXRecordDecl *Lambda = MD->getParent(); 2184 2185 // Start building arguments for forwarding call 2186 CallArgList CallArgs; 2187 2188 QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda)); 2189 llvm::Value *ThisPtr = llvm::UndefValue::get(getTypes().ConvertType(ThisType)); 2190 CallArgs.add(RValue::get(ThisPtr), ThisType); 2191 2192 // Add the rest of the parameters. 2193 for (FunctionDecl::param_const_iterator I = MD->param_begin(), 2194 E = MD->param_end(); I != E; ++I) { 2195 ParmVarDecl *param = *I; 2196 EmitDelegateCallArg(CallArgs, param, param->getLocStart()); 2197 } 2198 const CXXMethodDecl *CallOp = Lambda->getLambdaCallOperator(); 2199 // For a generic lambda, find the corresponding call operator specialization 2200 // to which the call to the static-invoker shall be forwarded. 2201 if (Lambda->isGenericLambda()) { 2202 assert(MD->isFunctionTemplateSpecialization()); 2203 const TemplateArgumentList *TAL = MD->getTemplateSpecializationArgs(); 2204 FunctionTemplateDecl *CallOpTemplate = CallOp->getDescribedFunctionTemplate(); 2205 void *InsertPos = 0; 2206 FunctionDecl *CorrespondingCallOpSpecialization = 2207 CallOpTemplate->findSpecialization(TAL->data(), TAL->size(), InsertPos); 2208 assert(CorrespondingCallOpSpecialization); 2209 CallOp = cast<CXXMethodDecl>(CorrespondingCallOpSpecialization); 2210 } 2211 EmitForwardingCallToLambda(CallOp, CallArgs); 2212 } 2213 2214 void CodeGenFunction::EmitLambdaStaticInvokeFunction(const CXXMethodDecl *MD) { 2215 if (MD->isVariadic()) { 2216 // FIXME: Making this work correctly is nasty because it requires either 2217 // cloning the body of the call operator or making the call operator forward. 2218 CGM.ErrorUnsupported(MD, "lambda conversion to variadic function"); 2219 return; 2220 } 2221 2222 EmitLambdaDelegatingInvokeBody(MD); 2223 } 2224