1 //===--- CGClass.cpp - Emit LLVM Code for C++ classes ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code dealing with C++ code generation of classes 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CGBlocks.h" 15 #include "CGDebugInfo.h" 16 #include "CGRecordLayout.h" 17 #include "CodeGenFunction.h" 18 #include "CGCXXABI.h" 19 #include "clang/AST/CXXInheritance.h" 20 #include "clang/AST/DeclTemplate.h" 21 #include "clang/AST/EvaluatedExprVisitor.h" 22 #include "clang/AST/RecordLayout.h" 23 #include "clang/AST/StmtCXX.h" 24 #include "clang/Basic/TargetBuiltins.h" 25 #include "clang/Frontend/CodeGenOptions.h" 26 27 using namespace clang; 28 using namespace CodeGen; 29 30 static CharUnits 31 ComputeNonVirtualBaseClassOffset(ASTContext &Context, 32 const CXXRecordDecl *DerivedClass, 33 CastExpr::path_const_iterator Start, 34 CastExpr::path_const_iterator End) { 35 CharUnits Offset = CharUnits::Zero(); 36 37 const CXXRecordDecl *RD = DerivedClass; 38 39 for (CastExpr::path_const_iterator I = Start; I != End; ++I) { 40 const CXXBaseSpecifier *Base = *I; 41 assert(!Base->isVirtual() && "Should not see virtual bases here!"); 42 43 // Get the layout. 44 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 45 46 const CXXRecordDecl *BaseDecl = 47 cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl()); 48 49 // Add the offset. 50 Offset += Layout.getBaseClassOffset(BaseDecl); 51 52 RD = BaseDecl; 53 } 54 55 return Offset; 56 } 57 58 llvm::Constant * 59 CodeGenModule::GetNonVirtualBaseClassOffset(const CXXRecordDecl *ClassDecl, 60 CastExpr::path_const_iterator PathBegin, 61 CastExpr::path_const_iterator PathEnd) { 62 assert(PathBegin != PathEnd && "Base path should not be empty!"); 63 64 CharUnits Offset = 65 ComputeNonVirtualBaseClassOffset(getContext(), ClassDecl, 66 PathBegin, PathEnd); 67 if (Offset.isZero()) 68 return 0; 69 70 llvm::Type *PtrDiffTy = 71 Types.ConvertType(getContext().getPointerDiffType()); 72 73 return llvm::ConstantInt::get(PtrDiffTy, Offset.getQuantity()); 74 } 75 76 /// Gets the address of a direct base class within a complete object. 77 /// This should only be used for (1) non-virtual bases or (2) virtual bases 78 /// when the type is known to be complete (e.g. in complete destructors). 79 /// 80 /// The object pointed to by 'This' is assumed to be non-null. 81 llvm::Value * 82 CodeGenFunction::GetAddressOfDirectBaseInCompleteClass(llvm::Value *This, 83 const CXXRecordDecl *Derived, 84 const CXXRecordDecl *Base, 85 bool BaseIsVirtual) { 86 // 'this' must be a pointer (in some address space) to Derived. 87 assert(This->getType()->isPointerTy() && 88 cast<llvm::PointerType>(This->getType())->getElementType() 89 == ConvertType(Derived)); 90 91 // Compute the offset of the virtual base. 92 CharUnits Offset; 93 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(Derived); 94 if (BaseIsVirtual) 95 Offset = Layout.getVBaseClassOffset(Base); 96 else 97 Offset = Layout.getBaseClassOffset(Base); 98 99 // Shift and cast down to the base type. 100 // TODO: for complete types, this should be possible with a GEP. 101 llvm::Value *V = This; 102 if (Offset.isPositive()) { 103 V = Builder.CreateBitCast(V, Int8PtrTy); 104 V = Builder.CreateConstInBoundsGEP1_64(V, Offset.getQuantity()); 105 } 106 V = Builder.CreateBitCast(V, ConvertType(Base)->getPointerTo()); 107 108 return V; 109 } 110 111 static llvm::Value * 112 ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, llvm::Value *ptr, 113 CharUnits nonVirtualOffset, 114 llvm::Value *virtualOffset) { 115 // Assert that we have something to do. 116 assert(!nonVirtualOffset.isZero() || virtualOffset != 0); 117 118 // Compute the offset from the static and dynamic components. 119 llvm::Value *baseOffset; 120 if (!nonVirtualOffset.isZero()) { 121 baseOffset = llvm::ConstantInt::get(CGF.PtrDiffTy, 122 nonVirtualOffset.getQuantity()); 123 if (virtualOffset) { 124 baseOffset = CGF.Builder.CreateAdd(virtualOffset, baseOffset); 125 } 126 } else { 127 baseOffset = virtualOffset; 128 } 129 130 // Apply the base offset. 131 ptr = CGF.Builder.CreateBitCast(ptr, CGF.Int8PtrTy); 132 ptr = CGF.Builder.CreateInBoundsGEP(ptr, baseOffset, "add.ptr"); 133 return ptr; 134 } 135 136 llvm::Value * 137 CodeGenFunction::GetAddressOfBaseClass(llvm::Value *Value, 138 const CXXRecordDecl *Derived, 139 CastExpr::path_const_iterator PathBegin, 140 CastExpr::path_const_iterator PathEnd, 141 bool NullCheckValue) { 142 assert(PathBegin != PathEnd && "Base path should not be empty!"); 143 144 CastExpr::path_const_iterator Start = PathBegin; 145 const CXXRecordDecl *VBase = 0; 146 147 // Sema has done some convenient canonicalization here: if the 148 // access path involved any virtual steps, the conversion path will 149 // *start* with a step down to the correct virtual base subobject, 150 // and hence will not require any further steps. 151 if ((*Start)->isVirtual()) { 152 VBase = 153 cast<CXXRecordDecl>((*Start)->getType()->getAs<RecordType>()->getDecl()); 154 ++Start; 155 } 156 157 // Compute the static offset of the ultimate destination within its 158 // allocating subobject (the virtual base, if there is one, or else 159 // the "complete" object that we see). 160 CharUnits NonVirtualOffset = 161 ComputeNonVirtualBaseClassOffset(getContext(), VBase ? VBase : Derived, 162 Start, PathEnd); 163 164 // If there's a virtual step, we can sometimes "devirtualize" it. 165 // For now, that's limited to when the derived type is final. 166 // TODO: "devirtualize" this for accesses to known-complete objects. 167 if (VBase && Derived->hasAttr<FinalAttr>()) { 168 const ASTRecordLayout &layout = getContext().getASTRecordLayout(Derived); 169 CharUnits vBaseOffset = layout.getVBaseClassOffset(VBase); 170 NonVirtualOffset += vBaseOffset; 171 VBase = 0; // we no longer have a virtual step 172 } 173 174 // Get the base pointer type. 175 llvm::Type *BasePtrTy = 176 ConvertType((PathEnd[-1])->getType())->getPointerTo(); 177 178 // If the static offset is zero and we don't have a virtual step, 179 // just do a bitcast; null checks are unnecessary. 180 if (NonVirtualOffset.isZero() && !VBase) { 181 return Builder.CreateBitCast(Value, BasePtrTy); 182 } 183 184 llvm::BasicBlock *origBB = 0; 185 llvm::BasicBlock *endBB = 0; 186 187 // Skip over the offset (and the vtable load) if we're supposed to 188 // null-check the pointer. 189 if (NullCheckValue) { 190 origBB = Builder.GetInsertBlock(); 191 llvm::BasicBlock *notNullBB = createBasicBlock("cast.notnull"); 192 endBB = createBasicBlock("cast.end"); 193 194 llvm::Value *isNull = Builder.CreateIsNull(Value); 195 Builder.CreateCondBr(isNull, endBB, notNullBB); 196 EmitBlock(notNullBB); 197 } 198 199 // Compute the virtual offset. 200 llvm::Value *VirtualOffset = 0; 201 if (VBase) { 202 VirtualOffset = 203 CGM.getCXXABI().GetVirtualBaseClassOffset(*this, Value, Derived, VBase); 204 } 205 206 // Apply both offsets. 207 Value = ApplyNonVirtualAndVirtualOffset(*this, Value, 208 NonVirtualOffset, 209 VirtualOffset); 210 211 // Cast to the destination type. 212 Value = Builder.CreateBitCast(Value, BasePtrTy); 213 214 // Build a phi if we needed a null check. 215 if (NullCheckValue) { 216 llvm::BasicBlock *notNullBB = Builder.GetInsertBlock(); 217 Builder.CreateBr(endBB); 218 EmitBlock(endBB); 219 220 llvm::PHINode *PHI = Builder.CreatePHI(BasePtrTy, 2, "cast.result"); 221 PHI->addIncoming(Value, notNullBB); 222 PHI->addIncoming(llvm::Constant::getNullValue(BasePtrTy), origBB); 223 Value = PHI; 224 } 225 226 return Value; 227 } 228 229 llvm::Value * 230 CodeGenFunction::GetAddressOfDerivedClass(llvm::Value *Value, 231 const CXXRecordDecl *Derived, 232 CastExpr::path_const_iterator PathBegin, 233 CastExpr::path_const_iterator PathEnd, 234 bool NullCheckValue) { 235 assert(PathBegin != PathEnd && "Base path should not be empty!"); 236 237 QualType DerivedTy = 238 getContext().getCanonicalType(getContext().getTagDeclType(Derived)); 239 llvm::Type *DerivedPtrTy = ConvertType(DerivedTy)->getPointerTo(); 240 241 llvm::Value *NonVirtualOffset = 242 CGM.GetNonVirtualBaseClassOffset(Derived, PathBegin, PathEnd); 243 244 if (!NonVirtualOffset) { 245 // No offset, we can just cast back. 246 return Builder.CreateBitCast(Value, DerivedPtrTy); 247 } 248 249 llvm::BasicBlock *CastNull = 0; 250 llvm::BasicBlock *CastNotNull = 0; 251 llvm::BasicBlock *CastEnd = 0; 252 253 if (NullCheckValue) { 254 CastNull = createBasicBlock("cast.null"); 255 CastNotNull = createBasicBlock("cast.notnull"); 256 CastEnd = createBasicBlock("cast.end"); 257 258 llvm::Value *IsNull = Builder.CreateIsNull(Value); 259 Builder.CreateCondBr(IsNull, CastNull, CastNotNull); 260 EmitBlock(CastNotNull); 261 } 262 263 // Apply the offset. 264 Value = Builder.CreateBitCast(Value, Int8PtrTy); 265 Value = Builder.CreateGEP(Value, Builder.CreateNeg(NonVirtualOffset), 266 "sub.ptr"); 267 268 // Just cast. 269 Value = Builder.CreateBitCast(Value, DerivedPtrTy); 270 271 if (NullCheckValue) { 272 Builder.CreateBr(CastEnd); 273 EmitBlock(CastNull); 274 Builder.CreateBr(CastEnd); 275 EmitBlock(CastEnd); 276 277 llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2); 278 PHI->addIncoming(Value, CastNotNull); 279 PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), 280 CastNull); 281 Value = PHI; 282 } 283 284 return Value; 285 } 286 287 llvm::Value *CodeGenFunction::GetVTTParameter(GlobalDecl GD, 288 bool ForVirtualBase, 289 bool Delegating) { 290 if (!CGM.getCXXABI().NeedsVTTParameter(GD)) { 291 // This constructor/destructor does not need a VTT parameter. 292 return 0; 293 } 294 295 const CXXRecordDecl *RD = cast<CXXMethodDecl>(CurCodeDecl)->getParent(); 296 const CXXRecordDecl *Base = cast<CXXMethodDecl>(GD.getDecl())->getParent(); 297 298 llvm::Value *VTT; 299 300 uint64_t SubVTTIndex; 301 302 if (Delegating) { 303 // If this is a delegating constructor call, just load the VTT. 304 return LoadCXXVTT(); 305 } else if (RD == Base) { 306 // If the record matches the base, this is the complete ctor/dtor 307 // variant calling the base variant in a class with virtual bases. 308 assert(!CGM.getCXXABI().NeedsVTTParameter(CurGD) && 309 "doing no-op VTT offset in base dtor/ctor?"); 310 assert(!ForVirtualBase && "Can't have same class as virtual base!"); 311 SubVTTIndex = 0; 312 } else { 313 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 314 CharUnits BaseOffset = ForVirtualBase ? 315 Layout.getVBaseClassOffset(Base) : 316 Layout.getBaseClassOffset(Base); 317 318 SubVTTIndex = 319 CGM.getVTables().getSubVTTIndex(RD, BaseSubobject(Base, BaseOffset)); 320 assert(SubVTTIndex != 0 && "Sub-VTT index must be greater than zero!"); 321 } 322 323 if (CGM.getCXXABI().NeedsVTTParameter(CurGD)) { 324 // A VTT parameter was passed to the constructor, use it. 325 VTT = LoadCXXVTT(); 326 VTT = Builder.CreateConstInBoundsGEP1_64(VTT, SubVTTIndex); 327 } else { 328 // We're the complete constructor, so get the VTT by name. 329 VTT = CGM.getVTables().GetAddrOfVTT(RD); 330 VTT = Builder.CreateConstInBoundsGEP2_64(VTT, 0, SubVTTIndex); 331 } 332 333 return VTT; 334 } 335 336 namespace { 337 /// Call the destructor for a direct base class. 338 struct CallBaseDtor : EHScopeStack::Cleanup { 339 const CXXRecordDecl *BaseClass; 340 bool BaseIsVirtual; 341 CallBaseDtor(const CXXRecordDecl *Base, bool BaseIsVirtual) 342 : BaseClass(Base), BaseIsVirtual(BaseIsVirtual) {} 343 344 void Emit(CodeGenFunction &CGF, Flags flags) { 345 const CXXRecordDecl *DerivedClass = 346 cast<CXXMethodDecl>(CGF.CurCodeDecl)->getParent(); 347 348 const CXXDestructorDecl *D = BaseClass->getDestructor(); 349 llvm::Value *Addr = 350 CGF.GetAddressOfDirectBaseInCompleteClass(CGF.LoadCXXThis(), 351 DerivedClass, BaseClass, 352 BaseIsVirtual); 353 CGF.EmitCXXDestructorCall(D, Dtor_Base, BaseIsVirtual, 354 /*Delegating=*/false, Addr); 355 } 356 }; 357 358 /// A visitor which checks whether an initializer uses 'this' in a 359 /// way which requires the vtable to be properly set. 360 struct DynamicThisUseChecker : EvaluatedExprVisitor<DynamicThisUseChecker> { 361 typedef EvaluatedExprVisitor<DynamicThisUseChecker> super; 362 363 bool UsesThis; 364 365 DynamicThisUseChecker(ASTContext &C) : super(C), UsesThis(false) {} 366 367 // Black-list all explicit and implicit references to 'this'. 368 // 369 // Do we need to worry about external references to 'this' derived 370 // from arbitrary code? If so, then anything which runs arbitrary 371 // external code might potentially access the vtable. 372 void VisitCXXThisExpr(CXXThisExpr *E) { UsesThis = true; } 373 }; 374 } 375 376 static bool BaseInitializerUsesThis(ASTContext &C, const Expr *Init) { 377 DynamicThisUseChecker Checker(C); 378 Checker.Visit(const_cast<Expr*>(Init)); 379 return Checker.UsesThis; 380 } 381 382 static void EmitBaseInitializer(CodeGenFunction &CGF, 383 const CXXRecordDecl *ClassDecl, 384 CXXCtorInitializer *BaseInit, 385 CXXCtorType CtorType) { 386 assert(BaseInit->isBaseInitializer() && 387 "Must have base initializer!"); 388 389 llvm::Value *ThisPtr = CGF.LoadCXXThis(); 390 391 const Type *BaseType = BaseInit->getBaseClass(); 392 CXXRecordDecl *BaseClassDecl = 393 cast<CXXRecordDecl>(BaseType->getAs<RecordType>()->getDecl()); 394 395 bool isBaseVirtual = BaseInit->isBaseVirtual(); 396 397 // The base constructor doesn't construct virtual bases. 398 if (CtorType == Ctor_Base && isBaseVirtual) 399 return; 400 401 // If the initializer for the base (other than the constructor 402 // itself) accesses 'this' in any way, we need to initialize the 403 // vtables. 404 if (BaseInitializerUsesThis(CGF.getContext(), BaseInit->getInit())) 405 CGF.InitializeVTablePointers(ClassDecl); 406 407 // We can pretend to be a complete class because it only matters for 408 // virtual bases, and we only do virtual bases for complete ctors. 409 llvm::Value *V = 410 CGF.GetAddressOfDirectBaseInCompleteClass(ThisPtr, ClassDecl, 411 BaseClassDecl, 412 isBaseVirtual); 413 CharUnits Alignment = CGF.getContext().getTypeAlignInChars(BaseType); 414 AggValueSlot AggSlot = 415 AggValueSlot::forAddr(V, Alignment, Qualifiers(), 416 AggValueSlot::IsDestructed, 417 AggValueSlot::DoesNotNeedGCBarriers, 418 AggValueSlot::IsNotAliased); 419 420 CGF.EmitAggExpr(BaseInit->getInit(), AggSlot); 421 422 if (CGF.CGM.getLangOpts().Exceptions && 423 !BaseClassDecl->hasTrivialDestructor()) 424 CGF.EHStack.pushCleanup<CallBaseDtor>(EHCleanup, BaseClassDecl, 425 isBaseVirtual); 426 } 427 428 static void EmitAggMemberInitializer(CodeGenFunction &CGF, 429 LValue LHS, 430 Expr *Init, 431 llvm::Value *ArrayIndexVar, 432 QualType T, 433 ArrayRef<VarDecl *> ArrayIndexes, 434 unsigned Index) { 435 if (Index == ArrayIndexes.size()) { 436 LValue LV = LHS; 437 438 if (ArrayIndexVar) { 439 // If we have an array index variable, load it and use it as an offset. 440 // Then, increment the value. 441 llvm::Value *Dest = LHS.getAddress(); 442 llvm::Value *ArrayIndex = CGF.Builder.CreateLoad(ArrayIndexVar); 443 Dest = CGF.Builder.CreateInBoundsGEP(Dest, ArrayIndex, "destaddress"); 444 llvm::Value *Next = llvm::ConstantInt::get(ArrayIndex->getType(), 1); 445 Next = CGF.Builder.CreateAdd(ArrayIndex, Next, "inc"); 446 CGF.Builder.CreateStore(Next, ArrayIndexVar); 447 448 // Update the LValue. 449 LV.setAddress(Dest); 450 CharUnits Align = CGF.getContext().getTypeAlignInChars(T); 451 LV.setAlignment(std::min(Align, LV.getAlignment())); 452 } 453 454 switch (CGF.getEvaluationKind(T)) { 455 case TEK_Scalar: 456 CGF.EmitScalarInit(Init, /*decl*/ 0, LV, false); 457 break; 458 case TEK_Complex: 459 CGF.EmitComplexExprIntoLValue(Init, LV, /*isInit*/ true); 460 break; 461 case TEK_Aggregate: { 462 AggValueSlot Slot = 463 AggValueSlot::forLValue(LV, 464 AggValueSlot::IsDestructed, 465 AggValueSlot::DoesNotNeedGCBarriers, 466 AggValueSlot::IsNotAliased); 467 468 CGF.EmitAggExpr(Init, Slot); 469 break; 470 } 471 } 472 473 return; 474 } 475 476 const ConstantArrayType *Array = CGF.getContext().getAsConstantArrayType(T); 477 assert(Array && "Array initialization without the array type?"); 478 llvm::Value *IndexVar 479 = CGF.GetAddrOfLocalVar(ArrayIndexes[Index]); 480 assert(IndexVar && "Array index variable not loaded"); 481 482 // Initialize this index variable to zero. 483 llvm::Value* Zero 484 = llvm::Constant::getNullValue( 485 CGF.ConvertType(CGF.getContext().getSizeType())); 486 CGF.Builder.CreateStore(Zero, IndexVar); 487 488 // Start the loop with a block that tests the condition. 489 llvm::BasicBlock *CondBlock = CGF.createBasicBlock("for.cond"); 490 llvm::BasicBlock *AfterFor = CGF.createBasicBlock("for.end"); 491 492 CGF.EmitBlock(CondBlock); 493 494 llvm::BasicBlock *ForBody = CGF.createBasicBlock("for.body"); 495 // Generate: if (loop-index < number-of-elements) fall to the loop body, 496 // otherwise, go to the block after the for-loop. 497 uint64_t NumElements = Array->getSize().getZExtValue(); 498 llvm::Value *Counter = CGF.Builder.CreateLoad(IndexVar); 499 llvm::Value *NumElementsPtr = 500 llvm::ConstantInt::get(Counter->getType(), NumElements); 501 llvm::Value *IsLess = CGF.Builder.CreateICmpULT(Counter, NumElementsPtr, 502 "isless"); 503 504 // If the condition is true, execute the body. 505 CGF.Builder.CreateCondBr(IsLess, ForBody, AfterFor); 506 507 CGF.EmitBlock(ForBody); 508 llvm::BasicBlock *ContinueBlock = CGF.createBasicBlock("for.inc"); 509 510 // Inside the loop body recurse to emit the inner loop or, eventually, the 511 // constructor call. 512 EmitAggMemberInitializer(CGF, LHS, Init, ArrayIndexVar, 513 Array->getElementType(), ArrayIndexes, Index + 1); 514 515 CGF.EmitBlock(ContinueBlock); 516 517 // Emit the increment of the loop counter. 518 llvm::Value *NextVal = llvm::ConstantInt::get(Counter->getType(), 1); 519 Counter = CGF.Builder.CreateLoad(IndexVar); 520 NextVal = CGF.Builder.CreateAdd(Counter, NextVal, "inc"); 521 CGF.Builder.CreateStore(NextVal, IndexVar); 522 523 // Finally, branch back up to the condition for the next iteration. 524 CGF.EmitBranch(CondBlock); 525 526 // Emit the fall-through block. 527 CGF.EmitBlock(AfterFor, true); 528 } 529 530 static void EmitMemberInitializer(CodeGenFunction &CGF, 531 const CXXRecordDecl *ClassDecl, 532 CXXCtorInitializer *MemberInit, 533 const CXXConstructorDecl *Constructor, 534 FunctionArgList &Args) { 535 assert(MemberInit->isAnyMemberInitializer() && 536 "Must have member initializer!"); 537 assert(MemberInit->getInit() && "Must have initializer!"); 538 539 // non-static data member initializers. 540 FieldDecl *Field = MemberInit->getAnyMember(); 541 QualType FieldType = Field->getType(); 542 543 llvm::Value *ThisPtr = CGF.LoadCXXThis(); 544 QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl); 545 LValue LHS = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy); 546 547 if (MemberInit->isIndirectMemberInitializer()) { 548 // If we are initializing an anonymous union field, drill down to 549 // the field. 550 IndirectFieldDecl *IndirectField = MemberInit->getIndirectMember(); 551 IndirectFieldDecl::chain_iterator I = IndirectField->chain_begin(), 552 IEnd = IndirectField->chain_end(); 553 for ( ; I != IEnd; ++I) 554 LHS = CGF.EmitLValueForFieldInitialization(LHS, cast<FieldDecl>(*I)); 555 FieldType = MemberInit->getIndirectMember()->getAnonField()->getType(); 556 } else { 557 LHS = CGF.EmitLValueForFieldInitialization(LHS, Field); 558 } 559 560 // Special case: if we are in a copy or move constructor, and we are copying 561 // an array of PODs or classes with trivial copy constructors, ignore the 562 // AST and perform the copy we know is equivalent. 563 // FIXME: This is hacky at best... if we had a bit more explicit information 564 // in the AST, we could generalize it more easily. 565 const ConstantArrayType *Array 566 = CGF.getContext().getAsConstantArrayType(FieldType); 567 if (Array && Constructor->isDefaulted() && 568 Constructor->isCopyOrMoveConstructor()) { 569 QualType BaseElementTy = CGF.getContext().getBaseElementType(Array); 570 CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(MemberInit->getInit()); 571 if (BaseElementTy.isPODType(CGF.getContext()) || 572 (CE && CE->getConstructor()->isTrivial())) { 573 // Find the source pointer. We know it's the last argument because 574 // we know we're in an implicit copy constructor. 575 unsigned SrcArgIndex = Args.size() - 1; 576 llvm::Value *SrcPtr 577 = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(Args[SrcArgIndex])); 578 LValue ThisRHSLV = CGF.MakeNaturalAlignAddrLValue(SrcPtr, RecordTy); 579 LValue Src = CGF.EmitLValueForFieldInitialization(ThisRHSLV, Field); 580 581 // Copy the aggregate. 582 CGF.EmitAggregateCopy(LHS.getAddress(), Src.getAddress(), FieldType, 583 LHS.isVolatileQualified()); 584 return; 585 } 586 } 587 588 ArrayRef<VarDecl *> ArrayIndexes; 589 if (MemberInit->getNumArrayIndices()) 590 ArrayIndexes = MemberInit->getArrayIndexes(); 591 CGF.EmitInitializerForField(Field, LHS, MemberInit->getInit(), ArrayIndexes); 592 } 593 594 void CodeGenFunction::EmitInitializerForField(FieldDecl *Field, 595 LValue LHS, Expr *Init, 596 ArrayRef<VarDecl *> ArrayIndexes) { 597 QualType FieldType = Field->getType(); 598 switch (getEvaluationKind(FieldType)) { 599 case TEK_Scalar: 600 if (LHS.isSimple()) { 601 EmitExprAsInit(Init, Field, LHS, false); 602 } else { 603 RValue RHS = RValue::get(EmitScalarExpr(Init)); 604 EmitStoreThroughLValue(RHS, LHS); 605 } 606 break; 607 case TEK_Complex: 608 EmitComplexExprIntoLValue(Init, LHS, /*isInit*/ true); 609 break; 610 case TEK_Aggregate: { 611 llvm::Value *ArrayIndexVar = 0; 612 if (ArrayIndexes.size()) { 613 llvm::Type *SizeTy = ConvertType(getContext().getSizeType()); 614 615 // The LHS is a pointer to the first object we'll be constructing, as 616 // a flat array. 617 QualType BaseElementTy = getContext().getBaseElementType(FieldType); 618 llvm::Type *BasePtr = ConvertType(BaseElementTy); 619 BasePtr = llvm::PointerType::getUnqual(BasePtr); 620 llvm::Value *BaseAddrPtr = Builder.CreateBitCast(LHS.getAddress(), 621 BasePtr); 622 LHS = MakeAddrLValue(BaseAddrPtr, BaseElementTy); 623 624 // Create an array index that will be used to walk over all of the 625 // objects we're constructing. 626 ArrayIndexVar = CreateTempAlloca(SizeTy, "object.index"); 627 llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy); 628 Builder.CreateStore(Zero, ArrayIndexVar); 629 630 631 // Emit the block variables for the array indices, if any. 632 for (unsigned I = 0, N = ArrayIndexes.size(); I != N; ++I) 633 EmitAutoVarDecl(*ArrayIndexes[I]); 634 } 635 636 EmitAggMemberInitializer(*this, LHS, Init, ArrayIndexVar, FieldType, 637 ArrayIndexes, 0); 638 } 639 } 640 641 // Ensure that we destroy this object if an exception is thrown 642 // later in the constructor. 643 QualType::DestructionKind dtorKind = FieldType.isDestructedType(); 644 if (needsEHCleanup(dtorKind)) 645 pushEHDestroy(dtorKind, LHS.getAddress(), FieldType); 646 } 647 648 /// Checks whether the given constructor is a valid subject for the 649 /// complete-to-base constructor delegation optimization, i.e. 650 /// emitting the complete constructor as a simple call to the base 651 /// constructor. 652 static bool IsConstructorDelegationValid(const CXXConstructorDecl *Ctor) { 653 654 // Currently we disable the optimization for classes with virtual 655 // bases because (1) the addresses of parameter variables need to be 656 // consistent across all initializers but (2) the delegate function 657 // call necessarily creates a second copy of the parameter variable. 658 // 659 // The limiting example (purely theoretical AFAIK): 660 // struct A { A(int &c) { c++; } }; 661 // struct B : virtual A { 662 // B(int count) : A(count) { printf("%d\n", count); } 663 // }; 664 // ...although even this example could in principle be emitted as a 665 // delegation since the address of the parameter doesn't escape. 666 if (Ctor->getParent()->getNumVBases()) { 667 // TODO: white-list trivial vbase initializers. This case wouldn't 668 // be subject to the restrictions below. 669 670 // TODO: white-list cases where: 671 // - there are no non-reference parameters to the constructor 672 // - the initializers don't access any non-reference parameters 673 // - the initializers don't take the address of non-reference 674 // parameters 675 // - etc. 676 // If we ever add any of the above cases, remember that: 677 // - function-try-blocks will always blacklist this optimization 678 // - we need to perform the constructor prologue and cleanup in 679 // EmitConstructorBody. 680 681 return false; 682 } 683 684 // We also disable the optimization for variadic functions because 685 // it's impossible to "re-pass" varargs. 686 if (Ctor->getType()->getAs<FunctionProtoType>()->isVariadic()) 687 return false; 688 689 // FIXME: Decide if we can do a delegation of a delegating constructor. 690 if (Ctor->isDelegatingConstructor()) 691 return false; 692 693 return true; 694 } 695 696 /// EmitConstructorBody - Emits the body of the current constructor. 697 void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) { 698 const CXXConstructorDecl *Ctor = cast<CXXConstructorDecl>(CurGD.getDecl()); 699 CXXCtorType CtorType = CurGD.getCtorType(); 700 701 // Before we go any further, try the complete->base constructor 702 // delegation optimization. 703 if (CtorType == Ctor_Complete && IsConstructorDelegationValid(Ctor) && 704 CGM.getTarget().getCXXABI().hasConstructorVariants()) { 705 if (CGDebugInfo *DI = getDebugInfo()) 706 DI->EmitLocation(Builder, Ctor->getLocEnd()); 707 EmitDelegateCXXConstructorCall(Ctor, Ctor_Base, Args, Ctor->getLocEnd()); 708 return; 709 } 710 711 Stmt *Body = Ctor->getBody(); 712 713 // Enter the function-try-block before the constructor prologue if 714 // applicable. 715 bool IsTryBody = (Body && isa<CXXTryStmt>(Body)); 716 if (IsTryBody) 717 EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true); 718 719 RunCleanupsScope RunCleanups(*this); 720 721 // TODO: in restricted cases, we can emit the vbase initializers of 722 // a complete ctor and then delegate to the base ctor. 723 724 // Emit the constructor prologue, i.e. the base and member 725 // initializers. 726 EmitCtorPrologue(Ctor, CtorType, Args); 727 728 // Emit the body of the statement. 729 if (IsTryBody) 730 EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock()); 731 else if (Body) 732 EmitStmt(Body); 733 734 // Emit any cleanup blocks associated with the member or base 735 // initializers, which includes (along the exceptional path) the 736 // destructors for those members and bases that were fully 737 // constructed. 738 RunCleanups.ForceCleanup(); 739 740 if (IsTryBody) 741 ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true); 742 } 743 744 namespace { 745 /// RAII object to indicate that codegen is copying the value representation 746 /// instead of the object representation. Useful when copying a struct or 747 /// class which has uninitialized members and we're only performing 748 /// lvalue-to-rvalue conversion on the object but not its members. 749 class CopyingValueRepresentation { 750 public: 751 explicit CopyingValueRepresentation(CodeGenFunction &CGF) 752 : CGF(CGF), SO(*CGF.SanOpts), OldSanOpts(CGF.SanOpts) { 753 SO.Bool = false; 754 SO.Enum = false; 755 CGF.SanOpts = &SO; 756 } 757 ~CopyingValueRepresentation() { 758 CGF.SanOpts = OldSanOpts; 759 } 760 private: 761 CodeGenFunction &CGF; 762 SanitizerOptions SO; 763 const SanitizerOptions *OldSanOpts; 764 }; 765 } 766 767 namespace { 768 class FieldMemcpyizer { 769 public: 770 FieldMemcpyizer(CodeGenFunction &CGF, const CXXRecordDecl *ClassDecl, 771 const VarDecl *SrcRec) 772 : CGF(CGF), ClassDecl(ClassDecl), SrcRec(SrcRec), 773 RecLayout(CGF.getContext().getASTRecordLayout(ClassDecl)), 774 FirstField(0), LastField(0), FirstFieldOffset(0), LastFieldOffset(0), 775 LastAddedFieldIndex(0) { } 776 777 static bool isMemcpyableField(FieldDecl *F) { 778 Qualifiers Qual = F->getType().getQualifiers(); 779 if (Qual.hasVolatile() || Qual.hasObjCLifetime()) 780 return false; 781 return true; 782 } 783 784 void addMemcpyableField(FieldDecl *F) { 785 if (FirstField == 0) 786 addInitialField(F); 787 else 788 addNextField(F); 789 } 790 791 CharUnits getMemcpySize() const { 792 unsigned LastFieldSize = 793 LastField->isBitField() ? 794 LastField->getBitWidthValue(CGF.getContext()) : 795 CGF.getContext().getTypeSize(LastField->getType()); 796 uint64_t MemcpySizeBits = 797 LastFieldOffset + LastFieldSize - FirstFieldOffset + 798 CGF.getContext().getCharWidth() - 1; 799 CharUnits MemcpySize = 800 CGF.getContext().toCharUnitsFromBits(MemcpySizeBits); 801 return MemcpySize; 802 } 803 804 void emitMemcpy() { 805 // Give the subclass a chance to bail out if it feels the memcpy isn't 806 // worth it (e.g. Hasn't aggregated enough data). 807 if (FirstField == 0) { 808 return; 809 } 810 811 CharUnits Alignment; 812 813 if (FirstField->isBitField()) { 814 const CGRecordLayout &RL = 815 CGF.getTypes().getCGRecordLayout(FirstField->getParent()); 816 const CGBitFieldInfo &BFInfo = RL.getBitFieldInfo(FirstField); 817 Alignment = CharUnits::fromQuantity(BFInfo.StorageAlignment); 818 } else { 819 Alignment = CGF.getContext().getDeclAlign(FirstField); 820 } 821 822 assert((CGF.getContext().toCharUnitsFromBits(FirstFieldOffset) % 823 Alignment) == 0 && "Bad field alignment."); 824 825 CharUnits MemcpySize = getMemcpySize(); 826 QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl); 827 llvm::Value *ThisPtr = CGF.LoadCXXThis(); 828 LValue DestLV = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy); 829 LValue Dest = CGF.EmitLValueForFieldInitialization(DestLV, FirstField); 830 llvm::Value *SrcPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(SrcRec)); 831 LValue SrcLV = CGF.MakeNaturalAlignAddrLValue(SrcPtr, RecordTy); 832 LValue Src = CGF.EmitLValueForFieldInitialization(SrcLV, FirstField); 833 834 emitMemcpyIR(Dest.isBitField() ? Dest.getBitFieldAddr() : Dest.getAddress(), 835 Src.isBitField() ? Src.getBitFieldAddr() : Src.getAddress(), 836 MemcpySize, Alignment); 837 reset(); 838 } 839 840 void reset() { 841 FirstField = 0; 842 } 843 844 protected: 845 CodeGenFunction &CGF; 846 const CXXRecordDecl *ClassDecl; 847 848 private: 849 850 void emitMemcpyIR(llvm::Value *DestPtr, llvm::Value *SrcPtr, 851 CharUnits Size, CharUnits Alignment) { 852 llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType()); 853 llvm::Type *DBP = 854 llvm::Type::getInt8PtrTy(CGF.getLLVMContext(), DPT->getAddressSpace()); 855 DestPtr = CGF.Builder.CreateBitCast(DestPtr, DBP); 856 857 llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType()); 858 llvm::Type *SBP = 859 llvm::Type::getInt8PtrTy(CGF.getLLVMContext(), SPT->getAddressSpace()); 860 SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, SBP); 861 862 CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, Size.getQuantity(), 863 Alignment.getQuantity()); 864 } 865 866 void addInitialField(FieldDecl *F) { 867 FirstField = F; 868 LastField = F; 869 FirstFieldOffset = RecLayout.getFieldOffset(F->getFieldIndex()); 870 LastFieldOffset = FirstFieldOffset; 871 LastAddedFieldIndex = F->getFieldIndex(); 872 return; 873 } 874 875 void addNextField(FieldDecl *F) { 876 // For the most part, the following invariant will hold: 877 // F->getFieldIndex() == LastAddedFieldIndex + 1 878 // The one exception is that Sema won't add a copy-initializer for an 879 // unnamed bitfield, which will show up here as a gap in the sequence. 880 assert(F->getFieldIndex() >= LastAddedFieldIndex + 1 && 881 "Cannot aggregate fields out of order."); 882 LastAddedFieldIndex = F->getFieldIndex(); 883 884 // The 'first' and 'last' fields are chosen by offset, rather than field 885 // index. This allows the code to support bitfields, as well as regular 886 // fields. 887 uint64_t FOffset = RecLayout.getFieldOffset(F->getFieldIndex()); 888 if (FOffset < FirstFieldOffset) { 889 FirstField = F; 890 FirstFieldOffset = FOffset; 891 } else if (FOffset > LastFieldOffset) { 892 LastField = F; 893 LastFieldOffset = FOffset; 894 } 895 } 896 897 const VarDecl *SrcRec; 898 const ASTRecordLayout &RecLayout; 899 FieldDecl *FirstField; 900 FieldDecl *LastField; 901 uint64_t FirstFieldOffset, LastFieldOffset; 902 unsigned LastAddedFieldIndex; 903 }; 904 905 class ConstructorMemcpyizer : public FieldMemcpyizer { 906 private: 907 908 /// Get source argument for copy constructor. Returns null if not a copy 909 /// constructor. 910 static const VarDecl* getTrivialCopySource(const CXXConstructorDecl *CD, 911 FunctionArgList &Args) { 912 if (CD->isCopyOrMoveConstructor() && CD->isDefaulted()) 913 return Args[Args.size() - 1]; 914 return 0; 915 } 916 917 // Returns true if a CXXCtorInitializer represents a member initialization 918 // that can be rolled into a memcpy. 919 bool isMemberInitMemcpyable(CXXCtorInitializer *MemberInit) const { 920 if (!MemcpyableCtor) 921 return false; 922 FieldDecl *Field = MemberInit->getMember(); 923 assert(Field != 0 && "No field for member init."); 924 QualType FieldType = Field->getType(); 925 CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(MemberInit->getInit()); 926 927 // Bail out on non-POD, not-trivially-constructable members. 928 if (!(CE && CE->getConstructor()->isTrivial()) && 929 !(FieldType.isTriviallyCopyableType(CGF.getContext()) || 930 FieldType->isReferenceType())) 931 return false; 932 933 // Bail out on volatile fields. 934 if (!isMemcpyableField(Field)) 935 return false; 936 937 // Otherwise we're good. 938 return true; 939 } 940 941 public: 942 ConstructorMemcpyizer(CodeGenFunction &CGF, const CXXConstructorDecl *CD, 943 FunctionArgList &Args) 944 : FieldMemcpyizer(CGF, CD->getParent(), getTrivialCopySource(CD, Args)), 945 ConstructorDecl(CD), 946 MemcpyableCtor(CD->isDefaulted() && 947 CD->isCopyOrMoveConstructor() && 948 CGF.getLangOpts().getGC() == LangOptions::NonGC), 949 Args(Args) { } 950 951 void addMemberInitializer(CXXCtorInitializer *MemberInit) { 952 if (isMemberInitMemcpyable(MemberInit)) { 953 AggregatedInits.push_back(MemberInit); 954 addMemcpyableField(MemberInit->getMember()); 955 } else { 956 emitAggregatedInits(); 957 EmitMemberInitializer(CGF, ConstructorDecl->getParent(), MemberInit, 958 ConstructorDecl, Args); 959 } 960 } 961 962 void emitAggregatedInits() { 963 if (AggregatedInits.size() <= 1) { 964 // This memcpy is too small to be worthwhile. Fall back on default 965 // codegen. 966 if (!AggregatedInits.empty()) { 967 CopyingValueRepresentation CVR(CGF); 968 EmitMemberInitializer(CGF, ConstructorDecl->getParent(), 969 AggregatedInits[0], ConstructorDecl, Args); 970 } 971 reset(); 972 return; 973 } 974 975 pushEHDestructors(); 976 emitMemcpy(); 977 AggregatedInits.clear(); 978 } 979 980 void pushEHDestructors() { 981 llvm::Value *ThisPtr = CGF.LoadCXXThis(); 982 QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl); 983 LValue LHS = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy); 984 985 for (unsigned i = 0; i < AggregatedInits.size(); ++i) { 986 QualType FieldType = AggregatedInits[i]->getMember()->getType(); 987 QualType::DestructionKind dtorKind = FieldType.isDestructedType(); 988 if (CGF.needsEHCleanup(dtorKind)) 989 CGF.pushEHDestroy(dtorKind, LHS.getAddress(), FieldType); 990 } 991 } 992 993 void finish() { 994 emitAggregatedInits(); 995 } 996 997 private: 998 const CXXConstructorDecl *ConstructorDecl; 999 bool MemcpyableCtor; 1000 FunctionArgList &Args; 1001 SmallVector<CXXCtorInitializer*, 16> AggregatedInits; 1002 }; 1003 1004 class AssignmentMemcpyizer : public FieldMemcpyizer { 1005 private: 1006 1007 // Returns the memcpyable field copied by the given statement, if one 1008 // exists. Otherwise returns null. 1009 FieldDecl *getMemcpyableField(Stmt *S) { 1010 if (!AssignmentsMemcpyable) 1011 return 0; 1012 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(S)) { 1013 // Recognise trivial assignments. 1014 if (BO->getOpcode() != BO_Assign) 1015 return 0; 1016 MemberExpr *ME = dyn_cast<MemberExpr>(BO->getLHS()); 1017 if (!ME) 1018 return 0; 1019 FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl()); 1020 if (!Field || !isMemcpyableField(Field)) 1021 return 0; 1022 Stmt *RHS = BO->getRHS(); 1023 if (ImplicitCastExpr *EC = dyn_cast<ImplicitCastExpr>(RHS)) 1024 RHS = EC->getSubExpr(); 1025 if (!RHS) 1026 return 0; 1027 MemberExpr *ME2 = dyn_cast<MemberExpr>(RHS); 1028 if (dyn_cast<FieldDecl>(ME2->getMemberDecl()) != Field) 1029 return 0; 1030 return Field; 1031 } else if (CXXMemberCallExpr *MCE = dyn_cast<CXXMemberCallExpr>(S)) { 1032 CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MCE->getCalleeDecl()); 1033 if (!(MD && (MD->isCopyAssignmentOperator() || 1034 MD->isMoveAssignmentOperator()) && 1035 MD->isTrivial())) 1036 return 0; 1037 MemberExpr *IOA = dyn_cast<MemberExpr>(MCE->getImplicitObjectArgument()); 1038 if (!IOA) 1039 return 0; 1040 FieldDecl *Field = dyn_cast<FieldDecl>(IOA->getMemberDecl()); 1041 if (!Field || !isMemcpyableField(Field)) 1042 return 0; 1043 MemberExpr *Arg0 = dyn_cast<MemberExpr>(MCE->getArg(0)); 1044 if (!Arg0 || Field != dyn_cast<FieldDecl>(Arg0->getMemberDecl())) 1045 return 0; 1046 return Field; 1047 } else if (CallExpr *CE = dyn_cast<CallExpr>(S)) { 1048 FunctionDecl *FD = dyn_cast<FunctionDecl>(CE->getCalleeDecl()); 1049 if (!FD || FD->getBuiltinID() != Builtin::BI__builtin_memcpy) 1050 return 0; 1051 Expr *DstPtr = CE->getArg(0); 1052 if (ImplicitCastExpr *DC = dyn_cast<ImplicitCastExpr>(DstPtr)) 1053 DstPtr = DC->getSubExpr(); 1054 UnaryOperator *DUO = dyn_cast<UnaryOperator>(DstPtr); 1055 if (!DUO || DUO->getOpcode() != UO_AddrOf) 1056 return 0; 1057 MemberExpr *ME = dyn_cast<MemberExpr>(DUO->getSubExpr()); 1058 if (!ME) 1059 return 0; 1060 FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl()); 1061 if (!Field || !isMemcpyableField(Field)) 1062 return 0; 1063 Expr *SrcPtr = CE->getArg(1); 1064 if (ImplicitCastExpr *SC = dyn_cast<ImplicitCastExpr>(SrcPtr)) 1065 SrcPtr = SC->getSubExpr(); 1066 UnaryOperator *SUO = dyn_cast<UnaryOperator>(SrcPtr); 1067 if (!SUO || SUO->getOpcode() != UO_AddrOf) 1068 return 0; 1069 MemberExpr *ME2 = dyn_cast<MemberExpr>(SUO->getSubExpr()); 1070 if (!ME2 || Field != dyn_cast<FieldDecl>(ME2->getMemberDecl())) 1071 return 0; 1072 return Field; 1073 } 1074 1075 return 0; 1076 } 1077 1078 bool AssignmentsMemcpyable; 1079 SmallVector<Stmt*, 16> AggregatedStmts; 1080 1081 public: 1082 1083 AssignmentMemcpyizer(CodeGenFunction &CGF, const CXXMethodDecl *AD, 1084 FunctionArgList &Args) 1085 : FieldMemcpyizer(CGF, AD->getParent(), Args[Args.size() - 1]), 1086 AssignmentsMemcpyable(CGF.getLangOpts().getGC() == LangOptions::NonGC) { 1087 assert(Args.size() == 2); 1088 } 1089 1090 void emitAssignment(Stmt *S) { 1091 FieldDecl *F = getMemcpyableField(S); 1092 if (F) { 1093 addMemcpyableField(F); 1094 AggregatedStmts.push_back(S); 1095 } else { 1096 emitAggregatedStmts(); 1097 CGF.EmitStmt(S); 1098 } 1099 } 1100 1101 void emitAggregatedStmts() { 1102 if (AggregatedStmts.size() <= 1) { 1103 if (!AggregatedStmts.empty()) { 1104 CopyingValueRepresentation CVR(CGF); 1105 CGF.EmitStmt(AggregatedStmts[0]); 1106 } 1107 reset(); 1108 } 1109 1110 emitMemcpy(); 1111 AggregatedStmts.clear(); 1112 } 1113 1114 void finish() { 1115 emitAggregatedStmts(); 1116 } 1117 }; 1118 1119 } 1120 1121 /// EmitCtorPrologue - This routine generates necessary code to initialize 1122 /// base classes and non-static data members belonging to this constructor. 1123 void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD, 1124 CXXCtorType CtorType, 1125 FunctionArgList &Args) { 1126 if (CD->isDelegatingConstructor()) 1127 return EmitDelegatingCXXConstructorCall(CD, Args); 1128 1129 const CXXRecordDecl *ClassDecl = CD->getParent(); 1130 1131 CXXConstructorDecl::init_const_iterator B = CD->init_begin(), 1132 E = CD->init_end(); 1133 1134 llvm::BasicBlock *BaseCtorContinueBB = 0; 1135 if (ClassDecl->getNumVBases() && 1136 !CGM.getTarget().getCXXABI().hasConstructorVariants()) { 1137 // The ABIs that don't have constructor variants need to put a branch 1138 // before the virtual base initialization code. 1139 BaseCtorContinueBB = 1140 CGM.getCXXABI().EmitCtorCompleteObjectHandler(*this, ClassDecl); 1141 assert(BaseCtorContinueBB); 1142 } 1143 1144 // Virtual base initializers first. 1145 for (; B != E && (*B)->isBaseInitializer() && (*B)->isBaseVirtual(); B++) { 1146 EmitBaseInitializer(*this, ClassDecl, *B, CtorType); 1147 } 1148 1149 if (BaseCtorContinueBB) { 1150 // Complete object handler should continue to the remaining initializers. 1151 Builder.CreateBr(BaseCtorContinueBB); 1152 EmitBlock(BaseCtorContinueBB); 1153 } 1154 1155 // Then, non-virtual base initializers. 1156 for (; B != E && (*B)->isBaseInitializer(); B++) { 1157 assert(!(*B)->isBaseVirtual()); 1158 EmitBaseInitializer(*this, ClassDecl, *B, CtorType); 1159 } 1160 1161 InitializeVTablePointers(ClassDecl); 1162 1163 // And finally, initialize class members. 1164 FieldConstructionScope FCS(*this, CXXThisValue); 1165 ConstructorMemcpyizer CM(*this, CD, Args); 1166 for (; B != E; B++) { 1167 CXXCtorInitializer *Member = (*B); 1168 assert(!Member->isBaseInitializer()); 1169 assert(Member->isAnyMemberInitializer() && 1170 "Delegating initializer on non-delegating constructor"); 1171 CM.addMemberInitializer(Member); 1172 } 1173 CM.finish(); 1174 } 1175 1176 static bool 1177 FieldHasTrivialDestructorBody(ASTContext &Context, const FieldDecl *Field); 1178 1179 static bool 1180 HasTrivialDestructorBody(ASTContext &Context, 1181 const CXXRecordDecl *BaseClassDecl, 1182 const CXXRecordDecl *MostDerivedClassDecl) 1183 { 1184 // If the destructor is trivial we don't have to check anything else. 1185 if (BaseClassDecl->hasTrivialDestructor()) 1186 return true; 1187 1188 if (!BaseClassDecl->getDestructor()->hasTrivialBody()) 1189 return false; 1190 1191 // Check fields. 1192 for (CXXRecordDecl::field_iterator I = BaseClassDecl->field_begin(), 1193 E = BaseClassDecl->field_end(); I != E; ++I) { 1194 const FieldDecl *Field = *I; 1195 1196 if (!FieldHasTrivialDestructorBody(Context, Field)) 1197 return false; 1198 } 1199 1200 // Check non-virtual bases. 1201 for (CXXRecordDecl::base_class_const_iterator I = 1202 BaseClassDecl->bases_begin(), E = BaseClassDecl->bases_end(); 1203 I != E; ++I) { 1204 if (I->isVirtual()) 1205 continue; 1206 1207 const CXXRecordDecl *NonVirtualBase = 1208 cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl()); 1209 if (!HasTrivialDestructorBody(Context, NonVirtualBase, 1210 MostDerivedClassDecl)) 1211 return false; 1212 } 1213 1214 if (BaseClassDecl == MostDerivedClassDecl) { 1215 // Check virtual bases. 1216 for (CXXRecordDecl::base_class_const_iterator I = 1217 BaseClassDecl->vbases_begin(), E = BaseClassDecl->vbases_end(); 1218 I != E; ++I) { 1219 const CXXRecordDecl *VirtualBase = 1220 cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl()); 1221 if (!HasTrivialDestructorBody(Context, VirtualBase, 1222 MostDerivedClassDecl)) 1223 return false; 1224 } 1225 } 1226 1227 return true; 1228 } 1229 1230 static bool 1231 FieldHasTrivialDestructorBody(ASTContext &Context, 1232 const FieldDecl *Field) 1233 { 1234 QualType FieldBaseElementType = Context.getBaseElementType(Field->getType()); 1235 1236 const RecordType *RT = FieldBaseElementType->getAs<RecordType>(); 1237 if (!RT) 1238 return true; 1239 1240 CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(RT->getDecl()); 1241 return HasTrivialDestructorBody(Context, FieldClassDecl, FieldClassDecl); 1242 } 1243 1244 /// CanSkipVTablePointerInitialization - Check whether we need to initialize 1245 /// any vtable pointers before calling this destructor. 1246 static bool CanSkipVTablePointerInitialization(ASTContext &Context, 1247 const CXXDestructorDecl *Dtor) { 1248 if (!Dtor->hasTrivialBody()) 1249 return false; 1250 1251 // Check the fields. 1252 const CXXRecordDecl *ClassDecl = Dtor->getParent(); 1253 for (CXXRecordDecl::field_iterator I = ClassDecl->field_begin(), 1254 E = ClassDecl->field_end(); I != E; ++I) { 1255 const FieldDecl *Field = *I; 1256 1257 if (!FieldHasTrivialDestructorBody(Context, Field)) 1258 return false; 1259 } 1260 1261 return true; 1262 } 1263 1264 /// EmitDestructorBody - Emits the body of the current destructor. 1265 void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) { 1266 const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CurGD.getDecl()); 1267 CXXDtorType DtorType = CurGD.getDtorType(); 1268 1269 // The call to operator delete in a deleting destructor happens 1270 // outside of the function-try-block, which means it's always 1271 // possible to delegate the destructor body to the complete 1272 // destructor. Do so. 1273 if (DtorType == Dtor_Deleting) { 1274 EnterDtorCleanups(Dtor, Dtor_Deleting); 1275 EmitCXXDestructorCall(Dtor, Dtor_Complete, /*ForVirtualBase=*/false, 1276 /*Delegating=*/false, LoadCXXThis()); 1277 PopCleanupBlock(); 1278 return; 1279 } 1280 1281 Stmt *Body = Dtor->getBody(); 1282 1283 // If the body is a function-try-block, enter the try before 1284 // anything else. 1285 bool isTryBody = (Body && isa<CXXTryStmt>(Body)); 1286 if (isTryBody) 1287 EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true); 1288 1289 // Enter the epilogue cleanups. 1290 RunCleanupsScope DtorEpilogue(*this); 1291 1292 // If this is the complete variant, just invoke the base variant; 1293 // the epilogue will destruct the virtual bases. But we can't do 1294 // this optimization if the body is a function-try-block, because 1295 // we'd introduce *two* handler blocks. In the Microsoft ABI, we 1296 // always delegate because we might not have a definition in this TU. 1297 switch (DtorType) { 1298 case Dtor_Deleting: llvm_unreachable("already handled deleting case"); 1299 1300 case Dtor_Complete: 1301 assert((Body || getTarget().getCXXABI().isMicrosoft()) && 1302 "can't emit a dtor without a body for non-Microsoft ABIs"); 1303 1304 // Enter the cleanup scopes for virtual bases. 1305 EnterDtorCleanups(Dtor, Dtor_Complete); 1306 1307 if (!isTryBody) { 1308 EmitCXXDestructorCall(Dtor, Dtor_Base, /*ForVirtualBase=*/false, 1309 /*Delegating=*/false, LoadCXXThis()); 1310 break; 1311 } 1312 // Fallthrough: act like we're in the base variant. 1313 1314 case Dtor_Base: 1315 assert(Body); 1316 1317 // Enter the cleanup scopes for fields and non-virtual bases. 1318 EnterDtorCleanups(Dtor, Dtor_Base); 1319 1320 // Initialize the vtable pointers before entering the body. 1321 if (!CanSkipVTablePointerInitialization(getContext(), Dtor)) 1322 InitializeVTablePointers(Dtor->getParent()); 1323 1324 if (isTryBody) 1325 EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock()); 1326 else if (Body) 1327 EmitStmt(Body); 1328 else { 1329 assert(Dtor->isImplicit() && "bodyless dtor not implicit"); 1330 // nothing to do besides what's in the epilogue 1331 } 1332 // -fapple-kext must inline any call to this dtor into 1333 // the caller's body. 1334 if (getLangOpts().AppleKext) 1335 CurFn->addFnAttr(llvm::Attribute::AlwaysInline); 1336 break; 1337 } 1338 1339 // Jump out through the epilogue cleanups. 1340 DtorEpilogue.ForceCleanup(); 1341 1342 // Exit the try if applicable. 1343 if (isTryBody) 1344 ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true); 1345 } 1346 1347 void CodeGenFunction::emitImplicitAssignmentOperatorBody(FunctionArgList &Args) { 1348 const CXXMethodDecl *AssignOp = cast<CXXMethodDecl>(CurGD.getDecl()); 1349 const Stmt *RootS = AssignOp->getBody(); 1350 assert(isa<CompoundStmt>(RootS) && 1351 "Body of an implicit assignment operator should be compound stmt."); 1352 const CompoundStmt *RootCS = cast<CompoundStmt>(RootS); 1353 1354 LexicalScope Scope(*this, RootCS->getSourceRange()); 1355 1356 AssignmentMemcpyizer AM(*this, AssignOp, Args); 1357 for (CompoundStmt::const_body_iterator I = RootCS->body_begin(), 1358 E = RootCS->body_end(); 1359 I != E; ++I) { 1360 AM.emitAssignment(*I); 1361 } 1362 AM.finish(); 1363 } 1364 1365 namespace { 1366 /// Call the operator delete associated with the current destructor. 1367 struct CallDtorDelete : EHScopeStack::Cleanup { 1368 CallDtorDelete() {} 1369 1370 void Emit(CodeGenFunction &CGF, Flags flags) { 1371 const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CGF.CurCodeDecl); 1372 const CXXRecordDecl *ClassDecl = Dtor->getParent(); 1373 CGF.EmitDeleteCall(Dtor->getOperatorDelete(), CGF.LoadCXXThis(), 1374 CGF.getContext().getTagDeclType(ClassDecl)); 1375 } 1376 }; 1377 1378 struct CallDtorDeleteConditional : EHScopeStack::Cleanup { 1379 llvm::Value *ShouldDeleteCondition; 1380 public: 1381 CallDtorDeleteConditional(llvm::Value *ShouldDeleteCondition) 1382 : ShouldDeleteCondition(ShouldDeleteCondition) { 1383 assert(ShouldDeleteCondition != NULL); 1384 } 1385 1386 void Emit(CodeGenFunction &CGF, Flags flags) { 1387 llvm::BasicBlock *callDeleteBB = CGF.createBasicBlock("dtor.call_delete"); 1388 llvm::BasicBlock *continueBB = CGF.createBasicBlock("dtor.continue"); 1389 llvm::Value *ShouldCallDelete 1390 = CGF.Builder.CreateIsNull(ShouldDeleteCondition); 1391 CGF.Builder.CreateCondBr(ShouldCallDelete, continueBB, callDeleteBB); 1392 1393 CGF.EmitBlock(callDeleteBB); 1394 const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CGF.CurCodeDecl); 1395 const CXXRecordDecl *ClassDecl = Dtor->getParent(); 1396 CGF.EmitDeleteCall(Dtor->getOperatorDelete(), CGF.LoadCXXThis(), 1397 CGF.getContext().getTagDeclType(ClassDecl)); 1398 CGF.Builder.CreateBr(continueBB); 1399 1400 CGF.EmitBlock(continueBB); 1401 } 1402 }; 1403 1404 class DestroyField : public EHScopeStack::Cleanup { 1405 const FieldDecl *field; 1406 CodeGenFunction::Destroyer *destroyer; 1407 bool useEHCleanupForArray; 1408 1409 public: 1410 DestroyField(const FieldDecl *field, CodeGenFunction::Destroyer *destroyer, 1411 bool useEHCleanupForArray) 1412 : field(field), destroyer(destroyer), 1413 useEHCleanupForArray(useEHCleanupForArray) {} 1414 1415 void Emit(CodeGenFunction &CGF, Flags flags) { 1416 // Find the address of the field. 1417 llvm::Value *thisValue = CGF.LoadCXXThis(); 1418 QualType RecordTy = CGF.getContext().getTagDeclType(field->getParent()); 1419 LValue ThisLV = CGF.MakeAddrLValue(thisValue, RecordTy); 1420 LValue LV = CGF.EmitLValueForField(ThisLV, field); 1421 assert(LV.isSimple()); 1422 1423 CGF.emitDestroy(LV.getAddress(), field->getType(), destroyer, 1424 flags.isForNormalCleanup() && useEHCleanupForArray); 1425 } 1426 }; 1427 } 1428 1429 /// EmitDtorEpilogue - Emit all code that comes at the end of class's 1430 /// destructor. This is to call destructors on members and base classes 1431 /// in reverse order of their construction. 1432 void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD, 1433 CXXDtorType DtorType) { 1434 assert(!DD->isTrivial() && 1435 "Should not emit dtor epilogue for trivial dtor!"); 1436 1437 // The deleting-destructor phase just needs to call the appropriate 1438 // operator delete that Sema picked up. 1439 if (DtorType == Dtor_Deleting) { 1440 assert(DD->getOperatorDelete() && 1441 "operator delete missing - EmitDtorEpilogue"); 1442 if (CXXStructorImplicitParamValue) { 1443 // If there is an implicit param to the deleting dtor, it's a boolean 1444 // telling whether we should call delete at the end of the dtor. 1445 EHStack.pushCleanup<CallDtorDeleteConditional>( 1446 NormalAndEHCleanup, CXXStructorImplicitParamValue); 1447 } else { 1448 EHStack.pushCleanup<CallDtorDelete>(NormalAndEHCleanup); 1449 } 1450 return; 1451 } 1452 1453 const CXXRecordDecl *ClassDecl = DD->getParent(); 1454 1455 // Unions have no bases and do not call field destructors. 1456 if (ClassDecl->isUnion()) 1457 return; 1458 1459 // The complete-destructor phase just destructs all the virtual bases. 1460 if (DtorType == Dtor_Complete) { 1461 1462 // We push them in the forward order so that they'll be popped in 1463 // the reverse order. 1464 for (CXXRecordDecl::base_class_const_iterator I = 1465 ClassDecl->vbases_begin(), E = ClassDecl->vbases_end(); 1466 I != E; ++I) { 1467 const CXXBaseSpecifier &Base = *I; 1468 CXXRecordDecl *BaseClassDecl 1469 = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl()); 1470 1471 // Ignore trivial destructors. 1472 if (BaseClassDecl->hasTrivialDestructor()) 1473 continue; 1474 1475 EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup, 1476 BaseClassDecl, 1477 /*BaseIsVirtual*/ true); 1478 } 1479 1480 return; 1481 } 1482 1483 assert(DtorType == Dtor_Base); 1484 1485 // Destroy non-virtual bases. 1486 for (CXXRecordDecl::base_class_const_iterator I = 1487 ClassDecl->bases_begin(), E = ClassDecl->bases_end(); I != E; ++I) { 1488 const CXXBaseSpecifier &Base = *I; 1489 1490 // Ignore virtual bases. 1491 if (Base.isVirtual()) 1492 continue; 1493 1494 CXXRecordDecl *BaseClassDecl = Base.getType()->getAsCXXRecordDecl(); 1495 1496 // Ignore trivial destructors. 1497 if (BaseClassDecl->hasTrivialDestructor()) 1498 continue; 1499 1500 EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup, 1501 BaseClassDecl, 1502 /*BaseIsVirtual*/ false); 1503 } 1504 1505 // Destroy direct fields. 1506 SmallVector<const FieldDecl *, 16> FieldDecls; 1507 for (CXXRecordDecl::field_iterator I = ClassDecl->field_begin(), 1508 E = ClassDecl->field_end(); I != E; ++I) { 1509 const FieldDecl *field = *I; 1510 QualType type = field->getType(); 1511 QualType::DestructionKind dtorKind = type.isDestructedType(); 1512 if (!dtorKind) continue; 1513 1514 // Anonymous union members do not have their destructors called. 1515 const RecordType *RT = type->getAsUnionType(); 1516 if (RT && RT->getDecl()->isAnonymousStructOrUnion()) continue; 1517 1518 CleanupKind cleanupKind = getCleanupKind(dtorKind); 1519 EHStack.pushCleanup<DestroyField>(cleanupKind, field, 1520 getDestroyer(dtorKind), 1521 cleanupKind & EHCleanup); 1522 } 1523 } 1524 1525 /// EmitCXXAggrConstructorCall - Emit a loop to call a particular 1526 /// constructor for each of several members of an array. 1527 /// 1528 /// \param ctor the constructor to call for each element 1529 /// \param arrayType the type of the array to initialize 1530 /// \param arrayBegin an arrayType* 1531 /// \param zeroInitialize true if each element should be 1532 /// zero-initialized before it is constructed 1533 void 1534 CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor, 1535 const ConstantArrayType *arrayType, 1536 llvm::Value *arrayBegin, 1537 CallExpr::const_arg_iterator argBegin, 1538 CallExpr::const_arg_iterator argEnd, 1539 bool zeroInitialize) { 1540 QualType elementType; 1541 llvm::Value *numElements = 1542 emitArrayLength(arrayType, elementType, arrayBegin); 1543 1544 EmitCXXAggrConstructorCall(ctor, numElements, arrayBegin, 1545 argBegin, argEnd, zeroInitialize); 1546 } 1547 1548 /// EmitCXXAggrConstructorCall - Emit a loop to call a particular 1549 /// constructor for each of several members of an array. 1550 /// 1551 /// \param ctor the constructor to call for each element 1552 /// \param numElements the number of elements in the array; 1553 /// may be zero 1554 /// \param arrayBegin a T*, where T is the type constructed by ctor 1555 /// \param zeroInitialize true if each element should be 1556 /// zero-initialized before it is constructed 1557 void 1558 CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor, 1559 llvm::Value *numElements, 1560 llvm::Value *arrayBegin, 1561 CallExpr::const_arg_iterator argBegin, 1562 CallExpr::const_arg_iterator argEnd, 1563 bool zeroInitialize) { 1564 1565 // It's legal for numElements to be zero. This can happen both 1566 // dynamically, because x can be zero in 'new A[x]', and statically, 1567 // because of GCC extensions that permit zero-length arrays. There 1568 // are probably legitimate places where we could assume that this 1569 // doesn't happen, but it's not clear that it's worth it. 1570 llvm::BranchInst *zeroCheckBranch = 0; 1571 1572 // Optimize for a constant count. 1573 llvm::ConstantInt *constantCount 1574 = dyn_cast<llvm::ConstantInt>(numElements); 1575 if (constantCount) { 1576 // Just skip out if the constant count is zero. 1577 if (constantCount->isZero()) return; 1578 1579 // Otherwise, emit the check. 1580 } else { 1581 llvm::BasicBlock *loopBB = createBasicBlock("new.ctorloop"); 1582 llvm::Value *iszero = Builder.CreateIsNull(numElements, "isempty"); 1583 zeroCheckBranch = Builder.CreateCondBr(iszero, loopBB, loopBB); 1584 EmitBlock(loopBB); 1585 } 1586 1587 // Find the end of the array. 1588 llvm::Value *arrayEnd = Builder.CreateInBoundsGEP(arrayBegin, numElements, 1589 "arrayctor.end"); 1590 1591 // Enter the loop, setting up a phi for the current location to initialize. 1592 llvm::BasicBlock *entryBB = Builder.GetInsertBlock(); 1593 llvm::BasicBlock *loopBB = createBasicBlock("arrayctor.loop"); 1594 EmitBlock(loopBB); 1595 llvm::PHINode *cur = Builder.CreatePHI(arrayBegin->getType(), 2, 1596 "arrayctor.cur"); 1597 cur->addIncoming(arrayBegin, entryBB); 1598 1599 // Inside the loop body, emit the constructor call on the array element. 1600 1601 QualType type = getContext().getTypeDeclType(ctor->getParent()); 1602 1603 // Zero initialize the storage, if requested. 1604 if (zeroInitialize) 1605 EmitNullInitialization(cur, type); 1606 1607 // C++ [class.temporary]p4: 1608 // There are two contexts in which temporaries are destroyed at a different 1609 // point than the end of the full-expression. The first context is when a 1610 // default constructor is called to initialize an element of an array. 1611 // If the constructor has one or more default arguments, the destruction of 1612 // every temporary created in a default argument expression is sequenced 1613 // before the construction of the next array element, if any. 1614 1615 { 1616 RunCleanupsScope Scope(*this); 1617 1618 // Evaluate the constructor and its arguments in a regular 1619 // partial-destroy cleanup. 1620 if (getLangOpts().Exceptions && 1621 !ctor->getParent()->hasTrivialDestructor()) { 1622 Destroyer *destroyer = destroyCXXObject; 1623 pushRegularPartialArrayCleanup(arrayBegin, cur, type, *destroyer); 1624 } 1625 1626 EmitCXXConstructorCall(ctor, Ctor_Complete, /*ForVirtualBase=*/ false, 1627 /*Delegating=*/false, cur, argBegin, argEnd); 1628 } 1629 1630 // Go to the next element. 1631 llvm::Value *next = 1632 Builder.CreateInBoundsGEP(cur, llvm::ConstantInt::get(SizeTy, 1), 1633 "arrayctor.next"); 1634 cur->addIncoming(next, Builder.GetInsertBlock()); 1635 1636 // Check whether that's the end of the loop. 1637 llvm::Value *done = Builder.CreateICmpEQ(next, arrayEnd, "arrayctor.done"); 1638 llvm::BasicBlock *contBB = createBasicBlock("arrayctor.cont"); 1639 Builder.CreateCondBr(done, contBB, loopBB); 1640 1641 // Patch the earlier check to skip over the loop. 1642 if (zeroCheckBranch) zeroCheckBranch->setSuccessor(0, contBB); 1643 1644 EmitBlock(contBB); 1645 } 1646 1647 void CodeGenFunction::destroyCXXObject(CodeGenFunction &CGF, 1648 llvm::Value *addr, 1649 QualType type) { 1650 const RecordType *rtype = type->castAs<RecordType>(); 1651 const CXXRecordDecl *record = cast<CXXRecordDecl>(rtype->getDecl()); 1652 const CXXDestructorDecl *dtor = record->getDestructor(); 1653 assert(!dtor->isTrivial()); 1654 CGF.EmitCXXDestructorCall(dtor, Dtor_Complete, /*for vbase*/ false, 1655 /*Delegating=*/false, addr); 1656 } 1657 1658 void 1659 CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D, 1660 CXXCtorType Type, bool ForVirtualBase, 1661 bool Delegating, 1662 llvm::Value *This, 1663 CallExpr::const_arg_iterator ArgBeg, 1664 CallExpr::const_arg_iterator ArgEnd) { 1665 // If this is a trivial constructor, just emit what's needed. 1666 if (D->isTrivial()) { 1667 if (ArgBeg == ArgEnd) { 1668 // Trivial default constructor, no codegen required. 1669 assert(D->isDefaultConstructor() && 1670 "trivial 0-arg ctor not a default ctor"); 1671 return; 1672 } 1673 1674 assert(ArgBeg + 1 == ArgEnd && "unexpected argcount for trivial ctor"); 1675 assert(D->isCopyOrMoveConstructor() && 1676 "trivial 1-arg ctor not a copy/move ctor"); 1677 1678 const Expr *E = (*ArgBeg); 1679 QualType Ty = E->getType(); 1680 llvm::Value *Src = EmitLValue(E).getAddress(); 1681 EmitAggregateCopy(This, Src, Ty); 1682 return; 1683 } 1684 1685 // Non-trivial constructors are handled in an ABI-specific manner. 1686 CGM.getCXXABI().EmitConstructorCall(*this, D, Type, ForVirtualBase, 1687 Delegating, This, ArgBeg, ArgEnd); 1688 } 1689 1690 void 1691 CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D, 1692 llvm::Value *This, llvm::Value *Src, 1693 CallExpr::const_arg_iterator ArgBeg, 1694 CallExpr::const_arg_iterator ArgEnd) { 1695 if (D->isTrivial()) { 1696 assert(ArgBeg + 1 == ArgEnd && "unexpected argcount for trivial ctor"); 1697 assert(D->isCopyOrMoveConstructor() && 1698 "trivial 1-arg ctor not a copy/move ctor"); 1699 EmitAggregateCopy(This, Src, (*ArgBeg)->getType()); 1700 return; 1701 } 1702 llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(D, clang::Ctor_Complete); 1703 assert(D->isInstance() && 1704 "Trying to emit a member call expr on a static method!"); 1705 1706 const FunctionProtoType *FPT = D->getType()->getAs<FunctionProtoType>(); 1707 1708 CallArgList Args; 1709 1710 // Push the this ptr. 1711 Args.add(RValue::get(This), D->getThisType(getContext())); 1712 1713 1714 // Push the src ptr. 1715 QualType QT = *(FPT->arg_type_begin()); 1716 llvm::Type *t = CGM.getTypes().ConvertType(QT); 1717 Src = Builder.CreateBitCast(Src, t); 1718 Args.add(RValue::get(Src), QT); 1719 1720 // Skip over first argument (Src). 1721 ++ArgBeg; 1722 CallExpr::const_arg_iterator Arg = ArgBeg; 1723 for (FunctionProtoType::arg_type_iterator I = FPT->arg_type_begin()+1, 1724 E = FPT->arg_type_end(); I != E; ++I, ++Arg) { 1725 assert(Arg != ArgEnd && "Running over edge of argument list!"); 1726 EmitCallArg(Args, *Arg, *I); 1727 } 1728 // Either we've emitted all the call args, or we have a call to a 1729 // variadic function. 1730 assert((Arg == ArgEnd || FPT->isVariadic()) && 1731 "Extra arguments in non-variadic function!"); 1732 // If we still have any arguments, emit them using the type of the argument. 1733 for (; Arg != ArgEnd; ++Arg) { 1734 QualType ArgType = Arg->getType(); 1735 EmitCallArg(Args, *Arg, ArgType); 1736 } 1737 1738 EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, RequiredArgs::All), 1739 Callee, ReturnValueSlot(), Args, D); 1740 } 1741 1742 void 1743 CodeGenFunction::EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor, 1744 CXXCtorType CtorType, 1745 const FunctionArgList &Args, 1746 SourceLocation Loc) { 1747 CallArgList DelegateArgs; 1748 1749 FunctionArgList::const_iterator I = Args.begin(), E = Args.end(); 1750 assert(I != E && "no parameters to constructor"); 1751 1752 // this 1753 DelegateArgs.add(RValue::get(LoadCXXThis()), (*I)->getType()); 1754 ++I; 1755 1756 // vtt 1757 if (llvm::Value *VTT = GetVTTParameter(GlobalDecl(Ctor, CtorType), 1758 /*ForVirtualBase=*/false, 1759 /*Delegating=*/true)) { 1760 QualType VoidPP = getContext().getPointerType(getContext().VoidPtrTy); 1761 DelegateArgs.add(RValue::get(VTT), VoidPP); 1762 1763 if (CGM.getCXXABI().NeedsVTTParameter(CurGD)) { 1764 assert(I != E && "cannot skip vtt parameter, already done with args"); 1765 assert((*I)->getType() == VoidPP && "skipping parameter not of vtt type"); 1766 ++I; 1767 } 1768 } 1769 1770 // Explicit arguments. 1771 for (; I != E; ++I) { 1772 const VarDecl *param = *I; 1773 // FIXME: per-argument source location 1774 EmitDelegateCallArg(DelegateArgs, param, Loc); 1775 } 1776 1777 llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(Ctor, CtorType); 1778 EmitCall(CGM.getTypes().arrangeCXXConstructorDeclaration(Ctor, CtorType), 1779 Callee, ReturnValueSlot(), DelegateArgs, Ctor); 1780 } 1781 1782 namespace { 1783 struct CallDelegatingCtorDtor : EHScopeStack::Cleanup { 1784 const CXXDestructorDecl *Dtor; 1785 llvm::Value *Addr; 1786 CXXDtorType Type; 1787 1788 CallDelegatingCtorDtor(const CXXDestructorDecl *D, llvm::Value *Addr, 1789 CXXDtorType Type) 1790 : Dtor(D), Addr(Addr), Type(Type) {} 1791 1792 void Emit(CodeGenFunction &CGF, Flags flags) { 1793 CGF.EmitCXXDestructorCall(Dtor, Type, /*ForVirtualBase=*/false, 1794 /*Delegating=*/true, Addr); 1795 } 1796 }; 1797 } 1798 1799 void 1800 CodeGenFunction::EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor, 1801 const FunctionArgList &Args) { 1802 assert(Ctor->isDelegatingConstructor()); 1803 1804 llvm::Value *ThisPtr = LoadCXXThis(); 1805 1806 QualType Ty = getContext().getTagDeclType(Ctor->getParent()); 1807 CharUnits Alignment = getContext().getTypeAlignInChars(Ty); 1808 AggValueSlot AggSlot = 1809 AggValueSlot::forAddr(ThisPtr, Alignment, Qualifiers(), 1810 AggValueSlot::IsDestructed, 1811 AggValueSlot::DoesNotNeedGCBarriers, 1812 AggValueSlot::IsNotAliased); 1813 1814 EmitAggExpr(Ctor->init_begin()[0]->getInit(), AggSlot); 1815 1816 const CXXRecordDecl *ClassDecl = Ctor->getParent(); 1817 if (CGM.getLangOpts().Exceptions && !ClassDecl->hasTrivialDestructor()) { 1818 CXXDtorType Type = 1819 CurGD.getCtorType() == Ctor_Complete ? Dtor_Complete : Dtor_Base; 1820 1821 EHStack.pushCleanup<CallDelegatingCtorDtor>(EHCleanup, 1822 ClassDecl->getDestructor(), 1823 ThisPtr, Type); 1824 } 1825 } 1826 1827 void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *DD, 1828 CXXDtorType Type, 1829 bool ForVirtualBase, 1830 bool Delegating, 1831 llvm::Value *This) { 1832 GlobalDecl GD(DD, Type); 1833 llvm::Value *VTT = GetVTTParameter(GD, ForVirtualBase, Delegating); 1834 llvm::Value *Callee = 0; 1835 if (getLangOpts().AppleKext) 1836 Callee = BuildAppleKextVirtualDestructorCall(DD, Type, 1837 DD->getParent()); 1838 1839 if (!Callee) 1840 Callee = CGM.GetAddrOfCXXDestructor(DD, Type); 1841 1842 if (DD->isVirtual()) 1843 This = CGM.getCXXABI().adjustThisArgumentForVirtualCall(*this, GD, This); 1844 1845 // FIXME: Provide a source location here. 1846 EmitCXXMemberCall(DD, SourceLocation(), Callee, ReturnValueSlot(), This, 1847 VTT, getContext().getPointerType(getContext().VoidPtrTy), 1848 0, 0); 1849 } 1850 1851 namespace { 1852 struct CallLocalDtor : EHScopeStack::Cleanup { 1853 const CXXDestructorDecl *Dtor; 1854 llvm::Value *Addr; 1855 1856 CallLocalDtor(const CXXDestructorDecl *D, llvm::Value *Addr) 1857 : Dtor(D), Addr(Addr) {} 1858 1859 void Emit(CodeGenFunction &CGF, Flags flags) { 1860 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, 1861 /*ForVirtualBase=*/false, 1862 /*Delegating=*/false, Addr); 1863 } 1864 }; 1865 } 1866 1867 void CodeGenFunction::PushDestructorCleanup(const CXXDestructorDecl *D, 1868 llvm::Value *Addr) { 1869 EHStack.pushCleanup<CallLocalDtor>(NormalAndEHCleanup, D, Addr); 1870 } 1871 1872 void CodeGenFunction::PushDestructorCleanup(QualType T, llvm::Value *Addr) { 1873 CXXRecordDecl *ClassDecl = T->getAsCXXRecordDecl(); 1874 if (!ClassDecl) return; 1875 if (ClassDecl->hasTrivialDestructor()) return; 1876 1877 const CXXDestructorDecl *D = ClassDecl->getDestructor(); 1878 assert(D && D->isUsed() && "destructor not marked as used!"); 1879 PushDestructorCleanup(D, Addr); 1880 } 1881 1882 void 1883 CodeGenFunction::InitializeVTablePointer(BaseSubobject Base, 1884 const CXXRecordDecl *NearestVBase, 1885 CharUnits OffsetFromNearestVBase, 1886 const CXXRecordDecl *VTableClass) { 1887 // Compute the address point. 1888 bool NeedsVirtualOffset; 1889 llvm::Value *VTableAddressPoint = 1890 CGM.getCXXABI().getVTableAddressPointInStructor( 1891 *this, VTableClass, Base, NearestVBase, NeedsVirtualOffset); 1892 if (!VTableAddressPoint) 1893 return; 1894 1895 // Compute where to store the address point. 1896 llvm::Value *VirtualOffset = 0; 1897 CharUnits NonVirtualOffset = CharUnits::Zero(); 1898 1899 if (NeedsVirtualOffset) { 1900 // We need to use the virtual base offset offset because the virtual base 1901 // might have a different offset in the most derived class. 1902 VirtualOffset = CGM.getCXXABI().GetVirtualBaseClassOffset(*this, 1903 LoadCXXThis(), 1904 VTableClass, 1905 NearestVBase); 1906 NonVirtualOffset = OffsetFromNearestVBase; 1907 } else { 1908 // We can just use the base offset in the complete class. 1909 NonVirtualOffset = Base.getBaseOffset(); 1910 } 1911 1912 // Apply the offsets. 1913 llvm::Value *VTableField = LoadCXXThis(); 1914 1915 if (!NonVirtualOffset.isZero() || VirtualOffset) 1916 VTableField = ApplyNonVirtualAndVirtualOffset(*this, VTableField, 1917 NonVirtualOffset, 1918 VirtualOffset); 1919 1920 // Finally, store the address point. 1921 llvm::Type *AddressPointPtrTy = 1922 VTableAddressPoint->getType()->getPointerTo(); 1923 VTableField = Builder.CreateBitCast(VTableField, AddressPointPtrTy); 1924 llvm::StoreInst *Store = Builder.CreateStore(VTableAddressPoint, VTableField); 1925 CGM.DecorateInstruction(Store, CGM.getTBAAInfoForVTablePtr()); 1926 } 1927 1928 void 1929 CodeGenFunction::InitializeVTablePointers(BaseSubobject Base, 1930 const CXXRecordDecl *NearestVBase, 1931 CharUnits OffsetFromNearestVBase, 1932 bool BaseIsNonVirtualPrimaryBase, 1933 const CXXRecordDecl *VTableClass, 1934 VisitedVirtualBasesSetTy& VBases) { 1935 // If this base is a non-virtual primary base the address point has already 1936 // been set. 1937 if (!BaseIsNonVirtualPrimaryBase) { 1938 // Initialize the vtable pointer for this base. 1939 InitializeVTablePointer(Base, NearestVBase, OffsetFromNearestVBase, 1940 VTableClass); 1941 } 1942 1943 const CXXRecordDecl *RD = Base.getBase(); 1944 1945 // Traverse bases. 1946 for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), 1947 E = RD->bases_end(); I != E; ++I) { 1948 CXXRecordDecl *BaseDecl 1949 = cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); 1950 1951 // Ignore classes without a vtable. 1952 if (!BaseDecl->isDynamicClass()) 1953 continue; 1954 1955 CharUnits BaseOffset; 1956 CharUnits BaseOffsetFromNearestVBase; 1957 bool BaseDeclIsNonVirtualPrimaryBase; 1958 1959 if (I->isVirtual()) { 1960 // Check if we've visited this virtual base before. 1961 if (!VBases.insert(BaseDecl)) 1962 continue; 1963 1964 const ASTRecordLayout &Layout = 1965 getContext().getASTRecordLayout(VTableClass); 1966 1967 BaseOffset = Layout.getVBaseClassOffset(BaseDecl); 1968 BaseOffsetFromNearestVBase = CharUnits::Zero(); 1969 BaseDeclIsNonVirtualPrimaryBase = false; 1970 } else { 1971 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 1972 1973 BaseOffset = Base.getBaseOffset() + Layout.getBaseClassOffset(BaseDecl); 1974 BaseOffsetFromNearestVBase = 1975 OffsetFromNearestVBase + Layout.getBaseClassOffset(BaseDecl); 1976 BaseDeclIsNonVirtualPrimaryBase = Layout.getPrimaryBase() == BaseDecl; 1977 } 1978 1979 InitializeVTablePointers(BaseSubobject(BaseDecl, BaseOffset), 1980 I->isVirtual() ? BaseDecl : NearestVBase, 1981 BaseOffsetFromNearestVBase, 1982 BaseDeclIsNonVirtualPrimaryBase, 1983 VTableClass, VBases); 1984 } 1985 } 1986 1987 void CodeGenFunction::InitializeVTablePointers(const CXXRecordDecl *RD) { 1988 // Ignore classes without a vtable. 1989 if (!RD->isDynamicClass()) 1990 return; 1991 1992 // Initialize the vtable pointers for this class and all of its bases. 1993 VisitedVirtualBasesSetTy VBases; 1994 InitializeVTablePointers(BaseSubobject(RD, CharUnits::Zero()), 1995 /*NearestVBase=*/0, 1996 /*OffsetFromNearestVBase=*/CharUnits::Zero(), 1997 /*BaseIsNonVirtualPrimaryBase=*/false, RD, VBases); 1998 1999 if (RD->getNumVBases()) 2000 CGM.getCXXABI().initializeHiddenVirtualInheritanceMembers(*this, RD); 2001 } 2002 2003 llvm::Value *CodeGenFunction::GetVTablePtr(llvm::Value *This, 2004 llvm::Type *Ty) { 2005 llvm::Value *VTablePtrSrc = Builder.CreateBitCast(This, Ty->getPointerTo()); 2006 llvm::Instruction *VTable = Builder.CreateLoad(VTablePtrSrc, "vtable"); 2007 CGM.DecorateInstruction(VTable, CGM.getTBAAInfoForVTablePtr()); 2008 return VTable; 2009 } 2010 2011 2012 // FIXME: Ideally Expr::IgnoreParenNoopCasts should do this, but it doesn't do 2013 // quite what we want. 2014 static const Expr *skipNoOpCastsAndParens(const Expr *E) { 2015 while (true) { 2016 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) { 2017 E = PE->getSubExpr(); 2018 continue; 2019 } 2020 2021 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) { 2022 if (CE->getCastKind() == CK_NoOp) { 2023 E = CE->getSubExpr(); 2024 continue; 2025 } 2026 } 2027 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 2028 if (UO->getOpcode() == UO_Extension) { 2029 E = UO->getSubExpr(); 2030 continue; 2031 } 2032 } 2033 return E; 2034 } 2035 } 2036 2037 bool 2038 CodeGenFunction::CanDevirtualizeMemberFunctionCall(const Expr *Base, 2039 const CXXMethodDecl *MD) { 2040 // When building with -fapple-kext, all calls must go through the vtable since 2041 // the kernel linker can do runtime patching of vtables. 2042 if (getLangOpts().AppleKext) 2043 return false; 2044 2045 // If the most derived class is marked final, we know that no subclass can 2046 // override this member function and so we can devirtualize it. For example: 2047 // 2048 // struct A { virtual void f(); } 2049 // struct B final : A { }; 2050 // 2051 // void f(B *b) { 2052 // b->f(); 2053 // } 2054 // 2055 const CXXRecordDecl *MostDerivedClassDecl = Base->getBestDynamicClassType(); 2056 if (MostDerivedClassDecl->hasAttr<FinalAttr>()) 2057 return true; 2058 2059 // If the member function is marked 'final', we know that it can't be 2060 // overridden and can therefore devirtualize it. 2061 if (MD->hasAttr<FinalAttr>()) 2062 return true; 2063 2064 // Similarly, if the class itself is marked 'final' it can't be overridden 2065 // and we can therefore devirtualize the member function call. 2066 if (MD->getParent()->hasAttr<FinalAttr>()) 2067 return true; 2068 2069 Base = skipNoOpCastsAndParens(Base); 2070 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) { 2071 if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) { 2072 // This is a record decl. We know the type and can devirtualize it. 2073 return VD->getType()->isRecordType(); 2074 } 2075 2076 return false; 2077 } 2078 2079 // We can devirtualize calls on an object accessed by a class member access 2080 // expression, since by C++11 [basic.life]p6 we know that it can't refer to 2081 // a derived class object constructed in the same location. 2082 if (const MemberExpr *ME = dyn_cast<MemberExpr>(Base)) 2083 if (const ValueDecl *VD = dyn_cast<ValueDecl>(ME->getMemberDecl())) 2084 return VD->getType()->isRecordType(); 2085 2086 // We can always devirtualize calls on temporary object expressions. 2087 if (isa<CXXConstructExpr>(Base)) 2088 return true; 2089 2090 // And calls on bound temporaries. 2091 if (isa<CXXBindTemporaryExpr>(Base)) 2092 return true; 2093 2094 // Check if this is a call expr that returns a record type. 2095 if (const CallExpr *CE = dyn_cast<CallExpr>(Base)) 2096 return CE->getCallReturnType()->isRecordType(); 2097 2098 // We can't devirtualize the call. 2099 return false; 2100 } 2101 2102 llvm::Value * 2103 CodeGenFunction::EmitCXXOperatorMemberCallee(const CXXOperatorCallExpr *E, 2104 const CXXMethodDecl *MD, 2105 llvm::Value *This) { 2106 llvm::FunctionType *fnType = 2107 CGM.getTypes().GetFunctionType( 2108 CGM.getTypes().arrangeCXXMethodDeclaration(MD)); 2109 2110 if (MD->isVirtual() && !CanDevirtualizeMemberFunctionCall(E->getArg(0), MD)) 2111 return CGM.getCXXABI().getVirtualFunctionPointer(*this, MD, This, fnType); 2112 2113 return CGM.GetAddrOfFunction(MD, fnType); 2114 } 2115 2116 void CodeGenFunction::EmitForwardingCallToLambda( 2117 const CXXMethodDecl *callOperator, 2118 CallArgList &callArgs) { 2119 // Get the address of the call operator. 2120 const CGFunctionInfo &calleeFnInfo = 2121 CGM.getTypes().arrangeCXXMethodDeclaration(callOperator); 2122 llvm::Value *callee = 2123 CGM.GetAddrOfFunction(GlobalDecl(callOperator), 2124 CGM.getTypes().GetFunctionType(calleeFnInfo)); 2125 2126 // Prepare the return slot. 2127 const FunctionProtoType *FPT = 2128 callOperator->getType()->castAs<FunctionProtoType>(); 2129 QualType resultType = FPT->getResultType(); 2130 ReturnValueSlot returnSlot; 2131 if (!resultType->isVoidType() && 2132 calleeFnInfo.getReturnInfo().getKind() == ABIArgInfo::Indirect && 2133 !hasScalarEvaluationKind(calleeFnInfo.getReturnType())) 2134 returnSlot = ReturnValueSlot(ReturnValue, resultType.isVolatileQualified()); 2135 2136 // We don't need to separately arrange the call arguments because 2137 // the call can't be variadic anyway --- it's impossible to forward 2138 // variadic arguments. 2139 2140 // Now emit our call. 2141 RValue RV = EmitCall(calleeFnInfo, callee, returnSlot, 2142 callArgs, callOperator); 2143 2144 // If necessary, copy the returned value into the slot. 2145 if (!resultType->isVoidType() && returnSlot.isNull()) 2146 EmitReturnOfRValue(RV, resultType); 2147 else 2148 EmitBranchThroughCleanup(ReturnBlock); 2149 } 2150 2151 void CodeGenFunction::EmitLambdaBlockInvokeBody() { 2152 const BlockDecl *BD = BlockInfo->getBlockDecl(); 2153 const VarDecl *variable = BD->capture_begin()->getVariable(); 2154 const CXXRecordDecl *Lambda = variable->getType()->getAsCXXRecordDecl(); 2155 2156 // Start building arguments for forwarding call 2157 CallArgList CallArgs; 2158 2159 QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda)); 2160 llvm::Value *ThisPtr = GetAddrOfBlockDecl(variable, false); 2161 CallArgs.add(RValue::get(ThisPtr), ThisType); 2162 2163 // Add the rest of the parameters. 2164 for (BlockDecl::param_const_iterator I = BD->param_begin(), 2165 E = BD->param_end(); I != E; ++I) { 2166 ParmVarDecl *param = *I; 2167 EmitDelegateCallArg(CallArgs, param, param->getLocStart()); 2168 } 2169 assert(!Lambda->isGenericLambda() && 2170 "generic lambda interconversion to block not implemented"); 2171 EmitForwardingCallToLambda(Lambda->getLambdaCallOperator(), CallArgs); 2172 } 2173 2174 void CodeGenFunction::EmitLambdaToBlockPointerBody(FunctionArgList &Args) { 2175 if (cast<CXXMethodDecl>(CurCodeDecl)->isVariadic()) { 2176 // FIXME: Making this work correctly is nasty because it requires either 2177 // cloning the body of the call operator or making the call operator forward. 2178 CGM.ErrorUnsupported(CurCodeDecl, "lambda conversion to variadic function"); 2179 return; 2180 } 2181 2182 EmitFunctionBody(Args); 2183 } 2184 2185 void CodeGenFunction::EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD) { 2186 const CXXRecordDecl *Lambda = MD->getParent(); 2187 2188 // Start building arguments for forwarding call 2189 CallArgList CallArgs; 2190 2191 QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda)); 2192 llvm::Value *ThisPtr = llvm::UndefValue::get(getTypes().ConvertType(ThisType)); 2193 CallArgs.add(RValue::get(ThisPtr), ThisType); 2194 2195 // Add the rest of the parameters. 2196 for (FunctionDecl::param_const_iterator I = MD->param_begin(), 2197 E = MD->param_end(); I != E; ++I) { 2198 ParmVarDecl *param = *I; 2199 EmitDelegateCallArg(CallArgs, param, param->getLocStart()); 2200 } 2201 const CXXMethodDecl *CallOp = Lambda->getLambdaCallOperator(); 2202 // For a generic lambda, find the corresponding call operator specialization 2203 // to which the call to the static-invoker shall be forwarded. 2204 if (Lambda->isGenericLambda()) { 2205 assert(MD->isFunctionTemplateSpecialization()); 2206 const TemplateArgumentList *TAL = MD->getTemplateSpecializationArgs(); 2207 FunctionTemplateDecl *CallOpTemplate = CallOp->getDescribedFunctionTemplate(); 2208 void *InsertPos = 0; 2209 FunctionDecl *CorrespondingCallOpSpecialization = 2210 CallOpTemplate->findSpecialization(TAL->data(), TAL->size(), InsertPos); 2211 assert(CorrespondingCallOpSpecialization); 2212 CallOp = cast<CXXMethodDecl>(CorrespondingCallOpSpecialization); 2213 } 2214 EmitForwardingCallToLambda(CallOp, CallArgs); 2215 } 2216 2217 void CodeGenFunction::EmitLambdaStaticInvokeFunction(const CXXMethodDecl *MD) { 2218 if (MD->isVariadic()) { 2219 // FIXME: Making this work correctly is nasty because it requires either 2220 // cloning the body of the call operator or making the call operator forward. 2221 CGM.ErrorUnsupported(MD, "lambda conversion to variadic function"); 2222 return; 2223 } 2224 2225 EmitLambdaDelegatingInvokeBody(MD); 2226 } 2227