1 //===--- CGClass.cpp - Emit LLVM Code for C++ classes ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code dealing with C++ code generation of classes 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CGBlocks.h" 15 #include "CGCXXABI.h" 16 #include "CGDebugInfo.h" 17 #include "CGRecordLayout.h" 18 #include "CodeGenFunction.h" 19 #include "clang/AST/CXXInheritance.h" 20 #include "clang/AST/DeclTemplate.h" 21 #include "clang/AST/EvaluatedExprVisitor.h" 22 #include "clang/AST/RecordLayout.h" 23 #include "clang/AST/StmtCXX.h" 24 #include "clang/Basic/TargetBuiltins.h" 25 #include "clang/CodeGen/CGFunctionInfo.h" 26 #include "clang/Frontend/CodeGenOptions.h" 27 28 using namespace clang; 29 using namespace CodeGen; 30 31 static CharUnits 32 ComputeNonVirtualBaseClassOffset(ASTContext &Context, 33 const CXXRecordDecl *DerivedClass, 34 CastExpr::path_const_iterator Start, 35 CastExpr::path_const_iterator End) { 36 CharUnits Offset = CharUnits::Zero(); 37 38 const CXXRecordDecl *RD = DerivedClass; 39 40 for (CastExpr::path_const_iterator I = Start; I != End; ++I) { 41 const CXXBaseSpecifier *Base = *I; 42 assert(!Base->isVirtual() && "Should not see virtual bases here!"); 43 44 // Get the layout. 45 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 46 47 const CXXRecordDecl *BaseDecl = 48 cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl()); 49 50 // Add the offset. 51 Offset += Layout.getBaseClassOffset(BaseDecl); 52 53 RD = BaseDecl; 54 } 55 56 return Offset; 57 } 58 59 llvm::Constant * 60 CodeGenModule::GetNonVirtualBaseClassOffset(const CXXRecordDecl *ClassDecl, 61 CastExpr::path_const_iterator PathBegin, 62 CastExpr::path_const_iterator PathEnd) { 63 assert(PathBegin != PathEnd && "Base path should not be empty!"); 64 65 CharUnits Offset = 66 ComputeNonVirtualBaseClassOffset(getContext(), ClassDecl, 67 PathBegin, PathEnd); 68 if (Offset.isZero()) 69 return 0; 70 71 llvm::Type *PtrDiffTy = 72 Types.ConvertType(getContext().getPointerDiffType()); 73 74 return llvm::ConstantInt::get(PtrDiffTy, Offset.getQuantity()); 75 } 76 77 /// Gets the address of a direct base class within a complete object. 78 /// This should only be used for (1) non-virtual bases or (2) virtual bases 79 /// when the type is known to be complete (e.g. in complete destructors). 80 /// 81 /// The object pointed to by 'This' is assumed to be non-null. 82 llvm::Value * 83 CodeGenFunction::GetAddressOfDirectBaseInCompleteClass(llvm::Value *This, 84 const CXXRecordDecl *Derived, 85 const CXXRecordDecl *Base, 86 bool BaseIsVirtual) { 87 // 'this' must be a pointer (in some address space) to Derived. 88 assert(This->getType()->isPointerTy() && 89 cast<llvm::PointerType>(This->getType())->getElementType() 90 == ConvertType(Derived)); 91 92 // Compute the offset of the virtual base. 93 CharUnits Offset; 94 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(Derived); 95 if (BaseIsVirtual) 96 Offset = Layout.getVBaseClassOffset(Base); 97 else 98 Offset = Layout.getBaseClassOffset(Base); 99 100 // Shift and cast down to the base type. 101 // TODO: for complete types, this should be possible with a GEP. 102 llvm::Value *V = This; 103 if (Offset.isPositive()) { 104 V = Builder.CreateBitCast(V, Int8PtrTy); 105 V = Builder.CreateConstInBoundsGEP1_64(V, Offset.getQuantity()); 106 } 107 V = Builder.CreateBitCast(V, ConvertType(Base)->getPointerTo()); 108 109 return V; 110 } 111 112 static llvm::Value * 113 ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, llvm::Value *ptr, 114 CharUnits nonVirtualOffset, 115 llvm::Value *virtualOffset) { 116 // Assert that we have something to do. 117 assert(!nonVirtualOffset.isZero() || virtualOffset != 0); 118 119 // Compute the offset from the static and dynamic components. 120 llvm::Value *baseOffset; 121 if (!nonVirtualOffset.isZero()) { 122 baseOffset = llvm::ConstantInt::get(CGF.PtrDiffTy, 123 nonVirtualOffset.getQuantity()); 124 if (virtualOffset) { 125 baseOffset = CGF.Builder.CreateAdd(virtualOffset, baseOffset); 126 } 127 } else { 128 baseOffset = virtualOffset; 129 } 130 131 // Apply the base offset. 132 ptr = CGF.Builder.CreateBitCast(ptr, CGF.Int8PtrTy); 133 ptr = CGF.Builder.CreateInBoundsGEP(ptr, baseOffset, "add.ptr"); 134 return ptr; 135 } 136 137 llvm::Value * 138 CodeGenFunction::GetAddressOfBaseClass(llvm::Value *Value, 139 const CXXRecordDecl *Derived, 140 CastExpr::path_const_iterator PathBegin, 141 CastExpr::path_const_iterator PathEnd, 142 bool NullCheckValue) { 143 assert(PathBegin != PathEnd && "Base path should not be empty!"); 144 145 CastExpr::path_const_iterator Start = PathBegin; 146 const CXXRecordDecl *VBase = 0; 147 148 // Sema has done some convenient canonicalization here: if the 149 // access path involved any virtual steps, the conversion path will 150 // *start* with a step down to the correct virtual base subobject, 151 // and hence will not require any further steps. 152 if ((*Start)->isVirtual()) { 153 VBase = 154 cast<CXXRecordDecl>((*Start)->getType()->getAs<RecordType>()->getDecl()); 155 ++Start; 156 } 157 158 // Compute the static offset of the ultimate destination within its 159 // allocating subobject (the virtual base, if there is one, or else 160 // the "complete" object that we see). 161 CharUnits NonVirtualOffset = 162 ComputeNonVirtualBaseClassOffset(getContext(), VBase ? VBase : Derived, 163 Start, PathEnd); 164 165 // If there's a virtual step, we can sometimes "devirtualize" it. 166 // For now, that's limited to when the derived type is final. 167 // TODO: "devirtualize" this for accesses to known-complete objects. 168 if (VBase && Derived->hasAttr<FinalAttr>()) { 169 const ASTRecordLayout &layout = getContext().getASTRecordLayout(Derived); 170 CharUnits vBaseOffset = layout.getVBaseClassOffset(VBase); 171 NonVirtualOffset += vBaseOffset; 172 VBase = 0; // we no longer have a virtual step 173 } 174 175 // Get the base pointer type. 176 llvm::Type *BasePtrTy = 177 ConvertType((PathEnd[-1])->getType())->getPointerTo(); 178 179 // If the static offset is zero and we don't have a virtual step, 180 // just do a bitcast; null checks are unnecessary. 181 if (NonVirtualOffset.isZero() && !VBase) { 182 return Builder.CreateBitCast(Value, BasePtrTy); 183 } 184 185 llvm::BasicBlock *origBB = 0; 186 llvm::BasicBlock *endBB = 0; 187 188 // Skip over the offset (and the vtable load) if we're supposed to 189 // null-check the pointer. 190 if (NullCheckValue) { 191 origBB = Builder.GetInsertBlock(); 192 llvm::BasicBlock *notNullBB = createBasicBlock("cast.notnull"); 193 endBB = createBasicBlock("cast.end"); 194 195 llvm::Value *isNull = Builder.CreateIsNull(Value); 196 Builder.CreateCondBr(isNull, endBB, notNullBB); 197 EmitBlock(notNullBB); 198 } 199 200 // Compute the virtual offset. 201 llvm::Value *VirtualOffset = 0; 202 if (VBase) { 203 VirtualOffset = 204 CGM.getCXXABI().GetVirtualBaseClassOffset(*this, Value, Derived, VBase); 205 } 206 207 // Apply both offsets. 208 Value = ApplyNonVirtualAndVirtualOffset(*this, Value, 209 NonVirtualOffset, 210 VirtualOffset); 211 212 // Cast to the destination type. 213 Value = Builder.CreateBitCast(Value, BasePtrTy); 214 215 // Build a phi if we needed a null check. 216 if (NullCheckValue) { 217 llvm::BasicBlock *notNullBB = Builder.GetInsertBlock(); 218 Builder.CreateBr(endBB); 219 EmitBlock(endBB); 220 221 llvm::PHINode *PHI = Builder.CreatePHI(BasePtrTy, 2, "cast.result"); 222 PHI->addIncoming(Value, notNullBB); 223 PHI->addIncoming(llvm::Constant::getNullValue(BasePtrTy), origBB); 224 Value = PHI; 225 } 226 227 return Value; 228 } 229 230 llvm::Value * 231 CodeGenFunction::GetAddressOfDerivedClass(llvm::Value *Value, 232 const CXXRecordDecl *Derived, 233 CastExpr::path_const_iterator PathBegin, 234 CastExpr::path_const_iterator PathEnd, 235 bool NullCheckValue) { 236 assert(PathBegin != PathEnd && "Base path should not be empty!"); 237 238 QualType DerivedTy = 239 getContext().getCanonicalType(getContext().getTagDeclType(Derived)); 240 llvm::Type *DerivedPtrTy = ConvertType(DerivedTy)->getPointerTo(); 241 242 llvm::Value *NonVirtualOffset = 243 CGM.GetNonVirtualBaseClassOffset(Derived, PathBegin, PathEnd); 244 245 if (!NonVirtualOffset) { 246 // No offset, we can just cast back. 247 return Builder.CreateBitCast(Value, DerivedPtrTy); 248 } 249 250 llvm::BasicBlock *CastNull = 0; 251 llvm::BasicBlock *CastNotNull = 0; 252 llvm::BasicBlock *CastEnd = 0; 253 254 if (NullCheckValue) { 255 CastNull = createBasicBlock("cast.null"); 256 CastNotNull = createBasicBlock("cast.notnull"); 257 CastEnd = createBasicBlock("cast.end"); 258 259 llvm::Value *IsNull = Builder.CreateIsNull(Value); 260 Builder.CreateCondBr(IsNull, CastNull, CastNotNull); 261 EmitBlock(CastNotNull); 262 } 263 264 // Apply the offset. 265 Value = Builder.CreateBitCast(Value, Int8PtrTy); 266 Value = Builder.CreateGEP(Value, Builder.CreateNeg(NonVirtualOffset), 267 "sub.ptr"); 268 269 // Just cast. 270 Value = Builder.CreateBitCast(Value, DerivedPtrTy); 271 272 if (NullCheckValue) { 273 Builder.CreateBr(CastEnd); 274 EmitBlock(CastNull); 275 Builder.CreateBr(CastEnd); 276 EmitBlock(CastEnd); 277 278 llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2); 279 PHI->addIncoming(Value, CastNotNull); 280 PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), 281 CastNull); 282 Value = PHI; 283 } 284 285 return Value; 286 } 287 288 llvm::Value *CodeGenFunction::GetVTTParameter(GlobalDecl GD, 289 bool ForVirtualBase, 290 bool Delegating) { 291 if (!CGM.getCXXABI().NeedsVTTParameter(GD)) { 292 // This constructor/destructor does not need a VTT parameter. 293 return 0; 294 } 295 296 const CXXRecordDecl *RD = cast<CXXMethodDecl>(CurCodeDecl)->getParent(); 297 const CXXRecordDecl *Base = cast<CXXMethodDecl>(GD.getDecl())->getParent(); 298 299 llvm::Value *VTT; 300 301 uint64_t SubVTTIndex; 302 303 if (Delegating) { 304 // If this is a delegating constructor call, just load the VTT. 305 return LoadCXXVTT(); 306 } else if (RD == Base) { 307 // If the record matches the base, this is the complete ctor/dtor 308 // variant calling the base variant in a class with virtual bases. 309 assert(!CGM.getCXXABI().NeedsVTTParameter(CurGD) && 310 "doing no-op VTT offset in base dtor/ctor?"); 311 assert(!ForVirtualBase && "Can't have same class as virtual base!"); 312 SubVTTIndex = 0; 313 } else { 314 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 315 CharUnits BaseOffset = ForVirtualBase ? 316 Layout.getVBaseClassOffset(Base) : 317 Layout.getBaseClassOffset(Base); 318 319 SubVTTIndex = 320 CGM.getVTables().getSubVTTIndex(RD, BaseSubobject(Base, BaseOffset)); 321 assert(SubVTTIndex != 0 && "Sub-VTT index must be greater than zero!"); 322 } 323 324 if (CGM.getCXXABI().NeedsVTTParameter(CurGD)) { 325 // A VTT parameter was passed to the constructor, use it. 326 VTT = LoadCXXVTT(); 327 VTT = Builder.CreateConstInBoundsGEP1_64(VTT, SubVTTIndex); 328 } else { 329 // We're the complete constructor, so get the VTT by name. 330 VTT = CGM.getVTables().GetAddrOfVTT(RD); 331 VTT = Builder.CreateConstInBoundsGEP2_64(VTT, 0, SubVTTIndex); 332 } 333 334 return VTT; 335 } 336 337 namespace { 338 /// Call the destructor for a direct base class. 339 struct CallBaseDtor : EHScopeStack::Cleanup { 340 const CXXRecordDecl *BaseClass; 341 bool BaseIsVirtual; 342 CallBaseDtor(const CXXRecordDecl *Base, bool BaseIsVirtual) 343 : BaseClass(Base), BaseIsVirtual(BaseIsVirtual) {} 344 345 void Emit(CodeGenFunction &CGF, Flags flags) { 346 const CXXRecordDecl *DerivedClass = 347 cast<CXXMethodDecl>(CGF.CurCodeDecl)->getParent(); 348 349 const CXXDestructorDecl *D = BaseClass->getDestructor(); 350 llvm::Value *Addr = 351 CGF.GetAddressOfDirectBaseInCompleteClass(CGF.LoadCXXThis(), 352 DerivedClass, BaseClass, 353 BaseIsVirtual); 354 CGF.EmitCXXDestructorCall(D, Dtor_Base, BaseIsVirtual, 355 /*Delegating=*/false, Addr); 356 } 357 }; 358 359 /// A visitor which checks whether an initializer uses 'this' in a 360 /// way which requires the vtable to be properly set. 361 struct DynamicThisUseChecker : EvaluatedExprVisitor<DynamicThisUseChecker> { 362 typedef EvaluatedExprVisitor<DynamicThisUseChecker> super; 363 364 bool UsesThis; 365 366 DynamicThisUseChecker(ASTContext &C) : super(C), UsesThis(false) {} 367 368 // Black-list all explicit and implicit references to 'this'. 369 // 370 // Do we need to worry about external references to 'this' derived 371 // from arbitrary code? If so, then anything which runs arbitrary 372 // external code might potentially access the vtable. 373 void VisitCXXThisExpr(CXXThisExpr *E) { UsesThis = true; } 374 }; 375 } 376 377 static bool BaseInitializerUsesThis(ASTContext &C, const Expr *Init) { 378 DynamicThisUseChecker Checker(C); 379 Checker.Visit(const_cast<Expr*>(Init)); 380 return Checker.UsesThis; 381 } 382 383 static void EmitBaseInitializer(CodeGenFunction &CGF, 384 const CXXRecordDecl *ClassDecl, 385 CXXCtorInitializer *BaseInit, 386 CXXCtorType CtorType) { 387 assert(BaseInit->isBaseInitializer() && 388 "Must have base initializer!"); 389 390 llvm::Value *ThisPtr = CGF.LoadCXXThis(); 391 392 const Type *BaseType = BaseInit->getBaseClass(); 393 CXXRecordDecl *BaseClassDecl = 394 cast<CXXRecordDecl>(BaseType->getAs<RecordType>()->getDecl()); 395 396 bool isBaseVirtual = BaseInit->isBaseVirtual(); 397 398 // The base constructor doesn't construct virtual bases. 399 if (CtorType == Ctor_Base && isBaseVirtual) 400 return; 401 402 // If the initializer for the base (other than the constructor 403 // itself) accesses 'this' in any way, we need to initialize the 404 // vtables. 405 if (BaseInitializerUsesThis(CGF.getContext(), BaseInit->getInit())) 406 CGF.InitializeVTablePointers(ClassDecl); 407 408 // We can pretend to be a complete class because it only matters for 409 // virtual bases, and we only do virtual bases for complete ctors. 410 llvm::Value *V = 411 CGF.GetAddressOfDirectBaseInCompleteClass(ThisPtr, ClassDecl, 412 BaseClassDecl, 413 isBaseVirtual); 414 CharUnits Alignment = CGF.getContext().getTypeAlignInChars(BaseType); 415 AggValueSlot AggSlot = 416 AggValueSlot::forAddr(V, Alignment, Qualifiers(), 417 AggValueSlot::IsDestructed, 418 AggValueSlot::DoesNotNeedGCBarriers, 419 AggValueSlot::IsNotAliased); 420 421 CGF.EmitAggExpr(BaseInit->getInit(), AggSlot); 422 423 if (CGF.CGM.getLangOpts().Exceptions && 424 !BaseClassDecl->hasTrivialDestructor()) 425 CGF.EHStack.pushCleanup<CallBaseDtor>(EHCleanup, BaseClassDecl, 426 isBaseVirtual); 427 } 428 429 static void EmitAggMemberInitializer(CodeGenFunction &CGF, 430 LValue LHS, 431 Expr *Init, 432 llvm::Value *ArrayIndexVar, 433 QualType T, 434 ArrayRef<VarDecl *> ArrayIndexes, 435 unsigned Index) { 436 if (Index == ArrayIndexes.size()) { 437 LValue LV = LHS; 438 439 if (ArrayIndexVar) { 440 // If we have an array index variable, load it and use it as an offset. 441 // Then, increment the value. 442 llvm::Value *Dest = LHS.getAddress(); 443 llvm::Value *ArrayIndex = CGF.Builder.CreateLoad(ArrayIndexVar); 444 Dest = CGF.Builder.CreateInBoundsGEP(Dest, ArrayIndex, "destaddress"); 445 llvm::Value *Next = llvm::ConstantInt::get(ArrayIndex->getType(), 1); 446 Next = CGF.Builder.CreateAdd(ArrayIndex, Next, "inc"); 447 CGF.Builder.CreateStore(Next, ArrayIndexVar); 448 449 // Update the LValue. 450 LV.setAddress(Dest); 451 CharUnits Align = CGF.getContext().getTypeAlignInChars(T); 452 LV.setAlignment(std::min(Align, LV.getAlignment())); 453 } 454 455 switch (CGF.getEvaluationKind(T)) { 456 case TEK_Scalar: 457 CGF.EmitScalarInit(Init, /*decl*/ 0, LV, false); 458 break; 459 case TEK_Complex: 460 CGF.EmitComplexExprIntoLValue(Init, LV, /*isInit*/ true); 461 break; 462 case TEK_Aggregate: { 463 AggValueSlot Slot = 464 AggValueSlot::forLValue(LV, 465 AggValueSlot::IsDestructed, 466 AggValueSlot::DoesNotNeedGCBarriers, 467 AggValueSlot::IsNotAliased); 468 469 CGF.EmitAggExpr(Init, Slot); 470 break; 471 } 472 } 473 474 return; 475 } 476 477 const ConstantArrayType *Array = CGF.getContext().getAsConstantArrayType(T); 478 assert(Array && "Array initialization without the array type?"); 479 llvm::Value *IndexVar 480 = CGF.GetAddrOfLocalVar(ArrayIndexes[Index]); 481 assert(IndexVar && "Array index variable not loaded"); 482 483 // Initialize this index variable to zero. 484 llvm::Value* Zero 485 = llvm::Constant::getNullValue( 486 CGF.ConvertType(CGF.getContext().getSizeType())); 487 CGF.Builder.CreateStore(Zero, IndexVar); 488 489 // Start the loop with a block that tests the condition. 490 llvm::BasicBlock *CondBlock = CGF.createBasicBlock("for.cond"); 491 llvm::BasicBlock *AfterFor = CGF.createBasicBlock("for.end"); 492 493 CGF.EmitBlock(CondBlock); 494 495 llvm::BasicBlock *ForBody = CGF.createBasicBlock("for.body"); 496 // Generate: if (loop-index < number-of-elements) fall to the loop body, 497 // otherwise, go to the block after the for-loop. 498 uint64_t NumElements = Array->getSize().getZExtValue(); 499 llvm::Value *Counter = CGF.Builder.CreateLoad(IndexVar); 500 llvm::Value *NumElementsPtr = 501 llvm::ConstantInt::get(Counter->getType(), NumElements); 502 llvm::Value *IsLess = CGF.Builder.CreateICmpULT(Counter, NumElementsPtr, 503 "isless"); 504 505 // If the condition is true, execute the body. 506 CGF.Builder.CreateCondBr(IsLess, ForBody, AfterFor); 507 508 CGF.EmitBlock(ForBody); 509 llvm::BasicBlock *ContinueBlock = CGF.createBasicBlock("for.inc"); 510 511 // Inside the loop body recurse to emit the inner loop or, eventually, the 512 // constructor call. 513 EmitAggMemberInitializer(CGF, LHS, Init, ArrayIndexVar, 514 Array->getElementType(), ArrayIndexes, Index + 1); 515 516 CGF.EmitBlock(ContinueBlock); 517 518 // Emit the increment of the loop counter. 519 llvm::Value *NextVal = llvm::ConstantInt::get(Counter->getType(), 1); 520 Counter = CGF.Builder.CreateLoad(IndexVar); 521 NextVal = CGF.Builder.CreateAdd(Counter, NextVal, "inc"); 522 CGF.Builder.CreateStore(NextVal, IndexVar); 523 524 // Finally, branch back up to the condition for the next iteration. 525 CGF.EmitBranch(CondBlock); 526 527 // Emit the fall-through block. 528 CGF.EmitBlock(AfterFor, true); 529 } 530 531 static void EmitMemberInitializer(CodeGenFunction &CGF, 532 const CXXRecordDecl *ClassDecl, 533 CXXCtorInitializer *MemberInit, 534 const CXXConstructorDecl *Constructor, 535 FunctionArgList &Args) { 536 assert(MemberInit->isAnyMemberInitializer() && 537 "Must have member initializer!"); 538 assert(MemberInit->getInit() && "Must have initializer!"); 539 540 // non-static data member initializers. 541 FieldDecl *Field = MemberInit->getAnyMember(); 542 QualType FieldType = Field->getType(); 543 544 llvm::Value *ThisPtr = CGF.LoadCXXThis(); 545 QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl); 546 LValue LHS = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy); 547 548 if (MemberInit->isIndirectMemberInitializer()) { 549 // If we are initializing an anonymous union field, drill down to 550 // the field. 551 IndirectFieldDecl *IndirectField = MemberInit->getIndirectMember(); 552 IndirectFieldDecl::chain_iterator I = IndirectField->chain_begin(), 553 IEnd = IndirectField->chain_end(); 554 for ( ; I != IEnd; ++I) 555 LHS = CGF.EmitLValueForFieldInitialization(LHS, cast<FieldDecl>(*I)); 556 FieldType = MemberInit->getIndirectMember()->getAnonField()->getType(); 557 } else { 558 LHS = CGF.EmitLValueForFieldInitialization(LHS, Field); 559 } 560 561 // Special case: if we are in a copy or move constructor, and we are copying 562 // an array of PODs or classes with trivial copy constructors, ignore the 563 // AST and perform the copy we know is equivalent. 564 // FIXME: This is hacky at best... if we had a bit more explicit information 565 // in the AST, we could generalize it more easily. 566 const ConstantArrayType *Array 567 = CGF.getContext().getAsConstantArrayType(FieldType); 568 if (Array && Constructor->isDefaulted() && 569 Constructor->isCopyOrMoveConstructor()) { 570 QualType BaseElementTy = CGF.getContext().getBaseElementType(Array); 571 CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(MemberInit->getInit()); 572 if (BaseElementTy.isPODType(CGF.getContext()) || 573 (CE && CE->getConstructor()->isTrivial())) { 574 // Find the source pointer. We know it's the last argument because 575 // we know we're in an implicit copy constructor. 576 unsigned SrcArgIndex = Args.size() - 1; 577 llvm::Value *SrcPtr 578 = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(Args[SrcArgIndex])); 579 LValue ThisRHSLV = CGF.MakeNaturalAlignAddrLValue(SrcPtr, RecordTy); 580 LValue Src = CGF.EmitLValueForFieldInitialization(ThisRHSLV, Field); 581 582 // Copy the aggregate. 583 CGF.EmitAggregateCopy(LHS.getAddress(), Src.getAddress(), FieldType, 584 LHS.isVolatileQualified()); 585 return; 586 } 587 } 588 589 ArrayRef<VarDecl *> ArrayIndexes; 590 if (MemberInit->getNumArrayIndices()) 591 ArrayIndexes = MemberInit->getArrayIndexes(); 592 CGF.EmitInitializerForField(Field, LHS, MemberInit->getInit(), ArrayIndexes); 593 } 594 595 void CodeGenFunction::EmitInitializerForField(FieldDecl *Field, 596 LValue LHS, Expr *Init, 597 ArrayRef<VarDecl *> ArrayIndexes) { 598 QualType FieldType = Field->getType(); 599 switch (getEvaluationKind(FieldType)) { 600 case TEK_Scalar: 601 if (LHS.isSimple()) { 602 EmitExprAsInit(Init, Field, LHS, false); 603 } else { 604 RValue RHS = RValue::get(EmitScalarExpr(Init)); 605 EmitStoreThroughLValue(RHS, LHS); 606 } 607 break; 608 case TEK_Complex: 609 EmitComplexExprIntoLValue(Init, LHS, /*isInit*/ true); 610 break; 611 case TEK_Aggregate: { 612 llvm::Value *ArrayIndexVar = 0; 613 if (ArrayIndexes.size()) { 614 llvm::Type *SizeTy = ConvertType(getContext().getSizeType()); 615 616 // The LHS is a pointer to the first object we'll be constructing, as 617 // a flat array. 618 QualType BaseElementTy = getContext().getBaseElementType(FieldType); 619 llvm::Type *BasePtr = ConvertType(BaseElementTy); 620 BasePtr = llvm::PointerType::getUnqual(BasePtr); 621 llvm::Value *BaseAddrPtr = Builder.CreateBitCast(LHS.getAddress(), 622 BasePtr); 623 LHS = MakeAddrLValue(BaseAddrPtr, BaseElementTy); 624 625 // Create an array index that will be used to walk over all of the 626 // objects we're constructing. 627 ArrayIndexVar = CreateTempAlloca(SizeTy, "object.index"); 628 llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy); 629 Builder.CreateStore(Zero, ArrayIndexVar); 630 631 632 // Emit the block variables for the array indices, if any. 633 for (unsigned I = 0, N = ArrayIndexes.size(); I != N; ++I) 634 EmitAutoVarDecl(*ArrayIndexes[I]); 635 } 636 637 EmitAggMemberInitializer(*this, LHS, Init, ArrayIndexVar, FieldType, 638 ArrayIndexes, 0); 639 } 640 } 641 642 // Ensure that we destroy this object if an exception is thrown 643 // later in the constructor. 644 QualType::DestructionKind dtorKind = FieldType.isDestructedType(); 645 if (needsEHCleanup(dtorKind)) 646 pushEHDestroy(dtorKind, LHS.getAddress(), FieldType); 647 } 648 649 /// Checks whether the given constructor is a valid subject for the 650 /// complete-to-base constructor delegation optimization, i.e. 651 /// emitting the complete constructor as a simple call to the base 652 /// constructor. 653 static bool IsConstructorDelegationValid(const CXXConstructorDecl *Ctor) { 654 655 // Currently we disable the optimization for classes with virtual 656 // bases because (1) the addresses of parameter variables need to be 657 // consistent across all initializers but (2) the delegate function 658 // call necessarily creates a second copy of the parameter variable. 659 // 660 // The limiting example (purely theoretical AFAIK): 661 // struct A { A(int &c) { c++; } }; 662 // struct B : virtual A { 663 // B(int count) : A(count) { printf("%d\n", count); } 664 // }; 665 // ...although even this example could in principle be emitted as a 666 // delegation since the address of the parameter doesn't escape. 667 if (Ctor->getParent()->getNumVBases()) { 668 // TODO: white-list trivial vbase initializers. This case wouldn't 669 // be subject to the restrictions below. 670 671 // TODO: white-list cases where: 672 // - there are no non-reference parameters to the constructor 673 // - the initializers don't access any non-reference parameters 674 // - the initializers don't take the address of non-reference 675 // parameters 676 // - etc. 677 // If we ever add any of the above cases, remember that: 678 // - function-try-blocks will always blacklist this optimization 679 // - we need to perform the constructor prologue and cleanup in 680 // EmitConstructorBody. 681 682 return false; 683 } 684 685 // We also disable the optimization for variadic functions because 686 // it's impossible to "re-pass" varargs. 687 if (Ctor->getType()->getAs<FunctionProtoType>()->isVariadic()) 688 return false; 689 690 // FIXME: Decide if we can do a delegation of a delegating constructor. 691 if (Ctor->isDelegatingConstructor()) 692 return false; 693 694 return true; 695 } 696 697 /// EmitConstructorBody - Emits the body of the current constructor. 698 void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) { 699 const CXXConstructorDecl *Ctor = cast<CXXConstructorDecl>(CurGD.getDecl()); 700 CXXCtorType CtorType = CurGD.getCtorType(); 701 702 assert((CGM.getTarget().getCXXABI().hasConstructorVariants() || 703 CtorType == Ctor_Complete) && 704 "can only generate complete ctor for this ABI"); 705 706 // Before we go any further, try the complete->base constructor 707 // delegation optimization. 708 if (CtorType == Ctor_Complete && IsConstructorDelegationValid(Ctor) && 709 CGM.getTarget().getCXXABI().hasConstructorVariants()) { 710 if (CGDebugInfo *DI = getDebugInfo()) 711 DI->EmitLocation(Builder, Ctor->getLocEnd()); 712 EmitDelegateCXXConstructorCall(Ctor, Ctor_Base, Args, Ctor->getLocEnd()); 713 return; 714 } 715 716 Stmt *Body = Ctor->getBody(); 717 718 // Enter the function-try-block before the constructor prologue if 719 // applicable. 720 bool IsTryBody = (Body && isa<CXXTryStmt>(Body)); 721 if (IsTryBody) 722 EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true); 723 724 RegionCounter Cnt = getPGORegionCounter(Body); 725 Cnt.beginRegion(Builder); 726 727 RunCleanupsScope RunCleanups(*this); 728 729 // TODO: in restricted cases, we can emit the vbase initializers of 730 // a complete ctor and then delegate to the base ctor. 731 732 // Emit the constructor prologue, i.e. the base and member 733 // initializers. 734 EmitCtorPrologue(Ctor, CtorType, Args); 735 736 // Emit the body of the statement. 737 if (IsTryBody) 738 EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock()); 739 else if (Body) 740 EmitStmt(Body); 741 742 // Emit any cleanup blocks associated with the member or base 743 // initializers, which includes (along the exceptional path) the 744 // destructors for those members and bases that were fully 745 // constructed. 746 RunCleanups.ForceCleanup(); 747 748 if (IsTryBody) 749 ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true); 750 } 751 752 namespace { 753 /// RAII object to indicate that codegen is copying the value representation 754 /// instead of the object representation. Useful when copying a struct or 755 /// class which has uninitialized members and we're only performing 756 /// lvalue-to-rvalue conversion on the object but not its members. 757 class CopyingValueRepresentation { 758 public: 759 explicit CopyingValueRepresentation(CodeGenFunction &CGF) 760 : CGF(CGF), SO(*CGF.SanOpts), OldSanOpts(CGF.SanOpts) { 761 SO.Bool = false; 762 SO.Enum = false; 763 CGF.SanOpts = &SO; 764 } 765 ~CopyingValueRepresentation() { 766 CGF.SanOpts = OldSanOpts; 767 } 768 private: 769 CodeGenFunction &CGF; 770 SanitizerOptions SO; 771 const SanitizerOptions *OldSanOpts; 772 }; 773 } 774 775 namespace { 776 class FieldMemcpyizer { 777 public: 778 FieldMemcpyizer(CodeGenFunction &CGF, const CXXRecordDecl *ClassDecl, 779 const VarDecl *SrcRec) 780 : CGF(CGF), ClassDecl(ClassDecl), SrcRec(SrcRec), 781 RecLayout(CGF.getContext().getASTRecordLayout(ClassDecl)), 782 FirstField(0), LastField(0), FirstFieldOffset(0), LastFieldOffset(0), 783 LastAddedFieldIndex(0) { } 784 785 static bool isMemcpyableField(FieldDecl *F) { 786 Qualifiers Qual = F->getType().getQualifiers(); 787 if (Qual.hasVolatile() || Qual.hasObjCLifetime()) 788 return false; 789 return true; 790 } 791 792 void addMemcpyableField(FieldDecl *F) { 793 if (FirstField == 0) 794 addInitialField(F); 795 else 796 addNextField(F); 797 } 798 799 CharUnits getMemcpySize() const { 800 unsigned LastFieldSize = 801 LastField->isBitField() ? 802 LastField->getBitWidthValue(CGF.getContext()) : 803 CGF.getContext().getTypeSize(LastField->getType()); 804 uint64_t MemcpySizeBits = 805 LastFieldOffset + LastFieldSize - FirstFieldOffset + 806 CGF.getContext().getCharWidth() - 1; 807 CharUnits MemcpySize = 808 CGF.getContext().toCharUnitsFromBits(MemcpySizeBits); 809 return MemcpySize; 810 } 811 812 void emitMemcpy() { 813 // Give the subclass a chance to bail out if it feels the memcpy isn't 814 // worth it (e.g. Hasn't aggregated enough data). 815 if (FirstField == 0) { 816 return; 817 } 818 819 CharUnits Alignment; 820 821 if (FirstField->isBitField()) { 822 const CGRecordLayout &RL = 823 CGF.getTypes().getCGRecordLayout(FirstField->getParent()); 824 const CGBitFieldInfo &BFInfo = RL.getBitFieldInfo(FirstField); 825 Alignment = CharUnits::fromQuantity(BFInfo.StorageAlignment); 826 } else { 827 Alignment = CGF.getContext().getDeclAlign(FirstField); 828 } 829 830 assert((CGF.getContext().toCharUnitsFromBits(FirstFieldOffset) % 831 Alignment) == 0 && "Bad field alignment."); 832 833 CharUnits MemcpySize = getMemcpySize(); 834 QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl); 835 llvm::Value *ThisPtr = CGF.LoadCXXThis(); 836 LValue DestLV = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy); 837 LValue Dest = CGF.EmitLValueForFieldInitialization(DestLV, FirstField); 838 llvm::Value *SrcPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(SrcRec)); 839 LValue SrcLV = CGF.MakeNaturalAlignAddrLValue(SrcPtr, RecordTy); 840 LValue Src = CGF.EmitLValueForFieldInitialization(SrcLV, FirstField); 841 842 emitMemcpyIR(Dest.isBitField() ? Dest.getBitFieldAddr() : Dest.getAddress(), 843 Src.isBitField() ? Src.getBitFieldAddr() : Src.getAddress(), 844 MemcpySize, Alignment); 845 reset(); 846 } 847 848 void reset() { 849 FirstField = 0; 850 } 851 852 protected: 853 CodeGenFunction &CGF; 854 const CXXRecordDecl *ClassDecl; 855 856 private: 857 858 void emitMemcpyIR(llvm::Value *DestPtr, llvm::Value *SrcPtr, 859 CharUnits Size, CharUnits Alignment) { 860 llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType()); 861 llvm::Type *DBP = 862 llvm::Type::getInt8PtrTy(CGF.getLLVMContext(), DPT->getAddressSpace()); 863 DestPtr = CGF.Builder.CreateBitCast(DestPtr, DBP); 864 865 llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType()); 866 llvm::Type *SBP = 867 llvm::Type::getInt8PtrTy(CGF.getLLVMContext(), SPT->getAddressSpace()); 868 SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, SBP); 869 870 CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, Size.getQuantity(), 871 Alignment.getQuantity()); 872 } 873 874 void addInitialField(FieldDecl *F) { 875 FirstField = F; 876 LastField = F; 877 FirstFieldOffset = RecLayout.getFieldOffset(F->getFieldIndex()); 878 LastFieldOffset = FirstFieldOffset; 879 LastAddedFieldIndex = F->getFieldIndex(); 880 return; 881 } 882 883 void addNextField(FieldDecl *F) { 884 // For the most part, the following invariant will hold: 885 // F->getFieldIndex() == LastAddedFieldIndex + 1 886 // The one exception is that Sema won't add a copy-initializer for an 887 // unnamed bitfield, which will show up here as a gap in the sequence. 888 assert(F->getFieldIndex() >= LastAddedFieldIndex + 1 && 889 "Cannot aggregate fields out of order."); 890 LastAddedFieldIndex = F->getFieldIndex(); 891 892 // The 'first' and 'last' fields are chosen by offset, rather than field 893 // index. This allows the code to support bitfields, as well as regular 894 // fields. 895 uint64_t FOffset = RecLayout.getFieldOffset(F->getFieldIndex()); 896 if (FOffset < FirstFieldOffset) { 897 FirstField = F; 898 FirstFieldOffset = FOffset; 899 } else if (FOffset > LastFieldOffset) { 900 LastField = F; 901 LastFieldOffset = FOffset; 902 } 903 } 904 905 const VarDecl *SrcRec; 906 const ASTRecordLayout &RecLayout; 907 FieldDecl *FirstField; 908 FieldDecl *LastField; 909 uint64_t FirstFieldOffset, LastFieldOffset; 910 unsigned LastAddedFieldIndex; 911 }; 912 913 class ConstructorMemcpyizer : public FieldMemcpyizer { 914 private: 915 916 /// Get source argument for copy constructor. Returns null if not a copy 917 /// constructor. 918 static const VarDecl* getTrivialCopySource(const CXXConstructorDecl *CD, 919 FunctionArgList &Args) { 920 if (CD->isCopyOrMoveConstructor() && CD->isDefaulted()) 921 return Args[Args.size() - 1]; 922 return 0; 923 } 924 925 // Returns true if a CXXCtorInitializer represents a member initialization 926 // that can be rolled into a memcpy. 927 bool isMemberInitMemcpyable(CXXCtorInitializer *MemberInit) const { 928 if (!MemcpyableCtor) 929 return false; 930 FieldDecl *Field = MemberInit->getMember(); 931 assert(Field != 0 && "No field for member init."); 932 QualType FieldType = Field->getType(); 933 CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(MemberInit->getInit()); 934 935 // Bail out on non-POD, not-trivially-constructable members. 936 if (!(CE && CE->getConstructor()->isTrivial()) && 937 !(FieldType.isTriviallyCopyableType(CGF.getContext()) || 938 FieldType->isReferenceType())) 939 return false; 940 941 // Bail out on volatile fields. 942 if (!isMemcpyableField(Field)) 943 return false; 944 945 // Otherwise we're good. 946 return true; 947 } 948 949 public: 950 ConstructorMemcpyizer(CodeGenFunction &CGF, const CXXConstructorDecl *CD, 951 FunctionArgList &Args) 952 : FieldMemcpyizer(CGF, CD->getParent(), getTrivialCopySource(CD, Args)), 953 ConstructorDecl(CD), 954 MemcpyableCtor(CD->isDefaulted() && 955 CD->isCopyOrMoveConstructor() && 956 CGF.getLangOpts().getGC() == LangOptions::NonGC), 957 Args(Args) { } 958 959 void addMemberInitializer(CXXCtorInitializer *MemberInit) { 960 if (isMemberInitMemcpyable(MemberInit)) { 961 AggregatedInits.push_back(MemberInit); 962 addMemcpyableField(MemberInit->getMember()); 963 } else { 964 emitAggregatedInits(); 965 EmitMemberInitializer(CGF, ConstructorDecl->getParent(), MemberInit, 966 ConstructorDecl, Args); 967 } 968 } 969 970 void emitAggregatedInits() { 971 if (AggregatedInits.size() <= 1) { 972 // This memcpy is too small to be worthwhile. Fall back on default 973 // codegen. 974 if (!AggregatedInits.empty()) { 975 CopyingValueRepresentation CVR(CGF); 976 EmitMemberInitializer(CGF, ConstructorDecl->getParent(), 977 AggregatedInits[0], ConstructorDecl, Args); 978 } 979 reset(); 980 return; 981 } 982 983 pushEHDestructors(); 984 emitMemcpy(); 985 AggregatedInits.clear(); 986 } 987 988 void pushEHDestructors() { 989 llvm::Value *ThisPtr = CGF.LoadCXXThis(); 990 QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl); 991 LValue LHS = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy); 992 993 for (unsigned i = 0; i < AggregatedInits.size(); ++i) { 994 QualType FieldType = AggregatedInits[i]->getMember()->getType(); 995 QualType::DestructionKind dtorKind = FieldType.isDestructedType(); 996 if (CGF.needsEHCleanup(dtorKind)) 997 CGF.pushEHDestroy(dtorKind, LHS.getAddress(), FieldType); 998 } 999 } 1000 1001 void finish() { 1002 emitAggregatedInits(); 1003 } 1004 1005 private: 1006 const CXXConstructorDecl *ConstructorDecl; 1007 bool MemcpyableCtor; 1008 FunctionArgList &Args; 1009 SmallVector<CXXCtorInitializer*, 16> AggregatedInits; 1010 }; 1011 1012 class AssignmentMemcpyizer : public FieldMemcpyizer { 1013 private: 1014 1015 // Returns the memcpyable field copied by the given statement, if one 1016 // exists. Otherwise returns null. 1017 FieldDecl *getMemcpyableField(Stmt *S) { 1018 if (!AssignmentsMemcpyable) 1019 return 0; 1020 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(S)) { 1021 // Recognise trivial assignments. 1022 if (BO->getOpcode() != BO_Assign) 1023 return 0; 1024 MemberExpr *ME = dyn_cast<MemberExpr>(BO->getLHS()); 1025 if (!ME) 1026 return 0; 1027 FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl()); 1028 if (!Field || !isMemcpyableField(Field)) 1029 return 0; 1030 Stmt *RHS = BO->getRHS(); 1031 if (ImplicitCastExpr *EC = dyn_cast<ImplicitCastExpr>(RHS)) 1032 RHS = EC->getSubExpr(); 1033 if (!RHS) 1034 return 0; 1035 MemberExpr *ME2 = dyn_cast<MemberExpr>(RHS); 1036 if (dyn_cast<FieldDecl>(ME2->getMemberDecl()) != Field) 1037 return 0; 1038 return Field; 1039 } else if (CXXMemberCallExpr *MCE = dyn_cast<CXXMemberCallExpr>(S)) { 1040 CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MCE->getCalleeDecl()); 1041 if (!(MD && (MD->isCopyAssignmentOperator() || 1042 MD->isMoveAssignmentOperator()) && 1043 MD->isTrivial())) 1044 return 0; 1045 MemberExpr *IOA = dyn_cast<MemberExpr>(MCE->getImplicitObjectArgument()); 1046 if (!IOA) 1047 return 0; 1048 FieldDecl *Field = dyn_cast<FieldDecl>(IOA->getMemberDecl()); 1049 if (!Field || !isMemcpyableField(Field)) 1050 return 0; 1051 MemberExpr *Arg0 = dyn_cast<MemberExpr>(MCE->getArg(0)); 1052 if (!Arg0 || Field != dyn_cast<FieldDecl>(Arg0->getMemberDecl())) 1053 return 0; 1054 return Field; 1055 } else if (CallExpr *CE = dyn_cast<CallExpr>(S)) { 1056 FunctionDecl *FD = dyn_cast<FunctionDecl>(CE->getCalleeDecl()); 1057 if (!FD || FD->getBuiltinID() != Builtin::BI__builtin_memcpy) 1058 return 0; 1059 Expr *DstPtr = CE->getArg(0); 1060 if (ImplicitCastExpr *DC = dyn_cast<ImplicitCastExpr>(DstPtr)) 1061 DstPtr = DC->getSubExpr(); 1062 UnaryOperator *DUO = dyn_cast<UnaryOperator>(DstPtr); 1063 if (!DUO || DUO->getOpcode() != UO_AddrOf) 1064 return 0; 1065 MemberExpr *ME = dyn_cast<MemberExpr>(DUO->getSubExpr()); 1066 if (!ME) 1067 return 0; 1068 FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl()); 1069 if (!Field || !isMemcpyableField(Field)) 1070 return 0; 1071 Expr *SrcPtr = CE->getArg(1); 1072 if (ImplicitCastExpr *SC = dyn_cast<ImplicitCastExpr>(SrcPtr)) 1073 SrcPtr = SC->getSubExpr(); 1074 UnaryOperator *SUO = dyn_cast<UnaryOperator>(SrcPtr); 1075 if (!SUO || SUO->getOpcode() != UO_AddrOf) 1076 return 0; 1077 MemberExpr *ME2 = dyn_cast<MemberExpr>(SUO->getSubExpr()); 1078 if (!ME2 || Field != dyn_cast<FieldDecl>(ME2->getMemberDecl())) 1079 return 0; 1080 return Field; 1081 } 1082 1083 return 0; 1084 } 1085 1086 bool AssignmentsMemcpyable; 1087 SmallVector<Stmt*, 16> AggregatedStmts; 1088 1089 public: 1090 1091 AssignmentMemcpyizer(CodeGenFunction &CGF, const CXXMethodDecl *AD, 1092 FunctionArgList &Args) 1093 : FieldMemcpyizer(CGF, AD->getParent(), Args[Args.size() - 1]), 1094 AssignmentsMemcpyable(CGF.getLangOpts().getGC() == LangOptions::NonGC) { 1095 assert(Args.size() == 2); 1096 } 1097 1098 void emitAssignment(Stmt *S) { 1099 FieldDecl *F = getMemcpyableField(S); 1100 if (F) { 1101 addMemcpyableField(F); 1102 AggregatedStmts.push_back(S); 1103 } else { 1104 emitAggregatedStmts(); 1105 CGF.EmitStmt(S); 1106 } 1107 } 1108 1109 void emitAggregatedStmts() { 1110 if (AggregatedStmts.size() <= 1) { 1111 if (!AggregatedStmts.empty()) { 1112 CopyingValueRepresentation CVR(CGF); 1113 CGF.EmitStmt(AggregatedStmts[0]); 1114 } 1115 reset(); 1116 } 1117 1118 emitMemcpy(); 1119 AggregatedStmts.clear(); 1120 } 1121 1122 void finish() { 1123 emitAggregatedStmts(); 1124 } 1125 }; 1126 1127 } 1128 1129 /// EmitCtorPrologue - This routine generates necessary code to initialize 1130 /// base classes and non-static data members belonging to this constructor. 1131 void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD, 1132 CXXCtorType CtorType, 1133 FunctionArgList &Args) { 1134 if (CD->isDelegatingConstructor()) 1135 return EmitDelegatingCXXConstructorCall(CD, Args); 1136 1137 const CXXRecordDecl *ClassDecl = CD->getParent(); 1138 1139 CXXConstructorDecl::init_const_iterator B = CD->init_begin(), 1140 E = CD->init_end(); 1141 1142 llvm::BasicBlock *BaseCtorContinueBB = 0; 1143 if (ClassDecl->getNumVBases() && 1144 !CGM.getTarget().getCXXABI().hasConstructorVariants()) { 1145 // The ABIs that don't have constructor variants need to put a branch 1146 // before the virtual base initialization code. 1147 BaseCtorContinueBB = 1148 CGM.getCXXABI().EmitCtorCompleteObjectHandler(*this, ClassDecl); 1149 assert(BaseCtorContinueBB); 1150 } 1151 1152 // Virtual base initializers first. 1153 for (; B != E && (*B)->isBaseInitializer() && (*B)->isBaseVirtual(); B++) { 1154 EmitBaseInitializer(*this, ClassDecl, *B, CtorType); 1155 } 1156 1157 if (BaseCtorContinueBB) { 1158 // Complete object handler should continue to the remaining initializers. 1159 Builder.CreateBr(BaseCtorContinueBB); 1160 EmitBlock(BaseCtorContinueBB); 1161 } 1162 1163 // Then, non-virtual base initializers. 1164 for (; B != E && (*B)->isBaseInitializer(); B++) { 1165 assert(!(*B)->isBaseVirtual()); 1166 EmitBaseInitializer(*this, ClassDecl, *B, CtorType); 1167 } 1168 1169 InitializeVTablePointers(ClassDecl); 1170 1171 // And finally, initialize class members. 1172 FieldConstructionScope FCS(*this, CXXThisValue); 1173 ConstructorMemcpyizer CM(*this, CD, Args); 1174 for (; B != E; B++) { 1175 CXXCtorInitializer *Member = (*B); 1176 assert(!Member->isBaseInitializer()); 1177 assert(Member->isAnyMemberInitializer() && 1178 "Delegating initializer on non-delegating constructor"); 1179 CM.addMemberInitializer(Member); 1180 } 1181 CM.finish(); 1182 } 1183 1184 static bool 1185 FieldHasTrivialDestructorBody(ASTContext &Context, const FieldDecl *Field); 1186 1187 static bool 1188 HasTrivialDestructorBody(ASTContext &Context, 1189 const CXXRecordDecl *BaseClassDecl, 1190 const CXXRecordDecl *MostDerivedClassDecl) 1191 { 1192 // If the destructor is trivial we don't have to check anything else. 1193 if (BaseClassDecl->hasTrivialDestructor()) 1194 return true; 1195 1196 if (!BaseClassDecl->getDestructor()->hasTrivialBody()) 1197 return false; 1198 1199 // Check fields. 1200 for (CXXRecordDecl::field_iterator I = BaseClassDecl->field_begin(), 1201 E = BaseClassDecl->field_end(); I != E; ++I) { 1202 const FieldDecl *Field = *I; 1203 1204 if (!FieldHasTrivialDestructorBody(Context, Field)) 1205 return false; 1206 } 1207 1208 // Check non-virtual bases. 1209 for (CXXRecordDecl::base_class_const_iterator I = 1210 BaseClassDecl->bases_begin(), E = BaseClassDecl->bases_end(); 1211 I != E; ++I) { 1212 if (I->isVirtual()) 1213 continue; 1214 1215 const CXXRecordDecl *NonVirtualBase = 1216 cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl()); 1217 if (!HasTrivialDestructorBody(Context, NonVirtualBase, 1218 MostDerivedClassDecl)) 1219 return false; 1220 } 1221 1222 if (BaseClassDecl == MostDerivedClassDecl) { 1223 // Check virtual bases. 1224 for (CXXRecordDecl::base_class_const_iterator I = 1225 BaseClassDecl->vbases_begin(), E = BaseClassDecl->vbases_end(); 1226 I != E; ++I) { 1227 const CXXRecordDecl *VirtualBase = 1228 cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl()); 1229 if (!HasTrivialDestructorBody(Context, VirtualBase, 1230 MostDerivedClassDecl)) 1231 return false; 1232 } 1233 } 1234 1235 return true; 1236 } 1237 1238 static bool 1239 FieldHasTrivialDestructorBody(ASTContext &Context, 1240 const FieldDecl *Field) 1241 { 1242 QualType FieldBaseElementType = Context.getBaseElementType(Field->getType()); 1243 1244 const RecordType *RT = FieldBaseElementType->getAs<RecordType>(); 1245 if (!RT) 1246 return true; 1247 1248 CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(RT->getDecl()); 1249 return HasTrivialDestructorBody(Context, FieldClassDecl, FieldClassDecl); 1250 } 1251 1252 /// CanSkipVTablePointerInitialization - Check whether we need to initialize 1253 /// any vtable pointers before calling this destructor. 1254 static bool CanSkipVTablePointerInitialization(ASTContext &Context, 1255 const CXXDestructorDecl *Dtor) { 1256 if (!Dtor->hasTrivialBody()) 1257 return false; 1258 1259 // Check the fields. 1260 const CXXRecordDecl *ClassDecl = Dtor->getParent(); 1261 for (CXXRecordDecl::field_iterator I = ClassDecl->field_begin(), 1262 E = ClassDecl->field_end(); I != E; ++I) { 1263 const FieldDecl *Field = *I; 1264 1265 if (!FieldHasTrivialDestructorBody(Context, Field)) 1266 return false; 1267 } 1268 1269 return true; 1270 } 1271 1272 /// EmitDestructorBody - Emits the body of the current destructor. 1273 void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) { 1274 const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CurGD.getDecl()); 1275 CXXDtorType DtorType = CurGD.getDtorType(); 1276 1277 // The call to operator delete in a deleting destructor happens 1278 // outside of the function-try-block, which means it's always 1279 // possible to delegate the destructor body to the complete 1280 // destructor. Do so. 1281 if (DtorType == Dtor_Deleting) { 1282 EnterDtorCleanups(Dtor, Dtor_Deleting); 1283 EmitCXXDestructorCall(Dtor, Dtor_Complete, /*ForVirtualBase=*/false, 1284 /*Delegating=*/false, LoadCXXThis()); 1285 PopCleanupBlock(); 1286 return; 1287 } 1288 1289 Stmt *Body = Dtor->getBody(); 1290 1291 // If the body is a function-try-block, enter the try before 1292 // anything else. 1293 bool isTryBody = (Body && isa<CXXTryStmt>(Body)); 1294 if (isTryBody) 1295 EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true); 1296 1297 // Enter the epilogue cleanups. 1298 RunCleanupsScope DtorEpilogue(*this); 1299 1300 // If this is the complete variant, just invoke the base variant; 1301 // the epilogue will destruct the virtual bases. But we can't do 1302 // this optimization if the body is a function-try-block, because 1303 // we'd introduce *two* handler blocks. In the Microsoft ABI, we 1304 // always delegate because we might not have a definition in this TU. 1305 switch (DtorType) { 1306 case Dtor_Deleting: llvm_unreachable("already handled deleting case"); 1307 1308 case Dtor_Complete: 1309 assert((Body || getTarget().getCXXABI().isMicrosoft()) && 1310 "can't emit a dtor without a body for non-Microsoft ABIs"); 1311 1312 // Enter the cleanup scopes for virtual bases. 1313 EnterDtorCleanups(Dtor, Dtor_Complete); 1314 1315 if (!isTryBody) { 1316 EmitCXXDestructorCall(Dtor, Dtor_Base, /*ForVirtualBase=*/false, 1317 /*Delegating=*/false, LoadCXXThis()); 1318 break; 1319 } 1320 // Fallthrough: act like we're in the base variant. 1321 1322 case Dtor_Base: 1323 assert(Body); 1324 1325 RegionCounter Cnt = getPGORegionCounter(Body); 1326 Cnt.beginRegion(Builder); 1327 1328 // Enter the cleanup scopes for fields and non-virtual bases. 1329 EnterDtorCleanups(Dtor, Dtor_Base); 1330 1331 // Initialize the vtable pointers before entering the body. 1332 if (!CanSkipVTablePointerInitialization(getContext(), Dtor)) 1333 InitializeVTablePointers(Dtor->getParent()); 1334 1335 if (isTryBody) 1336 EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock()); 1337 else if (Body) 1338 EmitStmt(Body); 1339 else { 1340 assert(Dtor->isImplicit() && "bodyless dtor not implicit"); 1341 // nothing to do besides what's in the epilogue 1342 } 1343 // -fapple-kext must inline any call to this dtor into 1344 // the caller's body. 1345 if (getLangOpts().AppleKext) 1346 CurFn->addFnAttr(llvm::Attribute::AlwaysInline); 1347 break; 1348 } 1349 1350 // Jump out through the epilogue cleanups. 1351 DtorEpilogue.ForceCleanup(); 1352 1353 // Exit the try if applicable. 1354 if (isTryBody) 1355 ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true); 1356 } 1357 1358 void CodeGenFunction::emitImplicitAssignmentOperatorBody(FunctionArgList &Args) { 1359 const CXXMethodDecl *AssignOp = cast<CXXMethodDecl>(CurGD.getDecl()); 1360 const Stmt *RootS = AssignOp->getBody(); 1361 assert(isa<CompoundStmt>(RootS) && 1362 "Body of an implicit assignment operator should be compound stmt."); 1363 const CompoundStmt *RootCS = cast<CompoundStmt>(RootS); 1364 1365 LexicalScope Scope(*this, RootCS->getSourceRange()); 1366 1367 AssignmentMemcpyizer AM(*this, AssignOp, Args); 1368 for (CompoundStmt::const_body_iterator I = RootCS->body_begin(), 1369 E = RootCS->body_end(); 1370 I != E; ++I) { 1371 AM.emitAssignment(*I); 1372 } 1373 AM.finish(); 1374 } 1375 1376 namespace { 1377 /// Call the operator delete associated with the current destructor. 1378 struct CallDtorDelete : EHScopeStack::Cleanup { 1379 CallDtorDelete() {} 1380 1381 void Emit(CodeGenFunction &CGF, Flags flags) { 1382 const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CGF.CurCodeDecl); 1383 const CXXRecordDecl *ClassDecl = Dtor->getParent(); 1384 CGF.EmitDeleteCall(Dtor->getOperatorDelete(), CGF.LoadCXXThis(), 1385 CGF.getContext().getTagDeclType(ClassDecl)); 1386 } 1387 }; 1388 1389 struct CallDtorDeleteConditional : EHScopeStack::Cleanup { 1390 llvm::Value *ShouldDeleteCondition; 1391 public: 1392 CallDtorDeleteConditional(llvm::Value *ShouldDeleteCondition) 1393 : ShouldDeleteCondition(ShouldDeleteCondition) { 1394 assert(ShouldDeleteCondition != NULL); 1395 } 1396 1397 void Emit(CodeGenFunction &CGF, Flags flags) { 1398 llvm::BasicBlock *callDeleteBB = CGF.createBasicBlock("dtor.call_delete"); 1399 llvm::BasicBlock *continueBB = CGF.createBasicBlock("dtor.continue"); 1400 llvm::Value *ShouldCallDelete 1401 = CGF.Builder.CreateIsNull(ShouldDeleteCondition); 1402 CGF.Builder.CreateCondBr(ShouldCallDelete, continueBB, callDeleteBB); 1403 1404 CGF.EmitBlock(callDeleteBB); 1405 const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CGF.CurCodeDecl); 1406 const CXXRecordDecl *ClassDecl = Dtor->getParent(); 1407 CGF.EmitDeleteCall(Dtor->getOperatorDelete(), CGF.LoadCXXThis(), 1408 CGF.getContext().getTagDeclType(ClassDecl)); 1409 CGF.Builder.CreateBr(continueBB); 1410 1411 CGF.EmitBlock(continueBB); 1412 } 1413 }; 1414 1415 class DestroyField : public EHScopeStack::Cleanup { 1416 const FieldDecl *field; 1417 CodeGenFunction::Destroyer *destroyer; 1418 bool useEHCleanupForArray; 1419 1420 public: 1421 DestroyField(const FieldDecl *field, CodeGenFunction::Destroyer *destroyer, 1422 bool useEHCleanupForArray) 1423 : field(field), destroyer(destroyer), 1424 useEHCleanupForArray(useEHCleanupForArray) {} 1425 1426 void Emit(CodeGenFunction &CGF, Flags flags) { 1427 // Find the address of the field. 1428 llvm::Value *thisValue = CGF.LoadCXXThis(); 1429 QualType RecordTy = CGF.getContext().getTagDeclType(field->getParent()); 1430 LValue ThisLV = CGF.MakeAddrLValue(thisValue, RecordTy); 1431 LValue LV = CGF.EmitLValueForField(ThisLV, field); 1432 assert(LV.isSimple()); 1433 1434 CGF.emitDestroy(LV.getAddress(), field->getType(), destroyer, 1435 flags.isForNormalCleanup() && useEHCleanupForArray); 1436 } 1437 }; 1438 } 1439 1440 /// \brief Emit all code that comes at the end of class's 1441 /// destructor. This is to call destructors on members and base classes 1442 /// in reverse order of their construction. 1443 void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD, 1444 CXXDtorType DtorType) { 1445 assert(!DD->isTrivial() && 1446 "Should not emit dtor epilogue for trivial dtor!"); 1447 1448 // The deleting-destructor phase just needs to call the appropriate 1449 // operator delete that Sema picked up. 1450 if (DtorType == Dtor_Deleting) { 1451 assert(DD->getOperatorDelete() && 1452 "operator delete missing - EnterDtorCleanups"); 1453 if (CXXStructorImplicitParamValue) { 1454 // If there is an implicit param to the deleting dtor, it's a boolean 1455 // telling whether we should call delete at the end of the dtor. 1456 EHStack.pushCleanup<CallDtorDeleteConditional>( 1457 NormalAndEHCleanup, CXXStructorImplicitParamValue); 1458 } else { 1459 EHStack.pushCleanup<CallDtorDelete>(NormalAndEHCleanup); 1460 } 1461 return; 1462 } 1463 1464 const CXXRecordDecl *ClassDecl = DD->getParent(); 1465 1466 // Unions have no bases and do not call field destructors. 1467 if (ClassDecl->isUnion()) 1468 return; 1469 1470 // The complete-destructor phase just destructs all the virtual bases. 1471 if (DtorType == Dtor_Complete) { 1472 1473 // We push them in the forward order so that they'll be popped in 1474 // the reverse order. 1475 for (CXXRecordDecl::base_class_const_iterator I = 1476 ClassDecl->vbases_begin(), E = ClassDecl->vbases_end(); 1477 I != E; ++I) { 1478 const CXXBaseSpecifier &Base = *I; 1479 CXXRecordDecl *BaseClassDecl 1480 = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl()); 1481 1482 // Ignore trivial destructors. 1483 if (BaseClassDecl->hasTrivialDestructor()) 1484 continue; 1485 1486 EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup, 1487 BaseClassDecl, 1488 /*BaseIsVirtual*/ true); 1489 } 1490 1491 return; 1492 } 1493 1494 assert(DtorType == Dtor_Base); 1495 1496 // Destroy non-virtual bases. 1497 for (CXXRecordDecl::base_class_const_iterator I = 1498 ClassDecl->bases_begin(), E = ClassDecl->bases_end(); I != E; ++I) { 1499 const CXXBaseSpecifier &Base = *I; 1500 1501 // Ignore virtual bases. 1502 if (Base.isVirtual()) 1503 continue; 1504 1505 CXXRecordDecl *BaseClassDecl = Base.getType()->getAsCXXRecordDecl(); 1506 1507 // Ignore trivial destructors. 1508 if (BaseClassDecl->hasTrivialDestructor()) 1509 continue; 1510 1511 EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup, 1512 BaseClassDecl, 1513 /*BaseIsVirtual*/ false); 1514 } 1515 1516 // Destroy direct fields. 1517 for (CXXRecordDecl::field_iterator I = ClassDecl->field_begin(), 1518 E = ClassDecl->field_end(); I != E; ++I) { 1519 const FieldDecl *field = *I; 1520 QualType type = field->getType(); 1521 QualType::DestructionKind dtorKind = type.isDestructedType(); 1522 if (!dtorKind) continue; 1523 1524 // Anonymous union members do not have their destructors called. 1525 const RecordType *RT = type->getAsUnionType(); 1526 if (RT && RT->getDecl()->isAnonymousStructOrUnion()) continue; 1527 1528 CleanupKind cleanupKind = getCleanupKind(dtorKind); 1529 EHStack.pushCleanup<DestroyField>(cleanupKind, field, 1530 getDestroyer(dtorKind), 1531 cleanupKind & EHCleanup); 1532 } 1533 } 1534 1535 /// EmitCXXAggrConstructorCall - Emit a loop to call a particular 1536 /// constructor for each of several members of an array. 1537 /// 1538 /// \param ctor the constructor to call for each element 1539 /// \param arrayType the type of the array to initialize 1540 /// \param arrayBegin an arrayType* 1541 /// \param zeroInitialize true if each element should be 1542 /// zero-initialized before it is constructed 1543 void 1544 CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor, 1545 const ConstantArrayType *arrayType, 1546 llvm::Value *arrayBegin, 1547 CallExpr::const_arg_iterator argBegin, 1548 CallExpr::const_arg_iterator argEnd, 1549 bool zeroInitialize) { 1550 QualType elementType; 1551 llvm::Value *numElements = 1552 emitArrayLength(arrayType, elementType, arrayBegin); 1553 1554 EmitCXXAggrConstructorCall(ctor, numElements, arrayBegin, 1555 argBegin, argEnd, zeroInitialize); 1556 } 1557 1558 /// EmitCXXAggrConstructorCall - Emit a loop to call a particular 1559 /// constructor for each of several members of an array. 1560 /// 1561 /// \param ctor the constructor to call for each element 1562 /// \param numElements the number of elements in the array; 1563 /// may be zero 1564 /// \param arrayBegin a T*, where T is the type constructed by ctor 1565 /// \param zeroInitialize true if each element should be 1566 /// zero-initialized before it is constructed 1567 void 1568 CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor, 1569 llvm::Value *numElements, 1570 llvm::Value *arrayBegin, 1571 CallExpr::const_arg_iterator argBegin, 1572 CallExpr::const_arg_iterator argEnd, 1573 bool zeroInitialize) { 1574 1575 // It's legal for numElements to be zero. This can happen both 1576 // dynamically, because x can be zero in 'new A[x]', and statically, 1577 // because of GCC extensions that permit zero-length arrays. There 1578 // are probably legitimate places where we could assume that this 1579 // doesn't happen, but it's not clear that it's worth it. 1580 llvm::BranchInst *zeroCheckBranch = 0; 1581 1582 // Optimize for a constant count. 1583 llvm::ConstantInt *constantCount 1584 = dyn_cast<llvm::ConstantInt>(numElements); 1585 if (constantCount) { 1586 // Just skip out if the constant count is zero. 1587 if (constantCount->isZero()) return; 1588 1589 // Otherwise, emit the check. 1590 } else { 1591 llvm::BasicBlock *loopBB = createBasicBlock("new.ctorloop"); 1592 llvm::Value *iszero = Builder.CreateIsNull(numElements, "isempty"); 1593 zeroCheckBranch = Builder.CreateCondBr(iszero, loopBB, loopBB); 1594 EmitBlock(loopBB); 1595 } 1596 1597 // Find the end of the array. 1598 llvm::Value *arrayEnd = Builder.CreateInBoundsGEP(arrayBegin, numElements, 1599 "arrayctor.end"); 1600 1601 // Enter the loop, setting up a phi for the current location to initialize. 1602 llvm::BasicBlock *entryBB = Builder.GetInsertBlock(); 1603 llvm::BasicBlock *loopBB = createBasicBlock("arrayctor.loop"); 1604 EmitBlock(loopBB); 1605 llvm::PHINode *cur = Builder.CreatePHI(arrayBegin->getType(), 2, 1606 "arrayctor.cur"); 1607 cur->addIncoming(arrayBegin, entryBB); 1608 1609 // Inside the loop body, emit the constructor call on the array element. 1610 1611 QualType type = getContext().getTypeDeclType(ctor->getParent()); 1612 1613 // Zero initialize the storage, if requested. 1614 if (zeroInitialize) 1615 EmitNullInitialization(cur, type); 1616 1617 // C++ [class.temporary]p4: 1618 // There are two contexts in which temporaries are destroyed at a different 1619 // point than the end of the full-expression. The first context is when a 1620 // default constructor is called to initialize an element of an array. 1621 // If the constructor has one or more default arguments, the destruction of 1622 // every temporary created in a default argument expression is sequenced 1623 // before the construction of the next array element, if any. 1624 1625 { 1626 RunCleanupsScope Scope(*this); 1627 1628 // Evaluate the constructor and its arguments in a regular 1629 // partial-destroy cleanup. 1630 if (getLangOpts().Exceptions && 1631 !ctor->getParent()->hasTrivialDestructor()) { 1632 Destroyer *destroyer = destroyCXXObject; 1633 pushRegularPartialArrayCleanup(arrayBegin, cur, type, *destroyer); 1634 } 1635 1636 EmitCXXConstructorCall(ctor, Ctor_Complete, /*ForVirtualBase=*/ false, 1637 /*Delegating=*/false, cur, argBegin, argEnd); 1638 } 1639 1640 // Go to the next element. 1641 llvm::Value *next = 1642 Builder.CreateInBoundsGEP(cur, llvm::ConstantInt::get(SizeTy, 1), 1643 "arrayctor.next"); 1644 cur->addIncoming(next, Builder.GetInsertBlock()); 1645 1646 // Check whether that's the end of the loop. 1647 llvm::Value *done = Builder.CreateICmpEQ(next, arrayEnd, "arrayctor.done"); 1648 llvm::BasicBlock *contBB = createBasicBlock("arrayctor.cont"); 1649 Builder.CreateCondBr(done, contBB, loopBB); 1650 1651 // Patch the earlier check to skip over the loop. 1652 if (zeroCheckBranch) zeroCheckBranch->setSuccessor(0, contBB); 1653 1654 EmitBlock(contBB); 1655 } 1656 1657 void CodeGenFunction::destroyCXXObject(CodeGenFunction &CGF, 1658 llvm::Value *addr, 1659 QualType type) { 1660 const RecordType *rtype = type->castAs<RecordType>(); 1661 const CXXRecordDecl *record = cast<CXXRecordDecl>(rtype->getDecl()); 1662 const CXXDestructorDecl *dtor = record->getDestructor(); 1663 assert(!dtor->isTrivial()); 1664 CGF.EmitCXXDestructorCall(dtor, Dtor_Complete, /*for vbase*/ false, 1665 /*Delegating=*/false, addr); 1666 } 1667 1668 void 1669 CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D, 1670 CXXCtorType Type, bool ForVirtualBase, 1671 bool Delegating, 1672 llvm::Value *This, 1673 CallExpr::const_arg_iterator ArgBeg, 1674 CallExpr::const_arg_iterator ArgEnd) { 1675 // If this is a trivial constructor, just emit what's needed. 1676 if (D->isTrivial()) { 1677 if (ArgBeg == ArgEnd) { 1678 // Trivial default constructor, no codegen required. 1679 assert(D->isDefaultConstructor() && 1680 "trivial 0-arg ctor not a default ctor"); 1681 return; 1682 } 1683 1684 assert(ArgBeg + 1 == ArgEnd && "unexpected argcount for trivial ctor"); 1685 assert(D->isCopyOrMoveConstructor() && 1686 "trivial 1-arg ctor not a copy/move ctor"); 1687 1688 const Expr *E = (*ArgBeg); 1689 QualType Ty = E->getType(); 1690 llvm::Value *Src = EmitLValue(E).getAddress(); 1691 EmitAggregateCopy(This, Src, Ty); 1692 return; 1693 } 1694 1695 // C++11 [class.mfct.non-static]p2: 1696 // If a non-static member function of a class X is called for an object that 1697 // is not of type X, or of a type derived from X, the behavior is undefined. 1698 // FIXME: Provide a source location here. 1699 EmitTypeCheck(CodeGenFunction::TCK_ConstructorCall, SourceLocation(), This, 1700 getContext().getRecordType(D->getParent())); 1701 1702 CallArgList Args; 1703 1704 // Push the this ptr. 1705 Args.add(RValue::get(This), D->getThisType(getContext())); 1706 1707 // Add the rest of the user-supplied arguments. 1708 const FunctionProtoType *FPT = D->getType()->castAs<FunctionProtoType>(); 1709 EmitCallArgs(Args, FPT, ArgBeg, ArgEnd); 1710 1711 // Insert any ABI-specific implicit constructor arguments. 1712 unsigned ExtraArgs = CGM.getCXXABI().addImplicitConstructorArgs( 1713 *this, D, Type, ForVirtualBase, Delegating, Args); 1714 1715 // Emit the call. 1716 llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(D, Type); 1717 const CGFunctionInfo &Info = 1718 CGM.getTypes().arrangeCXXConstructorCall(Args, D, Type, ExtraArgs); 1719 EmitCall(Info, Callee, ReturnValueSlot(), Args, D); 1720 } 1721 1722 void 1723 CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D, 1724 llvm::Value *This, llvm::Value *Src, 1725 CallExpr::const_arg_iterator ArgBeg, 1726 CallExpr::const_arg_iterator ArgEnd) { 1727 if (D->isTrivial()) { 1728 assert(ArgBeg + 1 == ArgEnd && "unexpected argcount for trivial ctor"); 1729 assert(D->isCopyOrMoveConstructor() && 1730 "trivial 1-arg ctor not a copy/move ctor"); 1731 EmitAggregateCopy(This, Src, (*ArgBeg)->getType()); 1732 return; 1733 } 1734 llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(D, clang::Ctor_Complete); 1735 assert(D->isInstance() && 1736 "Trying to emit a member call expr on a static method!"); 1737 1738 const FunctionProtoType *FPT = D->getType()->castAs<FunctionProtoType>(); 1739 1740 CallArgList Args; 1741 1742 // Push the this ptr. 1743 Args.add(RValue::get(This), D->getThisType(getContext())); 1744 1745 // Push the src ptr. 1746 QualType QT = *(FPT->param_type_begin()); 1747 llvm::Type *t = CGM.getTypes().ConvertType(QT); 1748 Src = Builder.CreateBitCast(Src, t); 1749 Args.add(RValue::get(Src), QT); 1750 1751 // Skip over first argument (Src). 1752 EmitCallArgs(Args, FPT->isVariadic(), FPT->param_type_begin() + 1, 1753 FPT->param_type_end(), ArgBeg + 1, ArgEnd); 1754 1755 EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, RequiredArgs::All), 1756 Callee, ReturnValueSlot(), Args, D); 1757 } 1758 1759 void 1760 CodeGenFunction::EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor, 1761 CXXCtorType CtorType, 1762 const FunctionArgList &Args, 1763 SourceLocation Loc) { 1764 CallArgList DelegateArgs; 1765 1766 FunctionArgList::const_iterator I = Args.begin(), E = Args.end(); 1767 assert(I != E && "no parameters to constructor"); 1768 1769 // this 1770 DelegateArgs.add(RValue::get(LoadCXXThis()), (*I)->getType()); 1771 ++I; 1772 1773 // vtt 1774 if (llvm::Value *VTT = GetVTTParameter(GlobalDecl(Ctor, CtorType), 1775 /*ForVirtualBase=*/false, 1776 /*Delegating=*/true)) { 1777 QualType VoidPP = getContext().getPointerType(getContext().VoidPtrTy); 1778 DelegateArgs.add(RValue::get(VTT), VoidPP); 1779 1780 if (CGM.getCXXABI().NeedsVTTParameter(CurGD)) { 1781 assert(I != E && "cannot skip vtt parameter, already done with args"); 1782 assert((*I)->getType() == VoidPP && "skipping parameter not of vtt type"); 1783 ++I; 1784 } 1785 } 1786 1787 // Explicit arguments. 1788 for (; I != E; ++I) { 1789 const VarDecl *param = *I; 1790 // FIXME: per-argument source location 1791 EmitDelegateCallArg(DelegateArgs, param, Loc); 1792 } 1793 1794 llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(Ctor, CtorType); 1795 EmitCall(CGM.getTypes().arrangeCXXConstructorDeclaration(Ctor, CtorType), 1796 Callee, ReturnValueSlot(), DelegateArgs, Ctor); 1797 } 1798 1799 namespace { 1800 struct CallDelegatingCtorDtor : EHScopeStack::Cleanup { 1801 const CXXDestructorDecl *Dtor; 1802 llvm::Value *Addr; 1803 CXXDtorType Type; 1804 1805 CallDelegatingCtorDtor(const CXXDestructorDecl *D, llvm::Value *Addr, 1806 CXXDtorType Type) 1807 : Dtor(D), Addr(Addr), Type(Type) {} 1808 1809 void Emit(CodeGenFunction &CGF, Flags flags) { 1810 CGF.EmitCXXDestructorCall(Dtor, Type, /*ForVirtualBase=*/false, 1811 /*Delegating=*/true, Addr); 1812 } 1813 }; 1814 } 1815 1816 void 1817 CodeGenFunction::EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor, 1818 const FunctionArgList &Args) { 1819 assert(Ctor->isDelegatingConstructor()); 1820 1821 llvm::Value *ThisPtr = LoadCXXThis(); 1822 1823 QualType Ty = getContext().getTagDeclType(Ctor->getParent()); 1824 CharUnits Alignment = getContext().getTypeAlignInChars(Ty); 1825 AggValueSlot AggSlot = 1826 AggValueSlot::forAddr(ThisPtr, Alignment, Qualifiers(), 1827 AggValueSlot::IsDestructed, 1828 AggValueSlot::DoesNotNeedGCBarriers, 1829 AggValueSlot::IsNotAliased); 1830 1831 EmitAggExpr(Ctor->init_begin()[0]->getInit(), AggSlot); 1832 1833 const CXXRecordDecl *ClassDecl = Ctor->getParent(); 1834 if (CGM.getLangOpts().Exceptions && !ClassDecl->hasTrivialDestructor()) { 1835 CXXDtorType Type = 1836 CurGD.getCtorType() == Ctor_Complete ? Dtor_Complete : Dtor_Base; 1837 1838 EHStack.pushCleanup<CallDelegatingCtorDtor>(EHCleanup, 1839 ClassDecl->getDestructor(), 1840 ThisPtr, Type); 1841 } 1842 } 1843 1844 void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *DD, 1845 CXXDtorType Type, 1846 bool ForVirtualBase, 1847 bool Delegating, 1848 llvm::Value *This) { 1849 CGM.getCXXABI().EmitDestructorCall(*this, DD, Type, ForVirtualBase, 1850 Delegating, This); 1851 } 1852 1853 namespace { 1854 struct CallLocalDtor : EHScopeStack::Cleanup { 1855 const CXXDestructorDecl *Dtor; 1856 llvm::Value *Addr; 1857 1858 CallLocalDtor(const CXXDestructorDecl *D, llvm::Value *Addr) 1859 : Dtor(D), Addr(Addr) {} 1860 1861 void Emit(CodeGenFunction &CGF, Flags flags) { 1862 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, 1863 /*ForVirtualBase=*/false, 1864 /*Delegating=*/false, Addr); 1865 } 1866 }; 1867 } 1868 1869 void CodeGenFunction::PushDestructorCleanup(const CXXDestructorDecl *D, 1870 llvm::Value *Addr) { 1871 EHStack.pushCleanup<CallLocalDtor>(NormalAndEHCleanup, D, Addr); 1872 } 1873 1874 void CodeGenFunction::PushDestructorCleanup(QualType T, llvm::Value *Addr) { 1875 CXXRecordDecl *ClassDecl = T->getAsCXXRecordDecl(); 1876 if (!ClassDecl) return; 1877 if (ClassDecl->hasTrivialDestructor()) return; 1878 1879 const CXXDestructorDecl *D = ClassDecl->getDestructor(); 1880 assert(D && D->isUsed() && "destructor not marked as used!"); 1881 PushDestructorCleanup(D, Addr); 1882 } 1883 1884 void 1885 CodeGenFunction::InitializeVTablePointer(BaseSubobject Base, 1886 const CXXRecordDecl *NearestVBase, 1887 CharUnits OffsetFromNearestVBase, 1888 const CXXRecordDecl *VTableClass) { 1889 // Compute the address point. 1890 bool NeedsVirtualOffset; 1891 llvm::Value *VTableAddressPoint = 1892 CGM.getCXXABI().getVTableAddressPointInStructor( 1893 *this, VTableClass, Base, NearestVBase, NeedsVirtualOffset); 1894 if (!VTableAddressPoint) 1895 return; 1896 1897 // Compute where to store the address point. 1898 llvm::Value *VirtualOffset = 0; 1899 CharUnits NonVirtualOffset = CharUnits::Zero(); 1900 1901 if (NeedsVirtualOffset) { 1902 // We need to use the virtual base offset offset because the virtual base 1903 // might have a different offset in the most derived class. 1904 VirtualOffset = CGM.getCXXABI().GetVirtualBaseClassOffset(*this, 1905 LoadCXXThis(), 1906 VTableClass, 1907 NearestVBase); 1908 NonVirtualOffset = OffsetFromNearestVBase; 1909 } else { 1910 // We can just use the base offset in the complete class. 1911 NonVirtualOffset = Base.getBaseOffset(); 1912 } 1913 1914 // Apply the offsets. 1915 llvm::Value *VTableField = LoadCXXThis(); 1916 1917 if (!NonVirtualOffset.isZero() || VirtualOffset) 1918 VTableField = ApplyNonVirtualAndVirtualOffset(*this, VTableField, 1919 NonVirtualOffset, 1920 VirtualOffset); 1921 1922 // Finally, store the address point. 1923 llvm::Type *AddressPointPtrTy = 1924 VTableAddressPoint->getType()->getPointerTo(); 1925 VTableField = Builder.CreateBitCast(VTableField, AddressPointPtrTy); 1926 llvm::StoreInst *Store = Builder.CreateStore(VTableAddressPoint, VTableField); 1927 CGM.DecorateInstruction(Store, CGM.getTBAAInfoForVTablePtr()); 1928 } 1929 1930 void 1931 CodeGenFunction::InitializeVTablePointers(BaseSubobject Base, 1932 const CXXRecordDecl *NearestVBase, 1933 CharUnits OffsetFromNearestVBase, 1934 bool BaseIsNonVirtualPrimaryBase, 1935 const CXXRecordDecl *VTableClass, 1936 VisitedVirtualBasesSetTy& VBases) { 1937 // If this base is a non-virtual primary base the address point has already 1938 // been set. 1939 if (!BaseIsNonVirtualPrimaryBase) { 1940 // Initialize the vtable pointer for this base. 1941 InitializeVTablePointer(Base, NearestVBase, OffsetFromNearestVBase, 1942 VTableClass); 1943 } 1944 1945 const CXXRecordDecl *RD = Base.getBase(); 1946 1947 // Traverse bases. 1948 for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), 1949 E = RD->bases_end(); I != E; ++I) { 1950 CXXRecordDecl *BaseDecl 1951 = cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); 1952 1953 // Ignore classes without a vtable. 1954 if (!BaseDecl->isDynamicClass()) 1955 continue; 1956 1957 CharUnits BaseOffset; 1958 CharUnits BaseOffsetFromNearestVBase; 1959 bool BaseDeclIsNonVirtualPrimaryBase; 1960 1961 if (I->isVirtual()) { 1962 // Check if we've visited this virtual base before. 1963 if (!VBases.insert(BaseDecl)) 1964 continue; 1965 1966 const ASTRecordLayout &Layout = 1967 getContext().getASTRecordLayout(VTableClass); 1968 1969 BaseOffset = Layout.getVBaseClassOffset(BaseDecl); 1970 BaseOffsetFromNearestVBase = CharUnits::Zero(); 1971 BaseDeclIsNonVirtualPrimaryBase = false; 1972 } else { 1973 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 1974 1975 BaseOffset = Base.getBaseOffset() + Layout.getBaseClassOffset(BaseDecl); 1976 BaseOffsetFromNearestVBase = 1977 OffsetFromNearestVBase + Layout.getBaseClassOffset(BaseDecl); 1978 BaseDeclIsNonVirtualPrimaryBase = Layout.getPrimaryBase() == BaseDecl; 1979 } 1980 1981 InitializeVTablePointers(BaseSubobject(BaseDecl, BaseOffset), 1982 I->isVirtual() ? BaseDecl : NearestVBase, 1983 BaseOffsetFromNearestVBase, 1984 BaseDeclIsNonVirtualPrimaryBase, 1985 VTableClass, VBases); 1986 } 1987 } 1988 1989 void CodeGenFunction::InitializeVTablePointers(const CXXRecordDecl *RD) { 1990 // Ignore classes without a vtable. 1991 if (!RD->isDynamicClass()) 1992 return; 1993 1994 // Initialize the vtable pointers for this class and all of its bases. 1995 VisitedVirtualBasesSetTy VBases; 1996 InitializeVTablePointers(BaseSubobject(RD, CharUnits::Zero()), 1997 /*NearestVBase=*/0, 1998 /*OffsetFromNearestVBase=*/CharUnits::Zero(), 1999 /*BaseIsNonVirtualPrimaryBase=*/false, RD, VBases); 2000 2001 if (RD->getNumVBases()) 2002 CGM.getCXXABI().initializeHiddenVirtualInheritanceMembers(*this, RD); 2003 } 2004 2005 llvm::Value *CodeGenFunction::GetVTablePtr(llvm::Value *This, 2006 llvm::Type *Ty) { 2007 llvm::Value *VTablePtrSrc = Builder.CreateBitCast(This, Ty->getPointerTo()); 2008 llvm::Instruction *VTable = Builder.CreateLoad(VTablePtrSrc, "vtable"); 2009 CGM.DecorateInstruction(VTable, CGM.getTBAAInfoForVTablePtr()); 2010 return VTable; 2011 } 2012 2013 2014 // FIXME: Ideally Expr::IgnoreParenNoopCasts should do this, but it doesn't do 2015 // quite what we want. 2016 static const Expr *skipNoOpCastsAndParens(const Expr *E) { 2017 while (true) { 2018 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) { 2019 E = PE->getSubExpr(); 2020 continue; 2021 } 2022 2023 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) { 2024 if (CE->getCastKind() == CK_NoOp) { 2025 E = CE->getSubExpr(); 2026 continue; 2027 } 2028 } 2029 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 2030 if (UO->getOpcode() == UO_Extension) { 2031 E = UO->getSubExpr(); 2032 continue; 2033 } 2034 } 2035 return E; 2036 } 2037 } 2038 2039 bool 2040 CodeGenFunction::CanDevirtualizeMemberFunctionCall(const Expr *Base, 2041 const CXXMethodDecl *MD) { 2042 // When building with -fapple-kext, all calls must go through the vtable since 2043 // the kernel linker can do runtime patching of vtables. 2044 if (getLangOpts().AppleKext) 2045 return false; 2046 2047 // If the most derived class is marked final, we know that no subclass can 2048 // override this member function and so we can devirtualize it. For example: 2049 // 2050 // struct A { virtual void f(); } 2051 // struct B final : A { }; 2052 // 2053 // void f(B *b) { 2054 // b->f(); 2055 // } 2056 // 2057 const CXXRecordDecl *MostDerivedClassDecl = Base->getBestDynamicClassType(); 2058 if (MostDerivedClassDecl->hasAttr<FinalAttr>()) 2059 return true; 2060 2061 // If the member function is marked 'final', we know that it can't be 2062 // overridden and can therefore devirtualize it. 2063 if (MD->hasAttr<FinalAttr>()) 2064 return true; 2065 2066 // Similarly, if the class itself is marked 'final' it can't be overridden 2067 // and we can therefore devirtualize the member function call. 2068 if (MD->getParent()->hasAttr<FinalAttr>()) 2069 return true; 2070 2071 Base = skipNoOpCastsAndParens(Base); 2072 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) { 2073 if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) { 2074 // This is a record decl. We know the type and can devirtualize it. 2075 return VD->getType()->isRecordType(); 2076 } 2077 2078 return false; 2079 } 2080 2081 // We can devirtualize calls on an object accessed by a class member access 2082 // expression, since by C++11 [basic.life]p6 we know that it can't refer to 2083 // a derived class object constructed in the same location. 2084 if (const MemberExpr *ME = dyn_cast<MemberExpr>(Base)) 2085 if (const ValueDecl *VD = dyn_cast<ValueDecl>(ME->getMemberDecl())) 2086 return VD->getType()->isRecordType(); 2087 2088 // We can always devirtualize calls on temporary object expressions. 2089 if (isa<CXXConstructExpr>(Base)) 2090 return true; 2091 2092 // And calls on bound temporaries. 2093 if (isa<CXXBindTemporaryExpr>(Base)) 2094 return true; 2095 2096 // Check if this is a call expr that returns a record type. 2097 if (const CallExpr *CE = dyn_cast<CallExpr>(Base)) 2098 return CE->getCallReturnType()->isRecordType(); 2099 2100 // We can't devirtualize the call. 2101 return false; 2102 } 2103 2104 llvm::Value * 2105 CodeGenFunction::EmitCXXOperatorMemberCallee(const CXXOperatorCallExpr *E, 2106 const CXXMethodDecl *MD, 2107 llvm::Value *This) { 2108 llvm::FunctionType *fnType = 2109 CGM.getTypes().GetFunctionType( 2110 CGM.getTypes().arrangeCXXMethodDeclaration(MD)); 2111 2112 if (MD->isVirtual() && !CanDevirtualizeMemberFunctionCall(E->getArg(0), MD)) 2113 return CGM.getCXXABI().getVirtualFunctionPointer(*this, MD, This, fnType); 2114 2115 return CGM.GetAddrOfFunction(MD, fnType); 2116 } 2117 2118 void CodeGenFunction::EmitForwardingCallToLambda( 2119 const CXXMethodDecl *callOperator, 2120 CallArgList &callArgs) { 2121 // Get the address of the call operator. 2122 const CGFunctionInfo &calleeFnInfo = 2123 CGM.getTypes().arrangeCXXMethodDeclaration(callOperator); 2124 llvm::Value *callee = 2125 CGM.GetAddrOfFunction(GlobalDecl(callOperator), 2126 CGM.getTypes().GetFunctionType(calleeFnInfo)); 2127 2128 // Prepare the return slot. 2129 const FunctionProtoType *FPT = 2130 callOperator->getType()->castAs<FunctionProtoType>(); 2131 QualType resultType = FPT->getReturnType(); 2132 ReturnValueSlot returnSlot; 2133 if (!resultType->isVoidType() && 2134 calleeFnInfo.getReturnInfo().getKind() == ABIArgInfo::Indirect && 2135 !hasScalarEvaluationKind(calleeFnInfo.getReturnType())) 2136 returnSlot = ReturnValueSlot(ReturnValue, resultType.isVolatileQualified()); 2137 2138 // We don't need to separately arrange the call arguments because 2139 // the call can't be variadic anyway --- it's impossible to forward 2140 // variadic arguments. 2141 2142 // Now emit our call. 2143 RValue RV = EmitCall(calleeFnInfo, callee, returnSlot, 2144 callArgs, callOperator); 2145 2146 // If necessary, copy the returned value into the slot. 2147 if (!resultType->isVoidType() && returnSlot.isNull()) 2148 EmitReturnOfRValue(RV, resultType); 2149 else 2150 EmitBranchThroughCleanup(ReturnBlock); 2151 } 2152 2153 void CodeGenFunction::EmitLambdaBlockInvokeBody() { 2154 const BlockDecl *BD = BlockInfo->getBlockDecl(); 2155 const VarDecl *variable = BD->capture_begin()->getVariable(); 2156 const CXXRecordDecl *Lambda = variable->getType()->getAsCXXRecordDecl(); 2157 2158 // Start building arguments for forwarding call 2159 CallArgList CallArgs; 2160 2161 QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda)); 2162 llvm::Value *ThisPtr = GetAddrOfBlockDecl(variable, false); 2163 CallArgs.add(RValue::get(ThisPtr), ThisType); 2164 2165 // Add the rest of the parameters. 2166 for (BlockDecl::param_const_iterator I = BD->param_begin(), 2167 E = BD->param_end(); I != E; ++I) { 2168 ParmVarDecl *param = *I; 2169 EmitDelegateCallArg(CallArgs, param, param->getLocStart()); 2170 } 2171 assert(!Lambda->isGenericLambda() && 2172 "generic lambda interconversion to block not implemented"); 2173 EmitForwardingCallToLambda(Lambda->getLambdaCallOperator(), CallArgs); 2174 } 2175 2176 void CodeGenFunction::EmitLambdaToBlockPointerBody(FunctionArgList &Args) { 2177 if (cast<CXXMethodDecl>(CurCodeDecl)->isVariadic()) { 2178 // FIXME: Making this work correctly is nasty because it requires either 2179 // cloning the body of the call operator or making the call operator forward. 2180 CGM.ErrorUnsupported(CurCodeDecl, "lambda conversion to variadic function"); 2181 return; 2182 } 2183 2184 EmitFunctionBody(Args, cast<FunctionDecl>(CurGD.getDecl())->getBody()); 2185 } 2186 2187 void CodeGenFunction::EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD) { 2188 const CXXRecordDecl *Lambda = MD->getParent(); 2189 2190 // Start building arguments for forwarding call 2191 CallArgList CallArgs; 2192 2193 QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda)); 2194 llvm::Value *ThisPtr = llvm::UndefValue::get(getTypes().ConvertType(ThisType)); 2195 CallArgs.add(RValue::get(ThisPtr), ThisType); 2196 2197 // Add the rest of the parameters. 2198 for (FunctionDecl::param_const_iterator I = MD->param_begin(), 2199 E = MD->param_end(); I != E; ++I) { 2200 ParmVarDecl *param = *I; 2201 EmitDelegateCallArg(CallArgs, param, param->getLocStart()); 2202 } 2203 const CXXMethodDecl *CallOp = Lambda->getLambdaCallOperator(); 2204 // For a generic lambda, find the corresponding call operator specialization 2205 // to which the call to the static-invoker shall be forwarded. 2206 if (Lambda->isGenericLambda()) { 2207 assert(MD->isFunctionTemplateSpecialization()); 2208 const TemplateArgumentList *TAL = MD->getTemplateSpecializationArgs(); 2209 FunctionTemplateDecl *CallOpTemplate = CallOp->getDescribedFunctionTemplate(); 2210 void *InsertPos = 0; 2211 FunctionDecl *CorrespondingCallOpSpecialization = 2212 CallOpTemplate->findSpecialization(TAL->data(), TAL->size(), InsertPos); 2213 assert(CorrespondingCallOpSpecialization); 2214 CallOp = cast<CXXMethodDecl>(CorrespondingCallOpSpecialization); 2215 } 2216 EmitForwardingCallToLambda(CallOp, CallArgs); 2217 } 2218 2219 void CodeGenFunction::EmitLambdaStaticInvokeFunction(const CXXMethodDecl *MD) { 2220 if (MD->isVariadic()) { 2221 // FIXME: Making this work correctly is nasty because it requires either 2222 // cloning the body of the call operator or making the call operator forward. 2223 CGM.ErrorUnsupported(MD, "lambda conversion to variadic function"); 2224 return; 2225 } 2226 2227 EmitLambdaDelegatingInvokeBody(MD); 2228 } 2229