1 //===--- CGClass.cpp - Emit LLVM Code for C++ classes ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code dealing with C++ code generation of classes 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CGBlocks.h" 15 #include "CGDebugInfo.h" 16 #include "CGRecordLayout.h" 17 #include "CodeGenFunction.h" 18 #include "CGCXXABI.h" 19 #include "clang/AST/CXXInheritance.h" 20 #include "clang/AST/EvaluatedExprVisitor.h" 21 #include "clang/AST/RecordLayout.h" 22 #include "clang/AST/StmtCXX.h" 23 #include "clang/Basic/TargetBuiltins.h" 24 #include "clang/Frontend/CodeGenOptions.h" 25 26 using namespace clang; 27 using namespace CodeGen; 28 29 static CharUnits 30 ComputeNonVirtualBaseClassOffset(ASTContext &Context, 31 const CXXRecordDecl *DerivedClass, 32 CastExpr::path_const_iterator Start, 33 CastExpr::path_const_iterator End) { 34 CharUnits Offset = CharUnits::Zero(); 35 36 const CXXRecordDecl *RD = DerivedClass; 37 38 for (CastExpr::path_const_iterator I = Start; I != End; ++I) { 39 const CXXBaseSpecifier *Base = *I; 40 assert(!Base->isVirtual() && "Should not see virtual bases here!"); 41 42 // Get the layout. 43 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 44 45 const CXXRecordDecl *BaseDecl = 46 cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl()); 47 48 // Add the offset. 49 Offset += Layout.getBaseClassOffset(BaseDecl); 50 51 RD = BaseDecl; 52 } 53 54 return Offset; 55 } 56 57 llvm::Constant * 58 CodeGenModule::GetNonVirtualBaseClassOffset(const CXXRecordDecl *ClassDecl, 59 CastExpr::path_const_iterator PathBegin, 60 CastExpr::path_const_iterator PathEnd) { 61 assert(PathBegin != PathEnd && "Base path should not be empty!"); 62 63 CharUnits Offset = 64 ComputeNonVirtualBaseClassOffset(getContext(), ClassDecl, 65 PathBegin, PathEnd); 66 if (Offset.isZero()) 67 return 0; 68 69 llvm::Type *PtrDiffTy = 70 Types.ConvertType(getContext().getPointerDiffType()); 71 72 return llvm::ConstantInt::get(PtrDiffTy, Offset.getQuantity()); 73 } 74 75 /// Gets the address of a direct base class within a complete object. 76 /// This should only be used for (1) non-virtual bases or (2) virtual bases 77 /// when the type is known to be complete (e.g. in complete destructors). 78 /// 79 /// The object pointed to by 'This' is assumed to be non-null. 80 llvm::Value * 81 CodeGenFunction::GetAddressOfDirectBaseInCompleteClass(llvm::Value *This, 82 const CXXRecordDecl *Derived, 83 const CXXRecordDecl *Base, 84 bool BaseIsVirtual) { 85 // 'this' must be a pointer (in some address space) to Derived. 86 assert(This->getType()->isPointerTy() && 87 cast<llvm::PointerType>(This->getType())->getElementType() 88 == ConvertType(Derived)); 89 90 // Compute the offset of the virtual base. 91 CharUnits Offset; 92 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(Derived); 93 if (BaseIsVirtual) 94 Offset = Layout.getVBaseClassOffset(Base); 95 else 96 Offset = Layout.getBaseClassOffset(Base); 97 98 // Shift and cast down to the base type. 99 // TODO: for complete types, this should be possible with a GEP. 100 llvm::Value *V = This; 101 if (Offset.isPositive()) { 102 V = Builder.CreateBitCast(V, Int8PtrTy); 103 V = Builder.CreateConstInBoundsGEP1_64(V, Offset.getQuantity()); 104 } 105 V = Builder.CreateBitCast(V, ConvertType(Base)->getPointerTo()); 106 107 return V; 108 } 109 110 static llvm::Value * 111 ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, llvm::Value *ptr, 112 CharUnits nonVirtualOffset, 113 llvm::Value *virtualOffset) { 114 // Assert that we have something to do. 115 assert(!nonVirtualOffset.isZero() || virtualOffset != 0); 116 117 // Compute the offset from the static and dynamic components. 118 llvm::Value *baseOffset; 119 if (!nonVirtualOffset.isZero()) { 120 baseOffset = llvm::ConstantInt::get(CGF.PtrDiffTy, 121 nonVirtualOffset.getQuantity()); 122 if (virtualOffset) { 123 baseOffset = CGF.Builder.CreateAdd(virtualOffset, baseOffset); 124 } 125 } else { 126 baseOffset = virtualOffset; 127 } 128 129 // Apply the base offset. 130 ptr = CGF.Builder.CreateBitCast(ptr, CGF.Int8PtrTy); 131 ptr = CGF.Builder.CreateInBoundsGEP(ptr, baseOffset, "add.ptr"); 132 return ptr; 133 } 134 135 llvm::Value * 136 CodeGenFunction::GetAddressOfBaseClass(llvm::Value *Value, 137 const CXXRecordDecl *Derived, 138 CastExpr::path_const_iterator PathBegin, 139 CastExpr::path_const_iterator PathEnd, 140 bool NullCheckValue) { 141 assert(PathBegin != PathEnd && "Base path should not be empty!"); 142 143 CastExpr::path_const_iterator Start = PathBegin; 144 const CXXRecordDecl *VBase = 0; 145 146 // Sema has done some convenient canonicalization here: if the 147 // access path involved any virtual steps, the conversion path will 148 // *start* with a step down to the correct virtual base subobject, 149 // and hence will not require any further steps. 150 if ((*Start)->isVirtual()) { 151 VBase = 152 cast<CXXRecordDecl>((*Start)->getType()->getAs<RecordType>()->getDecl()); 153 ++Start; 154 } 155 156 // Compute the static offset of the ultimate destination within its 157 // allocating subobject (the virtual base, if there is one, or else 158 // the "complete" object that we see). 159 CharUnits NonVirtualOffset = 160 ComputeNonVirtualBaseClassOffset(getContext(), VBase ? VBase : Derived, 161 Start, PathEnd); 162 163 // If there's a virtual step, we can sometimes "devirtualize" it. 164 // For now, that's limited to when the derived type is final. 165 // TODO: "devirtualize" this for accesses to known-complete objects. 166 if (VBase && Derived->hasAttr<FinalAttr>()) { 167 const ASTRecordLayout &layout = getContext().getASTRecordLayout(Derived); 168 CharUnits vBaseOffset = layout.getVBaseClassOffset(VBase); 169 NonVirtualOffset += vBaseOffset; 170 VBase = 0; // we no longer have a virtual step 171 } 172 173 // Get the base pointer type. 174 llvm::Type *BasePtrTy = 175 ConvertType((PathEnd[-1])->getType())->getPointerTo(); 176 177 // If the static offset is zero and we don't have a virtual step, 178 // just do a bitcast; null checks are unnecessary. 179 if (NonVirtualOffset.isZero() && !VBase) { 180 return Builder.CreateBitCast(Value, BasePtrTy); 181 } 182 183 llvm::BasicBlock *origBB = 0; 184 llvm::BasicBlock *endBB = 0; 185 186 // Skip over the offset (and the vtable load) if we're supposed to 187 // null-check the pointer. 188 if (NullCheckValue) { 189 origBB = Builder.GetInsertBlock(); 190 llvm::BasicBlock *notNullBB = createBasicBlock("cast.notnull"); 191 endBB = createBasicBlock("cast.end"); 192 193 llvm::Value *isNull = Builder.CreateIsNull(Value); 194 Builder.CreateCondBr(isNull, endBB, notNullBB); 195 EmitBlock(notNullBB); 196 } 197 198 // Compute the virtual offset. 199 llvm::Value *VirtualOffset = 0; 200 if (VBase) { 201 VirtualOffset = 202 CGM.getCXXABI().GetVirtualBaseClassOffset(*this, Value, Derived, VBase); 203 } 204 205 // Apply both offsets. 206 Value = ApplyNonVirtualAndVirtualOffset(*this, Value, 207 NonVirtualOffset, 208 VirtualOffset); 209 210 // Cast to the destination type. 211 Value = Builder.CreateBitCast(Value, BasePtrTy); 212 213 // Build a phi if we needed a null check. 214 if (NullCheckValue) { 215 llvm::BasicBlock *notNullBB = Builder.GetInsertBlock(); 216 Builder.CreateBr(endBB); 217 EmitBlock(endBB); 218 219 llvm::PHINode *PHI = Builder.CreatePHI(BasePtrTy, 2, "cast.result"); 220 PHI->addIncoming(Value, notNullBB); 221 PHI->addIncoming(llvm::Constant::getNullValue(BasePtrTy), origBB); 222 Value = PHI; 223 } 224 225 return Value; 226 } 227 228 llvm::Value * 229 CodeGenFunction::GetAddressOfDerivedClass(llvm::Value *Value, 230 const CXXRecordDecl *Derived, 231 CastExpr::path_const_iterator PathBegin, 232 CastExpr::path_const_iterator PathEnd, 233 bool NullCheckValue) { 234 assert(PathBegin != PathEnd && "Base path should not be empty!"); 235 236 QualType DerivedTy = 237 getContext().getCanonicalType(getContext().getTagDeclType(Derived)); 238 llvm::Type *DerivedPtrTy = ConvertType(DerivedTy)->getPointerTo(); 239 240 llvm::Value *NonVirtualOffset = 241 CGM.GetNonVirtualBaseClassOffset(Derived, PathBegin, PathEnd); 242 243 if (!NonVirtualOffset) { 244 // No offset, we can just cast back. 245 return Builder.CreateBitCast(Value, DerivedPtrTy); 246 } 247 248 llvm::BasicBlock *CastNull = 0; 249 llvm::BasicBlock *CastNotNull = 0; 250 llvm::BasicBlock *CastEnd = 0; 251 252 if (NullCheckValue) { 253 CastNull = createBasicBlock("cast.null"); 254 CastNotNull = createBasicBlock("cast.notnull"); 255 CastEnd = createBasicBlock("cast.end"); 256 257 llvm::Value *IsNull = Builder.CreateIsNull(Value); 258 Builder.CreateCondBr(IsNull, CastNull, CastNotNull); 259 EmitBlock(CastNotNull); 260 } 261 262 // Apply the offset. 263 Value = Builder.CreateBitCast(Value, Int8PtrTy); 264 Value = Builder.CreateGEP(Value, Builder.CreateNeg(NonVirtualOffset), 265 "sub.ptr"); 266 267 // Just cast. 268 Value = Builder.CreateBitCast(Value, DerivedPtrTy); 269 270 if (NullCheckValue) { 271 Builder.CreateBr(CastEnd); 272 EmitBlock(CastNull); 273 Builder.CreateBr(CastEnd); 274 EmitBlock(CastEnd); 275 276 llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2); 277 PHI->addIncoming(Value, CastNotNull); 278 PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), 279 CastNull); 280 Value = PHI; 281 } 282 283 return Value; 284 } 285 286 llvm::Value *CodeGenFunction::GetVTTParameter(GlobalDecl GD, 287 bool ForVirtualBase, 288 bool Delegating) { 289 if (!CGM.getCXXABI().NeedsVTTParameter(GD)) { 290 // This constructor/destructor does not need a VTT parameter. 291 return 0; 292 } 293 294 const CXXRecordDecl *RD = cast<CXXMethodDecl>(CurCodeDecl)->getParent(); 295 const CXXRecordDecl *Base = cast<CXXMethodDecl>(GD.getDecl())->getParent(); 296 297 llvm::Value *VTT; 298 299 uint64_t SubVTTIndex; 300 301 if (Delegating) { 302 // If this is a delegating constructor call, just load the VTT. 303 return LoadCXXVTT(); 304 } else if (RD == Base) { 305 // If the record matches the base, this is the complete ctor/dtor 306 // variant calling the base variant in a class with virtual bases. 307 assert(!CGM.getCXXABI().NeedsVTTParameter(CurGD) && 308 "doing no-op VTT offset in base dtor/ctor?"); 309 assert(!ForVirtualBase && "Can't have same class as virtual base!"); 310 SubVTTIndex = 0; 311 } else { 312 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 313 CharUnits BaseOffset = ForVirtualBase ? 314 Layout.getVBaseClassOffset(Base) : 315 Layout.getBaseClassOffset(Base); 316 317 SubVTTIndex = 318 CGM.getVTables().getSubVTTIndex(RD, BaseSubobject(Base, BaseOffset)); 319 assert(SubVTTIndex != 0 && "Sub-VTT index must be greater than zero!"); 320 } 321 322 if (CGM.getCXXABI().NeedsVTTParameter(CurGD)) { 323 // A VTT parameter was passed to the constructor, use it. 324 VTT = LoadCXXVTT(); 325 VTT = Builder.CreateConstInBoundsGEP1_64(VTT, SubVTTIndex); 326 } else { 327 // We're the complete constructor, so get the VTT by name. 328 VTT = CGM.getVTables().GetAddrOfVTT(RD); 329 VTT = Builder.CreateConstInBoundsGEP2_64(VTT, 0, SubVTTIndex); 330 } 331 332 return VTT; 333 } 334 335 namespace { 336 /// Call the destructor for a direct base class. 337 struct CallBaseDtor : EHScopeStack::Cleanup { 338 const CXXRecordDecl *BaseClass; 339 bool BaseIsVirtual; 340 CallBaseDtor(const CXXRecordDecl *Base, bool BaseIsVirtual) 341 : BaseClass(Base), BaseIsVirtual(BaseIsVirtual) {} 342 343 void Emit(CodeGenFunction &CGF, Flags flags) { 344 const CXXRecordDecl *DerivedClass = 345 cast<CXXMethodDecl>(CGF.CurCodeDecl)->getParent(); 346 347 const CXXDestructorDecl *D = BaseClass->getDestructor(); 348 llvm::Value *Addr = 349 CGF.GetAddressOfDirectBaseInCompleteClass(CGF.LoadCXXThis(), 350 DerivedClass, BaseClass, 351 BaseIsVirtual); 352 CGF.EmitCXXDestructorCall(D, Dtor_Base, BaseIsVirtual, 353 /*Delegating=*/false, Addr); 354 } 355 }; 356 357 /// A visitor which checks whether an initializer uses 'this' in a 358 /// way which requires the vtable to be properly set. 359 struct DynamicThisUseChecker : EvaluatedExprVisitor<DynamicThisUseChecker> { 360 typedef EvaluatedExprVisitor<DynamicThisUseChecker> super; 361 362 bool UsesThis; 363 364 DynamicThisUseChecker(ASTContext &C) : super(C), UsesThis(false) {} 365 366 // Black-list all explicit and implicit references to 'this'. 367 // 368 // Do we need to worry about external references to 'this' derived 369 // from arbitrary code? If so, then anything which runs arbitrary 370 // external code might potentially access the vtable. 371 void VisitCXXThisExpr(CXXThisExpr *E) { UsesThis = true; } 372 }; 373 } 374 375 static bool BaseInitializerUsesThis(ASTContext &C, const Expr *Init) { 376 DynamicThisUseChecker Checker(C); 377 Checker.Visit(const_cast<Expr*>(Init)); 378 return Checker.UsesThis; 379 } 380 381 static void EmitBaseInitializer(CodeGenFunction &CGF, 382 const CXXRecordDecl *ClassDecl, 383 CXXCtorInitializer *BaseInit, 384 CXXCtorType CtorType) { 385 assert(BaseInit->isBaseInitializer() && 386 "Must have base initializer!"); 387 388 llvm::Value *ThisPtr = CGF.LoadCXXThis(); 389 390 const Type *BaseType = BaseInit->getBaseClass(); 391 CXXRecordDecl *BaseClassDecl = 392 cast<CXXRecordDecl>(BaseType->getAs<RecordType>()->getDecl()); 393 394 bool isBaseVirtual = BaseInit->isBaseVirtual(); 395 396 // The base constructor doesn't construct virtual bases. 397 if (CtorType == Ctor_Base && isBaseVirtual) 398 return; 399 400 // If the initializer for the base (other than the constructor 401 // itself) accesses 'this' in any way, we need to initialize the 402 // vtables. 403 if (BaseInitializerUsesThis(CGF.getContext(), BaseInit->getInit())) 404 CGF.InitializeVTablePointers(ClassDecl); 405 406 // We can pretend to be a complete class because it only matters for 407 // virtual bases, and we only do virtual bases for complete ctors. 408 llvm::Value *V = 409 CGF.GetAddressOfDirectBaseInCompleteClass(ThisPtr, ClassDecl, 410 BaseClassDecl, 411 isBaseVirtual); 412 CharUnits Alignment = CGF.getContext().getTypeAlignInChars(BaseType); 413 AggValueSlot AggSlot = 414 AggValueSlot::forAddr(V, Alignment, Qualifiers(), 415 AggValueSlot::IsDestructed, 416 AggValueSlot::DoesNotNeedGCBarriers, 417 AggValueSlot::IsNotAliased); 418 419 CGF.EmitAggExpr(BaseInit->getInit(), AggSlot); 420 421 if (CGF.CGM.getLangOpts().Exceptions && 422 !BaseClassDecl->hasTrivialDestructor()) 423 CGF.EHStack.pushCleanup<CallBaseDtor>(EHCleanup, BaseClassDecl, 424 isBaseVirtual); 425 } 426 427 static void EmitAggMemberInitializer(CodeGenFunction &CGF, 428 LValue LHS, 429 Expr *Init, 430 llvm::Value *ArrayIndexVar, 431 QualType T, 432 ArrayRef<VarDecl *> ArrayIndexes, 433 unsigned Index) { 434 if (Index == ArrayIndexes.size()) { 435 LValue LV = LHS; 436 437 if (ArrayIndexVar) { 438 // If we have an array index variable, load it and use it as an offset. 439 // Then, increment the value. 440 llvm::Value *Dest = LHS.getAddress(); 441 llvm::Value *ArrayIndex = CGF.Builder.CreateLoad(ArrayIndexVar); 442 Dest = CGF.Builder.CreateInBoundsGEP(Dest, ArrayIndex, "destaddress"); 443 llvm::Value *Next = llvm::ConstantInt::get(ArrayIndex->getType(), 1); 444 Next = CGF.Builder.CreateAdd(ArrayIndex, Next, "inc"); 445 CGF.Builder.CreateStore(Next, ArrayIndexVar); 446 447 // Update the LValue. 448 LV.setAddress(Dest); 449 CharUnits Align = CGF.getContext().getTypeAlignInChars(T); 450 LV.setAlignment(std::min(Align, LV.getAlignment())); 451 } 452 453 switch (CGF.getEvaluationKind(T)) { 454 case TEK_Scalar: 455 CGF.EmitScalarInit(Init, /*decl*/ 0, LV, false); 456 break; 457 case TEK_Complex: 458 CGF.EmitComplexExprIntoLValue(Init, LV, /*isInit*/ true); 459 break; 460 case TEK_Aggregate: { 461 AggValueSlot Slot = 462 AggValueSlot::forLValue(LV, 463 AggValueSlot::IsDestructed, 464 AggValueSlot::DoesNotNeedGCBarriers, 465 AggValueSlot::IsNotAliased); 466 467 CGF.EmitAggExpr(Init, Slot); 468 break; 469 } 470 } 471 472 return; 473 } 474 475 const ConstantArrayType *Array = CGF.getContext().getAsConstantArrayType(T); 476 assert(Array && "Array initialization without the array type?"); 477 llvm::Value *IndexVar 478 = CGF.GetAddrOfLocalVar(ArrayIndexes[Index]); 479 assert(IndexVar && "Array index variable not loaded"); 480 481 // Initialize this index variable to zero. 482 llvm::Value* Zero 483 = llvm::Constant::getNullValue( 484 CGF.ConvertType(CGF.getContext().getSizeType())); 485 CGF.Builder.CreateStore(Zero, IndexVar); 486 487 // Start the loop with a block that tests the condition. 488 llvm::BasicBlock *CondBlock = CGF.createBasicBlock("for.cond"); 489 llvm::BasicBlock *AfterFor = CGF.createBasicBlock("for.end"); 490 491 CGF.EmitBlock(CondBlock); 492 493 llvm::BasicBlock *ForBody = CGF.createBasicBlock("for.body"); 494 // Generate: if (loop-index < number-of-elements) fall to the loop body, 495 // otherwise, go to the block after the for-loop. 496 uint64_t NumElements = Array->getSize().getZExtValue(); 497 llvm::Value *Counter = CGF.Builder.CreateLoad(IndexVar); 498 llvm::Value *NumElementsPtr = 499 llvm::ConstantInt::get(Counter->getType(), NumElements); 500 llvm::Value *IsLess = CGF.Builder.CreateICmpULT(Counter, NumElementsPtr, 501 "isless"); 502 503 // If the condition is true, execute the body. 504 CGF.Builder.CreateCondBr(IsLess, ForBody, AfterFor); 505 506 CGF.EmitBlock(ForBody); 507 llvm::BasicBlock *ContinueBlock = CGF.createBasicBlock("for.inc"); 508 509 // Inside the loop body recurse to emit the inner loop or, eventually, the 510 // constructor call. 511 EmitAggMemberInitializer(CGF, LHS, Init, ArrayIndexVar, 512 Array->getElementType(), ArrayIndexes, Index + 1); 513 514 CGF.EmitBlock(ContinueBlock); 515 516 // Emit the increment of the loop counter. 517 llvm::Value *NextVal = llvm::ConstantInt::get(Counter->getType(), 1); 518 Counter = CGF.Builder.CreateLoad(IndexVar); 519 NextVal = CGF.Builder.CreateAdd(Counter, NextVal, "inc"); 520 CGF.Builder.CreateStore(NextVal, IndexVar); 521 522 // Finally, branch back up to the condition for the next iteration. 523 CGF.EmitBranch(CondBlock); 524 525 // Emit the fall-through block. 526 CGF.EmitBlock(AfterFor, true); 527 } 528 529 static void EmitMemberInitializer(CodeGenFunction &CGF, 530 const CXXRecordDecl *ClassDecl, 531 CXXCtorInitializer *MemberInit, 532 const CXXConstructorDecl *Constructor, 533 FunctionArgList &Args) { 534 assert(MemberInit->isAnyMemberInitializer() && 535 "Must have member initializer!"); 536 assert(MemberInit->getInit() && "Must have initializer!"); 537 538 // non-static data member initializers. 539 FieldDecl *Field = MemberInit->getAnyMember(); 540 QualType FieldType = Field->getType(); 541 542 llvm::Value *ThisPtr = CGF.LoadCXXThis(); 543 QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl); 544 LValue LHS = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy); 545 546 if (MemberInit->isIndirectMemberInitializer()) { 547 // If we are initializing an anonymous union field, drill down to 548 // the field. 549 IndirectFieldDecl *IndirectField = MemberInit->getIndirectMember(); 550 IndirectFieldDecl::chain_iterator I = IndirectField->chain_begin(), 551 IEnd = IndirectField->chain_end(); 552 for ( ; I != IEnd; ++I) 553 LHS = CGF.EmitLValueForFieldInitialization(LHS, cast<FieldDecl>(*I)); 554 FieldType = MemberInit->getIndirectMember()->getAnonField()->getType(); 555 } else { 556 LHS = CGF.EmitLValueForFieldInitialization(LHS, Field); 557 } 558 559 // Special case: if we are in a copy or move constructor, and we are copying 560 // an array of PODs or classes with trivial copy constructors, ignore the 561 // AST and perform the copy we know is equivalent. 562 // FIXME: This is hacky at best... if we had a bit more explicit information 563 // in the AST, we could generalize it more easily. 564 const ConstantArrayType *Array 565 = CGF.getContext().getAsConstantArrayType(FieldType); 566 if (Array && Constructor->isImplicitlyDefined() && 567 Constructor->isCopyOrMoveConstructor()) { 568 QualType BaseElementTy = CGF.getContext().getBaseElementType(Array); 569 CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(MemberInit->getInit()); 570 if (BaseElementTy.isPODType(CGF.getContext()) || 571 (CE && CE->getConstructor()->isTrivial())) { 572 // Find the source pointer. We know it's the last argument because 573 // we know we're in an implicit copy constructor. 574 unsigned SrcArgIndex = Args.size() - 1; 575 llvm::Value *SrcPtr 576 = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(Args[SrcArgIndex])); 577 LValue ThisRHSLV = CGF.MakeNaturalAlignAddrLValue(SrcPtr, RecordTy); 578 LValue Src = CGF.EmitLValueForFieldInitialization(ThisRHSLV, Field); 579 580 // Copy the aggregate. 581 CGF.EmitAggregateCopy(LHS.getAddress(), Src.getAddress(), FieldType, 582 LHS.isVolatileQualified()); 583 return; 584 } 585 } 586 587 ArrayRef<VarDecl *> ArrayIndexes; 588 if (MemberInit->getNumArrayIndices()) 589 ArrayIndexes = MemberInit->getArrayIndexes(); 590 CGF.EmitInitializerForField(Field, LHS, MemberInit->getInit(), ArrayIndexes); 591 } 592 593 void CodeGenFunction::EmitInitializerForField(FieldDecl *Field, 594 LValue LHS, Expr *Init, 595 ArrayRef<VarDecl *> ArrayIndexes) { 596 QualType FieldType = Field->getType(); 597 switch (getEvaluationKind(FieldType)) { 598 case TEK_Scalar: 599 if (LHS.isSimple()) { 600 EmitExprAsInit(Init, Field, LHS, false); 601 } else { 602 RValue RHS = RValue::get(EmitScalarExpr(Init)); 603 EmitStoreThroughLValue(RHS, LHS); 604 } 605 break; 606 case TEK_Complex: 607 EmitComplexExprIntoLValue(Init, LHS, /*isInit*/ true); 608 break; 609 case TEK_Aggregate: { 610 llvm::Value *ArrayIndexVar = 0; 611 if (ArrayIndexes.size()) { 612 llvm::Type *SizeTy = ConvertType(getContext().getSizeType()); 613 614 // The LHS is a pointer to the first object we'll be constructing, as 615 // a flat array. 616 QualType BaseElementTy = getContext().getBaseElementType(FieldType); 617 llvm::Type *BasePtr = ConvertType(BaseElementTy); 618 BasePtr = llvm::PointerType::getUnqual(BasePtr); 619 llvm::Value *BaseAddrPtr = Builder.CreateBitCast(LHS.getAddress(), 620 BasePtr); 621 LHS = MakeAddrLValue(BaseAddrPtr, BaseElementTy); 622 623 // Create an array index that will be used to walk over all of the 624 // objects we're constructing. 625 ArrayIndexVar = CreateTempAlloca(SizeTy, "object.index"); 626 llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy); 627 Builder.CreateStore(Zero, ArrayIndexVar); 628 629 630 // Emit the block variables for the array indices, if any. 631 for (unsigned I = 0, N = ArrayIndexes.size(); I != N; ++I) 632 EmitAutoVarDecl(*ArrayIndexes[I]); 633 } 634 635 EmitAggMemberInitializer(*this, LHS, Init, ArrayIndexVar, FieldType, 636 ArrayIndexes, 0); 637 } 638 } 639 640 // Ensure that we destroy this object if an exception is thrown 641 // later in the constructor. 642 QualType::DestructionKind dtorKind = FieldType.isDestructedType(); 643 if (needsEHCleanup(dtorKind)) 644 pushEHDestroy(dtorKind, LHS.getAddress(), FieldType); 645 } 646 647 /// Checks whether the given constructor is a valid subject for the 648 /// complete-to-base constructor delegation optimization, i.e. 649 /// emitting the complete constructor as a simple call to the base 650 /// constructor. 651 static bool IsConstructorDelegationValid(const CXXConstructorDecl *Ctor) { 652 653 // Currently we disable the optimization for classes with virtual 654 // bases because (1) the addresses of parameter variables need to be 655 // consistent across all initializers but (2) the delegate function 656 // call necessarily creates a second copy of the parameter variable. 657 // 658 // The limiting example (purely theoretical AFAIK): 659 // struct A { A(int &c) { c++; } }; 660 // struct B : virtual A { 661 // B(int count) : A(count) { printf("%d\n", count); } 662 // }; 663 // ...although even this example could in principle be emitted as a 664 // delegation since the address of the parameter doesn't escape. 665 if (Ctor->getParent()->getNumVBases()) { 666 // TODO: white-list trivial vbase initializers. This case wouldn't 667 // be subject to the restrictions below. 668 669 // TODO: white-list cases where: 670 // - there are no non-reference parameters to the constructor 671 // - the initializers don't access any non-reference parameters 672 // - the initializers don't take the address of non-reference 673 // parameters 674 // - etc. 675 // If we ever add any of the above cases, remember that: 676 // - function-try-blocks will always blacklist this optimization 677 // - we need to perform the constructor prologue and cleanup in 678 // EmitConstructorBody. 679 680 return false; 681 } 682 683 // We also disable the optimization for variadic functions because 684 // it's impossible to "re-pass" varargs. 685 if (Ctor->getType()->getAs<FunctionProtoType>()->isVariadic()) 686 return false; 687 688 // FIXME: Decide if we can do a delegation of a delegating constructor. 689 if (Ctor->isDelegatingConstructor()) 690 return false; 691 692 return true; 693 } 694 695 /// EmitConstructorBody - Emits the body of the current constructor. 696 void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) { 697 const CXXConstructorDecl *Ctor = cast<CXXConstructorDecl>(CurGD.getDecl()); 698 CXXCtorType CtorType = CurGD.getCtorType(); 699 700 // Before we go any further, try the complete->base constructor 701 // delegation optimization. 702 if (CtorType == Ctor_Complete && IsConstructorDelegationValid(Ctor) && 703 CGM.getTarget().getCXXABI().hasConstructorVariants()) { 704 if (CGDebugInfo *DI = getDebugInfo()) 705 DI->EmitLocation(Builder, Ctor->getLocEnd()); 706 EmitDelegateCXXConstructorCall(Ctor, Ctor_Base, Args); 707 return; 708 } 709 710 Stmt *Body = Ctor->getBody(); 711 712 // Enter the function-try-block before the constructor prologue if 713 // applicable. 714 bool IsTryBody = (Body && isa<CXXTryStmt>(Body)); 715 if (IsTryBody) 716 EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true); 717 718 RunCleanupsScope RunCleanups(*this); 719 720 // TODO: in restricted cases, we can emit the vbase initializers of 721 // a complete ctor and then delegate to the base ctor. 722 723 // Emit the constructor prologue, i.e. the base and member 724 // initializers. 725 EmitCtorPrologue(Ctor, CtorType, Args); 726 727 // Emit the body of the statement. 728 if (IsTryBody) 729 EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock()); 730 else if (Body) 731 EmitStmt(Body); 732 733 // Emit any cleanup blocks associated with the member or base 734 // initializers, which includes (along the exceptional path) the 735 // destructors for those members and bases that were fully 736 // constructed. 737 RunCleanups.ForceCleanup(); 738 739 if (IsTryBody) 740 ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true); 741 } 742 743 namespace { 744 class FieldMemcpyizer { 745 public: 746 FieldMemcpyizer(CodeGenFunction &CGF, const CXXRecordDecl *ClassDecl, 747 const VarDecl *SrcRec) 748 : CGF(CGF), ClassDecl(ClassDecl), SrcRec(SrcRec), 749 RecLayout(CGF.getContext().getASTRecordLayout(ClassDecl)), 750 FirstField(0), LastField(0), FirstFieldOffset(0), LastFieldOffset(0), 751 LastAddedFieldIndex(0) { } 752 753 static bool isMemcpyableField(FieldDecl *F) { 754 Qualifiers Qual = F->getType().getQualifiers(); 755 if (Qual.hasVolatile() || Qual.hasObjCLifetime()) 756 return false; 757 return true; 758 } 759 760 void addMemcpyableField(FieldDecl *F) { 761 if (FirstField == 0) 762 addInitialField(F); 763 else 764 addNextField(F); 765 } 766 767 CharUnits getMemcpySize() const { 768 unsigned LastFieldSize = 769 LastField->isBitField() ? 770 LastField->getBitWidthValue(CGF.getContext()) : 771 CGF.getContext().getTypeSize(LastField->getType()); 772 uint64_t MemcpySizeBits = 773 LastFieldOffset + LastFieldSize - FirstFieldOffset + 774 CGF.getContext().getCharWidth() - 1; 775 CharUnits MemcpySize = 776 CGF.getContext().toCharUnitsFromBits(MemcpySizeBits); 777 return MemcpySize; 778 } 779 780 void emitMemcpy() { 781 // Give the subclass a chance to bail out if it feels the memcpy isn't 782 // worth it (e.g. Hasn't aggregated enough data). 783 if (FirstField == 0) { 784 return; 785 } 786 787 CharUnits Alignment; 788 789 if (FirstField->isBitField()) { 790 const CGRecordLayout &RL = 791 CGF.getTypes().getCGRecordLayout(FirstField->getParent()); 792 const CGBitFieldInfo &BFInfo = RL.getBitFieldInfo(FirstField); 793 Alignment = CharUnits::fromQuantity(BFInfo.StorageAlignment); 794 } else { 795 Alignment = CGF.getContext().getDeclAlign(FirstField); 796 } 797 798 assert((CGF.getContext().toCharUnitsFromBits(FirstFieldOffset) % 799 Alignment) == 0 && "Bad field alignment."); 800 801 CharUnits MemcpySize = getMemcpySize(); 802 QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl); 803 llvm::Value *ThisPtr = CGF.LoadCXXThis(); 804 LValue DestLV = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy); 805 LValue Dest = CGF.EmitLValueForFieldInitialization(DestLV, FirstField); 806 llvm::Value *SrcPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(SrcRec)); 807 LValue SrcLV = CGF.MakeNaturalAlignAddrLValue(SrcPtr, RecordTy); 808 LValue Src = CGF.EmitLValueForFieldInitialization(SrcLV, FirstField); 809 810 emitMemcpyIR(Dest.isBitField() ? Dest.getBitFieldAddr() : Dest.getAddress(), 811 Src.isBitField() ? Src.getBitFieldAddr() : Src.getAddress(), 812 MemcpySize, Alignment); 813 reset(); 814 } 815 816 void reset() { 817 FirstField = 0; 818 } 819 820 protected: 821 CodeGenFunction &CGF; 822 const CXXRecordDecl *ClassDecl; 823 824 private: 825 826 void emitMemcpyIR(llvm::Value *DestPtr, llvm::Value *SrcPtr, 827 CharUnits Size, CharUnits Alignment) { 828 llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType()); 829 llvm::Type *DBP = 830 llvm::Type::getInt8PtrTy(CGF.getLLVMContext(), DPT->getAddressSpace()); 831 DestPtr = CGF.Builder.CreateBitCast(DestPtr, DBP); 832 833 llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType()); 834 llvm::Type *SBP = 835 llvm::Type::getInt8PtrTy(CGF.getLLVMContext(), SPT->getAddressSpace()); 836 SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, SBP); 837 838 CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, Size.getQuantity(), 839 Alignment.getQuantity()); 840 } 841 842 void addInitialField(FieldDecl *F) { 843 FirstField = F; 844 LastField = F; 845 FirstFieldOffset = RecLayout.getFieldOffset(F->getFieldIndex()); 846 LastFieldOffset = FirstFieldOffset; 847 LastAddedFieldIndex = F->getFieldIndex(); 848 return; 849 } 850 851 void addNextField(FieldDecl *F) { 852 // For the most part, the following invariant will hold: 853 // F->getFieldIndex() == LastAddedFieldIndex + 1 854 // The one exception is that Sema won't add a copy-initializer for an 855 // unnamed bitfield, which will show up here as a gap in the sequence. 856 assert(F->getFieldIndex() >= LastAddedFieldIndex + 1 && 857 "Cannot aggregate fields out of order."); 858 LastAddedFieldIndex = F->getFieldIndex(); 859 860 // The 'first' and 'last' fields are chosen by offset, rather than field 861 // index. This allows the code to support bitfields, as well as regular 862 // fields. 863 uint64_t FOffset = RecLayout.getFieldOffset(F->getFieldIndex()); 864 if (FOffset < FirstFieldOffset) { 865 FirstField = F; 866 FirstFieldOffset = FOffset; 867 } else if (FOffset > LastFieldOffset) { 868 LastField = F; 869 LastFieldOffset = FOffset; 870 } 871 } 872 873 const VarDecl *SrcRec; 874 const ASTRecordLayout &RecLayout; 875 FieldDecl *FirstField; 876 FieldDecl *LastField; 877 uint64_t FirstFieldOffset, LastFieldOffset; 878 unsigned LastAddedFieldIndex; 879 }; 880 881 class ConstructorMemcpyizer : public FieldMemcpyizer { 882 private: 883 884 /// Get source argument for copy constructor. Returns null if not a copy 885 /// constructor. 886 static const VarDecl* getTrivialCopySource(const CXXConstructorDecl *CD, 887 FunctionArgList &Args) { 888 if (CD->isCopyOrMoveConstructor() && CD->isImplicitlyDefined()) 889 return Args[Args.size() - 1]; 890 return 0; 891 } 892 893 // Returns true if a CXXCtorInitializer represents a member initialization 894 // that can be rolled into a memcpy. 895 bool isMemberInitMemcpyable(CXXCtorInitializer *MemberInit) const { 896 if (!MemcpyableCtor) 897 return false; 898 FieldDecl *Field = MemberInit->getMember(); 899 assert(Field != 0 && "No field for member init."); 900 QualType FieldType = Field->getType(); 901 CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(MemberInit->getInit()); 902 903 // Bail out on non-POD, not-trivially-constructable members. 904 if (!(CE && CE->getConstructor()->isTrivial()) && 905 !(FieldType.isTriviallyCopyableType(CGF.getContext()) || 906 FieldType->isReferenceType())) 907 return false; 908 909 // Bail out on volatile fields. 910 if (!isMemcpyableField(Field)) 911 return false; 912 913 // Otherwise we're good. 914 return true; 915 } 916 917 public: 918 ConstructorMemcpyizer(CodeGenFunction &CGF, const CXXConstructorDecl *CD, 919 FunctionArgList &Args) 920 : FieldMemcpyizer(CGF, CD->getParent(), getTrivialCopySource(CD, Args)), 921 ConstructorDecl(CD), 922 MemcpyableCtor(CD->isImplicitlyDefined() && 923 CD->isCopyOrMoveConstructor() && 924 CGF.getLangOpts().getGC() == LangOptions::NonGC), 925 Args(Args) { } 926 927 void addMemberInitializer(CXXCtorInitializer *MemberInit) { 928 if (isMemberInitMemcpyable(MemberInit)) { 929 AggregatedInits.push_back(MemberInit); 930 addMemcpyableField(MemberInit->getMember()); 931 } else { 932 emitAggregatedInits(); 933 EmitMemberInitializer(CGF, ConstructorDecl->getParent(), MemberInit, 934 ConstructorDecl, Args); 935 } 936 } 937 938 void emitAggregatedInits() { 939 if (AggregatedInits.size() <= 1) { 940 // This memcpy is too small to be worthwhile. Fall back on default 941 // codegen. 942 for (unsigned i = 0; i < AggregatedInits.size(); ++i) { 943 EmitMemberInitializer(CGF, ConstructorDecl->getParent(), 944 AggregatedInits[i], ConstructorDecl, Args); 945 } 946 reset(); 947 return; 948 } 949 950 pushEHDestructors(); 951 emitMemcpy(); 952 AggregatedInits.clear(); 953 } 954 955 void pushEHDestructors() { 956 llvm::Value *ThisPtr = CGF.LoadCXXThis(); 957 QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl); 958 LValue LHS = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy); 959 960 for (unsigned i = 0; i < AggregatedInits.size(); ++i) { 961 QualType FieldType = AggregatedInits[i]->getMember()->getType(); 962 QualType::DestructionKind dtorKind = FieldType.isDestructedType(); 963 if (CGF.needsEHCleanup(dtorKind)) 964 CGF.pushEHDestroy(dtorKind, LHS.getAddress(), FieldType); 965 } 966 } 967 968 void finish() { 969 emitAggregatedInits(); 970 } 971 972 private: 973 const CXXConstructorDecl *ConstructorDecl; 974 bool MemcpyableCtor; 975 FunctionArgList &Args; 976 SmallVector<CXXCtorInitializer*, 16> AggregatedInits; 977 }; 978 979 class AssignmentMemcpyizer : public FieldMemcpyizer { 980 private: 981 982 // Returns the memcpyable field copied by the given statement, if one 983 // exists. Otherwise r 984 FieldDecl* getMemcpyableField(Stmt *S) { 985 if (!AssignmentsMemcpyable) 986 return 0; 987 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(S)) { 988 // Recognise trivial assignments. 989 if (BO->getOpcode() != BO_Assign) 990 return 0; 991 MemberExpr *ME = dyn_cast<MemberExpr>(BO->getLHS()); 992 if (!ME) 993 return 0; 994 FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl()); 995 if (!Field || !isMemcpyableField(Field)) 996 return 0; 997 Stmt *RHS = BO->getRHS(); 998 if (ImplicitCastExpr *EC = dyn_cast<ImplicitCastExpr>(RHS)) 999 RHS = EC->getSubExpr(); 1000 if (!RHS) 1001 return 0; 1002 MemberExpr *ME2 = dyn_cast<MemberExpr>(RHS); 1003 if (dyn_cast<FieldDecl>(ME2->getMemberDecl()) != Field) 1004 return 0; 1005 return Field; 1006 } else if (CXXMemberCallExpr *MCE = dyn_cast<CXXMemberCallExpr>(S)) { 1007 CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MCE->getCalleeDecl()); 1008 if (!(MD && (MD->isCopyAssignmentOperator() || 1009 MD->isMoveAssignmentOperator()) && 1010 MD->isTrivial())) 1011 return 0; 1012 MemberExpr *IOA = dyn_cast<MemberExpr>(MCE->getImplicitObjectArgument()); 1013 if (!IOA) 1014 return 0; 1015 FieldDecl *Field = dyn_cast<FieldDecl>(IOA->getMemberDecl()); 1016 if (!Field || !isMemcpyableField(Field)) 1017 return 0; 1018 MemberExpr *Arg0 = dyn_cast<MemberExpr>(MCE->getArg(0)); 1019 if (!Arg0 || Field != dyn_cast<FieldDecl>(Arg0->getMemberDecl())) 1020 return 0; 1021 return Field; 1022 } else if (CallExpr *CE = dyn_cast<CallExpr>(S)) { 1023 FunctionDecl *FD = dyn_cast<FunctionDecl>(CE->getCalleeDecl()); 1024 if (!FD || FD->getBuiltinID() != Builtin::BI__builtin_memcpy) 1025 return 0; 1026 Expr *DstPtr = CE->getArg(0); 1027 if (ImplicitCastExpr *DC = dyn_cast<ImplicitCastExpr>(DstPtr)) 1028 DstPtr = DC->getSubExpr(); 1029 UnaryOperator *DUO = dyn_cast<UnaryOperator>(DstPtr); 1030 if (!DUO || DUO->getOpcode() != UO_AddrOf) 1031 return 0; 1032 MemberExpr *ME = dyn_cast<MemberExpr>(DUO->getSubExpr()); 1033 if (!ME) 1034 return 0; 1035 FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl()); 1036 if (!Field || !isMemcpyableField(Field)) 1037 return 0; 1038 Expr *SrcPtr = CE->getArg(1); 1039 if (ImplicitCastExpr *SC = dyn_cast<ImplicitCastExpr>(SrcPtr)) 1040 SrcPtr = SC->getSubExpr(); 1041 UnaryOperator *SUO = dyn_cast<UnaryOperator>(SrcPtr); 1042 if (!SUO || SUO->getOpcode() != UO_AddrOf) 1043 return 0; 1044 MemberExpr *ME2 = dyn_cast<MemberExpr>(SUO->getSubExpr()); 1045 if (!ME2 || Field != dyn_cast<FieldDecl>(ME2->getMemberDecl())) 1046 return 0; 1047 return Field; 1048 } 1049 1050 return 0; 1051 } 1052 1053 bool AssignmentsMemcpyable; 1054 SmallVector<Stmt*, 16> AggregatedStmts; 1055 1056 public: 1057 1058 AssignmentMemcpyizer(CodeGenFunction &CGF, const CXXMethodDecl *AD, 1059 FunctionArgList &Args) 1060 : FieldMemcpyizer(CGF, AD->getParent(), Args[Args.size() - 1]), 1061 AssignmentsMemcpyable(CGF.getLangOpts().getGC() == LangOptions::NonGC) { 1062 assert(Args.size() == 2); 1063 } 1064 1065 void emitAssignment(Stmt *S) { 1066 FieldDecl *F = getMemcpyableField(S); 1067 if (F) { 1068 addMemcpyableField(F); 1069 AggregatedStmts.push_back(S); 1070 } else { 1071 emitAggregatedStmts(); 1072 CGF.EmitStmt(S); 1073 } 1074 } 1075 1076 void emitAggregatedStmts() { 1077 if (AggregatedStmts.size() <= 1) { 1078 for (unsigned i = 0; i < AggregatedStmts.size(); ++i) 1079 CGF.EmitStmt(AggregatedStmts[i]); 1080 reset(); 1081 } 1082 1083 emitMemcpy(); 1084 AggregatedStmts.clear(); 1085 } 1086 1087 void finish() { 1088 emitAggregatedStmts(); 1089 } 1090 }; 1091 1092 } 1093 1094 /// EmitCtorPrologue - This routine generates necessary code to initialize 1095 /// base classes and non-static data members belonging to this constructor. 1096 void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD, 1097 CXXCtorType CtorType, 1098 FunctionArgList &Args) { 1099 if (CD->isDelegatingConstructor()) 1100 return EmitDelegatingCXXConstructorCall(CD, Args); 1101 1102 const CXXRecordDecl *ClassDecl = CD->getParent(); 1103 1104 CXXConstructorDecl::init_const_iterator B = CD->init_begin(), 1105 E = CD->init_end(); 1106 1107 llvm::BasicBlock *BaseCtorContinueBB = 0; 1108 if (ClassDecl->getNumVBases() && 1109 !CGM.getTarget().getCXXABI().hasConstructorVariants()) { 1110 // The ABIs that don't have constructor variants need to put a branch 1111 // before the virtual base initialization code. 1112 BaseCtorContinueBB = 1113 CGM.getCXXABI().EmitCtorCompleteObjectHandler(*this, ClassDecl); 1114 assert(BaseCtorContinueBB); 1115 } 1116 1117 // Virtual base initializers first. 1118 for (; B != E && (*B)->isBaseInitializer() && (*B)->isBaseVirtual(); B++) { 1119 EmitBaseInitializer(*this, ClassDecl, *B, CtorType); 1120 } 1121 1122 if (BaseCtorContinueBB) { 1123 // Complete object handler should continue to the remaining initializers. 1124 Builder.CreateBr(BaseCtorContinueBB); 1125 EmitBlock(BaseCtorContinueBB); 1126 } 1127 1128 // Then, non-virtual base initializers. 1129 for (; B != E && (*B)->isBaseInitializer(); B++) { 1130 assert(!(*B)->isBaseVirtual()); 1131 EmitBaseInitializer(*this, ClassDecl, *B, CtorType); 1132 } 1133 1134 InitializeVTablePointers(ClassDecl); 1135 1136 // And finally, initialize class members. 1137 FieldConstructionScope FCS(*this, CXXThisValue); 1138 ConstructorMemcpyizer CM(*this, CD, Args); 1139 for (; B != E; B++) { 1140 CXXCtorInitializer *Member = (*B); 1141 assert(!Member->isBaseInitializer()); 1142 assert(Member->isAnyMemberInitializer() && 1143 "Delegating initializer on non-delegating constructor"); 1144 CM.addMemberInitializer(Member); 1145 } 1146 CM.finish(); 1147 } 1148 1149 static bool 1150 FieldHasTrivialDestructorBody(ASTContext &Context, const FieldDecl *Field); 1151 1152 static bool 1153 HasTrivialDestructorBody(ASTContext &Context, 1154 const CXXRecordDecl *BaseClassDecl, 1155 const CXXRecordDecl *MostDerivedClassDecl) 1156 { 1157 // If the destructor is trivial we don't have to check anything else. 1158 if (BaseClassDecl->hasTrivialDestructor()) 1159 return true; 1160 1161 if (!BaseClassDecl->getDestructor()->hasTrivialBody()) 1162 return false; 1163 1164 // Check fields. 1165 for (CXXRecordDecl::field_iterator I = BaseClassDecl->field_begin(), 1166 E = BaseClassDecl->field_end(); I != E; ++I) { 1167 const FieldDecl *Field = *I; 1168 1169 if (!FieldHasTrivialDestructorBody(Context, Field)) 1170 return false; 1171 } 1172 1173 // Check non-virtual bases. 1174 for (CXXRecordDecl::base_class_const_iterator I = 1175 BaseClassDecl->bases_begin(), E = BaseClassDecl->bases_end(); 1176 I != E; ++I) { 1177 if (I->isVirtual()) 1178 continue; 1179 1180 const CXXRecordDecl *NonVirtualBase = 1181 cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl()); 1182 if (!HasTrivialDestructorBody(Context, NonVirtualBase, 1183 MostDerivedClassDecl)) 1184 return false; 1185 } 1186 1187 if (BaseClassDecl == MostDerivedClassDecl) { 1188 // Check virtual bases. 1189 for (CXXRecordDecl::base_class_const_iterator I = 1190 BaseClassDecl->vbases_begin(), E = BaseClassDecl->vbases_end(); 1191 I != E; ++I) { 1192 const CXXRecordDecl *VirtualBase = 1193 cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl()); 1194 if (!HasTrivialDestructorBody(Context, VirtualBase, 1195 MostDerivedClassDecl)) 1196 return false; 1197 } 1198 } 1199 1200 return true; 1201 } 1202 1203 static bool 1204 FieldHasTrivialDestructorBody(ASTContext &Context, 1205 const FieldDecl *Field) 1206 { 1207 QualType FieldBaseElementType = Context.getBaseElementType(Field->getType()); 1208 1209 const RecordType *RT = FieldBaseElementType->getAs<RecordType>(); 1210 if (!RT) 1211 return true; 1212 1213 CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(RT->getDecl()); 1214 return HasTrivialDestructorBody(Context, FieldClassDecl, FieldClassDecl); 1215 } 1216 1217 /// CanSkipVTablePointerInitialization - Check whether we need to initialize 1218 /// any vtable pointers before calling this destructor. 1219 static bool CanSkipVTablePointerInitialization(ASTContext &Context, 1220 const CXXDestructorDecl *Dtor) { 1221 if (!Dtor->hasTrivialBody()) 1222 return false; 1223 1224 // Check the fields. 1225 const CXXRecordDecl *ClassDecl = Dtor->getParent(); 1226 for (CXXRecordDecl::field_iterator I = ClassDecl->field_begin(), 1227 E = ClassDecl->field_end(); I != E; ++I) { 1228 const FieldDecl *Field = *I; 1229 1230 if (!FieldHasTrivialDestructorBody(Context, Field)) 1231 return false; 1232 } 1233 1234 return true; 1235 } 1236 1237 /// EmitDestructorBody - Emits the body of the current destructor. 1238 void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) { 1239 const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CurGD.getDecl()); 1240 CXXDtorType DtorType = CurGD.getDtorType(); 1241 1242 // The call to operator delete in a deleting destructor happens 1243 // outside of the function-try-block, which means it's always 1244 // possible to delegate the destructor body to the complete 1245 // destructor. Do so. 1246 if (DtorType == Dtor_Deleting) { 1247 EnterDtorCleanups(Dtor, Dtor_Deleting); 1248 EmitCXXDestructorCall(Dtor, Dtor_Complete, /*ForVirtualBase=*/false, 1249 /*Delegating=*/false, LoadCXXThis()); 1250 PopCleanupBlock(); 1251 return; 1252 } 1253 1254 Stmt *Body = Dtor->getBody(); 1255 1256 // If the body is a function-try-block, enter the try before 1257 // anything else. 1258 bool isTryBody = (Body && isa<CXXTryStmt>(Body)); 1259 if (isTryBody) 1260 EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true); 1261 1262 // Enter the epilogue cleanups. 1263 RunCleanupsScope DtorEpilogue(*this); 1264 1265 // If this is the complete variant, just invoke the base variant; 1266 // the epilogue will destruct the virtual bases. But we can't do 1267 // this optimization if the body is a function-try-block, because 1268 // we'd introduce *two* handler blocks. 1269 switch (DtorType) { 1270 case Dtor_Deleting: llvm_unreachable("already handled deleting case"); 1271 1272 case Dtor_Complete: 1273 // Enter the cleanup scopes for virtual bases. 1274 EnterDtorCleanups(Dtor, Dtor_Complete); 1275 1276 if (!isTryBody && 1277 CGM.getTarget().getCXXABI().hasDestructorVariants()) { 1278 EmitCXXDestructorCall(Dtor, Dtor_Base, /*ForVirtualBase=*/false, 1279 /*Delegating=*/false, LoadCXXThis()); 1280 break; 1281 } 1282 // Fallthrough: act like we're in the base variant. 1283 1284 case Dtor_Base: 1285 // Enter the cleanup scopes for fields and non-virtual bases. 1286 EnterDtorCleanups(Dtor, Dtor_Base); 1287 1288 // Initialize the vtable pointers before entering the body. 1289 if (!CanSkipVTablePointerInitialization(getContext(), Dtor)) 1290 InitializeVTablePointers(Dtor->getParent()); 1291 1292 if (isTryBody) 1293 EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock()); 1294 else if (Body) 1295 EmitStmt(Body); 1296 else { 1297 assert(Dtor->isImplicit() && "bodyless dtor not implicit"); 1298 // nothing to do besides what's in the epilogue 1299 } 1300 // -fapple-kext must inline any call to this dtor into 1301 // the caller's body. 1302 if (getLangOpts().AppleKext) 1303 CurFn->addFnAttr(llvm::Attribute::AlwaysInline); 1304 break; 1305 } 1306 1307 // Jump out through the epilogue cleanups. 1308 DtorEpilogue.ForceCleanup(); 1309 1310 // Exit the try if applicable. 1311 if (isTryBody) 1312 ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true); 1313 } 1314 1315 void CodeGenFunction::emitImplicitAssignmentOperatorBody(FunctionArgList &Args) { 1316 const CXXMethodDecl *AssignOp = cast<CXXMethodDecl>(CurGD.getDecl()); 1317 const Stmt *RootS = AssignOp->getBody(); 1318 assert(isa<CompoundStmt>(RootS) && 1319 "Body of an implicit assignment operator should be compound stmt."); 1320 const CompoundStmt *RootCS = cast<CompoundStmt>(RootS); 1321 1322 LexicalScope Scope(*this, RootCS->getSourceRange()); 1323 1324 AssignmentMemcpyizer AM(*this, AssignOp, Args); 1325 for (CompoundStmt::const_body_iterator I = RootCS->body_begin(), 1326 E = RootCS->body_end(); 1327 I != E; ++I) { 1328 AM.emitAssignment(*I); 1329 } 1330 AM.finish(); 1331 } 1332 1333 namespace { 1334 /// Call the operator delete associated with the current destructor. 1335 struct CallDtorDelete : EHScopeStack::Cleanup { 1336 CallDtorDelete() {} 1337 1338 void Emit(CodeGenFunction &CGF, Flags flags) { 1339 const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CGF.CurCodeDecl); 1340 const CXXRecordDecl *ClassDecl = Dtor->getParent(); 1341 CGF.EmitDeleteCall(Dtor->getOperatorDelete(), CGF.LoadCXXThis(), 1342 CGF.getContext().getTagDeclType(ClassDecl)); 1343 } 1344 }; 1345 1346 struct CallDtorDeleteConditional : EHScopeStack::Cleanup { 1347 llvm::Value *ShouldDeleteCondition; 1348 public: 1349 CallDtorDeleteConditional(llvm::Value *ShouldDeleteCondition) 1350 : ShouldDeleteCondition(ShouldDeleteCondition) { 1351 assert(ShouldDeleteCondition != NULL); 1352 } 1353 1354 void Emit(CodeGenFunction &CGF, Flags flags) { 1355 llvm::BasicBlock *callDeleteBB = CGF.createBasicBlock("dtor.call_delete"); 1356 llvm::BasicBlock *continueBB = CGF.createBasicBlock("dtor.continue"); 1357 llvm::Value *ShouldCallDelete 1358 = CGF.Builder.CreateIsNull(ShouldDeleteCondition); 1359 CGF.Builder.CreateCondBr(ShouldCallDelete, continueBB, callDeleteBB); 1360 1361 CGF.EmitBlock(callDeleteBB); 1362 const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CGF.CurCodeDecl); 1363 const CXXRecordDecl *ClassDecl = Dtor->getParent(); 1364 CGF.EmitDeleteCall(Dtor->getOperatorDelete(), CGF.LoadCXXThis(), 1365 CGF.getContext().getTagDeclType(ClassDecl)); 1366 CGF.Builder.CreateBr(continueBB); 1367 1368 CGF.EmitBlock(continueBB); 1369 } 1370 }; 1371 1372 class DestroyField : public EHScopeStack::Cleanup { 1373 const FieldDecl *field; 1374 CodeGenFunction::Destroyer *destroyer; 1375 bool useEHCleanupForArray; 1376 1377 public: 1378 DestroyField(const FieldDecl *field, CodeGenFunction::Destroyer *destroyer, 1379 bool useEHCleanupForArray) 1380 : field(field), destroyer(destroyer), 1381 useEHCleanupForArray(useEHCleanupForArray) {} 1382 1383 void Emit(CodeGenFunction &CGF, Flags flags) { 1384 // Find the address of the field. 1385 llvm::Value *thisValue = CGF.LoadCXXThis(); 1386 QualType RecordTy = CGF.getContext().getTagDeclType(field->getParent()); 1387 LValue ThisLV = CGF.MakeAddrLValue(thisValue, RecordTy); 1388 LValue LV = CGF.EmitLValueForField(ThisLV, field); 1389 assert(LV.isSimple()); 1390 1391 CGF.emitDestroy(LV.getAddress(), field->getType(), destroyer, 1392 flags.isForNormalCleanup() && useEHCleanupForArray); 1393 } 1394 }; 1395 } 1396 1397 /// EmitDtorEpilogue - Emit all code that comes at the end of class's 1398 /// destructor. This is to call destructors on members and base classes 1399 /// in reverse order of their construction. 1400 void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD, 1401 CXXDtorType DtorType) { 1402 assert(!DD->isTrivial() && 1403 "Should not emit dtor epilogue for trivial dtor!"); 1404 1405 // The deleting-destructor phase just needs to call the appropriate 1406 // operator delete that Sema picked up. 1407 if (DtorType == Dtor_Deleting) { 1408 assert(DD->getOperatorDelete() && 1409 "operator delete missing - EmitDtorEpilogue"); 1410 if (CXXStructorImplicitParamValue) { 1411 // If there is an implicit param to the deleting dtor, it's a boolean 1412 // telling whether we should call delete at the end of the dtor. 1413 EHStack.pushCleanup<CallDtorDeleteConditional>( 1414 NormalAndEHCleanup, CXXStructorImplicitParamValue); 1415 } else { 1416 EHStack.pushCleanup<CallDtorDelete>(NormalAndEHCleanup); 1417 } 1418 return; 1419 } 1420 1421 const CXXRecordDecl *ClassDecl = DD->getParent(); 1422 1423 // Unions have no bases and do not call field destructors. 1424 if (ClassDecl->isUnion()) 1425 return; 1426 1427 // The complete-destructor phase just destructs all the virtual bases. 1428 if (DtorType == Dtor_Complete) { 1429 1430 // We push them in the forward order so that they'll be popped in 1431 // the reverse order. 1432 for (CXXRecordDecl::base_class_const_iterator I = 1433 ClassDecl->vbases_begin(), E = ClassDecl->vbases_end(); 1434 I != E; ++I) { 1435 const CXXBaseSpecifier &Base = *I; 1436 CXXRecordDecl *BaseClassDecl 1437 = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl()); 1438 1439 // Ignore trivial destructors. 1440 if (BaseClassDecl->hasTrivialDestructor()) 1441 continue; 1442 1443 EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup, 1444 BaseClassDecl, 1445 /*BaseIsVirtual*/ true); 1446 } 1447 1448 return; 1449 } 1450 1451 assert(DtorType == Dtor_Base); 1452 1453 // Destroy non-virtual bases. 1454 for (CXXRecordDecl::base_class_const_iterator I = 1455 ClassDecl->bases_begin(), E = ClassDecl->bases_end(); I != E; ++I) { 1456 const CXXBaseSpecifier &Base = *I; 1457 1458 // Ignore virtual bases. 1459 if (Base.isVirtual()) 1460 continue; 1461 1462 CXXRecordDecl *BaseClassDecl = Base.getType()->getAsCXXRecordDecl(); 1463 1464 // Ignore trivial destructors. 1465 if (BaseClassDecl->hasTrivialDestructor()) 1466 continue; 1467 1468 EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup, 1469 BaseClassDecl, 1470 /*BaseIsVirtual*/ false); 1471 } 1472 1473 // Destroy direct fields. 1474 SmallVector<const FieldDecl *, 16> FieldDecls; 1475 for (CXXRecordDecl::field_iterator I = ClassDecl->field_begin(), 1476 E = ClassDecl->field_end(); I != E; ++I) { 1477 const FieldDecl *field = *I; 1478 QualType type = field->getType(); 1479 QualType::DestructionKind dtorKind = type.isDestructedType(); 1480 if (!dtorKind) continue; 1481 1482 // Anonymous union members do not have their destructors called. 1483 const RecordType *RT = type->getAsUnionType(); 1484 if (RT && RT->getDecl()->isAnonymousStructOrUnion()) continue; 1485 1486 CleanupKind cleanupKind = getCleanupKind(dtorKind); 1487 EHStack.pushCleanup<DestroyField>(cleanupKind, field, 1488 getDestroyer(dtorKind), 1489 cleanupKind & EHCleanup); 1490 } 1491 } 1492 1493 /// EmitCXXAggrConstructorCall - Emit a loop to call a particular 1494 /// constructor for each of several members of an array. 1495 /// 1496 /// \param ctor the constructor to call for each element 1497 /// \param arrayType the type of the array to initialize 1498 /// \param arrayBegin an arrayType* 1499 /// \param zeroInitialize true if each element should be 1500 /// zero-initialized before it is constructed 1501 void 1502 CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor, 1503 const ConstantArrayType *arrayType, 1504 llvm::Value *arrayBegin, 1505 CallExpr::const_arg_iterator argBegin, 1506 CallExpr::const_arg_iterator argEnd, 1507 bool zeroInitialize) { 1508 QualType elementType; 1509 llvm::Value *numElements = 1510 emitArrayLength(arrayType, elementType, arrayBegin); 1511 1512 EmitCXXAggrConstructorCall(ctor, numElements, arrayBegin, 1513 argBegin, argEnd, zeroInitialize); 1514 } 1515 1516 /// EmitCXXAggrConstructorCall - Emit a loop to call a particular 1517 /// constructor for each of several members of an array. 1518 /// 1519 /// \param ctor the constructor to call for each element 1520 /// \param numElements the number of elements in the array; 1521 /// may be zero 1522 /// \param arrayBegin a T*, where T is the type constructed by ctor 1523 /// \param zeroInitialize true if each element should be 1524 /// zero-initialized before it is constructed 1525 void 1526 CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor, 1527 llvm::Value *numElements, 1528 llvm::Value *arrayBegin, 1529 CallExpr::const_arg_iterator argBegin, 1530 CallExpr::const_arg_iterator argEnd, 1531 bool zeroInitialize) { 1532 1533 // It's legal for numElements to be zero. This can happen both 1534 // dynamically, because x can be zero in 'new A[x]', and statically, 1535 // because of GCC extensions that permit zero-length arrays. There 1536 // are probably legitimate places where we could assume that this 1537 // doesn't happen, but it's not clear that it's worth it. 1538 llvm::BranchInst *zeroCheckBranch = 0; 1539 1540 // Optimize for a constant count. 1541 llvm::ConstantInt *constantCount 1542 = dyn_cast<llvm::ConstantInt>(numElements); 1543 if (constantCount) { 1544 // Just skip out if the constant count is zero. 1545 if (constantCount->isZero()) return; 1546 1547 // Otherwise, emit the check. 1548 } else { 1549 llvm::BasicBlock *loopBB = createBasicBlock("new.ctorloop"); 1550 llvm::Value *iszero = Builder.CreateIsNull(numElements, "isempty"); 1551 zeroCheckBranch = Builder.CreateCondBr(iszero, loopBB, loopBB); 1552 EmitBlock(loopBB); 1553 } 1554 1555 // Find the end of the array. 1556 llvm::Value *arrayEnd = Builder.CreateInBoundsGEP(arrayBegin, numElements, 1557 "arrayctor.end"); 1558 1559 // Enter the loop, setting up a phi for the current location to initialize. 1560 llvm::BasicBlock *entryBB = Builder.GetInsertBlock(); 1561 llvm::BasicBlock *loopBB = createBasicBlock("arrayctor.loop"); 1562 EmitBlock(loopBB); 1563 llvm::PHINode *cur = Builder.CreatePHI(arrayBegin->getType(), 2, 1564 "arrayctor.cur"); 1565 cur->addIncoming(arrayBegin, entryBB); 1566 1567 // Inside the loop body, emit the constructor call on the array element. 1568 1569 QualType type = getContext().getTypeDeclType(ctor->getParent()); 1570 1571 // Zero initialize the storage, if requested. 1572 if (zeroInitialize) 1573 EmitNullInitialization(cur, type); 1574 1575 // C++ [class.temporary]p4: 1576 // There are two contexts in which temporaries are destroyed at a different 1577 // point than the end of the full-expression. The first context is when a 1578 // default constructor is called to initialize an element of an array. 1579 // If the constructor has one or more default arguments, the destruction of 1580 // every temporary created in a default argument expression is sequenced 1581 // before the construction of the next array element, if any. 1582 1583 { 1584 RunCleanupsScope Scope(*this); 1585 1586 // Evaluate the constructor and its arguments in a regular 1587 // partial-destroy cleanup. 1588 if (getLangOpts().Exceptions && 1589 !ctor->getParent()->hasTrivialDestructor()) { 1590 Destroyer *destroyer = destroyCXXObject; 1591 pushRegularPartialArrayCleanup(arrayBegin, cur, type, *destroyer); 1592 } 1593 1594 EmitCXXConstructorCall(ctor, Ctor_Complete, /*ForVirtualBase=*/ false, 1595 /*Delegating=*/false, cur, argBegin, argEnd); 1596 } 1597 1598 // Go to the next element. 1599 llvm::Value *next = 1600 Builder.CreateInBoundsGEP(cur, llvm::ConstantInt::get(SizeTy, 1), 1601 "arrayctor.next"); 1602 cur->addIncoming(next, Builder.GetInsertBlock()); 1603 1604 // Check whether that's the end of the loop. 1605 llvm::Value *done = Builder.CreateICmpEQ(next, arrayEnd, "arrayctor.done"); 1606 llvm::BasicBlock *contBB = createBasicBlock("arrayctor.cont"); 1607 Builder.CreateCondBr(done, contBB, loopBB); 1608 1609 // Patch the earlier check to skip over the loop. 1610 if (zeroCheckBranch) zeroCheckBranch->setSuccessor(0, contBB); 1611 1612 EmitBlock(contBB); 1613 } 1614 1615 void CodeGenFunction::destroyCXXObject(CodeGenFunction &CGF, 1616 llvm::Value *addr, 1617 QualType type) { 1618 const RecordType *rtype = type->castAs<RecordType>(); 1619 const CXXRecordDecl *record = cast<CXXRecordDecl>(rtype->getDecl()); 1620 const CXXDestructorDecl *dtor = record->getDestructor(); 1621 assert(!dtor->isTrivial()); 1622 CGF.EmitCXXDestructorCall(dtor, Dtor_Complete, /*for vbase*/ false, 1623 /*Delegating=*/false, addr); 1624 } 1625 1626 void 1627 CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D, 1628 CXXCtorType Type, bool ForVirtualBase, 1629 bool Delegating, 1630 llvm::Value *This, 1631 CallExpr::const_arg_iterator ArgBeg, 1632 CallExpr::const_arg_iterator ArgEnd) { 1633 // If this is a trivial constructor, just emit what's needed. 1634 if (D->isTrivial()) { 1635 if (ArgBeg == ArgEnd) { 1636 // Trivial default constructor, no codegen required. 1637 assert(D->isDefaultConstructor() && 1638 "trivial 0-arg ctor not a default ctor"); 1639 return; 1640 } 1641 1642 assert(ArgBeg + 1 == ArgEnd && "unexpected argcount for trivial ctor"); 1643 assert(D->isCopyOrMoveConstructor() && 1644 "trivial 1-arg ctor not a copy/move ctor"); 1645 1646 const Expr *E = (*ArgBeg); 1647 QualType Ty = E->getType(); 1648 llvm::Value *Src = EmitLValue(E).getAddress(); 1649 EmitAggregateCopy(This, Src, Ty); 1650 return; 1651 } 1652 1653 // Non-trivial constructors are handled in an ABI-specific manner. 1654 CGM.getCXXABI().EmitConstructorCall(*this, D, Type, ForVirtualBase, 1655 Delegating, This, ArgBeg, ArgEnd); 1656 } 1657 1658 void 1659 CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D, 1660 llvm::Value *This, llvm::Value *Src, 1661 CallExpr::const_arg_iterator ArgBeg, 1662 CallExpr::const_arg_iterator ArgEnd) { 1663 if (D->isTrivial()) { 1664 assert(ArgBeg + 1 == ArgEnd && "unexpected argcount for trivial ctor"); 1665 assert(D->isCopyOrMoveConstructor() && 1666 "trivial 1-arg ctor not a copy/move ctor"); 1667 EmitAggregateCopy(This, Src, (*ArgBeg)->getType()); 1668 return; 1669 } 1670 llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(D, 1671 clang::Ctor_Complete); 1672 assert(D->isInstance() && 1673 "Trying to emit a member call expr on a static method!"); 1674 1675 const FunctionProtoType *FPT = D->getType()->getAs<FunctionProtoType>(); 1676 1677 CallArgList Args; 1678 1679 // Push the this ptr. 1680 Args.add(RValue::get(This), D->getThisType(getContext())); 1681 1682 1683 // Push the src ptr. 1684 QualType QT = *(FPT->arg_type_begin()); 1685 llvm::Type *t = CGM.getTypes().ConvertType(QT); 1686 Src = Builder.CreateBitCast(Src, t); 1687 Args.add(RValue::get(Src), QT); 1688 1689 // Skip over first argument (Src). 1690 ++ArgBeg; 1691 CallExpr::const_arg_iterator Arg = ArgBeg; 1692 for (FunctionProtoType::arg_type_iterator I = FPT->arg_type_begin()+1, 1693 E = FPT->arg_type_end(); I != E; ++I, ++Arg) { 1694 assert(Arg != ArgEnd && "Running over edge of argument list!"); 1695 EmitCallArg(Args, *Arg, *I); 1696 } 1697 // Either we've emitted all the call args, or we have a call to a 1698 // variadic function. 1699 assert((Arg == ArgEnd || FPT->isVariadic()) && 1700 "Extra arguments in non-variadic function!"); 1701 // If we still have any arguments, emit them using the type of the argument. 1702 for (; Arg != ArgEnd; ++Arg) { 1703 QualType ArgType = Arg->getType(); 1704 EmitCallArg(Args, *Arg, ArgType); 1705 } 1706 1707 EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, RequiredArgs::All), 1708 Callee, ReturnValueSlot(), Args, D); 1709 } 1710 1711 void 1712 CodeGenFunction::EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor, 1713 CXXCtorType CtorType, 1714 const FunctionArgList &Args) { 1715 CallArgList DelegateArgs; 1716 1717 FunctionArgList::const_iterator I = Args.begin(), E = Args.end(); 1718 assert(I != E && "no parameters to constructor"); 1719 1720 // this 1721 DelegateArgs.add(RValue::get(LoadCXXThis()), (*I)->getType()); 1722 ++I; 1723 1724 // vtt 1725 if (llvm::Value *VTT = GetVTTParameter(GlobalDecl(Ctor, CtorType), 1726 /*ForVirtualBase=*/false, 1727 /*Delegating=*/true)) { 1728 QualType VoidPP = getContext().getPointerType(getContext().VoidPtrTy); 1729 DelegateArgs.add(RValue::get(VTT), VoidPP); 1730 1731 if (CGM.getCXXABI().NeedsVTTParameter(CurGD)) { 1732 assert(I != E && "cannot skip vtt parameter, already done with args"); 1733 assert((*I)->getType() == VoidPP && "skipping parameter not of vtt type"); 1734 ++I; 1735 } 1736 } 1737 1738 // Explicit arguments. 1739 for (; I != E; ++I) { 1740 const VarDecl *param = *I; 1741 EmitDelegateCallArg(DelegateArgs, param); 1742 } 1743 1744 llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(Ctor, CtorType); 1745 EmitCall(CGM.getTypes().arrangeCXXConstructorDeclaration(Ctor, CtorType), 1746 Callee, ReturnValueSlot(), DelegateArgs, Ctor); 1747 } 1748 1749 namespace { 1750 struct CallDelegatingCtorDtor : EHScopeStack::Cleanup { 1751 const CXXDestructorDecl *Dtor; 1752 llvm::Value *Addr; 1753 CXXDtorType Type; 1754 1755 CallDelegatingCtorDtor(const CXXDestructorDecl *D, llvm::Value *Addr, 1756 CXXDtorType Type) 1757 : Dtor(D), Addr(Addr), Type(Type) {} 1758 1759 void Emit(CodeGenFunction &CGF, Flags flags) { 1760 CGF.EmitCXXDestructorCall(Dtor, Type, /*ForVirtualBase=*/false, 1761 /*Delegating=*/true, Addr); 1762 } 1763 }; 1764 } 1765 1766 void 1767 CodeGenFunction::EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor, 1768 const FunctionArgList &Args) { 1769 assert(Ctor->isDelegatingConstructor()); 1770 1771 llvm::Value *ThisPtr = LoadCXXThis(); 1772 1773 QualType Ty = getContext().getTagDeclType(Ctor->getParent()); 1774 CharUnits Alignment = getContext().getTypeAlignInChars(Ty); 1775 AggValueSlot AggSlot = 1776 AggValueSlot::forAddr(ThisPtr, Alignment, Qualifiers(), 1777 AggValueSlot::IsDestructed, 1778 AggValueSlot::DoesNotNeedGCBarriers, 1779 AggValueSlot::IsNotAliased); 1780 1781 EmitAggExpr(Ctor->init_begin()[0]->getInit(), AggSlot); 1782 1783 const CXXRecordDecl *ClassDecl = Ctor->getParent(); 1784 if (CGM.getLangOpts().Exceptions && !ClassDecl->hasTrivialDestructor()) { 1785 CXXDtorType Type = 1786 CurGD.getCtorType() == Ctor_Complete ? Dtor_Complete : Dtor_Base; 1787 1788 EHStack.pushCleanup<CallDelegatingCtorDtor>(EHCleanup, 1789 ClassDecl->getDestructor(), 1790 ThisPtr, Type); 1791 } 1792 } 1793 1794 void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *DD, 1795 CXXDtorType Type, 1796 bool ForVirtualBase, 1797 bool Delegating, 1798 llvm::Value *This) { 1799 llvm::Value *VTT = GetVTTParameter(GlobalDecl(DD, Type), 1800 ForVirtualBase, Delegating); 1801 llvm::Value *Callee = 0; 1802 if (getLangOpts().AppleKext) 1803 Callee = BuildAppleKextVirtualDestructorCall(DD, Type, 1804 DD->getParent()); 1805 1806 if (!Callee) 1807 Callee = CGM.GetAddrOfCXXDestructor(DD, Type); 1808 1809 // FIXME: Provide a source location here. 1810 EmitCXXMemberCall(DD, SourceLocation(), Callee, ReturnValueSlot(), This, 1811 VTT, getContext().getPointerType(getContext().VoidPtrTy), 1812 0, 0); 1813 } 1814 1815 namespace { 1816 struct CallLocalDtor : EHScopeStack::Cleanup { 1817 const CXXDestructorDecl *Dtor; 1818 llvm::Value *Addr; 1819 1820 CallLocalDtor(const CXXDestructorDecl *D, llvm::Value *Addr) 1821 : Dtor(D), Addr(Addr) {} 1822 1823 void Emit(CodeGenFunction &CGF, Flags flags) { 1824 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, 1825 /*ForVirtualBase=*/false, 1826 /*Delegating=*/false, Addr); 1827 } 1828 }; 1829 } 1830 1831 void CodeGenFunction::PushDestructorCleanup(const CXXDestructorDecl *D, 1832 llvm::Value *Addr) { 1833 EHStack.pushCleanup<CallLocalDtor>(NormalAndEHCleanup, D, Addr); 1834 } 1835 1836 void CodeGenFunction::PushDestructorCleanup(QualType T, llvm::Value *Addr) { 1837 CXXRecordDecl *ClassDecl = T->getAsCXXRecordDecl(); 1838 if (!ClassDecl) return; 1839 if (ClassDecl->hasTrivialDestructor()) return; 1840 1841 const CXXDestructorDecl *D = ClassDecl->getDestructor(); 1842 assert(D && D->isUsed() && "destructor not marked as used!"); 1843 PushDestructorCleanup(D, Addr); 1844 } 1845 1846 void 1847 CodeGenFunction::InitializeVTablePointer(BaseSubobject Base, 1848 const CXXRecordDecl *NearestVBase, 1849 CharUnits OffsetFromNearestVBase, 1850 llvm::Constant *VTable, 1851 const CXXRecordDecl *VTableClass) { 1852 const CXXRecordDecl *RD = Base.getBase(); 1853 1854 // Compute the address point. 1855 llvm::Value *VTableAddressPoint; 1856 1857 bool NeedsVTTParam = CGM.getCXXABI().NeedsVTTParameter(CurGD); 1858 1859 // Check if we need to use a vtable from the VTT. 1860 if (NeedsVTTParam && (RD->getNumVBases() || NearestVBase)) { 1861 // Get the secondary vpointer index. 1862 uint64_t VirtualPointerIndex = 1863 CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base); 1864 1865 /// Load the VTT. 1866 llvm::Value *VTT = LoadCXXVTT(); 1867 if (VirtualPointerIndex) 1868 VTT = Builder.CreateConstInBoundsGEP1_64(VTT, VirtualPointerIndex); 1869 1870 // And load the address point from the VTT. 1871 VTableAddressPoint = Builder.CreateLoad(VTT); 1872 } else { 1873 uint64_t AddressPoint = 1874 CGM.getVTableContext().getVTableLayout(VTableClass).getAddressPoint(Base); 1875 VTableAddressPoint = 1876 Builder.CreateConstInBoundsGEP2_64(VTable, 0, AddressPoint); 1877 } 1878 1879 // Compute where to store the address point. 1880 llvm::Value *VirtualOffset = 0; 1881 CharUnits NonVirtualOffset = CharUnits::Zero(); 1882 1883 if (NeedsVTTParam && NearestVBase) { 1884 // We need to use the virtual base offset offset because the virtual base 1885 // might have a different offset in the most derived class. 1886 VirtualOffset = CGM.getCXXABI().GetVirtualBaseClassOffset(*this, 1887 LoadCXXThis(), 1888 VTableClass, 1889 NearestVBase); 1890 NonVirtualOffset = OffsetFromNearestVBase; 1891 } else { 1892 // We can just use the base offset in the complete class. 1893 NonVirtualOffset = Base.getBaseOffset(); 1894 } 1895 1896 // Apply the offsets. 1897 llvm::Value *VTableField = LoadCXXThis(); 1898 1899 if (!NonVirtualOffset.isZero() || VirtualOffset) 1900 VTableField = ApplyNonVirtualAndVirtualOffset(*this, VTableField, 1901 NonVirtualOffset, 1902 VirtualOffset); 1903 1904 // Finally, store the address point. 1905 llvm::Type *AddressPointPtrTy = 1906 VTableAddressPoint->getType()->getPointerTo(); 1907 VTableField = Builder.CreateBitCast(VTableField, AddressPointPtrTy); 1908 llvm::StoreInst *Store = Builder.CreateStore(VTableAddressPoint, VTableField); 1909 CGM.DecorateInstruction(Store, CGM.getTBAAInfoForVTablePtr()); 1910 } 1911 1912 void 1913 CodeGenFunction::InitializeVTablePointers(BaseSubobject Base, 1914 const CXXRecordDecl *NearestVBase, 1915 CharUnits OffsetFromNearestVBase, 1916 bool BaseIsNonVirtualPrimaryBase, 1917 llvm::Constant *VTable, 1918 const CXXRecordDecl *VTableClass, 1919 VisitedVirtualBasesSetTy& VBases) { 1920 // If this base is a non-virtual primary base the address point has already 1921 // been set. 1922 if (!BaseIsNonVirtualPrimaryBase) { 1923 // Initialize the vtable pointer for this base. 1924 InitializeVTablePointer(Base, NearestVBase, OffsetFromNearestVBase, 1925 VTable, VTableClass); 1926 } 1927 1928 const CXXRecordDecl *RD = Base.getBase(); 1929 1930 // Traverse bases. 1931 for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), 1932 E = RD->bases_end(); I != E; ++I) { 1933 CXXRecordDecl *BaseDecl 1934 = cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); 1935 1936 // Ignore classes without a vtable. 1937 if (!BaseDecl->isDynamicClass()) 1938 continue; 1939 1940 CharUnits BaseOffset; 1941 CharUnits BaseOffsetFromNearestVBase; 1942 bool BaseDeclIsNonVirtualPrimaryBase; 1943 1944 if (I->isVirtual()) { 1945 // Check if we've visited this virtual base before. 1946 if (!VBases.insert(BaseDecl)) 1947 continue; 1948 1949 const ASTRecordLayout &Layout = 1950 getContext().getASTRecordLayout(VTableClass); 1951 1952 BaseOffset = Layout.getVBaseClassOffset(BaseDecl); 1953 BaseOffsetFromNearestVBase = CharUnits::Zero(); 1954 BaseDeclIsNonVirtualPrimaryBase = false; 1955 } else { 1956 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 1957 1958 BaseOffset = Base.getBaseOffset() + Layout.getBaseClassOffset(BaseDecl); 1959 BaseOffsetFromNearestVBase = 1960 OffsetFromNearestVBase + Layout.getBaseClassOffset(BaseDecl); 1961 BaseDeclIsNonVirtualPrimaryBase = Layout.getPrimaryBase() == BaseDecl; 1962 } 1963 1964 InitializeVTablePointers(BaseSubobject(BaseDecl, BaseOffset), 1965 I->isVirtual() ? BaseDecl : NearestVBase, 1966 BaseOffsetFromNearestVBase, 1967 BaseDeclIsNonVirtualPrimaryBase, 1968 VTable, VTableClass, VBases); 1969 } 1970 } 1971 1972 void CodeGenFunction::InitializeVTablePointers(const CXXRecordDecl *RD) { 1973 // Ignore classes without a vtable. 1974 if (!RD->isDynamicClass()) 1975 return; 1976 1977 // Get the VTable. 1978 llvm::Constant *VTable = CGM.getVTables().GetAddrOfVTable(RD); 1979 1980 // Initialize the vtable pointers for this class and all of its bases. 1981 VisitedVirtualBasesSetTy VBases; 1982 InitializeVTablePointers(BaseSubobject(RD, CharUnits::Zero()), 1983 /*NearestVBase=*/0, 1984 /*OffsetFromNearestVBase=*/CharUnits::Zero(), 1985 /*BaseIsNonVirtualPrimaryBase=*/false, 1986 VTable, RD, VBases); 1987 } 1988 1989 llvm::Value *CodeGenFunction::GetVTablePtr(llvm::Value *This, 1990 llvm::Type *Ty) { 1991 llvm::Value *VTablePtrSrc = Builder.CreateBitCast(This, Ty->getPointerTo()); 1992 llvm::Instruction *VTable = Builder.CreateLoad(VTablePtrSrc, "vtable"); 1993 CGM.DecorateInstruction(VTable, CGM.getTBAAInfoForVTablePtr()); 1994 return VTable; 1995 } 1996 1997 static const CXXRecordDecl *getMostDerivedClassDecl(const Expr *Base) { 1998 const Expr *E = Base; 1999 2000 while (true) { 2001 E = E->IgnoreParens(); 2002 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) { 2003 if (CE->getCastKind() == CK_DerivedToBase || 2004 CE->getCastKind() == CK_UncheckedDerivedToBase || 2005 CE->getCastKind() == CK_NoOp) { 2006 E = CE->getSubExpr(); 2007 continue; 2008 } 2009 } 2010 2011 break; 2012 } 2013 2014 QualType DerivedType = E->getType(); 2015 if (const PointerType *PTy = DerivedType->getAs<PointerType>()) 2016 DerivedType = PTy->getPointeeType(); 2017 2018 return cast<CXXRecordDecl>(DerivedType->castAs<RecordType>()->getDecl()); 2019 } 2020 2021 // FIXME: Ideally Expr::IgnoreParenNoopCasts should do this, but it doesn't do 2022 // quite what we want. 2023 static const Expr *skipNoOpCastsAndParens(const Expr *E) { 2024 while (true) { 2025 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) { 2026 E = PE->getSubExpr(); 2027 continue; 2028 } 2029 2030 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) { 2031 if (CE->getCastKind() == CK_NoOp) { 2032 E = CE->getSubExpr(); 2033 continue; 2034 } 2035 } 2036 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 2037 if (UO->getOpcode() == UO_Extension) { 2038 E = UO->getSubExpr(); 2039 continue; 2040 } 2041 } 2042 return E; 2043 } 2044 } 2045 2046 /// canDevirtualizeMemberFunctionCall - Checks whether the given virtual member 2047 /// function call on the given expr can be devirtualized. 2048 static bool canDevirtualizeMemberFunctionCall(const Expr *Base, 2049 const CXXMethodDecl *MD) { 2050 // If the most derived class is marked final, we know that no subclass can 2051 // override this member function and so we can devirtualize it. For example: 2052 // 2053 // struct A { virtual void f(); } 2054 // struct B final : A { }; 2055 // 2056 // void f(B *b) { 2057 // b->f(); 2058 // } 2059 // 2060 const CXXRecordDecl *MostDerivedClassDecl = getMostDerivedClassDecl(Base); 2061 if (MostDerivedClassDecl->hasAttr<FinalAttr>()) 2062 return true; 2063 2064 // If the member function is marked 'final', we know that it can't be 2065 // overridden and can therefore devirtualize it. 2066 if (MD->hasAttr<FinalAttr>()) 2067 return true; 2068 2069 // Similarly, if the class itself is marked 'final' it can't be overridden 2070 // and we can therefore devirtualize the member function call. 2071 if (MD->getParent()->hasAttr<FinalAttr>()) 2072 return true; 2073 2074 Base = skipNoOpCastsAndParens(Base); 2075 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) { 2076 if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) { 2077 // This is a record decl. We know the type and can devirtualize it. 2078 return VD->getType()->isRecordType(); 2079 } 2080 2081 return false; 2082 } 2083 2084 // We can always devirtualize calls on temporary object expressions. 2085 if (isa<CXXConstructExpr>(Base)) 2086 return true; 2087 2088 // And calls on bound temporaries. 2089 if (isa<CXXBindTemporaryExpr>(Base)) 2090 return true; 2091 2092 // Check if this is a call expr that returns a record type. 2093 if (const CallExpr *CE = dyn_cast<CallExpr>(Base)) 2094 return CE->getCallReturnType()->isRecordType(); 2095 2096 // We can't devirtualize the call. 2097 return false; 2098 } 2099 2100 static bool UseVirtualCall(ASTContext &Context, 2101 const CXXOperatorCallExpr *CE, 2102 const CXXMethodDecl *MD) { 2103 if (!MD->isVirtual()) 2104 return false; 2105 2106 // When building with -fapple-kext, all calls must go through the vtable since 2107 // the kernel linker can do runtime patching of vtables. 2108 if (Context.getLangOpts().AppleKext) 2109 return true; 2110 2111 return !canDevirtualizeMemberFunctionCall(CE->getArg(0), MD); 2112 } 2113 2114 llvm::Value * 2115 CodeGenFunction::EmitCXXOperatorMemberCallee(const CXXOperatorCallExpr *E, 2116 const CXXMethodDecl *MD, 2117 llvm::Value *This) { 2118 llvm::FunctionType *fnType = 2119 CGM.getTypes().GetFunctionType( 2120 CGM.getTypes().arrangeCXXMethodDeclaration(MD)); 2121 2122 if (UseVirtualCall(getContext(), E, MD)) 2123 return BuildVirtualCall(MD, This, fnType); 2124 2125 return CGM.GetAddrOfFunction(MD, fnType); 2126 } 2127 2128 void CodeGenFunction::EmitForwardingCallToLambda(const CXXRecordDecl *lambda, 2129 CallArgList &callArgs) { 2130 // Lookup the call operator 2131 DeclarationName operatorName 2132 = getContext().DeclarationNames.getCXXOperatorName(OO_Call); 2133 CXXMethodDecl *callOperator = 2134 cast<CXXMethodDecl>(lambda->lookup(operatorName).front()); 2135 2136 // Get the address of the call operator. 2137 const CGFunctionInfo &calleeFnInfo = 2138 CGM.getTypes().arrangeCXXMethodDeclaration(callOperator); 2139 llvm::Value *callee = 2140 CGM.GetAddrOfFunction(GlobalDecl(callOperator), 2141 CGM.getTypes().GetFunctionType(calleeFnInfo)); 2142 2143 // Prepare the return slot. 2144 const FunctionProtoType *FPT = 2145 callOperator->getType()->castAs<FunctionProtoType>(); 2146 QualType resultType = FPT->getResultType(); 2147 ReturnValueSlot returnSlot; 2148 if (!resultType->isVoidType() && 2149 calleeFnInfo.getReturnInfo().getKind() == ABIArgInfo::Indirect && 2150 !hasScalarEvaluationKind(calleeFnInfo.getReturnType())) 2151 returnSlot = ReturnValueSlot(ReturnValue, resultType.isVolatileQualified()); 2152 2153 // We don't need to separately arrange the call arguments because 2154 // the call can't be variadic anyway --- it's impossible to forward 2155 // variadic arguments. 2156 2157 // Now emit our call. 2158 RValue RV = EmitCall(calleeFnInfo, callee, returnSlot, 2159 callArgs, callOperator); 2160 2161 // If necessary, copy the returned value into the slot. 2162 if (!resultType->isVoidType() && returnSlot.isNull()) 2163 EmitReturnOfRValue(RV, resultType); 2164 else 2165 EmitBranchThroughCleanup(ReturnBlock); 2166 } 2167 2168 void CodeGenFunction::EmitLambdaBlockInvokeBody() { 2169 const BlockDecl *BD = BlockInfo->getBlockDecl(); 2170 const VarDecl *variable = BD->capture_begin()->getVariable(); 2171 const CXXRecordDecl *Lambda = variable->getType()->getAsCXXRecordDecl(); 2172 2173 // Start building arguments for forwarding call 2174 CallArgList CallArgs; 2175 2176 QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda)); 2177 llvm::Value *ThisPtr = GetAddrOfBlockDecl(variable, false); 2178 CallArgs.add(RValue::get(ThisPtr), ThisType); 2179 2180 // Add the rest of the parameters. 2181 for (BlockDecl::param_const_iterator I = BD->param_begin(), 2182 E = BD->param_end(); I != E; ++I) { 2183 ParmVarDecl *param = *I; 2184 EmitDelegateCallArg(CallArgs, param); 2185 } 2186 2187 EmitForwardingCallToLambda(Lambda, CallArgs); 2188 } 2189 2190 void CodeGenFunction::EmitLambdaToBlockPointerBody(FunctionArgList &Args) { 2191 if (cast<CXXMethodDecl>(CurCodeDecl)->isVariadic()) { 2192 // FIXME: Making this work correctly is nasty because it requires either 2193 // cloning the body of the call operator or making the call operator forward. 2194 CGM.ErrorUnsupported(CurCodeDecl, "lambda conversion to variadic function"); 2195 return; 2196 } 2197 2198 EmitFunctionBody(Args); 2199 } 2200 2201 void CodeGenFunction::EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD) { 2202 const CXXRecordDecl *Lambda = MD->getParent(); 2203 2204 // Start building arguments for forwarding call 2205 CallArgList CallArgs; 2206 2207 QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda)); 2208 llvm::Value *ThisPtr = llvm::UndefValue::get(getTypes().ConvertType(ThisType)); 2209 CallArgs.add(RValue::get(ThisPtr), ThisType); 2210 2211 // Add the rest of the parameters. 2212 for (FunctionDecl::param_const_iterator I = MD->param_begin(), 2213 E = MD->param_end(); I != E; ++I) { 2214 ParmVarDecl *param = *I; 2215 EmitDelegateCallArg(CallArgs, param); 2216 } 2217 2218 EmitForwardingCallToLambda(Lambda, CallArgs); 2219 } 2220 2221 void CodeGenFunction::EmitLambdaStaticInvokeFunction(const CXXMethodDecl *MD) { 2222 if (MD->isVariadic()) { 2223 // FIXME: Making this work correctly is nasty because it requires either 2224 // cloning the body of the call operator or making the call operator forward. 2225 CGM.ErrorUnsupported(MD, "lambda conversion to variadic function"); 2226 return; 2227 } 2228 2229 EmitLambdaDelegatingInvokeBody(MD); 2230 } 2231