1 //===--- CGClass.cpp - Emit LLVM Code for C++ classes ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code dealing with C++ code generation of classes 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CGBlocks.h" 15 #include "CGCXXABI.h" 16 #include "CGDebugInfo.h" 17 #include "CGRecordLayout.h" 18 #include "CodeGenFunction.h" 19 #include "clang/AST/CXXInheritance.h" 20 #include "clang/AST/DeclTemplate.h" 21 #include "clang/AST/EvaluatedExprVisitor.h" 22 #include "clang/AST/RecordLayout.h" 23 #include "clang/AST/StmtCXX.h" 24 #include "clang/Basic/TargetBuiltins.h" 25 #include "clang/CodeGen/CGFunctionInfo.h" 26 #include "clang/Frontend/CodeGenOptions.h" 27 28 using namespace clang; 29 using namespace CodeGen; 30 31 static CharUnits 32 ComputeNonVirtualBaseClassOffset(ASTContext &Context, 33 const CXXRecordDecl *DerivedClass, 34 CastExpr::path_const_iterator Start, 35 CastExpr::path_const_iterator End) { 36 CharUnits Offset = CharUnits::Zero(); 37 38 const CXXRecordDecl *RD = DerivedClass; 39 40 for (CastExpr::path_const_iterator I = Start; I != End; ++I) { 41 const CXXBaseSpecifier *Base = *I; 42 assert(!Base->isVirtual() && "Should not see virtual bases here!"); 43 44 // Get the layout. 45 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 46 47 const CXXRecordDecl *BaseDecl = 48 cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl()); 49 50 // Add the offset. 51 Offset += Layout.getBaseClassOffset(BaseDecl); 52 53 RD = BaseDecl; 54 } 55 56 return Offset; 57 } 58 59 llvm::Constant * 60 CodeGenModule::GetNonVirtualBaseClassOffset(const CXXRecordDecl *ClassDecl, 61 CastExpr::path_const_iterator PathBegin, 62 CastExpr::path_const_iterator PathEnd) { 63 assert(PathBegin != PathEnd && "Base path should not be empty!"); 64 65 CharUnits Offset = 66 ComputeNonVirtualBaseClassOffset(getContext(), ClassDecl, 67 PathBegin, PathEnd); 68 if (Offset.isZero()) 69 return nullptr; 70 71 llvm::Type *PtrDiffTy = 72 Types.ConvertType(getContext().getPointerDiffType()); 73 74 return llvm::ConstantInt::get(PtrDiffTy, Offset.getQuantity()); 75 } 76 77 /// Gets the address of a direct base class within a complete object. 78 /// This should only be used for (1) non-virtual bases or (2) virtual bases 79 /// when the type is known to be complete (e.g. in complete destructors). 80 /// 81 /// The object pointed to by 'This' is assumed to be non-null. 82 llvm::Value * 83 CodeGenFunction::GetAddressOfDirectBaseInCompleteClass(llvm::Value *This, 84 const CXXRecordDecl *Derived, 85 const CXXRecordDecl *Base, 86 bool BaseIsVirtual) { 87 // 'this' must be a pointer (in some address space) to Derived. 88 assert(This->getType()->isPointerTy() && 89 cast<llvm::PointerType>(This->getType())->getElementType() 90 == ConvertType(Derived)); 91 92 // Compute the offset of the virtual base. 93 CharUnits Offset; 94 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(Derived); 95 if (BaseIsVirtual) 96 Offset = Layout.getVBaseClassOffset(Base); 97 else 98 Offset = Layout.getBaseClassOffset(Base); 99 100 // Shift and cast down to the base type. 101 // TODO: for complete types, this should be possible with a GEP. 102 llvm::Value *V = This; 103 if (Offset.isPositive()) { 104 V = Builder.CreateBitCast(V, Int8PtrTy); 105 V = Builder.CreateConstInBoundsGEP1_64(V, Offset.getQuantity()); 106 } 107 V = Builder.CreateBitCast(V, ConvertType(Base)->getPointerTo()); 108 109 return V; 110 } 111 112 static llvm::Value * 113 ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, llvm::Value *ptr, 114 CharUnits nonVirtualOffset, 115 llvm::Value *virtualOffset) { 116 // Assert that we have something to do. 117 assert(!nonVirtualOffset.isZero() || virtualOffset != nullptr); 118 119 // Compute the offset from the static and dynamic components. 120 llvm::Value *baseOffset; 121 if (!nonVirtualOffset.isZero()) { 122 baseOffset = llvm::ConstantInt::get(CGF.PtrDiffTy, 123 nonVirtualOffset.getQuantity()); 124 if (virtualOffset) { 125 baseOffset = CGF.Builder.CreateAdd(virtualOffset, baseOffset); 126 } 127 } else { 128 baseOffset = virtualOffset; 129 } 130 131 // Apply the base offset. 132 ptr = CGF.Builder.CreateBitCast(ptr, CGF.Int8PtrTy); 133 ptr = CGF.Builder.CreateInBoundsGEP(ptr, baseOffset, "add.ptr"); 134 return ptr; 135 } 136 137 llvm::Value * 138 CodeGenFunction::GetAddressOfBaseClass(llvm::Value *Value, 139 const CXXRecordDecl *Derived, 140 CastExpr::path_const_iterator PathBegin, 141 CastExpr::path_const_iterator PathEnd, 142 bool NullCheckValue) { 143 assert(PathBegin != PathEnd && "Base path should not be empty!"); 144 145 CastExpr::path_const_iterator Start = PathBegin; 146 const CXXRecordDecl *VBase = nullptr; 147 148 // Sema has done some convenient canonicalization here: if the 149 // access path involved any virtual steps, the conversion path will 150 // *start* with a step down to the correct virtual base subobject, 151 // and hence will not require any further steps. 152 if ((*Start)->isVirtual()) { 153 VBase = 154 cast<CXXRecordDecl>((*Start)->getType()->getAs<RecordType>()->getDecl()); 155 ++Start; 156 } 157 158 // Compute the static offset of the ultimate destination within its 159 // allocating subobject (the virtual base, if there is one, or else 160 // the "complete" object that we see). 161 CharUnits NonVirtualOffset = 162 ComputeNonVirtualBaseClassOffset(getContext(), VBase ? VBase : Derived, 163 Start, PathEnd); 164 165 // If there's a virtual step, we can sometimes "devirtualize" it. 166 // For now, that's limited to when the derived type is final. 167 // TODO: "devirtualize" this for accesses to known-complete objects. 168 if (VBase && Derived->hasAttr<FinalAttr>()) { 169 const ASTRecordLayout &layout = getContext().getASTRecordLayout(Derived); 170 CharUnits vBaseOffset = layout.getVBaseClassOffset(VBase); 171 NonVirtualOffset += vBaseOffset; 172 VBase = nullptr; // we no longer have a virtual step 173 } 174 175 // Get the base pointer type. 176 llvm::Type *BasePtrTy = 177 ConvertType((PathEnd[-1])->getType())->getPointerTo(); 178 179 // If the static offset is zero and we don't have a virtual step, 180 // just do a bitcast; null checks are unnecessary. 181 if (NonVirtualOffset.isZero() && !VBase) { 182 return Builder.CreateBitCast(Value, BasePtrTy); 183 } 184 185 llvm::BasicBlock *origBB = nullptr; 186 llvm::BasicBlock *endBB = nullptr; 187 188 // Skip over the offset (and the vtable load) if we're supposed to 189 // null-check the pointer. 190 if (NullCheckValue) { 191 origBB = Builder.GetInsertBlock(); 192 llvm::BasicBlock *notNullBB = createBasicBlock("cast.notnull"); 193 endBB = createBasicBlock("cast.end"); 194 195 llvm::Value *isNull = Builder.CreateIsNull(Value); 196 Builder.CreateCondBr(isNull, endBB, notNullBB); 197 EmitBlock(notNullBB); 198 } 199 200 // Compute the virtual offset. 201 llvm::Value *VirtualOffset = nullptr; 202 if (VBase) { 203 VirtualOffset = 204 CGM.getCXXABI().GetVirtualBaseClassOffset(*this, Value, Derived, VBase); 205 } 206 207 // Apply both offsets. 208 Value = ApplyNonVirtualAndVirtualOffset(*this, Value, 209 NonVirtualOffset, 210 VirtualOffset); 211 212 // Cast to the destination type. 213 Value = Builder.CreateBitCast(Value, BasePtrTy); 214 215 // Build a phi if we needed a null check. 216 if (NullCheckValue) { 217 llvm::BasicBlock *notNullBB = Builder.GetInsertBlock(); 218 Builder.CreateBr(endBB); 219 EmitBlock(endBB); 220 221 llvm::PHINode *PHI = Builder.CreatePHI(BasePtrTy, 2, "cast.result"); 222 PHI->addIncoming(Value, notNullBB); 223 PHI->addIncoming(llvm::Constant::getNullValue(BasePtrTy), origBB); 224 Value = PHI; 225 } 226 227 return Value; 228 } 229 230 llvm::Value * 231 CodeGenFunction::GetAddressOfDerivedClass(llvm::Value *Value, 232 const CXXRecordDecl *Derived, 233 CastExpr::path_const_iterator PathBegin, 234 CastExpr::path_const_iterator PathEnd, 235 bool NullCheckValue) { 236 assert(PathBegin != PathEnd && "Base path should not be empty!"); 237 238 QualType DerivedTy = 239 getContext().getCanonicalType(getContext().getTagDeclType(Derived)); 240 llvm::Type *DerivedPtrTy = ConvertType(DerivedTy)->getPointerTo(); 241 242 llvm::Value *NonVirtualOffset = 243 CGM.GetNonVirtualBaseClassOffset(Derived, PathBegin, PathEnd); 244 245 if (!NonVirtualOffset) { 246 // No offset, we can just cast back. 247 return Builder.CreateBitCast(Value, DerivedPtrTy); 248 } 249 250 llvm::BasicBlock *CastNull = nullptr; 251 llvm::BasicBlock *CastNotNull = nullptr; 252 llvm::BasicBlock *CastEnd = nullptr; 253 254 if (NullCheckValue) { 255 CastNull = createBasicBlock("cast.null"); 256 CastNotNull = createBasicBlock("cast.notnull"); 257 CastEnd = createBasicBlock("cast.end"); 258 259 llvm::Value *IsNull = Builder.CreateIsNull(Value); 260 Builder.CreateCondBr(IsNull, CastNull, CastNotNull); 261 EmitBlock(CastNotNull); 262 } 263 264 // Apply the offset. 265 Value = Builder.CreateBitCast(Value, Int8PtrTy); 266 Value = Builder.CreateGEP(Value, Builder.CreateNeg(NonVirtualOffset), 267 "sub.ptr"); 268 269 // Just cast. 270 Value = Builder.CreateBitCast(Value, DerivedPtrTy); 271 272 if (NullCheckValue) { 273 Builder.CreateBr(CastEnd); 274 EmitBlock(CastNull); 275 Builder.CreateBr(CastEnd); 276 EmitBlock(CastEnd); 277 278 llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2); 279 PHI->addIncoming(Value, CastNotNull); 280 PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), 281 CastNull); 282 Value = PHI; 283 } 284 285 return Value; 286 } 287 288 llvm::Value *CodeGenFunction::GetVTTParameter(GlobalDecl GD, 289 bool ForVirtualBase, 290 bool Delegating) { 291 if (!CGM.getCXXABI().NeedsVTTParameter(GD)) { 292 // This constructor/destructor does not need a VTT parameter. 293 return nullptr; 294 } 295 296 const CXXRecordDecl *RD = cast<CXXMethodDecl>(CurCodeDecl)->getParent(); 297 const CXXRecordDecl *Base = cast<CXXMethodDecl>(GD.getDecl())->getParent(); 298 299 llvm::Value *VTT; 300 301 uint64_t SubVTTIndex; 302 303 if (Delegating) { 304 // If this is a delegating constructor call, just load the VTT. 305 return LoadCXXVTT(); 306 } else if (RD == Base) { 307 // If the record matches the base, this is the complete ctor/dtor 308 // variant calling the base variant in a class with virtual bases. 309 assert(!CGM.getCXXABI().NeedsVTTParameter(CurGD) && 310 "doing no-op VTT offset in base dtor/ctor?"); 311 assert(!ForVirtualBase && "Can't have same class as virtual base!"); 312 SubVTTIndex = 0; 313 } else { 314 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 315 CharUnits BaseOffset = ForVirtualBase ? 316 Layout.getVBaseClassOffset(Base) : 317 Layout.getBaseClassOffset(Base); 318 319 SubVTTIndex = 320 CGM.getVTables().getSubVTTIndex(RD, BaseSubobject(Base, BaseOffset)); 321 assert(SubVTTIndex != 0 && "Sub-VTT index must be greater than zero!"); 322 } 323 324 if (CGM.getCXXABI().NeedsVTTParameter(CurGD)) { 325 // A VTT parameter was passed to the constructor, use it. 326 VTT = LoadCXXVTT(); 327 VTT = Builder.CreateConstInBoundsGEP1_64(VTT, SubVTTIndex); 328 } else { 329 // We're the complete constructor, so get the VTT by name. 330 VTT = CGM.getVTables().GetAddrOfVTT(RD); 331 VTT = Builder.CreateConstInBoundsGEP2_64(VTT, 0, SubVTTIndex); 332 } 333 334 return VTT; 335 } 336 337 namespace { 338 /// Call the destructor for a direct base class. 339 struct CallBaseDtor : EHScopeStack::Cleanup { 340 const CXXRecordDecl *BaseClass; 341 bool BaseIsVirtual; 342 CallBaseDtor(const CXXRecordDecl *Base, bool BaseIsVirtual) 343 : BaseClass(Base), BaseIsVirtual(BaseIsVirtual) {} 344 345 void Emit(CodeGenFunction &CGF, Flags flags) override { 346 const CXXRecordDecl *DerivedClass = 347 cast<CXXMethodDecl>(CGF.CurCodeDecl)->getParent(); 348 349 const CXXDestructorDecl *D = BaseClass->getDestructor(); 350 llvm::Value *Addr = 351 CGF.GetAddressOfDirectBaseInCompleteClass(CGF.LoadCXXThis(), 352 DerivedClass, BaseClass, 353 BaseIsVirtual); 354 CGF.EmitCXXDestructorCall(D, Dtor_Base, BaseIsVirtual, 355 /*Delegating=*/false, Addr); 356 } 357 }; 358 359 /// A visitor which checks whether an initializer uses 'this' in a 360 /// way which requires the vtable to be properly set. 361 struct DynamicThisUseChecker : EvaluatedExprVisitor<DynamicThisUseChecker> { 362 typedef EvaluatedExprVisitor<DynamicThisUseChecker> super; 363 364 bool UsesThis; 365 366 DynamicThisUseChecker(ASTContext &C) : super(C), UsesThis(false) {} 367 368 // Black-list all explicit and implicit references to 'this'. 369 // 370 // Do we need to worry about external references to 'this' derived 371 // from arbitrary code? If so, then anything which runs arbitrary 372 // external code might potentially access the vtable. 373 void VisitCXXThisExpr(CXXThisExpr *E) { UsesThis = true; } 374 }; 375 } 376 377 static bool BaseInitializerUsesThis(ASTContext &C, const Expr *Init) { 378 DynamicThisUseChecker Checker(C); 379 Checker.Visit(const_cast<Expr*>(Init)); 380 return Checker.UsesThis; 381 } 382 383 static void EmitBaseInitializer(CodeGenFunction &CGF, 384 const CXXRecordDecl *ClassDecl, 385 CXXCtorInitializer *BaseInit, 386 CXXCtorType CtorType) { 387 assert(BaseInit->isBaseInitializer() && 388 "Must have base initializer!"); 389 390 llvm::Value *ThisPtr = CGF.LoadCXXThis(); 391 392 const Type *BaseType = BaseInit->getBaseClass(); 393 CXXRecordDecl *BaseClassDecl = 394 cast<CXXRecordDecl>(BaseType->getAs<RecordType>()->getDecl()); 395 396 bool isBaseVirtual = BaseInit->isBaseVirtual(); 397 398 // The base constructor doesn't construct virtual bases. 399 if (CtorType == Ctor_Base && isBaseVirtual) 400 return; 401 402 // If the initializer for the base (other than the constructor 403 // itself) accesses 'this' in any way, we need to initialize the 404 // vtables. 405 if (BaseInitializerUsesThis(CGF.getContext(), BaseInit->getInit())) 406 CGF.InitializeVTablePointers(ClassDecl); 407 408 // We can pretend to be a complete class because it only matters for 409 // virtual bases, and we only do virtual bases for complete ctors. 410 llvm::Value *V = 411 CGF.GetAddressOfDirectBaseInCompleteClass(ThisPtr, ClassDecl, 412 BaseClassDecl, 413 isBaseVirtual); 414 CharUnits Alignment = CGF.getContext().getTypeAlignInChars(BaseType); 415 AggValueSlot AggSlot = 416 AggValueSlot::forAddr(V, Alignment, Qualifiers(), 417 AggValueSlot::IsDestructed, 418 AggValueSlot::DoesNotNeedGCBarriers, 419 AggValueSlot::IsNotAliased); 420 421 CGF.EmitAggExpr(BaseInit->getInit(), AggSlot); 422 423 if (CGF.CGM.getLangOpts().Exceptions && 424 !BaseClassDecl->hasTrivialDestructor()) 425 CGF.EHStack.pushCleanup<CallBaseDtor>(EHCleanup, BaseClassDecl, 426 isBaseVirtual); 427 } 428 429 static void EmitAggMemberInitializer(CodeGenFunction &CGF, 430 LValue LHS, 431 Expr *Init, 432 llvm::Value *ArrayIndexVar, 433 QualType T, 434 ArrayRef<VarDecl *> ArrayIndexes, 435 unsigned Index) { 436 if (Index == ArrayIndexes.size()) { 437 LValue LV = LHS; 438 439 if (ArrayIndexVar) { 440 // If we have an array index variable, load it and use it as an offset. 441 // Then, increment the value. 442 llvm::Value *Dest = LHS.getAddress(); 443 llvm::Value *ArrayIndex = CGF.Builder.CreateLoad(ArrayIndexVar); 444 Dest = CGF.Builder.CreateInBoundsGEP(Dest, ArrayIndex, "destaddress"); 445 llvm::Value *Next = llvm::ConstantInt::get(ArrayIndex->getType(), 1); 446 Next = CGF.Builder.CreateAdd(ArrayIndex, Next, "inc"); 447 CGF.Builder.CreateStore(Next, ArrayIndexVar); 448 449 // Update the LValue. 450 LV.setAddress(Dest); 451 CharUnits Align = CGF.getContext().getTypeAlignInChars(T); 452 LV.setAlignment(std::min(Align, LV.getAlignment())); 453 } 454 455 switch (CGF.getEvaluationKind(T)) { 456 case TEK_Scalar: 457 CGF.EmitScalarInit(Init, /*decl*/ nullptr, LV, false); 458 break; 459 case TEK_Complex: 460 CGF.EmitComplexExprIntoLValue(Init, LV, /*isInit*/ true); 461 break; 462 case TEK_Aggregate: { 463 AggValueSlot Slot = 464 AggValueSlot::forLValue(LV, 465 AggValueSlot::IsDestructed, 466 AggValueSlot::DoesNotNeedGCBarriers, 467 AggValueSlot::IsNotAliased); 468 469 CGF.EmitAggExpr(Init, Slot); 470 break; 471 } 472 } 473 474 return; 475 } 476 477 const ConstantArrayType *Array = CGF.getContext().getAsConstantArrayType(T); 478 assert(Array && "Array initialization without the array type?"); 479 llvm::Value *IndexVar 480 = CGF.GetAddrOfLocalVar(ArrayIndexes[Index]); 481 assert(IndexVar && "Array index variable not loaded"); 482 483 // Initialize this index variable to zero. 484 llvm::Value* Zero 485 = llvm::Constant::getNullValue( 486 CGF.ConvertType(CGF.getContext().getSizeType())); 487 CGF.Builder.CreateStore(Zero, IndexVar); 488 489 // Start the loop with a block that tests the condition. 490 llvm::BasicBlock *CondBlock = CGF.createBasicBlock("for.cond"); 491 llvm::BasicBlock *AfterFor = CGF.createBasicBlock("for.end"); 492 493 CGF.EmitBlock(CondBlock); 494 495 llvm::BasicBlock *ForBody = CGF.createBasicBlock("for.body"); 496 // Generate: if (loop-index < number-of-elements) fall to the loop body, 497 // otherwise, go to the block after the for-loop. 498 uint64_t NumElements = Array->getSize().getZExtValue(); 499 llvm::Value *Counter = CGF.Builder.CreateLoad(IndexVar); 500 llvm::Value *NumElementsPtr = 501 llvm::ConstantInt::get(Counter->getType(), NumElements); 502 llvm::Value *IsLess = CGF.Builder.CreateICmpULT(Counter, NumElementsPtr, 503 "isless"); 504 505 // If the condition is true, execute the body. 506 CGF.Builder.CreateCondBr(IsLess, ForBody, AfterFor); 507 508 CGF.EmitBlock(ForBody); 509 llvm::BasicBlock *ContinueBlock = CGF.createBasicBlock("for.inc"); 510 511 // Inside the loop body recurse to emit the inner loop or, eventually, the 512 // constructor call. 513 EmitAggMemberInitializer(CGF, LHS, Init, ArrayIndexVar, 514 Array->getElementType(), ArrayIndexes, Index + 1); 515 516 CGF.EmitBlock(ContinueBlock); 517 518 // Emit the increment of the loop counter. 519 llvm::Value *NextVal = llvm::ConstantInt::get(Counter->getType(), 1); 520 Counter = CGF.Builder.CreateLoad(IndexVar); 521 NextVal = CGF.Builder.CreateAdd(Counter, NextVal, "inc"); 522 CGF.Builder.CreateStore(NextVal, IndexVar); 523 524 // Finally, branch back up to the condition for the next iteration. 525 CGF.EmitBranch(CondBlock); 526 527 // Emit the fall-through block. 528 CGF.EmitBlock(AfterFor, true); 529 } 530 531 static void EmitMemberInitializer(CodeGenFunction &CGF, 532 const CXXRecordDecl *ClassDecl, 533 CXXCtorInitializer *MemberInit, 534 const CXXConstructorDecl *Constructor, 535 FunctionArgList &Args) { 536 assert(MemberInit->isAnyMemberInitializer() && 537 "Must have member initializer!"); 538 assert(MemberInit->getInit() && "Must have initializer!"); 539 540 // non-static data member initializers. 541 FieldDecl *Field = MemberInit->getAnyMember(); 542 QualType FieldType = Field->getType(); 543 544 llvm::Value *ThisPtr = CGF.LoadCXXThis(); 545 QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl); 546 LValue LHS = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy); 547 548 if (MemberInit->isIndirectMemberInitializer()) { 549 // If we are initializing an anonymous union field, drill down to 550 // the field. 551 IndirectFieldDecl *IndirectField = MemberInit->getIndirectMember(); 552 for (const auto *I : IndirectField->chain()) 553 LHS = CGF.EmitLValueForFieldInitialization(LHS, cast<FieldDecl>(I)); 554 FieldType = MemberInit->getIndirectMember()->getAnonField()->getType(); 555 } else { 556 LHS = CGF.EmitLValueForFieldInitialization(LHS, Field); 557 } 558 559 // Special case: if we are in a copy or move constructor, and we are copying 560 // an array of PODs or classes with trivial copy constructors, ignore the 561 // AST and perform the copy we know is equivalent. 562 // FIXME: This is hacky at best... if we had a bit more explicit information 563 // in the AST, we could generalize it more easily. 564 const ConstantArrayType *Array 565 = CGF.getContext().getAsConstantArrayType(FieldType); 566 if (Array && Constructor->isDefaulted() && 567 Constructor->isCopyOrMoveConstructor()) { 568 QualType BaseElementTy = CGF.getContext().getBaseElementType(Array); 569 CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(MemberInit->getInit()); 570 if (BaseElementTy.isPODType(CGF.getContext()) || 571 (CE && CE->getConstructor()->isTrivial())) { 572 // Find the source pointer. We know it's the last argument because 573 // we know we're in an implicit copy constructor. 574 unsigned SrcArgIndex = Args.size() - 1; 575 llvm::Value *SrcPtr 576 = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(Args[SrcArgIndex])); 577 LValue ThisRHSLV = CGF.MakeNaturalAlignAddrLValue(SrcPtr, RecordTy); 578 LValue Src = CGF.EmitLValueForFieldInitialization(ThisRHSLV, Field); 579 580 // Copy the aggregate. 581 CGF.EmitAggregateCopy(LHS.getAddress(), Src.getAddress(), FieldType, 582 LHS.isVolatileQualified()); 583 return; 584 } 585 } 586 587 ArrayRef<VarDecl *> ArrayIndexes; 588 if (MemberInit->getNumArrayIndices()) 589 ArrayIndexes = MemberInit->getArrayIndexes(); 590 CGF.EmitInitializerForField(Field, LHS, MemberInit->getInit(), ArrayIndexes); 591 } 592 593 void CodeGenFunction::EmitInitializerForField(FieldDecl *Field, 594 LValue LHS, Expr *Init, 595 ArrayRef<VarDecl *> ArrayIndexes) { 596 QualType FieldType = Field->getType(); 597 switch (getEvaluationKind(FieldType)) { 598 case TEK_Scalar: 599 if (LHS.isSimple()) { 600 EmitExprAsInit(Init, Field, LHS, false); 601 } else { 602 RValue RHS = RValue::get(EmitScalarExpr(Init)); 603 EmitStoreThroughLValue(RHS, LHS); 604 } 605 break; 606 case TEK_Complex: 607 EmitComplexExprIntoLValue(Init, LHS, /*isInit*/ true); 608 break; 609 case TEK_Aggregate: { 610 llvm::Value *ArrayIndexVar = nullptr; 611 if (ArrayIndexes.size()) { 612 llvm::Type *SizeTy = ConvertType(getContext().getSizeType()); 613 614 // The LHS is a pointer to the first object we'll be constructing, as 615 // a flat array. 616 QualType BaseElementTy = getContext().getBaseElementType(FieldType); 617 llvm::Type *BasePtr = ConvertType(BaseElementTy); 618 BasePtr = llvm::PointerType::getUnqual(BasePtr); 619 llvm::Value *BaseAddrPtr = Builder.CreateBitCast(LHS.getAddress(), 620 BasePtr); 621 LHS = MakeAddrLValue(BaseAddrPtr, BaseElementTy); 622 623 // Create an array index that will be used to walk over all of the 624 // objects we're constructing. 625 ArrayIndexVar = CreateTempAlloca(SizeTy, "object.index"); 626 llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy); 627 Builder.CreateStore(Zero, ArrayIndexVar); 628 629 630 // Emit the block variables for the array indices, if any. 631 for (unsigned I = 0, N = ArrayIndexes.size(); I != N; ++I) 632 EmitAutoVarDecl(*ArrayIndexes[I]); 633 } 634 635 EmitAggMemberInitializer(*this, LHS, Init, ArrayIndexVar, FieldType, 636 ArrayIndexes, 0); 637 } 638 } 639 640 // Ensure that we destroy this object if an exception is thrown 641 // later in the constructor. 642 QualType::DestructionKind dtorKind = FieldType.isDestructedType(); 643 if (needsEHCleanup(dtorKind)) 644 pushEHDestroy(dtorKind, LHS.getAddress(), FieldType); 645 } 646 647 /// Checks whether the given constructor is a valid subject for the 648 /// complete-to-base constructor delegation optimization, i.e. 649 /// emitting the complete constructor as a simple call to the base 650 /// constructor. 651 static bool IsConstructorDelegationValid(const CXXConstructorDecl *Ctor) { 652 653 // Currently we disable the optimization for classes with virtual 654 // bases because (1) the addresses of parameter variables need to be 655 // consistent across all initializers but (2) the delegate function 656 // call necessarily creates a second copy of the parameter variable. 657 // 658 // The limiting example (purely theoretical AFAIK): 659 // struct A { A(int &c) { c++; } }; 660 // struct B : virtual A { 661 // B(int count) : A(count) { printf("%d\n", count); } 662 // }; 663 // ...although even this example could in principle be emitted as a 664 // delegation since the address of the parameter doesn't escape. 665 if (Ctor->getParent()->getNumVBases()) { 666 // TODO: white-list trivial vbase initializers. This case wouldn't 667 // be subject to the restrictions below. 668 669 // TODO: white-list cases where: 670 // - there are no non-reference parameters to the constructor 671 // - the initializers don't access any non-reference parameters 672 // - the initializers don't take the address of non-reference 673 // parameters 674 // - etc. 675 // If we ever add any of the above cases, remember that: 676 // - function-try-blocks will always blacklist this optimization 677 // - we need to perform the constructor prologue and cleanup in 678 // EmitConstructorBody. 679 680 return false; 681 } 682 683 // We also disable the optimization for variadic functions because 684 // it's impossible to "re-pass" varargs. 685 if (Ctor->getType()->getAs<FunctionProtoType>()->isVariadic()) 686 return false; 687 688 // FIXME: Decide if we can do a delegation of a delegating constructor. 689 if (Ctor->isDelegatingConstructor()) 690 return false; 691 692 return true; 693 } 694 695 /// EmitConstructorBody - Emits the body of the current constructor. 696 void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) { 697 const CXXConstructorDecl *Ctor = cast<CXXConstructorDecl>(CurGD.getDecl()); 698 CXXCtorType CtorType = CurGD.getCtorType(); 699 700 assert((CGM.getTarget().getCXXABI().hasConstructorVariants() || 701 CtorType == Ctor_Complete) && 702 "can only generate complete ctor for this ABI"); 703 704 // Before we go any further, try the complete->base constructor 705 // delegation optimization. 706 if (CtorType == Ctor_Complete && IsConstructorDelegationValid(Ctor) && 707 CGM.getTarget().getCXXABI().hasConstructorVariants()) { 708 if (CGDebugInfo *DI = getDebugInfo()) 709 DI->EmitLocation(Builder, Ctor->getLocEnd()); 710 EmitDelegateCXXConstructorCall(Ctor, Ctor_Base, Args, Ctor->getLocEnd()); 711 return; 712 } 713 714 const FunctionDecl *Definition = 0; 715 Stmt *Body = Ctor->getBody(Definition); 716 assert(Definition == Ctor && "emitting wrong constructor body"); 717 718 // Enter the function-try-block before the constructor prologue if 719 // applicable. 720 bool IsTryBody = (Body && isa<CXXTryStmt>(Body)); 721 if (IsTryBody) 722 EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true); 723 724 RegionCounter Cnt = getPGORegionCounter(Body); 725 Cnt.beginRegion(Builder); 726 727 RunCleanupsScope RunCleanups(*this); 728 729 // TODO: in restricted cases, we can emit the vbase initializers of 730 // a complete ctor and then delegate to the base ctor. 731 732 // Emit the constructor prologue, i.e. the base and member 733 // initializers. 734 EmitCtorPrologue(Ctor, CtorType, Args); 735 736 // Emit the body of the statement. 737 if (IsTryBody) 738 EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock()); 739 else if (Body) 740 EmitStmt(Body); 741 742 // Emit any cleanup blocks associated with the member or base 743 // initializers, which includes (along the exceptional path) the 744 // destructors for those members and bases that were fully 745 // constructed. 746 RunCleanups.ForceCleanup(); 747 748 if (IsTryBody) 749 ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true); 750 } 751 752 namespace { 753 /// RAII object to indicate that codegen is copying the value representation 754 /// instead of the object representation. Useful when copying a struct or 755 /// class which has uninitialized members and we're only performing 756 /// lvalue-to-rvalue conversion on the object but not its members. 757 class CopyingValueRepresentation { 758 public: 759 explicit CopyingValueRepresentation(CodeGenFunction &CGF) 760 : CGF(CGF), SO(*CGF.SanOpts), OldSanOpts(CGF.SanOpts) { 761 SO.Bool = false; 762 SO.Enum = false; 763 CGF.SanOpts = &SO; 764 } 765 ~CopyingValueRepresentation() { 766 CGF.SanOpts = OldSanOpts; 767 } 768 private: 769 CodeGenFunction &CGF; 770 SanitizerOptions SO; 771 const SanitizerOptions *OldSanOpts; 772 }; 773 } 774 775 namespace { 776 class FieldMemcpyizer { 777 public: 778 FieldMemcpyizer(CodeGenFunction &CGF, const CXXRecordDecl *ClassDecl, 779 const VarDecl *SrcRec) 780 : CGF(CGF), ClassDecl(ClassDecl), SrcRec(SrcRec), 781 RecLayout(CGF.getContext().getASTRecordLayout(ClassDecl)), 782 FirstField(nullptr), LastField(nullptr), FirstFieldOffset(0), 783 LastFieldOffset(0), LastAddedFieldIndex(0) {} 784 785 static bool isMemcpyableField(FieldDecl *F) { 786 Qualifiers Qual = F->getType().getQualifiers(); 787 if (Qual.hasVolatile() || Qual.hasObjCLifetime()) 788 return false; 789 return true; 790 } 791 792 void addMemcpyableField(FieldDecl *F) { 793 if (!FirstField) 794 addInitialField(F); 795 else 796 addNextField(F); 797 } 798 799 CharUnits getMemcpySize() const { 800 unsigned LastFieldSize = 801 LastField->isBitField() ? 802 LastField->getBitWidthValue(CGF.getContext()) : 803 CGF.getContext().getTypeSize(LastField->getType()); 804 uint64_t MemcpySizeBits = 805 LastFieldOffset + LastFieldSize - FirstFieldOffset + 806 CGF.getContext().getCharWidth() - 1; 807 CharUnits MemcpySize = 808 CGF.getContext().toCharUnitsFromBits(MemcpySizeBits); 809 return MemcpySize; 810 } 811 812 void emitMemcpy() { 813 // Give the subclass a chance to bail out if it feels the memcpy isn't 814 // worth it (e.g. Hasn't aggregated enough data). 815 if (!FirstField) { 816 return; 817 } 818 819 CharUnits Alignment; 820 821 if (FirstField->isBitField()) { 822 const CGRecordLayout &RL = 823 CGF.getTypes().getCGRecordLayout(FirstField->getParent()); 824 const CGBitFieldInfo &BFInfo = RL.getBitFieldInfo(FirstField); 825 Alignment = CharUnits::fromQuantity(BFInfo.StorageAlignment); 826 } else { 827 Alignment = CGF.getContext().getDeclAlign(FirstField); 828 } 829 830 assert((CGF.getContext().toCharUnitsFromBits(FirstFieldOffset) % 831 Alignment) == 0 && "Bad field alignment."); 832 833 CharUnits MemcpySize = getMemcpySize(); 834 QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl); 835 llvm::Value *ThisPtr = CGF.LoadCXXThis(); 836 LValue DestLV = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy); 837 LValue Dest = CGF.EmitLValueForFieldInitialization(DestLV, FirstField); 838 llvm::Value *SrcPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(SrcRec)); 839 LValue SrcLV = CGF.MakeNaturalAlignAddrLValue(SrcPtr, RecordTy); 840 LValue Src = CGF.EmitLValueForFieldInitialization(SrcLV, FirstField); 841 842 emitMemcpyIR(Dest.isBitField() ? Dest.getBitFieldAddr() : Dest.getAddress(), 843 Src.isBitField() ? Src.getBitFieldAddr() : Src.getAddress(), 844 MemcpySize, Alignment); 845 reset(); 846 } 847 848 void reset() { 849 FirstField = nullptr; 850 } 851 852 protected: 853 CodeGenFunction &CGF; 854 const CXXRecordDecl *ClassDecl; 855 856 private: 857 858 void emitMemcpyIR(llvm::Value *DestPtr, llvm::Value *SrcPtr, 859 CharUnits Size, CharUnits Alignment) { 860 llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType()); 861 llvm::Type *DBP = 862 llvm::Type::getInt8PtrTy(CGF.getLLVMContext(), DPT->getAddressSpace()); 863 DestPtr = CGF.Builder.CreateBitCast(DestPtr, DBP); 864 865 llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType()); 866 llvm::Type *SBP = 867 llvm::Type::getInt8PtrTy(CGF.getLLVMContext(), SPT->getAddressSpace()); 868 SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, SBP); 869 870 CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, Size.getQuantity(), 871 Alignment.getQuantity()); 872 } 873 874 void addInitialField(FieldDecl *F) { 875 FirstField = F; 876 LastField = F; 877 FirstFieldOffset = RecLayout.getFieldOffset(F->getFieldIndex()); 878 LastFieldOffset = FirstFieldOffset; 879 LastAddedFieldIndex = F->getFieldIndex(); 880 return; 881 } 882 883 void addNextField(FieldDecl *F) { 884 // For the most part, the following invariant will hold: 885 // F->getFieldIndex() == LastAddedFieldIndex + 1 886 // The one exception is that Sema won't add a copy-initializer for an 887 // unnamed bitfield, which will show up here as a gap in the sequence. 888 assert(F->getFieldIndex() >= LastAddedFieldIndex + 1 && 889 "Cannot aggregate fields out of order."); 890 LastAddedFieldIndex = F->getFieldIndex(); 891 892 // The 'first' and 'last' fields are chosen by offset, rather than field 893 // index. This allows the code to support bitfields, as well as regular 894 // fields. 895 uint64_t FOffset = RecLayout.getFieldOffset(F->getFieldIndex()); 896 if (FOffset < FirstFieldOffset) { 897 FirstField = F; 898 FirstFieldOffset = FOffset; 899 } else if (FOffset > LastFieldOffset) { 900 LastField = F; 901 LastFieldOffset = FOffset; 902 } 903 } 904 905 const VarDecl *SrcRec; 906 const ASTRecordLayout &RecLayout; 907 FieldDecl *FirstField; 908 FieldDecl *LastField; 909 uint64_t FirstFieldOffset, LastFieldOffset; 910 unsigned LastAddedFieldIndex; 911 }; 912 913 class ConstructorMemcpyizer : public FieldMemcpyizer { 914 private: 915 916 /// Get source argument for copy constructor. Returns null if not a copy 917 /// constructor. 918 static const VarDecl* getTrivialCopySource(const CXXConstructorDecl *CD, 919 FunctionArgList &Args) { 920 if (CD->isCopyOrMoveConstructor() && CD->isDefaulted()) 921 return Args[Args.size() - 1]; 922 return nullptr; 923 } 924 925 // Returns true if a CXXCtorInitializer represents a member initialization 926 // that can be rolled into a memcpy. 927 bool isMemberInitMemcpyable(CXXCtorInitializer *MemberInit) const { 928 if (!MemcpyableCtor) 929 return false; 930 FieldDecl *Field = MemberInit->getMember(); 931 assert(Field && "No field for member init."); 932 QualType FieldType = Field->getType(); 933 CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(MemberInit->getInit()); 934 935 // Bail out on non-POD, not-trivially-constructable members. 936 if (!(CE && CE->getConstructor()->isTrivial()) && 937 !(FieldType.isTriviallyCopyableType(CGF.getContext()) || 938 FieldType->isReferenceType())) 939 return false; 940 941 // Bail out on volatile fields. 942 if (!isMemcpyableField(Field)) 943 return false; 944 945 // Otherwise we're good. 946 return true; 947 } 948 949 public: 950 ConstructorMemcpyizer(CodeGenFunction &CGF, const CXXConstructorDecl *CD, 951 FunctionArgList &Args) 952 : FieldMemcpyizer(CGF, CD->getParent(), getTrivialCopySource(CD, Args)), 953 ConstructorDecl(CD), 954 MemcpyableCtor(CD->isDefaulted() && 955 CD->isCopyOrMoveConstructor() && 956 CGF.getLangOpts().getGC() == LangOptions::NonGC), 957 Args(Args) { } 958 959 void addMemberInitializer(CXXCtorInitializer *MemberInit) { 960 if (isMemberInitMemcpyable(MemberInit)) { 961 AggregatedInits.push_back(MemberInit); 962 addMemcpyableField(MemberInit->getMember()); 963 } else { 964 emitAggregatedInits(); 965 EmitMemberInitializer(CGF, ConstructorDecl->getParent(), MemberInit, 966 ConstructorDecl, Args); 967 } 968 } 969 970 void emitAggregatedInits() { 971 if (AggregatedInits.size() <= 1) { 972 // This memcpy is too small to be worthwhile. Fall back on default 973 // codegen. 974 if (!AggregatedInits.empty()) { 975 CopyingValueRepresentation CVR(CGF); 976 EmitMemberInitializer(CGF, ConstructorDecl->getParent(), 977 AggregatedInits[0], ConstructorDecl, Args); 978 } 979 reset(); 980 return; 981 } 982 983 pushEHDestructors(); 984 emitMemcpy(); 985 AggregatedInits.clear(); 986 } 987 988 void pushEHDestructors() { 989 llvm::Value *ThisPtr = CGF.LoadCXXThis(); 990 QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl); 991 LValue LHS = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy); 992 993 for (unsigned i = 0; i < AggregatedInits.size(); ++i) { 994 QualType FieldType = AggregatedInits[i]->getMember()->getType(); 995 QualType::DestructionKind dtorKind = FieldType.isDestructedType(); 996 if (CGF.needsEHCleanup(dtorKind)) 997 CGF.pushEHDestroy(dtorKind, LHS.getAddress(), FieldType); 998 } 999 } 1000 1001 void finish() { 1002 emitAggregatedInits(); 1003 } 1004 1005 private: 1006 const CXXConstructorDecl *ConstructorDecl; 1007 bool MemcpyableCtor; 1008 FunctionArgList &Args; 1009 SmallVector<CXXCtorInitializer*, 16> AggregatedInits; 1010 }; 1011 1012 class AssignmentMemcpyizer : public FieldMemcpyizer { 1013 private: 1014 1015 // Returns the memcpyable field copied by the given statement, if one 1016 // exists. Otherwise returns null. 1017 FieldDecl *getMemcpyableField(Stmt *S) { 1018 if (!AssignmentsMemcpyable) 1019 return nullptr; 1020 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(S)) { 1021 // Recognise trivial assignments. 1022 if (BO->getOpcode() != BO_Assign) 1023 return nullptr; 1024 MemberExpr *ME = dyn_cast<MemberExpr>(BO->getLHS()); 1025 if (!ME) 1026 return nullptr; 1027 FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl()); 1028 if (!Field || !isMemcpyableField(Field)) 1029 return nullptr; 1030 Stmt *RHS = BO->getRHS(); 1031 if (ImplicitCastExpr *EC = dyn_cast<ImplicitCastExpr>(RHS)) 1032 RHS = EC->getSubExpr(); 1033 if (!RHS) 1034 return nullptr; 1035 MemberExpr *ME2 = dyn_cast<MemberExpr>(RHS); 1036 if (dyn_cast<FieldDecl>(ME2->getMemberDecl()) != Field) 1037 return nullptr; 1038 return Field; 1039 } else if (CXXMemberCallExpr *MCE = dyn_cast<CXXMemberCallExpr>(S)) { 1040 CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MCE->getCalleeDecl()); 1041 if (!(MD && (MD->isCopyAssignmentOperator() || 1042 MD->isMoveAssignmentOperator()) && 1043 MD->isTrivial())) 1044 return nullptr; 1045 MemberExpr *IOA = dyn_cast<MemberExpr>(MCE->getImplicitObjectArgument()); 1046 if (!IOA) 1047 return nullptr; 1048 FieldDecl *Field = dyn_cast<FieldDecl>(IOA->getMemberDecl()); 1049 if (!Field || !isMemcpyableField(Field)) 1050 return nullptr; 1051 MemberExpr *Arg0 = dyn_cast<MemberExpr>(MCE->getArg(0)); 1052 if (!Arg0 || Field != dyn_cast<FieldDecl>(Arg0->getMemberDecl())) 1053 return nullptr; 1054 return Field; 1055 } else if (CallExpr *CE = dyn_cast<CallExpr>(S)) { 1056 FunctionDecl *FD = dyn_cast<FunctionDecl>(CE->getCalleeDecl()); 1057 if (!FD || FD->getBuiltinID() != Builtin::BI__builtin_memcpy) 1058 return nullptr; 1059 Expr *DstPtr = CE->getArg(0); 1060 if (ImplicitCastExpr *DC = dyn_cast<ImplicitCastExpr>(DstPtr)) 1061 DstPtr = DC->getSubExpr(); 1062 UnaryOperator *DUO = dyn_cast<UnaryOperator>(DstPtr); 1063 if (!DUO || DUO->getOpcode() != UO_AddrOf) 1064 return nullptr; 1065 MemberExpr *ME = dyn_cast<MemberExpr>(DUO->getSubExpr()); 1066 if (!ME) 1067 return nullptr; 1068 FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl()); 1069 if (!Field || !isMemcpyableField(Field)) 1070 return nullptr; 1071 Expr *SrcPtr = CE->getArg(1); 1072 if (ImplicitCastExpr *SC = dyn_cast<ImplicitCastExpr>(SrcPtr)) 1073 SrcPtr = SC->getSubExpr(); 1074 UnaryOperator *SUO = dyn_cast<UnaryOperator>(SrcPtr); 1075 if (!SUO || SUO->getOpcode() != UO_AddrOf) 1076 return nullptr; 1077 MemberExpr *ME2 = dyn_cast<MemberExpr>(SUO->getSubExpr()); 1078 if (!ME2 || Field != dyn_cast<FieldDecl>(ME2->getMemberDecl())) 1079 return nullptr; 1080 return Field; 1081 } 1082 1083 return nullptr; 1084 } 1085 1086 bool AssignmentsMemcpyable; 1087 SmallVector<Stmt*, 16> AggregatedStmts; 1088 1089 public: 1090 1091 AssignmentMemcpyizer(CodeGenFunction &CGF, const CXXMethodDecl *AD, 1092 FunctionArgList &Args) 1093 : FieldMemcpyizer(CGF, AD->getParent(), Args[Args.size() - 1]), 1094 AssignmentsMemcpyable(CGF.getLangOpts().getGC() == LangOptions::NonGC) { 1095 assert(Args.size() == 2); 1096 } 1097 1098 void emitAssignment(Stmt *S) { 1099 FieldDecl *F = getMemcpyableField(S); 1100 if (F) { 1101 addMemcpyableField(F); 1102 AggregatedStmts.push_back(S); 1103 } else { 1104 emitAggregatedStmts(); 1105 CGF.EmitStmt(S); 1106 } 1107 } 1108 1109 void emitAggregatedStmts() { 1110 if (AggregatedStmts.size() <= 1) { 1111 if (!AggregatedStmts.empty()) { 1112 CopyingValueRepresentation CVR(CGF); 1113 CGF.EmitStmt(AggregatedStmts[0]); 1114 } 1115 reset(); 1116 } 1117 1118 emitMemcpy(); 1119 AggregatedStmts.clear(); 1120 } 1121 1122 void finish() { 1123 emitAggregatedStmts(); 1124 } 1125 }; 1126 1127 } 1128 1129 /// EmitCtorPrologue - This routine generates necessary code to initialize 1130 /// base classes and non-static data members belonging to this constructor. 1131 void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD, 1132 CXXCtorType CtorType, 1133 FunctionArgList &Args) { 1134 if (CD->isDelegatingConstructor()) 1135 return EmitDelegatingCXXConstructorCall(CD, Args); 1136 1137 const CXXRecordDecl *ClassDecl = CD->getParent(); 1138 1139 CXXConstructorDecl::init_const_iterator B = CD->init_begin(), 1140 E = CD->init_end(); 1141 1142 llvm::BasicBlock *BaseCtorContinueBB = nullptr; 1143 if (ClassDecl->getNumVBases() && 1144 !CGM.getTarget().getCXXABI().hasConstructorVariants()) { 1145 // The ABIs that don't have constructor variants need to put a branch 1146 // before the virtual base initialization code. 1147 BaseCtorContinueBB = 1148 CGM.getCXXABI().EmitCtorCompleteObjectHandler(*this, ClassDecl); 1149 assert(BaseCtorContinueBB); 1150 } 1151 1152 // Virtual base initializers first. 1153 for (; B != E && (*B)->isBaseInitializer() && (*B)->isBaseVirtual(); B++) { 1154 EmitBaseInitializer(*this, ClassDecl, *B, CtorType); 1155 } 1156 1157 if (BaseCtorContinueBB) { 1158 // Complete object handler should continue to the remaining initializers. 1159 Builder.CreateBr(BaseCtorContinueBB); 1160 EmitBlock(BaseCtorContinueBB); 1161 } 1162 1163 // Then, non-virtual base initializers. 1164 for (; B != E && (*B)->isBaseInitializer(); B++) { 1165 assert(!(*B)->isBaseVirtual()); 1166 EmitBaseInitializer(*this, ClassDecl, *B, CtorType); 1167 } 1168 1169 InitializeVTablePointers(ClassDecl); 1170 1171 // And finally, initialize class members. 1172 FieldConstructionScope FCS(*this, CXXThisValue); 1173 ConstructorMemcpyizer CM(*this, CD, Args); 1174 for (; B != E; B++) { 1175 CXXCtorInitializer *Member = (*B); 1176 assert(!Member->isBaseInitializer()); 1177 assert(Member->isAnyMemberInitializer() && 1178 "Delegating initializer on non-delegating constructor"); 1179 CM.addMemberInitializer(Member); 1180 } 1181 CM.finish(); 1182 } 1183 1184 static bool 1185 FieldHasTrivialDestructorBody(ASTContext &Context, const FieldDecl *Field); 1186 1187 static bool 1188 HasTrivialDestructorBody(ASTContext &Context, 1189 const CXXRecordDecl *BaseClassDecl, 1190 const CXXRecordDecl *MostDerivedClassDecl) 1191 { 1192 // If the destructor is trivial we don't have to check anything else. 1193 if (BaseClassDecl->hasTrivialDestructor()) 1194 return true; 1195 1196 if (!BaseClassDecl->getDestructor()->hasTrivialBody()) 1197 return false; 1198 1199 // Check fields. 1200 for (const auto *Field : BaseClassDecl->fields()) 1201 if (!FieldHasTrivialDestructorBody(Context, Field)) 1202 return false; 1203 1204 // Check non-virtual bases. 1205 for (const auto &I : BaseClassDecl->bases()) { 1206 if (I.isVirtual()) 1207 continue; 1208 1209 const CXXRecordDecl *NonVirtualBase = 1210 cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl()); 1211 if (!HasTrivialDestructorBody(Context, NonVirtualBase, 1212 MostDerivedClassDecl)) 1213 return false; 1214 } 1215 1216 if (BaseClassDecl == MostDerivedClassDecl) { 1217 // Check virtual bases. 1218 for (const auto &I : BaseClassDecl->vbases()) { 1219 const CXXRecordDecl *VirtualBase = 1220 cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl()); 1221 if (!HasTrivialDestructorBody(Context, VirtualBase, 1222 MostDerivedClassDecl)) 1223 return false; 1224 } 1225 } 1226 1227 return true; 1228 } 1229 1230 static bool 1231 FieldHasTrivialDestructorBody(ASTContext &Context, 1232 const FieldDecl *Field) 1233 { 1234 QualType FieldBaseElementType = Context.getBaseElementType(Field->getType()); 1235 1236 const RecordType *RT = FieldBaseElementType->getAs<RecordType>(); 1237 if (!RT) 1238 return true; 1239 1240 CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(RT->getDecl()); 1241 return HasTrivialDestructorBody(Context, FieldClassDecl, FieldClassDecl); 1242 } 1243 1244 /// CanSkipVTablePointerInitialization - Check whether we need to initialize 1245 /// any vtable pointers before calling this destructor. 1246 static bool CanSkipVTablePointerInitialization(ASTContext &Context, 1247 const CXXDestructorDecl *Dtor) { 1248 if (!Dtor->hasTrivialBody()) 1249 return false; 1250 1251 // Check the fields. 1252 const CXXRecordDecl *ClassDecl = Dtor->getParent(); 1253 for (const auto *Field : ClassDecl->fields()) 1254 if (!FieldHasTrivialDestructorBody(Context, Field)) 1255 return false; 1256 1257 return true; 1258 } 1259 1260 /// EmitDestructorBody - Emits the body of the current destructor. 1261 void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) { 1262 const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CurGD.getDecl()); 1263 CXXDtorType DtorType = CurGD.getDtorType(); 1264 1265 // The call to operator delete in a deleting destructor happens 1266 // outside of the function-try-block, which means it's always 1267 // possible to delegate the destructor body to the complete 1268 // destructor. Do so. 1269 if (DtorType == Dtor_Deleting) { 1270 EnterDtorCleanups(Dtor, Dtor_Deleting); 1271 EmitCXXDestructorCall(Dtor, Dtor_Complete, /*ForVirtualBase=*/false, 1272 /*Delegating=*/false, LoadCXXThis()); 1273 PopCleanupBlock(); 1274 return; 1275 } 1276 1277 Stmt *Body = Dtor->getBody(); 1278 1279 // If the body is a function-try-block, enter the try before 1280 // anything else. 1281 bool isTryBody = (Body && isa<CXXTryStmt>(Body)); 1282 if (isTryBody) 1283 EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true); 1284 1285 // Enter the epilogue cleanups. 1286 RunCleanupsScope DtorEpilogue(*this); 1287 1288 // If this is the complete variant, just invoke the base variant; 1289 // the epilogue will destruct the virtual bases. But we can't do 1290 // this optimization if the body is a function-try-block, because 1291 // we'd introduce *two* handler blocks. In the Microsoft ABI, we 1292 // always delegate because we might not have a definition in this TU. 1293 switch (DtorType) { 1294 case Dtor_Deleting: llvm_unreachable("already handled deleting case"); 1295 1296 case Dtor_Complete: 1297 assert((Body || getTarget().getCXXABI().isMicrosoft()) && 1298 "can't emit a dtor without a body for non-Microsoft ABIs"); 1299 1300 // Enter the cleanup scopes for virtual bases. 1301 EnterDtorCleanups(Dtor, Dtor_Complete); 1302 1303 if (!isTryBody) { 1304 EmitCXXDestructorCall(Dtor, Dtor_Base, /*ForVirtualBase=*/false, 1305 /*Delegating=*/false, LoadCXXThis()); 1306 break; 1307 } 1308 // Fallthrough: act like we're in the base variant. 1309 1310 case Dtor_Base: 1311 assert(Body); 1312 1313 RegionCounter Cnt = getPGORegionCounter(Body); 1314 Cnt.beginRegion(Builder); 1315 1316 // Enter the cleanup scopes for fields and non-virtual bases. 1317 EnterDtorCleanups(Dtor, Dtor_Base); 1318 1319 // Initialize the vtable pointers before entering the body. 1320 if (!CanSkipVTablePointerInitialization(getContext(), Dtor)) 1321 InitializeVTablePointers(Dtor->getParent()); 1322 1323 if (isTryBody) 1324 EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock()); 1325 else if (Body) 1326 EmitStmt(Body); 1327 else { 1328 assert(Dtor->isImplicit() && "bodyless dtor not implicit"); 1329 // nothing to do besides what's in the epilogue 1330 } 1331 // -fapple-kext must inline any call to this dtor into 1332 // the caller's body. 1333 if (getLangOpts().AppleKext) 1334 CurFn->addFnAttr(llvm::Attribute::AlwaysInline); 1335 break; 1336 } 1337 1338 // Jump out through the epilogue cleanups. 1339 DtorEpilogue.ForceCleanup(); 1340 1341 // Exit the try if applicable. 1342 if (isTryBody) 1343 ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true); 1344 } 1345 1346 void CodeGenFunction::emitImplicitAssignmentOperatorBody(FunctionArgList &Args) { 1347 const CXXMethodDecl *AssignOp = cast<CXXMethodDecl>(CurGD.getDecl()); 1348 const Stmt *RootS = AssignOp->getBody(); 1349 assert(isa<CompoundStmt>(RootS) && 1350 "Body of an implicit assignment operator should be compound stmt."); 1351 const CompoundStmt *RootCS = cast<CompoundStmt>(RootS); 1352 1353 LexicalScope Scope(*this, RootCS->getSourceRange()); 1354 1355 AssignmentMemcpyizer AM(*this, AssignOp, Args); 1356 for (auto *I : RootCS->body()) 1357 AM.emitAssignment(I); 1358 AM.finish(); 1359 } 1360 1361 namespace { 1362 /// Call the operator delete associated with the current destructor. 1363 struct CallDtorDelete : EHScopeStack::Cleanup { 1364 CallDtorDelete() {} 1365 1366 void Emit(CodeGenFunction &CGF, Flags flags) override { 1367 const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CGF.CurCodeDecl); 1368 const CXXRecordDecl *ClassDecl = Dtor->getParent(); 1369 CGF.EmitDeleteCall(Dtor->getOperatorDelete(), CGF.LoadCXXThis(), 1370 CGF.getContext().getTagDeclType(ClassDecl)); 1371 } 1372 }; 1373 1374 struct CallDtorDeleteConditional : EHScopeStack::Cleanup { 1375 llvm::Value *ShouldDeleteCondition; 1376 public: 1377 CallDtorDeleteConditional(llvm::Value *ShouldDeleteCondition) 1378 : ShouldDeleteCondition(ShouldDeleteCondition) { 1379 assert(ShouldDeleteCondition != nullptr); 1380 } 1381 1382 void Emit(CodeGenFunction &CGF, Flags flags) override { 1383 llvm::BasicBlock *callDeleteBB = CGF.createBasicBlock("dtor.call_delete"); 1384 llvm::BasicBlock *continueBB = CGF.createBasicBlock("dtor.continue"); 1385 llvm::Value *ShouldCallDelete 1386 = CGF.Builder.CreateIsNull(ShouldDeleteCondition); 1387 CGF.Builder.CreateCondBr(ShouldCallDelete, continueBB, callDeleteBB); 1388 1389 CGF.EmitBlock(callDeleteBB); 1390 const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CGF.CurCodeDecl); 1391 const CXXRecordDecl *ClassDecl = Dtor->getParent(); 1392 CGF.EmitDeleteCall(Dtor->getOperatorDelete(), CGF.LoadCXXThis(), 1393 CGF.getContext().getTagDeclType(ClassDecl)); 1394 CGF.Builder.CreateBr(continueBB); 1395 1396 CGF.EmitBlock(continueBB); 1397 } 1398 }; 1399 1400 class DestroyField : public EHScopeStack::Cleanup { 1401 const FieldDecl *field; 1402 CodeGenFunction::Destroyer *destroyer; 1403 bool useEHCleanupForArray; 1404 1405 public: 1406 DestroyField(const FieldDecl *field, CodeGenFunction::Destroyer *destroyer, 1407 bool useEHCleanupForArray) 1408 : field(field), destroyer(destroyer), 1409 useEHCleanupForArray(useEHCleanupForArray) {} 1410 1411 void Emit(CodeGenFunction &CGF, Flags flags) override { 1412 // Find the address of the field. 1413 llvm::Value *thisValue = CGF.LoadCXXThis(); 1414 QualType RecordTy = CGF.getContext().getTagDeclType(field->getParent()); 1415 LValue ThisLV = CGF.MakeAddrLValue(thisValue, RecordTy); 1416 LValue LV = CGF.EmitLValueForField(ThisLV, field); 1417 assert(LV.isSimple()); 1418 1419 CGF.emitDestroy(LV.getAddress(), field->getType(), destroyer, 1420 flags.isForNormalCleanup() && useEHCleanupForArray); 1421 } 1422 }; 1423 } 1424 1425 /// \brief Emit all code that comes at the end of class's 1426 /// destructor. This is to call destructors on members and base classes 1427 /// in reverse order of their construction. 1428 void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD, 1429 CXXDtorType DtorType) { 1430 assert((!DD->isTrivial() || DD->hasAttr<DLLExportAttr>()) && 1431 "Should not emit dtor epilogue for non-exported trivial dtor!"); 1432 1433 // The deleting-destructor phase just needs to call the appropriate 1434 // operator delete that Sema picked up. 1435 if (DtorType == Dtor_Deleting) { 1436 assert(DD->getOperatorDelete() && 1437 "operator delete missing - EnterDtorCleanups"); 1438 if (CXXStructorImplicitParamValue) { 1439 // If there is an implicit param to the deleting dtor, it's a boolean 1440 // telling whether we should call delete at the end of the dtor. 1441 EHStack.pushCleanup<CallDtorDeleteConditional>( 1442 NormalAndEHCleanup, CXXStructorImplicitParamValue); 1443 } else { 1444 EHStack.pushCleanup<CallDtorDelete>(NormalAndEHCleanup); 1445 } 1446 return; 1447 } 1448 1449 const CXXRecordDecl *ClassDecl = DD->getParent(); 1450 1451 // Unions have no bases and do not call field destructors. 1452 if (ClassDecl->isUnion()) 1453 return; 1454 1455 // The complete-destructor phase just destructs all the virtual bases. 1456 if (DtorType == Dtor_Complete) { 1457 1458 // We push them in the forward order so that they'll be popped in 1459 // the reverse order. 1460 for (const auto &Base : ClassDecl->vbases()) { 1461 CXXRecordDecl *BaseClassDecl 1462 = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl()); 1463 1464 // Ignore trivial destructors. 1465 if (BaseClassDecl->hasTrivialDestructor()) 1466 continue; 1467 1468 EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup, 1469 BaseClassDecl, 1470 /*BaseIsVirtual*/ true); 1471 } 1472 1473 return; 1474 } 1475 1476 assert(DtorType == Dtor_Base); 1477 1478 // Destroy non-virtual bases. 1479 for (const auto &Base : ClassDecl->bases()) { 1480 // Ignore virtual bases. 1481 if (Base.isVirtual()) 1482 continue; 1483 1484 CXXRecordDecl *BaseClassDecl = Base.getType()->getAsCXXRecordDecl(); 1485 1486 // Ignore trivial destructors. 1487 if (BaseClassDecl->hasTrivialDestructor()) 1488 continue; 1489 1490 EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup, 1491 BaseClassDecl, 1492 /*BaseIsVirtual*/ false); 1493 } 1494 1495 // Destroy direct fields. 1496 for (const auto *Field : ClassDecl->fields()) { 1497 QualType type = Field->getType(); 1498 QualType::DestructionKind dtorKind = type.isDestructedType(); 1499 if (!dtorKind) continue; 1500 1501 // Anonymous union members do not have their destructors called. 1502 const RecordType *RT = type->getAsUnionType(); 1503 if (RT && RT->getDecl()->isAnonymousStructOrUnion()) continue; 1504 1505 CleanupKind cleanupKind = getCleanupKind(dtorKind); 1506 EHStack.pushCleanup<DestroyField>(cleanupKind, Field, 1507 getDestroyer(dtorKind), 1508 cleanupKind & EHCleanup); 1509 } 1510 } 1511 1512 /// EmitCXXAggrConstructorCall - Emit a loop to call a particular 1513 /// constructor for each of several members of an array. 1514 /// 1515 /// \param ctor the constructor to call for each element 1516 /// \param arrayType the type of the array to initialize 1517 /// \param arrayBegin an arrayType* 1518 /// \param zeroInitialize true if each element should be 1519 /// zero-initialized before it is constructed 1520 void CodeGenFunction::EmitCXXAggrConstructorCall( 1521 const CXXConstructorDecl *ctor, const ConstantArrayType *arrayType, 1522 llvm::Value *arrayBegin, const CXXConstructExpr *E, bool zeroInitialize) { 1523 QualType elementType; 1524 llvm::Value *numElements = 1525 emitArrayLength(arrayType, elementType, arrayBegin); 1526 1527 EmitCXXAggrConstructorCall(ctor, numElements, arrayBegin, E, zeroInitialize); 1528 } 1529 1530 /// EmitCXXAggrConstructorCall - Emit a loop to call a particular 1531 /// constructor for each of several members of an array. 1532 /// 1533 /// \param ctor the constructor to call for each element 1534 /// \param numElements the number of elements in the array; 1535 /// may be zero 1536 /// \param arrayBegin a T*, where T is the type constructed by ctor 1537 /// \param zeroInitialize true if each element should be 1538 /// zero-initialized before it is constructed 1539 void CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor, 1540 llvm::Value *numElements, 1541 llvm::Value *arrayBegin, 1542 const CXXConstructExpr *E, 1543 bool zeroInitialize) { 1544 1545 // It's legal for numElements to be zero. This can happen both 1546 // dynamically, because x can be zero in 'new A[x]', and statically, 1547 // because of GCC extensions that permit zero-length arrays. There 1548 // are probably legitimate places where we could assume that this 1549 // doesn't happen, but it's not clear that it's worth it. 1550 llvm::BranchInst *zeroCheckBranch = nullptr; 1551 1552 // Optimize for a constant count. 1553 llvm::ConstantInt *constantCount 1554 = dyn_cast<llvm::ConstantInt>(numElements); 1555 if (constantCount) { 1556 // Just skip out if the constant count is zero. 1557 if (constantCount->isZero()) return; 1558 1559 // Otherwise, emit the check. 1560 } else { 1561 llvm::BasicBlock *loopBB = createBasicBlock("new.ctorloop"); 1562 llvm::Value *iszero = Builder.CreateIsNull(numElements, "isempty"); 1563 zeroCheckBranch = Builder.CreateCondBr(iszero, loopBB, loopBB); 1564 EmitBlock(loopBB); 1565 } 1566 1567 // Find the end of the array. 1568 llvm::Value *arrayEnd = Builder.CreateInBoundsGEP(arrayBegin, numElements, 1569 "arrayctor.end"); 1570 1571 // Enter the loop, setting up a phi for the current location to initialize. 1572 llvm::BasicBlock *entryBB = Builder.GetInsertBlock(); 1573 llvm::BasicBlock *loopBB = createBasicBlock("arrayctor.loop"); 1574 EmitBlock(loopBB); 1575 llvm::PHINode *cur = Builder.CreatePHI(arrayBegin->getType(), 2, 1576 "arrayctor.cur"); 1577 cur->addIncoming(arrayBegin, entryBB); 1578 1579 // Inside the loop body, emit the constructor call on the array element. 1580 1581 QualType type = getContext().getTypeDeclType(ctor->getParent()); 1582 1583 // Zero initialize the storage, if requested. 1584 if (zeroInitialize) 1585 EmitNullInitialization(cur, type); 1586 1587 // C++ [class.temporary]p4: 1588 // There are two contexts in which temporaries are destroyed at a different 1589 // point than the end of the full-expression. The first context is when a 1590 // default constructor is called to initialize an element of an array. 1591 // If the constructor has one or more default arguments, the destruction of 1592 // every temporary created in a default argument expression is sequenced 1593 // before the construction of the next array element, if any. 1594 1595 { 1596 RunCleanupsScope Scope(*this); 1597 1598 // Evaluate the constructor and its arguments in a regular 1599 // partial-destroy cleanup. 1600 if (getLangOpts().Exceptions && 1601 !ctor->getParent()->hasTrivialDestructor()) { 1602 Destroyer *destroyer = destroyCXXObject; 1603 pushRegularPartialArrayCleanup(arrayBegin, cur, type, *destroyer); 1604 } 1605 1606 EmitCXXConstructorCall(ctor, Ctor_Complete, /*ForVirtualBase=*/false, 1607 /*Delegating=*/false, cur, E); 1608 } 1609 1610 // Go to the next element. 1611 llvm::Value *next = 1612 Builder.CreateInBoundsGEP(cur, llvm::ConstantInt::get(SizeTy, 1), 1613 "arrayctor.next"); 1614 cur->addIncoming(next, Builder.GetInsertBlock()); 1615 1616 // Check whether that's the end of the loop. 1617 llvm::Value *done = Builder.CreateICmpEQ(next, arrayEnd, "arrayctor.done"); 1618 llvm::BasicBlock *contBB = createBasicBlock("arrayctor.cont"); 1619 Builder.CreateCondBr(done, contBB, loopBB); 1620 1621 // Patch the earlier check to skip over the loop. 1622 if (zeroCheckBranch) zeroCheckBranch->setSuccessor(0, contBB); 1623 1624 EmitBlock(contBB); 1625 } 1626 1627 void CodeGenFunction::destroyCXXObject(CodeGenFunction &CGF, 1628 llvm::Value *addr, 1629 QualType type) { 1630 const RecordType *rtype = type->castAs<RecordType>(); 1631 const CXXRecordDecl *record = cast<CXXRecordDecl>(rtype->getDecl()); 1632 const CXXDestructorDecl *dtor = record->getDestructor(); 1633 assert(!dtor->isTrivial()); 1634 CGF.EmitCXXDestructorCall(dtor, Dtor_Complete, /*for vbase*/ false, 1635 /*Delegating=*/false, addr); 1636 } 1637 1638 void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D, 1639 CXXCtorType Type, 1640 bool ForVirtualBase, 1641 bool Delegating, llvm::Value *This, 1642 const CXXConstructExpr *E) { 1643 // If this is a trivial constructor, just emit what's needed. 1644 if (D->isTrivial()) { 1645 if (E->getNumArgs() == 0) { 1646 // Trivial default constructor, no codegen required. 1647 assert(D->isDefaultConstructor() && 1648 "trivial 0-arg ctor not a default ctor"); 1649 return; 1650 } 1651 1652 assert(E->getNumArgs() == 1 && "unexpected argcount for trivial ctor"); 1653 assert(D->isCopyOrMoveConstructor() && 1654 "trivial 1-arg ctor not a copy/move ctor"); 1655 1656 const Expr *Arg = E->getArg(0); 1657 QualType Ty = Arg->getType(); 1658 llvm::Value *Src = EmitLValue(Arg).getAddress(); 1659 EmitAggregateCopy(This, Src, Ty); 1660 return; 1661 } 1662 1663 // C++11 [class.mfct.non-static]p2: 1664 // If a non-static member function of a class X is called for an object that 1665 // is not of type X, or of a type derived from X, the behavior is undefined. 1666 // FIXME: Provide a source location here. 1667 EmitTypeCheck(CodeGenFunction::TCK_ConstructorCall, SourceLocation(), This, 1668 getContext().getRecordType(D->getParent())); 1669 1670 CallArgList Args; 1671 1672 // Push the this ptr. 1673 Args.add(RValue::get(This), D->getThisType(getContext())); 1674 1675 // Add the rest of the user-supplied arguments. 1676 const FunctionProtoType *FPT = D->getType()->castAs<FunctionProtoType>(); 1677 EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end()); 1678 1679 // Insert any ABI-specific implicit constructor arguments. 1680 unsigned ExtraArgs = CGM.getCXXABI().addImplicitConstructorArgs( 1681 *this, D, Type, ForVirtualBase, Delegating, Args); 1682 1683 // Emit the call. 1684 llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(D, Type); 1685 const CGFunctionInfo &Info = 1686 CGM.getTypes().arrangeCXXConstructorCall(Args, D, Type, ExtraArgs); 1687 EmitCall(Info, Callee, ReturnValueSlot(), Args, D); 1688 } 1689 1690 void 1691 CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D, 1692 llvm::Value *This, llvm::Value *Src, 1693 const CXXConstructExpr *E) { 1694 if (D->isTrivial()) { 1695 assert(E->getNumArgs() == 1 && "unexpected argcount for trivial ctor"); 1696 assert(D->isCopyOrMoveConstructor() && 1697 "trivial 1-arg ctor not a copy/move ctor"); 1698 EmitAggregateCopy(This, Src, E->arg_begin()->getType()); 1699 return; 1700 } 1701 llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(D, clang::Ctor_Complete); 1702 assert(D->isInstance() && 1703 "Trying to emit a member call expr on a static method!"); 1704 1705 const FunctionProtoType *FPT = D->getType()->castAs<FunctionProtoType>(); 1706 1707 CallArgList Args; 1708 1709 // Push the this ptr. 1710 Args.add(RValue::get(This), D->getThisType(getContext())); 1711 1712 // Push the src ptr. 1713 QualType QT = *(FPT->param_type_begin()); 1714 llvm::Type *t = CGM.getTypes().ConvertType(QT); 1715 Src = Builder.CreateBitCast(Src, t); 1716 Args.add(RValue::get(Src), QT); 1717 1718 // Skip over first argument (Src). 1719 EmitCallArgs(Args, FPT, E->arg_begin() + 1, E->arg_end(), /*ParamsToSkip*/ 1); 1720 1721 EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, RequiredArgs::All), 1722 Callee, ReturnValueSlot(), Args, D); 1723 } 1724 1725 void 1726 CodeGenFunction::EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor, 1727 CXXCtorType CtorType, 1728 const FunctionArgList &Args, 1729 SourceLocation Loc) { 1730 CallArgList DelegateArgs; 1731 1732 FunctionArgList::const_iterator I = Args.begin(), E = Args.end(); 1733 assert(I != E && "no parameters to constructor"); 1734 1735 // this 1736 DelegateArgs.add(RValue::get(LoadCXXThis()), (*I)->getType()); 1737 ++I; 1738 1739 // vtt 1740 if (llvm::Value *VTT = GetVTTParameter(GlobalDecl(Ctor, CtorType), 1741 /*ForVirtualBase=*/false, 1742 /*Delegating=*/true)) { 1743 QualType VoidPP = getContext().getPointerType(getContext().VoidPtrTy); 1744 DelegateArgs.add(RValue::get(VTT), VoidPP); 1745 1746 if (CGM.getCXXABI().NeedsVTTParameter(CurGD)) { 1747 assert(I != E && "cannot skip vtt parameter, already done with args"); 1748 assert((*I)->getType() == VoidPP && "skipping parameter not of vtt type"); 1749 ++I; 1750 } 1751 } 1752 1753 // Explicit arguments. 1754 for (; I != E; ++I) { 1755 const VarDecl *param = *I; 1756 // FIXME: per-argument source location 1757 EmitDelegateCallArg(DelegateArgs, param, Loc); 1758 } 1759 1760 llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(Ctor, CtorType); 1761 EmitCall(CGM.getTypes().arrangeCXXConstructorDeclaration(Ctor, CtorType), 1762 Callee, ReturnValueSlot(), DelegateArgs, Ctor); 1763 } 1764 1765 namespace { 1766 struct CallDelegatingCtorDtor : EHScopeStack::Cleanup { 1767 const CXXDestructorDecl *Dtor; 1768 llvm::Value *Addr; 1769 CXXDtorType Type; 1770 1771 CallDelegatingCtorDtor(const CXXDestructorDecl *D, llvm::Value *Addr, 1772 CXXDtorType Type) 1773 : Dtor(D), Addr(Addr), Type(Type) {} 1774 1775 void Emit(CodeGenFunction &CGF, Flags flags) override { 1776 CGF.EmitCXXDestructorCall(Dtor, Type, /*ForVirtualBase=*/false, 1777 /*Delegating=*/true, Addr); 1778 } 1779 }; 1780 } 1781 1782 void 1783 CodeGenFunction::EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor, 1784 const FunctionArgList &Args) { 1785 assert(Ctor->isDelegatingConstructor()); 1786 1787 llvm::Value *ThisPtr = LoadCXXThis(); 1788 1789 QualType Ty = getContext().getTagDeclType(Ctor->getParent()); 1790 CharUnits Alignment = getContext().getTypeAlignInChars(Ty); 1791 AggValueSlot AggSlot = 1792 AggValueSlot::forAddr(ThisPtr, Alignment, Qualifiers(), 1793 AggValueSlot::IsDestructed, 1794 AggValueSlot::DoesNotNeedGCBarriers, 1795 AggValueSlot::IsNotAliased); 1796 1797 EmitAggExpr(Ctor->init_begin()[0]->getInit(), AggSlot); 1798 1799 const CXXRecordDecl *ClassDecl = Ctor->getParent(); 1800 if (CGM.getLangOpts().Exceptions && !ClassDecl->hasTrivialDestructor()) { 1801 CXXDtorType Type = 1802 CurGD.getCtorType() == Ctor_Complete ? Dtor_Complete : Dtor_Base; 1803 1804 EHStack.pushCleanup<CallDelegatingCtorDtor>(EHCleanup, 1805 ClassDecl->getDestructor(), 1806 ThisPtr, Type); 1807 } 1808 } 1809 1810 void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *DD, 1811 CXXDtorType Type, 1812 bool ForVirtualBase, 1813 bool Delegating, 1814 llvm::Value *This) { 1815 CGM.getCXXABI().EmitDestructorCall(*this, DD, Type, ForVirtualBase, 1816 Delegating, This); 1817 } 1818 1819 namespace { 1820 struct CallLocalDtor : EHScopeStack::Cleanup { 1821 const CXXDestructorDecl *Dtor; 1822 llvm::Value *Addr; 1823 1824 CallLocalDtor(const CXXDestructorDecl *D, llvm::Value *Addr) 1825 : Dtor(D), Addr(Addr) {} 1826 1827 void Emit(CodeGenFunction &CGF, Flags flags) override { 1828 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, 1829 /*ForVirtualBase=*/false, 1830 /*Delegating=*/false, Addr); 1831 } 1832 }; 1833 } 1834 1835 void CodeGenFunction::PushDestructorCleanup(const CXXDestructorDecl *D, 1836 llvm::Value *Addr) { 1837 EHStack.pushCleanup<CallLocalDtor>(NormalAndEHCleanup, D, Addr); 1838 } 1839 1840 void CodeGenFunction::PushDestructorCleanup(QualType T, llvm::Value *Addr) { 1841 CXXRecordDecl *ClassDecl = T->getAsCXXRecordDecl(); 1842 if (!ClassDecl) return; 1843 if (ClassDecl->hasTrivialDestructor()) return; 1844 1845 const CXXDestructorDecl *D = ClassDecl->getDestructor(); 1846 assert(D && D->isUsed() && "destructor not marked as used!"); 1847 PushDestructorCleanup(D, Addr); 1848 } 1849 1850 void 1851 CodeGenFunction::InitializeVTablePointer(BaseSubobject Base, 1852 const CXXRecordDecl *NearestVBase, 1853 CharUnits OffsetFromNearestVBase, 1854 const CXXRecordDecl *VTableClass) { 1855 // Compute the address point. 1856 bool NeedsVirtualOffset; 1857 llvm::Value *VTableAddressPoint = 1858 CGM.getCXXABI().getVTableAddressPointInStructor( 1859 *this, VTableClass, Base, NearestVBase, NeedsVirtualOffset); 1860 if (!VTableAddressPoint) 1861 return; 1862 1863 // Compute where to store the address point. 1864 llvm::Value *VirtualOffset = nullptr; 1865 CharUnits NonVirtualOffset = CharUnits::Zero(); 1866 1867 if (NeedsVirtualOffset) { 1868 // We need to use the virtual base offset offset because the virtual base 1869 // might have a different offset in the most derived class. 1870 VirtualOffset = CGM.getCXXABI().GetVirtualBaseClassOffset(*this, 1871 LoadCXXThis(), 1872 VTableClass, 1873 NearestVBase); 1874 NonVirtualOffset = OffsetFromNearestVBase; 1875 } else { 1876 // We can just use the base offset in the complete class. 1877 NonVirtualOffset = Base.getBaseOffset(); 1878 } 1879 1880 // Apply the offsets. 1881 llvm::Value *VTableField = LoadCXXThis(); 1882 1883 if (!NonVirtualOffset.isZero() || VirtualOffset) 1884 VTableField = ApplyNonVirtualAndVirtualOffset(*this, VTableField, 1885 NonVirtualOffset, 1886 VirtualOffset); 1887 1888 // Finally, store the address point. 1889 llvm::Type *AddressPointPtrTy = 1890 VTableAddressPoint->getType()->getPointerTo(); 1891 VTableField = Builder.CreateBitCast(VTableField, AddressPointPtrTy); 1892 llvm::StoreInst *Store = Builder.CreateStore(VTableAddressPoint, VTableField); 1893 CGM.DecorateInstruction(Store, CGM.getTBAAInfoForVTablePtr()); 1894 } 1895 1896 void 1897 CodeGenFunction::InitializeVTablePointers(BaseSubobject Base, 1898 const CXXRecordDecl *NearestVBase, 1899 CharUnits OffsetFromNearestVBase, 1900 bool BaseIsNonVirtualPrimaryBase, 1901 const CXXRecordDecl *VTableClass, 1902 VisitedVirtualBasesSetTy& VBases) { 1903 // If this base is a non-virtual primary base the address point has already 1904 // been set. 1905 if (!BaseIsNonVirtualPrimaryBase) { 1906 // Initialize the vtable pointer for this base. 1907 InitializeVTablePointer(Base, NearestVBase, OffsetFromNearestVBase, 1908 VTableClass); 1909 } 1910 1911 const CXXRecordDecl *RD = Base.getBase(); 1912 1913 // Traverse bases. 1914 for (const auto &I : RD->bases()) { 1915 CXXRecordDecl *BaseDecl 1916 = cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl()); 1917 1918 // Ignore classes without a vtable. 1919 if (!BaseDecl->isDynamicClass()) 1920 continue; 1921 1922 CharUnits BaseOffset; 1923 CharUnits BaseOffsetFromNearestVBase; 1924 bool BaseDeclIsNonVirtualPrimaryBase; 1925 1926 if (I.isVirtual()) { 1927 // Check if we've visited this virtual base before. 1928 if (!VBases.insert(BaseDecl)) 1929 continue; 1930 1931 const ASTRecordLayout &Layout = 1932 getContext().getASTRecordLayout(VTableClass); 1933 1934 BaseOffset = Layout.getVBaseClassOffset(BaseDecl); 1935 BaseOffsetFromNearestVBase = CharUnits::Zero(); 1936 BaseDeclIsNonVirtualPrimaryBase = false; 1937 } else { 1938 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 1939 1940 BaseOffset = Base.getBaseOffset() + Layout.getBaseClassOffset(BaseDecl); 1941 BaseOffsetFromNearestVBase = 1942 OffsetFromNearestVBase + Layout.getBaseClassOffset(BaseDecl); 1943 BaseDeclIsNonVirtualPrimaryBase = Layout.getPrimaryBase() == BaseDecl; 1944 } 1945 1946 InitializeVTablePointers(BaseSubobject(BaseDecl, BaseOffset), 1947 I.isVirtual() ? BaseDecl : NearestVBase, 1948 BaseOffsetFromNearestVBase, 1949 BaseDeclIsNonVirtualPrimaryBase, 1950 VTableClass, VBases); 1951 } 1952 } 1953 1954 void CodeGenFunction::InitializeVTablePointers(const CXXRecordDecl *RD) { 1955 // Ignore classes without a vtable. 1956 if (!RD->isDynamicClass()) 1957 return; 1958 1959 // Initialize the vtable pointers for this class and all of its bases. 1960 VisitedVirtualBasesSetTy VBases; 1961 InitializeVTablePointers(BaseSubobject(RD, CharUnits::Zero()), 1962 /*NearestVBase=*/nullptr, 1963 /*OffsetFromNearestVBase=*/CharUnits::Zero(), 1964 /*BaseIsNonVirtualPrimaryBase=*/false, RD, VBases); 1965 1966 if (RD->getNumVBases()) 1967 CGM.getCXXABI().initializeHiddenVirtualInheritanceMembers(*this, RD); 1968 } 1969 1970 llvm::Value *CodeGenFunction::GetVTablePtr(llvm::Value *This, 1971 llvm::Type *Ty) { 1972 llvm::Value *VTablePtrSrc = Builder.CreateBitCast(This, Ty->getPointerTo()); 1973 llvm::Instruction *VTable = Builder.CreateLoad(VTablePtrSrc, "vtable"); 1974 CGM.DecorateInstruction(VTable, CGM.getTBAAInfoForVTablePtr()); 1975 return VTable; 1976 } 1977 1978 1979 // FIXME: Ideally Expr::IgnoreParenNoopCasts should do this, but it doesn't do 1980 // quite what we want. 1981 static const Expr *skipNoOpCastsAndParens(const Expr *E) { 1982 while (true) { 1983 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) { 1984 E = PE->getSubExpr(); 1985 continue; 1986 } 1987 1988 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) { 1989 if (CE->getCastKind() == CK_NoOp) { 1990 E = CE->getSubExpr(); 1991 continue; 1992 } 1993 } 1994 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 1995 if (UO->getOpcode() == UO_Extension) { 1996 E = UO->getSubExpr(); 1997 continue; 1998 } 1999 } 2000 return E; 2001 } 2002 } 2003 2004 bool 2005 CodeGenFunction::CanDevirtualizeMemberFunctionCall(const Expr *Base, 2006 const CXXMethodDecl *MD) { 2007 // When building with -fapple-kext, all calls must go through the vtable since 2008 // the kernel linker can do runtime patching of vtables. 2009 if (getLangOpts().AppleKext) 2010 return false; 2011 2012 // If the most derived class is marked final, we know that no subclass can 2013 // override this member function and so we can devirtualize it. For example: 2014 // 2015 // struct A { virtual void f(); } 2016 // struct B final : A { }; 2017 // 2018 // void f(B *b) { 2019 // b->f(); 2020 // } 2021 // 2022 const CXXRecordDecl *MostDerivedClassDecl = Base->getBestDynamicClassType(); 2023 if (MostDerivedClassDecl->hasAttr<FinalAttr>()) 2024 return true; 2025 2026 // If the member function is marked 'final', we know that it can't be 2027 // overridden and can therefore devirtualize it. 2028 if (MD->hasAttr<FinalAttr>()) 2029 return true; 2030 2031 // Similarly, if the class itself is marked 'final' it can't be overridden 2032 // and we can therefore devirtualize the member function call. 2033 if (MD->getParent()->hasAttr<FinalAttr>()) 2034 return true; 2035 2036 Base = skipNoOpCastsAndParens(Base); 2037 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) { 2038 if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) { 2039 // This is a record decl. We know the type and can devirtualize it. 2040 return VD->getType()->isRecordType(); 2041 } 2042 2043 return false; 2044 } 2045 2046 // We can devirtualize calls on an object accessed by a class member access 2047 // expression, since by C++11 [basic.life]p6 we know that it can't refer to 2048 // a derived class object constructed in the same location. 2049 if (const MemberExpr *ME = dyn_cast<MemberExpr>(Base)) 2050 if (const ValueDecl *VD = dyn_cast<ValueDecl>(ME->getMemberDecl())) 2051 return VD->getType()->isRecordType(); 2052 2053 // We can always devirtualize calls on temporary object expressions. 2054 if (isa<CXXConstructExpr>(Base)) 2055 return true; 2056 2057 // And calls on bound temporaries. 2058 if (isa<CXXBindTemporaryExpr>(Base)) 2059 return true; 2060 2061 // Check if this is a call expr that returns a record type. 2062 if (const CallExpr *CE = dyn_cast<CallExpr>(Base)) 2063 return CE->getCallReturnType()->isRecordType(); 2064 2065 // We can't devirtualize the call. 2066 return false; 2067 } 2068 2069 llvm::Value * 2070 CodeGenFunction::EmitCXXOperatorMemberCallee(const CXXOperatorCallExpr *E, 2071 const CXXMethodDecl *MD, 2072 llvm::Value *This) { 2073 llvm::FunctionType *fnType = 2074 CGM.getTypes().GetFunctionType( 2075 CGM.getTypes().arrangeCXXMethodDeclaration(MD)); 2076 2077 if (MD->isVirtual() && !CanDevirtualizeMemberFunctionCall(E->getArg(0), MD)) 2078 return CGM.getCXXABI().getVirtualFunctionPointer(*this, MD, This, fnType); 2079 2080 return CGM.GetAddrOfFunction(MD, fnType); 2081 } 2082 2083 void CodeGenFunction::EmitForwardingCallToLambda( 2084 const CXXMethodDecl *callOperator, 2085 CallArgList &callArgs) { 2086 // Get the address of the call operator. 2087 const CGFunctionInfo &calleeFnInfo = 2088 CGM.getTypes().arrangeCXXMethodDeclaration(callOperator); 2089 llvm::Value *callee = 2090 CGM.GetAddrOfFunction(GlobalDecl(callOperator), 2091 CGM.getTypes().GetFunctionType(calleeFnInfo)); 2092 2093 // Prepare the return slot. 2094 const FunctionProtoType *FPT = 2095 callOperator->getType()->castAs<FunctionProtoType>(); 2096 QualType resultType = FPT->getReturnType(); 2097 ReturnValueSlot returnSlot; 2098 if (!resultType->isVoidType() && 2099 calleeFnInfo.getReturnInfo().getKind() == ABIArgInfo::Indirect && 2100 !hasScalarEvaluationKind(calleeFnInfo.getReturnType())) 2101 returnSlot = ReturnValueSlot(ReturnValue, resultType.isVolatileQualified()); 2102 2103 // We don't need to separately arrange the call arguments because 2104 // the call can't be variadic anyway --- it's impossible to forward 2105 // variadic arguments. 2106 2107 // Now emit our call. 2108 RValue RV = EmitCall(calleeFnInfo, callee, returnSlot, 2109 callArgs, callOperator); 2110 2111 // If necessary, copy the returned value into the slot. 2112 if (!resultType->isVoidType() && returnSlot.isNull()) 2113 EmitReturnOfRValue(RV, resultType); 2114 else 2115 EmitBranchThroughCleanup(ReturnBlock); 2116 } 2117 2118 void CodeGenFunction::EmitLambdaBlockInvokeBody() { 2119 const BlockDecl *BD = BlockInfo->getBlockDecl(); 2120 const VarDecl *variable = BD->capture_begin()->getVariable(); 2121 const CXXRecordDecl *Lambda = variable->getType()->getAsCXXRecordDecl(); 2122 2123 // Start building arguments for forwarding call 2124 CallArgList CallArgs; 2125 2126 QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda)); 2127 llvm::Value *ThisPtr = GetAddrOfBlockDecl(variable, false); 2128 CallArgs.add(RValue::get(ThisPtr), ThisType); 2129 2130 // Add the rest of the parameters. 2131 for (auto param : BD->params()) 2132 EmitDelegateCallArg(CallArgs, param, param->getLocStart()); 2133 2134 assert(!Lambda->isGenericLambda() && 2135 "generic lambda interconversion to block not implemented"); 2136 EmitForwardingCallToLambda(Lambda->getLambdaCallOperator(), CallArgs); 2137 } 2138 2139 void CodeGenFunction::EmitLambdaToBlockPointerBody(FunctionArgList &Args) { 2140 if (cast<CXXMethodDecl>(CurCodeDecl)->isVariadic()) { 2141 // FIXME: Making this work correctly is nasty because it requires either 2142 // cloning the body of the call operator or making the call operator forward. 2143 CGM.ErrorUnsupported(CurCodeDecl, "lambda conversion to variadic function"); 2144 return; 2145 } 2146 2147 EmitFunctionBody(Args, cast<FunctionDecl>(CurGD.getDecl())->getBody()); 2148 } 2149 2150 void CodeGenFunction::EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD) { 2151 const CXXRecordDecl *Lambda = MD->getParent(); 2152 2153 // Start building arguments for forwarding call 2154 CallArgList CallArgs; 2155 2156 QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda)); 2157 llvm::Value *ThisPtr = llvm::UndefValue::get(getTypes().ConvertType(ThisType)); 2158 CallArgs.add(RValue::get(ThisPtr), ThisType); 2159 2160 // Add the rest of the parameters. 2161 for (auto Param : MD->params()) 2162 EmitDelegateCallArg(CallArgs, Param, Param->getLocStart()); 2163 2164 const CXXMethodDecl *CallOp = Lambda->getLambdaCallOperator(); 2165 // For a generic lambda, find the corresponding call operator specialization 2166 // to which the call to the static-invoker shall be forwarded. 2167 if (Lambda->isGenericLambda()) { 2168 assert(MD->isFunctionTemplateSpecialization()); 2169 const TemplateArgumentList *TAL = MD->getTemplateSpecializationArgs(); 2170 FunctionTemplateDecl *CallOpTemplate = CallOp->getDescribedFunctionTemplate(); 2171 void *InsertPos = nullptr; 2172 FunctionDecl *CorrespondingCallOpSpecialization = 2173 CallOpTemplate->findSpecialization(TAL->asArray(), InsertPos); 2174 assert(CorrespondingCallOpSpecialization); 2175 CallOp = cast<CXXMethodDecl>(CorrespondingCallOpSpecialization); 2176 } 2177 EmitForwardingCallToLambda(CallOp, CallArgs); 2178 } 2179 2180 void CodeGenFunction::EmitLambdaStaticInvokeFunction(const CXXMethodDecl *MD) { 2181 if (MD->isVariadic()) { 2182 // FIXME: Making this work correctly is nasty because it requires either 2183 // cloning the body of the call operator or making the call operator forward. 2184 CGM.ErrorUnsupported(MD, "lambda conversion to variadic function"); 2185 return; 2186 } 2187 2188 EmitLambdaDelegatingInvokeBody(MD); 2189 } 2190