1 //===--- CGClass.cpp - Emit LLVM Code for C++ classes ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code dealing with C++ code generation of classes 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CGBlocks.h" 15 #include "CGCXXABI.h" 16 #include "CGDebugInfo.h" 17 #include "CGRecordLayout.h" 18 #include "CodeGenFunction.h" 19 #include "clang/AST/CXXInheritance.h" 20 #include "clang/AST/DeclTemplate.h" 21 #include "clang/AST/EvaluatedExprVisitor.h" 22 #include "clang/AST/RecordLayout.h" 23 #include "clang/AST/StmtCXX.h" 24 #include "clang/Basic/TargetBuiltins.h" 25 #include "clang/CodeGen/CGFunctionInfo.h" 26 #include "clang/Frontend/CodeGenOptions.h" 27 28 using namespace clang; 29 using namespace CodeGen; 30 31 static CharUnits 32 ComputeNonVirtualBaseClassOffset(ASTContext &Context, 33 const CXXRecordDecl *DerivedClass, 34 CastExpr::path_const_iterator Start, 35 CastExpr::path_const_iterator End) { 36 CharUnits Offset = CharUnits::Zero(); 37 38 const CXXRecordDecl *RD = DerivedClass; 39 40 for (CastExpr::path_const_iterator I = Start; I != End; ++I) { 41 const CXXBaseSpecifier *Base = *I; 42 assert(!Base->isVirtual() && "Should not see virtual bases here!"); 43 44 // Get the layout. 45 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 46 47 const CXXRecordDecl *BaseDecl = 48 cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl()); 49 50 // Add the offset. 51 Offset += Layout.getBaseClassOffset(BaseDecl); 52 53 RD = BaseDecl; 54 } 55 56 return Offset; 57 } 58 59 llvm::Constant * 60 CodeGenModule::GetNonVirtualBaseClassOffset(const CXXRecordDecl *ClassDecl, 61 CastExpr::path_const_iterator PathBegin, 62 CastExpr::path_const_iterator PathEnd) { 63 assert(PathBegin != PathEnd && "Base path should not be empty!"); 64 65 CharUnits Offset = 66 ComputeNonVirtualBaseClassOffset(getContext(), ClassDecl, 67 PathBegin, PathEnd); 68 if (Offset.isZero()) 69 return nullptr; 70 71 llvm::Type *PtrDiffTy = 72 Types.ConvertType(getContext().getPointerDiffType()); 73 74 return llvm::ConstantInt::get(PtrDiffTy, Offset.getQuantity()); 75 } 76 77 /// Gets the address of a direct base class within a complete object. 78 /// This should only be used for (1) non-virtual bases or (2) virtual bases 79 /// when the type is known to be complete (e.g. in complete destructors). 80 /// 81 /// The object pointed to by 'This' is assumed to be non-null. 82 llvm::Value * 83 CodeGenFunction::GetAddressOfDirectBaseInCompleteClass(llvm::Value *This, 84 const CXXRecordDecl *Derived, 85 const CXXRecordDecl *Base, 86 bool BaseIsVirtual) { 87 // 'this' must be a pointer (in some address space) to Derived. 88 assert(This->getType()->isPointerTy() && 89 cast<llvm::PointerType>(This->getType())->getElementType() 90 == ConvertType(Derived)); 91 92 // Compute the offset of the virtual base. 93 CharUnits Offset; 94 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(Derived); 95 if (BaseIsVirtual) 96 Offset = Layout.getVBaseClassOffset(Base); 97 else 98 Offset = Layout.getBaseClassOffset(Base); 99 100 // Shift and cast down to the base type. 101 // TODO: for complete types, this should be possible with a GEP. 102 llvm::Value *V = This; 103 if (Offset.isPositive()) { 104 V = Builder.CreateBitCast(V, Int8PtrTy); 105 V = Builder.CreateConstInBoundsGEP1_64(V, Offset.getQuantity()); 106 } 107 V = Builder.CreateBitCast(V, ConvertType(Base)->getPointerTo()); 108 109 return V; 110 } 111 112 static llvm::Value * 113 ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, llvm::Value *ptr, 114 CharUnits nonVirtualOffset, 115 llvm::Value *virtualOffset) { 116 // Assert that we have something to do. 117 assert(!nonVirtualOffset.isZero() || virtualOffset != nullptr); 118 119 // Compute the offset from the static and dynamic components. 120 llvm::Value *baseOffset; 121 if (!nonVirtualOffset.isZero()) { 122 baseOffset = llvm::ConstantInt::get(CGF.PtrDiffTy, 123 nonVirtualOffset.getQuantity()); 124 if (virtualOffset) { 125 baseOffset = CGF.Builder.CreateAdd(virtualOffset, baseOffset); 126 } 127 } else { 128 baseOffset = virtualOffset; 129 } 130 131 // Apply the base offset. 132 ptr = CGF.Builder.CreateBitCast(ptr, CGF.Int8PtrTy); 133 ptr = CGF.Builder.CreateInBoundsGEP(ptr, baseOffset, "add.ptr"); 134 return ptr; 135 } 136 137 llvm::Value *CodeGenFunction::GetAddressOfBaseClass( 138 llvm::Value *Value, const CXXRecordDecl *Derived, 139 CastExpr::path_const_iterator PathBegin, 140 CastExpr::path_const_iterator PathEnd, bool NullCheckValue, 141 SourceLocation Loc) { 142 assert(PathBegin != PathEnd && "Base path should not be empty!"); 143 144 CastExpr::path_const_iterator Start = PathBegin; 145 const CXXRecordDecl *VBase = nullptr; 146 147 // Sema has done some convenient canonicalization here: if the 148 // access path involved any virtual steps, the conversion path will 149 // *start* with a step down to the correct virtual base subobject, 150 // and hence will not require any further steps. 151 if ((*Start)->isVirtual()) { 152 VBase = 153 cast<CXXRecordDecl>((*Start)->getType()->getAs<RecordType>()->getDecl()); 154 ++Start; 155 } 156 157 // Compute the static offset of the ultimate destination within its 158 // allocating subobject (the virtual base, if there is one, or else 159 // the "complete" object that we see). 160 CharUnits NonVirtualOffset = 161 ComputeNonVirtualBaseClassOffset(getContext(), VBase ? VBase : Derived, 162 Start, PathEnd); 163 164 // If there's a virtual step, we can sometimes "devirtualize" it. 165 // For now, that's limited to when the derived type is final. 166 // TODO: "devirtualize" this for accesses to known-complete objects. 167 if (VBase && Derived->hasAttr<FinalAttr>()) { 168 const ASTRecordLayout &layout = getContext().getASTRecordLayout(Derived); 169 CharUnits vBaseOffset = layout.getVBaseClassOffset(VBase); 170 NonVirtualOffset += vBaseOffset; 171 VBase = nullptr; // we no longer have a virtual step 172 } 173 174 // Get the base pointer type. 175 llvm::Type *BasePtrTy = 176 ConvertType((PathEnd[-1])->getType())->getPointerTo(); 177 178 QualType DerivedTy = getContext().getRecordType(Derived); 179 CharUnits DerivedAlign = getContext().getTypeAlignInChars(DerivedTy); 180 181 // If the static offset is zero and we don't have a virtual step, 182 // just do a bitcast; null checks are unnecessary. 183 if (NonVirtualOffset.isZero() && !VBase) { 184 if (sanitizePerformTypeCheck()) { 185 EmitTypeCheck(TCK_Upcast, Loc, Value, DerivedTy, DerivedAlign, 186 !NullCheckValue); 187 } 188 return Builder.CreateBitCast(Value, BasePtrTy); 189 } 190 191 llvm::BasicBlock *origBB = nullptr; 192 llvm::BasicBlock *endBB = nullptr; 193 194 // Skip over the offset (and the vtable load) if we're supposed to 195 // null-check the pointer. 196 if (NullCheckValue) { 197 origBB = Builder.GetInsertBlock(); 198 llvm::BasicBlock *notNullBB = createBasicBlock("cast.notnull"); 199 endBB = createBasicBlock("cast.end"); 200 201 llvm::Value *isNull = Builder.CreateIsNull(Value); 202 Builder.CreateCondBr(isNull, endBB, notNullBB); 203 EmitBlock(notNullBB); 204 } 205 206 if (sanitizePerformTypeCheck()) { 207 EmitTypeCheck(VBase ? TCK_UpcastToVirtualBase : TCK_Upcast, Loc, Value, 208 DerivedTy, DerivedAlign, true); 209 } 210 211 // Compute the virtual offset. 212 llvm::Value *VirtualOffset = nullptr; 213 if (VBase) { 214 VirtualOffset = 215 CGM.getCXXABI().GetVirtualBaseClassOffset(*this, Value, Derived, VBase); 216 } 217 218 // Apply both offsets. 219 Value = ApplyNonVirtualAndVirtualOffset(*this, Value, 220 NonVirtualOffset, 221 VirtualOffset); 222 223 // Cast to the destination type. 224 Value = Builder.CreateBitCast(Value, BasePtrTy); 225 226 // Build a phi if we needed a null check. 227 if (NullCheckValue) { 228 llvm::BasicBlock *notNullBB = Builder.GetInsertBlock(); 229 Builder.CreateBr(endBB); 230 EmitBlock(endBB); 231 232 llvm::PHINode *PHI = Builder.CreatePHI(BasePtrTy, 2, "cast.result"); 233 PHI->addIncoming(Value, notNullBB); 234 PHI->addIncoming(llvm::Constant::getNullValue(BasePtrTy), origBB); 235 Value = PHI; 236 } 237 238 return Value; 239 } 240 241 llvm::Value * 242 CodeGenFunction::GetAddressOfDerivedClass(llvm::Value *Value, 243 const CXXRecordDecl *Derived, 244 CastExpr::path_const_iterator PathBegin, 245 CastExpr::path_const_iterator PathEnd, 246 bool NullCheckValue) { 247 assert(PathBegin != PathEnd && "Base path should not be empty!"); 248 249 QualType DerivedTy = 250 getContext().getCanonicalType(getContext().getTagDeclType(Derived)); 251 llvm::Type *DerivedPtrTy = ConvertType(DerivedTy)->getPointerTo(); 252 253 llvm::Value *NonVirtualOffset = 254 CGM.GetNonVirtualBaseClassOffset(Derived, PathBegin, PathEnd); 255 256 if (!NonVirtualOffset) { 257 // No offset, we can just cast back. 258 return Builder.CreateBitCast(Value, DerivedPtrTy); 259 } 260 261 llvm::BasicBlock *CastNull = nullptr; 262 llvm::BasicBlock *CastNotNull = nullptr; 263 llvm::BasicBlock *CastEnd = nullptr; 264 265 if (NullCheckValue) { 266 CastNull = createBasicBlock("cast.null"); 267 CastNotNull = createBasicBlock("cast.notnull"); 268 CastEnd = createBasicBlock("cast.end"); 269 270 llvm::Value *IsNull = Builder.CreateIsNull(Value); 271 Builder.CreateCondBr(IsNull, CastNull, CastNotNull); 272 EmitBlock(CastNotNull); 273 } 274 275 // Apply the offset. 276 Value = Builder.CreateBitCast(Value, Int8PtrTy); 277 Value = Builder.CreateGEP(Value, Builder.CreateNeg(NonVirtualOffset), 278 "sub.ptr"); 279 280 // Just cast. 281 Value = Builder.CreateBitCast(Value, DerivedPtrTy); 282 283 if (NullCheckValue) { 284 Builder.CreateBr(CastEnd); 285 EmitBlock(CastNull); 286 Builder.CreateBr(CastEnd); 287 EmitBlock(CastEnd); 288 289 llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2); 290 PHI->addIncoming(Value, CastNotNull); 291 PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), 292 CastNull); 293 Value = PHI; 294 } 295 296 return Value; 297 } 298 299 llvm::Value *CodeGenFunction::GetVTTParameter(GlobalDecl GD, 300 bool ForVirtualBase, 301 bool Delegating) { 302 if (!CGM.getCXXABI().NeedsVTTParameter(GD)) { 303 // This constructor/destructor does not need a VTT parameter. 304 return nullptr; 305 } 306 307 const CXXRecordDecl *RD = cast<CXXMethodDecl>(CurCodeDecl)->getParent(); 308 const CXXRecordDecl *Base = cast<CXXMethodDecl>(GD.getDecl())->getParent(); 309 310 llvm::Value *VTT; 311 312 uint64_t SubVTTIndex; 313 314 if (Delegating) { 315 // If this is a delegating constructor call, just load the VTT. 316 return LoadCXXVTT(); 317 } else if (RD == Base) { 318 // If the record matches the base, this is the complete ctor/dtor 319 // variant calling the base variant in a class with virtual bases. 320 assert(!CGM.getCXXABI().NeedsVTTParameter(CurGD) && 321 "doing no-op VTT offset in base dtor/ctor?"); 322 assert(!ForVirtualBase && "Can't have same class as virtual base!"); 323 SubVTTIndex = 0; 324 } else { 325 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 326 CharUnits BaseOffset = ForVirtualBase ? 327 Layout.getVBaseClassOffset(Base) : 328 Layout.getBaseClassOffset(Base); 329 330 SubVTTIndex = 331 CGM.getVTables().getSubVTTIndex(RD, BaseSubobject(Base, BaseOffset)); 332 assert(SubVTTIndex != 0 && "Sub-VTT index must be greater than zero!"); 333 } 334 335 if (CGM.getCXXABI().NeedsVTTParameter(CurGD)) { 336 // A VTT parameter was passed to the constructor, use it. 337 VTT = LoadCXXVTT(); 338 VTT = Builder.CreateConstInBoundsGEP1_64(VTT, SubVTTIndex); 339 } else { 340 // We're the complete constructor, so get the VTT by name. 341 VTT = CGM.getVTables().GetAddrOfVTT(RD); 342 VTT = Builder.CreateConstInBoundsGEP2_64(VTT, 0, SubVTTIndex); 343 } 344 345 return VTT; 346 } 347 348 namespace { 349 /// Call the destructor for a direct base class. 350 struct CallBaseDtor : EHScopeStack::Cleanup { 351 const CXXRecordDecl *BaseClass; 352 bool BaseIsVirtual; 353 CallBaseDtor(const CXXRecordDecl *Base, bool BaseIsVirtual) 354 : BaseClass(Base), BaseIsVirtual(BaseIsVirtual) {} 355 356 void Emit(CodeGenFunction &CGF, Flags flags) override { 357 const CXXRecordDecl *DerivedClass = 358 cast<CXXMethodDecl>(CGF.CurCodeDecl)->getParent(); 359 360 const CXXDestructorDecl *D = BaseClass->getDestructor(); 361 llvm::Value *Addr = 362 CGF.GetAddressOfDirectBaseInCompleteClass(CGF.LoadCXXThis(), 363 DerivedClass, BaseClass, 364 BaseIsVirtual); 365 CGF.EmitCXXDestructorCall(D, Dtor_Base, BaseIsVirtual, 366 /*Delegating=*/false, Addr); 367 } 368 }; 369 370 /// A visitor which checks whether an initializer uses 'this' in a 371 /// way which requires the vtable to be properly set. 372 struct DynamicThisUseChecker : EvaluatedExprVisitor<DynamicThisUseChecker> { 373 typedef EvaluatedExprVisitor<DynamicThisUseChecker> super; 374 375 bool UsesThis; 376 377 DynamicThisUseChecker(ASTContext &C) : super(C), UsesThis(false) {} 378 379 // Black-list all explicit and implicit references to 'this'. 380 // 381 // Do we need to worry about external references to 'this' derived 382 // from arbitrary code? If so, then anything which runs arbitrary 383 // external code might potentially access the vtable. 384 void VisitCXXThisExpr(CXXThisExpr *E) { UsesThis = true; } 385 }; 386 } 387 388 static bool BaseInitializerUsesThis(ASTContext &C, const Expr *Init) { 389 DynamicThisUseChecker Checker(C); 390 Checker.Visit(const_cast<Expr*>(Init)); 391 return Checker.UsesThis; 392 } 393 394 static void EmitBaseInitializer(CodeGenFunction &CGF, 395 const CXXRecordDecl *ClassDecl, 396 CXXCtorInitializer *BaseInit, 397 CXXCtorType CtorType) { 398 assert(BaseInit->isBaseInitializer() && 399 "Must have base initializer!"); 400 401 llvm::Value *ThisPtr = CGF.LoadCXXThis(); 402 403 const Type *BaseType = BaseInit->getBaseClass(); 404 CXXRecordDecl *BaseClassDecl = 405 cast<CXXRecordDecl>(BaseType->getAs<RecordType>()->getDecl()); 406 407 bool isBaseVirtual = BaseInit->isBaseVirtual(); 408 409 // The base constructor doesn't construct virtual bases. 410 if (CtorType == Ctor_Base && isBaseVirtual) 411 return; 412 413 // If the initializer for the base (other than the constructor 414 // itself) accesses 'this' in any way, we need to initialize the 415 // vtables. 416 if (BaseInitializerUsesThis(CGF.getContext(), BaseInit->getInit())) 417 CGF.InitializeVTablePointers(ClassDecl); 418 419 // We can pretend to be a complete class because it only matters for 420 // virtual bases, and we only do virtual bases for complete ctors. 421 llvm::Value *V = 422 CGF.GetAddressOfDirectBaseInCompleteClass(ThisPtr, ClassDecl, 423 BaseClassDecl, 424 isBaseVirtual); 425 CharUnits Alignment = CGF.getContext().getTypeAlignInChars(BaseType); 426 AggValueSlot AggSlot = 427 AggValueSlot::forAddr(V, Alignment, Qualifiers(), 428 AggValueSlot::IsDestructed, 429 AggValueSlot::DoesNotNeedGCBarriers, 430 AggValueSlot::IsNotAliased); 431 432 CGF.EmitAggExpr(BaseInit->getInit(), AggSlot); 433 434 if (CGF.CGM.getLangOpts().Exceptions && 435 !BaseClassDecl->hasTrivialDestructor()) 436 CGF.EHStack.pushCleanup<CallBaseDtor>(EHCleanup, BaseClassDecl, 437 isBaseVirtual); 438 } 439 440 static void EmitAggMemberInitializer(CodeGenFunction &CGF, 441 LValue LHS, 442 Expr *Init, 443 llvm::Value *ArrayIndexVar, 444 QualType T, 445 ArrayRef<VarDecl *> ArrayIndexes, 446 unsigned Index) { 447 if (Index == ArrayIndexes.size()) { 448 LValue LV = LHS; 449 450 if (ArrayIndexVar) { 451 // If we have an array index variable, load it and use it as an offset. 452 // Then, increment the value. 453 llvm::Value *Dest = LHS.getAddress(); 454 llvm::Value *ArrayIndex = CGF.Builder.CreateLoad(ArrayIndexVar); 455 Dest = CGF.Builder.CreateInBoundsGEP(Dest, ArrayIndex, "destaddress"); 456 llvm::Value *Next = llvm::ConstantInt::get(ArrayIndex->getType(), 1); 457 Next = CGF.Builder.CreateAdd(ArrayIndex, Next, "inc"); 458 CGF.Builder.CreateStore(Next, ArrayIndexVar); 459 460 // Update the LValue. 461 LV.setAddress(Dest); 462 CharUnits Align = CGF.getContext().getTypeAlignInChars(T); 463 LV.setAlignment(std::min(Align, LV.getAlignment())); 464 } 465 466 switch (CGF.getEvaluationKind(T)) { 467 case TEK_Scalar: 468 CGF.EmitScalarInit(Init, /*decl*/ nullptr, LV, false); 469 break; 470 case TEK_Complex: 471 CGF.EmitComplexExprIntoLValue(Init, LV, /*isInit*/ true); 472 break; 473 case TEK_Aggregate: { 474 AggValueSlot Slot = 475 AggValueSlot::forLValue(LV, 476 AggValueSlot::IsDestructed, 477 AggValueSlot::DoesNotNeedGCBarriers, 478 AggValueSlot::IsNotAliased); 479 480 CGF.EmitAggExpr(Init, Slot); 481 break; 482 } 483 } 484 485 return; 486 } 487 488 const ConstantArrayType *Array = CGF.getContext().getAsConstantArrayType(T); 489 assert(Array && "Array initialization without the array type?"); 490 llvm::Value *IndexVar 491 = CGF.GetAddrOfLocalVar(ArrayIndexes[Index]); 492 assert(IndexVar && "Array index variable not loaded"); 493 494 // Initialize this index variable to zero. 495 llvm::Value* Zero 496 = llvm::Constant::getNullValue( 497 CGF.ConvertType(CGF.getContext().getSizeType())); 498 CGF.Builder.CreateStore(Zero, IndexVar); 499 500 // Start the loop with a block that tests the condition. 501 llvm::BasicBlock *CondBlock = CGF.createBasicBlock("for.cond"); 502 llvm::BasicBlock *AfterFor = CGF.createBasicBlock("for.end"); 503 504 CGF.EmitBlock(CondBlock); 505 506 llvm::BasicBlock *ForBody = CGF.createBasicBlock("for.body"); 507 // Generate: if (loop-index < number-of-elements) fall to the loop body, 508 // otherwise, go to the block after the for-loop. 509 uint64_t NumElements = Array->getSize().getZExtValue(); 510 llvm::Value *Counter = CGF.Builder.CreateLoad(IndexVar); 511 llvm::Value *NumElementsPtr = 512 llvm::ConstantInt::get(Counter->getType(), NumElements); 513 llvm::Value *IsLess = CGF.Builder.CreateICmpULT(Counter, NumElementsPtr, 514 "isless"); 515 516 // If the condition is true, execute the body. 517 CGF.Builder.CreateCondBr(IsLess, ForBody, AfterFor); 518 519 CGF.EmitBlock(ForBody); 520 llvm::BasicBlock *ContinueBlock = CGF.createBasicBlock("for.inc"); 521 522 // Inside the loop body recurse to emit the inner loop or, eventually, the 523 // constructor call. 524 EmitAggMemberInitializer(CGF, LHS, Init, ArrayIndexVar, 525 Array->getElementType(), ArrayIndexes, Index + 1); 526 527 CGF.EmitBlock(ContinueBlock); 528 529 // Emit the increment of the loop counter. 530 llvm::Value *NextVal = llvm::ConstantInt::get(Counter->getType(), 1); 531 Counter = CGF.Builder.CreateLoad(IndexVar); 532 NextVal = CGF.Builder.CreateAdd(Counter, NextVal, "inc"); 533 CGF.Builder.CreateStore(NextVal, IndexVar); 534 535 // Finally, branch back up to the condition for the next iteration. 536 CGF.EmitBranch(CondBlock); 537 538 // Emit the fall-through block. 539 CGF.EmitBlock(AfterFor, true); 540 } 541 542 static void EmitMemberInitializer(CodeGenFunction &CGF, 543 const CXXRecordDecl *ClassDecl, 544 CXXCtorInitializer *MemberInit, 545 const CXXConstructorDecl *Constructor, 546 FunctionArgList &Args) { 547 assert(MemberInit->isAnyMemberInitializer() && 548 "Must have member initializer!"); 549 assert(MemberInit->getInit() && "Must have initializer!"); 550 551 // non-static data member initializers. 552 FieldDecl *Field = MemberInit->getAnyMember(); 553 QualType FieldType = Field->getType(); 554 555 llvm::Value *ThisPtr = CGF.LoadCXXThis(); 556 QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl); 557 LValue LHS = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy); 558 559 if (MemberInit->isIndirectMemberInitializer()) { 560 // If we are initializing an anonymous union field, drill down to 561 // the field. 562 IndirectFieldDecl *IndirectField = MemberInit->getIndirectMember(); 563 for (const auto *I : IndirectField->chain()) 564 LHS = CGF.EmitLValueForFieldInitialization(LHS, cast<FieldDecl>(I)); 565 FieldType = MemberInit->getIndirectMember()->getAnonField()->getType(); 566 } else { 567 LHS = CGF.EmitLValueForFieldInitialization(LHS, Field); 568 } 569 570 // Special case: if we are in a copy or move constructor, and we are copying 571 // an array of PODs or classes with trivial copy constructors, ignore the 572 // AST and perform the copy we know is equivalent. 573 // FIXME: This is hacky at best... if we had a bit more explicit information 574 // in the AST, we could generalize it more easily. 575 const ConstantArrayType *Array 576 = CGF.getContext().getAsConstantArrayType(FieldType); 577 if (Array && Constructor->isDefaulted() && 578 Constructor->isCopyOrMoveConstructor()) { 579 QualType BaseElementTy = CGF.getContext().getBaseElementType(Array); 580 CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(MemberInit->getInit()); 581 if (BaseElementTy.isPODType(CGF.getContext()) || 582 (CE && CE->getConstructor()->isTrivial())) { 583 unsigned SrcArgIndex = 584 CGF.CGM.getCXXABI().getSrcArgforCopyCtor(Constructor, Args); 585 llvm::Value *SrcPtr 586 = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(Args[SrcArgIndex])); 587 LValue ThisRHSLV = CGF.MakeNaturalAlignAddrLValue(SrcPtr, RecordTy); 588 LValue Src = CGF.EmitLValueForFieldInitialization(ThisRHSLV, Field); 589 590 // Copy the aggregate. 591 CGF.EmitAggregateCopy(LHS.getAddress(), Src.getAddress(), FieldType, 592 LHS.isVolatileQualified()); 593 return; 594 } 595 } 596 597 ArrayRef<VarDecl *> ArrayIndexes; 598 if (MemberInit->getNumArrayIndices()) 599 ArrayIndexes = MemberInit->getArrayIndexes(); 600 CGF.EmitInitializerForField(Field, LHS, MemberInit->getInit(), ArrayIndexes, 601 MemberInit->getMemberLocation()); 602 } 603 604 void CodeGenFunction::EmitInitializerForField(FieldDecl *Field, LValue LHS, 605 Expr *Init, 606 ArrayRef<VarDecl *> ArrayIndexes, 607 SourceLocation DbgLoc) { 608 QualType FieldType = Field->getType(); 609 switch (getEvaluationKind(FieldType)) { 610 case TEK_Scalar: 611 if (LHS.isSimple()) { 612 EmitExprAsInit(Init, Field, LHS, false, DbgLoc); 613 } else { 614 RValue RHS = RValue::get(EmitScalarExpr(Init)); 615 EmitStoreThroughLValue(RHS, LHS); 616 } 617 break; 618 case TEK_Complex: 619 EmitComplexExprIntoLValue(Init, LHS, /*isInit*/ true, DbgLoc); 620 break; 621 case TEK_Aggregate: { 622 llvm::Value *ArrayIndexVar = nullptr; 623 if (ArrayIndexes.size()) { 624 llvm::Type *SizeTy = ConvertType(getContext().getSizeType()); 625 626 // The LHS is a pointer to the first object we'll be constructing, as 627 // a flat array. 628 QualType BaseElementTy = getContext().getBaseElementType(FieldType); 629 llvm::Type *BasePtr = ConvertType(BaseElementTy); 630 BasePtr = llvm::PointerType::getUnqual(BasePtr); 631 llvm::Value *BaseAddrPtr = Builder.CreateBitCast(LHS.getAddress(), 632 BasePtr); 633 LHS = MakeAddrLValue(BaseAddrPtr, BaseElementTy); 634 635 // Create an array index that will be used to walk over all of the 636 // objects we're constructing. 637 ArrayIndexVar = CreateTempAlloca(SizeTy, "object.index"); 638 llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy); 639 Builder.CreateStore(Zero, ArrayIndexVar); 640 641 642 // Emit the block variables for the array indices, if any. 643 for (unsigned I = 0, N = ArrayIndexes.size(); I != N; ++I) 644 EmitAutoVarDecl(*ArrayIndexes[I]); 645 } 646 647 EmitAggMemberInitializer(*this, LHS, Init, ArrayIndexVar, FieldType, 648 ArrayIndexes, 0); 649 } 650 } 651 652 // Ensure that we destroy this object if an exception is thrown 653 // later in the constructor. 654 QualType::DestructionKind dtorKind = FieldType.isDestructedType(); 655 if (needsEHCleanup(dtorKind)) 656 pushEHDestroy(dtorKind, LHS.getAddress(), FieldType); 657 } 658 659 /// Checks whether the given constructor is a valid subject for the 660 /// complete-to-base constructor delegation optimization, i.e. 661 /// emitting the complete constructor as a simple call to the base 662 /// constructor. 663 static bool IsConstructorDelegationValid(const CXXConstructorDecl *Ctor) { 664 665 // Currently we disable the optimization for classes with virtual 666 // bases because (1) the addresses of parameter variables need to be 667 // consistent across all initializers but (2) the delegate function 668 // call necessarily creates a second copy of the parameter variable. 669 // 670 // The limiting example (purely theoretical AFAIK): 671 // struct A { A(int &c) { c++; } }; 672 // struct B : virtual A { 673 // B(int count) : A(count) { printf("%d\n", count); } 674 // }; 675 // ...although even this example could in principle be emitted as a 676 // delegation since the address of the parameter doesn't escape. 677 if (Ctor->getParent()->getNumVBases()) { 678 // TODO: white-list trivial vbase initializers. This case wouldn't 679 // be subject to the restrictions below. 680 681 // TODO: white-list cases where: 682 // - there are no non-reference parameters to the constructor 683 // - the initializers don't access any non-reference parameters 684 // - the initializers don't take the address of non-reference 685 // parameters 686 // - etc. 687 // If we ever add any of the above cases, remember that: 688 // - function-try-blocks will always blacklist this optimization 689 // - we need to perform the constructor prologue and cleanup in 690 // EmitConstructorBody. 691 692 return false; 693 } 694 695 // We also disable the optimization for variadic functions because 696 // it's impossible to "re-pass" varargs. 697 if (Ctor->getType()->getAs<FunctionProtoType>()->isVariadic()) 698 return false; 699 700 // FIXME: Decide if we can do a delegation of a delegating constructor. 701 if (Ctor->isDelegatingConstructor()) 702 return false; 703 704 return true; 705 } 706 707 // Emit code in ctor (Prologue==true) or dtor (Prologue==false) 708 // to poison the extra field paddings inserted under 709 // -fsanitize-address-field-padding=1|2. 710 void CodeGenFunction::EmitAsanPrologueOrEpilogue(bool Prologue) { 711 ASTContext &Context = getContext(); 712 const CXXRecordDecl *ClassDecl = 713 Prologue ? cast<CXXConstructorDecl>(CurGD.getDecl())->getParent() 714 : cast<CXXDestructorDecl>(CurGD.getDecl())->getParent(); 715 if (!ClassDecl->mayInsertExtraPadding()) return; 716 717 struct SizeAndOffset { 718 uint64_t Size; 719 uint64_t Offset; 720 }; 721 722 unsigned PtrSize = CGM.getDataLayout().getPointerSizeInBits(); 723 const ASTRecordLayout &Info = Context.getASTRecordLayout(ClassDecl); 724 725 // Populate sizes and offsets of fields. 726 SmallVector<SizeAndOffset, 16> SSV(Info.getFieldCount()); 727 for (unsigned i = 0, e = Info.getFieldCount(); i != e; ++i) 728 SSV[i].Offset = 729 Context.toCharUnitsFromBits(Info.getFieldOffset(i)).getQuantity(); 730 731 size_t NumFields = 0; 732 for (const auto *Field : ClassDecl->fields()) { 733 const FieldDecl *D = Field; 734 std::pair<CharUnits, CharUnits> FieldInfo = 735 Context.getTypeInfoInChars(D->getType()); 736 CharUnits FieldSize = FieldInfo.first; 737 assert(NumFields < SSV.size()); 738 SSV[NumFields].Size = D->isBitField() ? 0 : FieldSize.getQuantity(); 739 NumFields++; 740 } 741 assert(NumFields == SSV.size()); 742 if (SSV.size() <= 1) return; 743 744 // We will insert calls to __asan_* run-time functions. 745 // LLVM AddressSanitizer pass may decide to inline them later. 746 llvm::Type *Args[2] = {IntPtrTy, IntPtrTy}; 747 llvm::FunctionType *FTy = 748 llvm::FunctionType::get(CGM.VoidTy, Args, false); 749 llvm::Constant *F = CGM.CreateRuntimeFunction( 750 FTy, Prologue ? "__asan_poison_intra_object_redzone" 751 : "__asan_unpoison_intra_object_redzone"); 752 753 llvm::Value *ThisPtr = LoadCXXThis(); 754 ThisPtr = Builder.CreatePtrToInt(ThisPtr, IntPtrTy); 755 uint64_t TypeSize = Info.getNonVirtualSize().getQuantity(); 756 // For each field check if it has sufficient padding, 757 // if so (un)poison it with a call. 758 for (size_t i = 0; i < SSV.size(); i++) { 759 uint64_t AsanAlignment = 8; 760 uint64_t NextField = i == SSV.size() - 1 ? TypeSize : SSV[i + 1].Offset; 761 uint64_t PoisonSize = NextField - SSV[i].Offset - SSV[i].Size; 762 uint64_t EndOffset = SSV[i].Offset + SSV[i].Size; 763 if (PoisonSize < AsanAlignment || !SSV[i].Size || 764 (NextField % AsanAlignment) != 0) 765 continue; 766 Builder.CreateCall2( 767 F, Builder.CreateAdd(ThisPtr, Builder.getIntN(PtrSize, EndOffset)), 768 Builder.getIntN(PtrSize, PoisonSize)); 769 } 770 } 771 772 /// EmitConstructorBody - Emits the body of the current constructor. 773 void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) { 774 EmitAsanPrologueOrEpilogue(true); 775 const CXXConstructorDecl *Ctor = cast<CXXConstructorDecl>(CurGD.getDecl()); 776 CXXCtorType CtorType = CurGD.getCtorType(); 777 778 assert((CGM.getTarget().getCXXABI().hasConstructorVariants() || 779 CtorType == Ctor_Complete) && 780 "can only generate complete ctor for this ABI"); 781 782 // Before we go any further, try the complete->base constructor 783 // delegation optimization. 784 if (CtorType == Ctor_Complete && IsConstructorDelegationValid(Ctor) && 785 CGM.getTarget().getCXXABI().hasConstructorVariants()) { 786 if (CGDebugInfo *DI = getDebugInfo()) 787 DI->EmitLocation(Builder, Ctor->getLocEnd()); 788 EmitDelegateCXXConstructorCall(Ctor, Ctor_Base, Args, Ctor->getLocEnd()); 789 return; 790 } 791 792 const FunctionDecl *Definition = 0; 793 Stmt *Body = Ctor->getBody(Definition); 794 assert(Definition == Ctor && "emitting wrong constructor body"); 795 796 // Enter the function-try-block before the constructor prologue if 797 // applicable. 798 bool IsTryBody = (Body && isa<CXXTryStmt>(Body)); 799 if (IsTryBody) 800 EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true); 801 802 RegionCounter Cnt = getPGORegionCounter(Body); 803 Cnt.beginRegion(Builder); 804 805 RunCleanupsScope RunCleanups(*this); 806 807 // TODO: in restricted cases, we can emit the vbase initializers of 808 // a complete ctor and then delegate to the base ctor. 809 810 // Emit the constructor prologue, i.e. the base and member 811 // initializers. 812 EmitCtorPrologue(Ctor, CtorType, Args); 813 814 // Emit the body of the statement. 815 if (IsTryBody) 816 EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock()); 817 else if (Body) 818 EmitStmt(Body); 819 820 // Emit any cleanup blocks associated with the member or base 821 // initializers, which includes (along the exceptional path) the 822 // destructors for those members and bases that were fully 823 // constructed. 824 RunCleanups.ForceCleanup(); 825 826 if (IsTryBody) 827 ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true); 828 } 829 830 namespace { 831 /// RAII object to indicate that codegen is copying the value representation 832 /// instead of the object representation. Useful when copying a struct or 833 /// class which has uninitialized members and we're only performing 834 /// lvalue-to-rvalue conversion on the object but not its members. 835 class CopyingValueRepresentation { 836 public: 837 explicit CopyingValueRepresentation(CodeGenFunction &CGF) 838 : CGF(CGF), OldSanOpts(CGF.SanOpts) { 839 CGF.SanOpts.set(SanitizerKind::Bool, false); 840 CGF.SanOpts.set(SanitizerKind::Enum, false); 841 } 842 ~CopyingValueRepresentation() { 843 CGF.SanOpts = OldSanOpts; 844 } 845 private: 846 CodeGenFunction &CGF; 847 SanitizerSet OldSanOpts; 848 }; 849 } 850 851 namespace { 852 class FieldMemcpyizer { 853 public: 854 FieldMemcpyizer(CodeGenFunction &CGF, const CXXRecordDecl *ClassDecl, 855 const VarDecl *SrcRec) 856 : CGF(CGF), ClassDecl(ClassDecl), SrcRec(SrcRec), 857 RecLayout(CGF.getContext().getASTRecordLayout(ClassDecl)), 858 FirstField(nullptr), LastField(nullptr), FirstFieldOffset(0), 859 LastFieldOffset(0), LastAddedFieldIndex(0) {} 860 861 bool isMemcpyableField(FieldDecl *F) const { 862 // Never memcpy fields when we are adding poisoned paddings. 863 if (CGF.getContext().getLangOpts().SanitizeAddressFieldPadding) 864 return false; 865 Qualifiers Qual = F->getType().getQualifiers(); 866 if (Qual.hasVolatile() || Qual.hasObjCLifetime()) 867 return false; 868 return true; 869 } 870 871 void addMemcpyableField(FieldDecl *F) { 872 if (!FirstField) 873 addInitialField(F); 874 else 875 addNextField(F); 876 } 877 878 CharUnits getMemcpySize(uint64_t FirstByteOffset) const { 879 unsigned LastFieldSize = 880 LastField->isBitField() ? 881 LastField->getBitWidthValue(CGF.getContext()) : 882 CGF.getContext().getTypeSize(LastField->getType()); 883 uint64_t MemcpySizeBits = 884 LastFieldOffset + LastFieldSize - FirstByteOffset + 885 CGF.getContext().getCharWidth() - 1; 886 CharUnits MemcpySize = 887 CGF.getContext().toCharUnitsFromBits(MemcpySizeBits); 888 return MemcpySize; 889 } 890 891 void emitMemcpy() { 892 // Give the subclass a chance to bail out if it feels the memcpy isn't 893 // worth it (e.g. Hasn't aggregated enough data). 894 if (!FirstField) { 895 return; 896 } 897 898 CharUnits Alignment; 899 900 uint64_t FirstByteOffset; 901 if (FirstField->isBitField()) { 902 const CGRecordLayout &RL = 903 CGF.getTypes().getCGRecordLayout(FirstField->getParent()); 904 const CGBitFieldInfo &BFInfo = RL.getBitFieldInfo(FirstField); 905 Alignment = CharUnits::fromQuantity(BFInfo.StorageAlignment); 906 // FirstFieldOffset is not appropriate for bitfields, 907 // it won't tell us what the storage offset should be and thus might not 908 // be properly aligned. 909 // 910 // Instead calculate the storage offset using the offset of the field in 911 // the struct type. 912 const llvm::DataLayout &DL = CGF.CGM.getDataLayout(); 913 FirstByteOffset = 914 DL.getStructLayout(RL.getLLVMType()) 915 ->getElementOffsetInBits(RL.getLLVMFieldNo(FirstField)); 916 } else { 917 Alignment = CGF.getContext().getDeclAlign(FirstField); 918 FirstByteOffset = FirstFieldOffset; 919 } 920 921 assert((CGF.getContext().toCharUnitsFromBits(FirstByteOffset) % 922 Alignment) == 0 && "Bad field alignment."); 923 924 CharUnits MemcpySize = getMemcpySize(FirstByteOffset); 925 QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl); 926 llvm::Value *ThisPtr = CGF.LoadCXXThis(); 927 LValue DestLV = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy); 928 LValue Dest = CGF.EmitLValueForFieldInitialization(DestLV, FirstField); 929 llvm::Value *SrcPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(SrcRec)); 930 LValue SrcLV = CGF.MakeNaturalAlignAddrLValue(SrcPtr, RecordTy); 931 LValue Src = CGF.EmitLValueForFieldInitialization(SrcLV, FirstField); 932 933 emitMemcpyIR(Dest.isBitField() ? Dest.getBitFieldAddr() : Dest.getAddress(), 934 Src.isBitField() ? Src.getBitFieldAddr() : Src.getAddress(), 935 MemcpySize, Alignment); 936 reset(); 937 } 938 939 void reset() { 940 FirstField = nullptr; 941 } 942 943 protected: 944 CodeGenFunction &CGF; 945 const CXXRecordDecl *ClassDecl; 946 947 private: 948 949 void emitMemcpyIR(llvm::Value *DestPtr, llvm::Value *SrcPtr, 950 CharUnits Size, CharUnits Alignment) { 951 llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType()); 952 llvm::Type *DBP = 953 llvm::Type::getInt8PtrTy(CGF.getLLVMContext(), DPT->getAddressSpace()); 954 DestPtr = CGF.Builder.CreateBitCast(DestPtr, DBP); 955 956 llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType()); 957 llvm::Type *SBP = 958 llvm::Type::getInt8PtrTy(CGF.getLLVMContext(), SPT->getAddressSpace()); 959 SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, SBP); 960 961 CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, Size.getQuantity(), 962 Alignment.getQuantity()); 963 } 964 965 void addInitialField(FieldDecl *F) { 966 FirstField = F; 967 LastField = F; 968 FirstFieldOffset = RecLayout.getFieldOffset(F->getFieldIndex()); 969 LastFieldOffset = FirstFieldOffset; 970 LastAddedFieldIndex = F->getFieldIndex(); 971 return; 972 } 973 974 void addNextField(FieldDecl *F) { 975 // For the most part, the following invariant will hold: 976 // F->getFieldIndex() == LastAddedFieldIndex + 1 977 // The one exception is that Sema won't add a copy-initializer for an 978 // unnamed bitfield, which will show up here as a gap in the sequence. 979 assert(F->getFieldIndex() >= LastAddedFieldIndex + 1 && 980 "Cannot aggregate fields out of order."); 981 LastAddedFieldIndex = F->getFieldIndex(); 982 983 // The 'first' and 'last' fields are chosen by offset, rather than field 984 // index. This allows the code to support bitfields, as well as regular 985 // fields. 986 uint64_t FOffset = RecLayout.getFieldOffset(F->getFieldIndex()); 987 if (FOffset < FirstFieldOffset) { 988 FirstField = F; 989 FirstFieldOffset = FOffset; 990 } else if (FOffset > LastFieldOffset) { 991 LastField = F; 992 LastFieldOffset = FOffset; 993 } 994 } 995 996 const VarDecl *SrcRec; 997 const ASTRecordLayout &RecLayout; 998 FieldDecl *FirstField; 999 FieldDecl *LastField; 1000 uint64_t FirstFieldOffset, LastFieldOffset; 1001 unsigned LastAddedFieldIndex; 1002 }; 1003 1004 class ConstructorMemcpyizer : public FieldMemcpyizer { 1005 private: 1006 1007 /// Get source argument for copy constructor. Returns null if not a copy 1008 /// constructor. 1009 static const VarDecl *getTrivialCopySource(CodeGenFunction &CGF, 1010 const CXXConstructorDecl *CD, 1011 FunctionArgList &Args) { 1012 if (CD->isCopyOrMoveConstructor() && CD->isDefaulted()) 1013 return Args[CGF.CGM.getCXXABI().getSrcArgforCopyCtor(CD, Args)]; 1014 return nullptr; 1015 } 1016 1017 // Returns true if a CXXCtorInitializer represents a member initialization 1018 // that can be rolled into a memcpy. 1019 bool isMemberInitMemcpyable(CXXCtorInitializer *MemberInit) const { 1020 if (!MemcpyableCtor) 1021 return false; 1022 FieldDecl *Field = MemberInit->getMember(); 1023 assert(Field && "No field for member init."); 1024 QualType FieldType = Field->getType(); 1025 CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(MemberInit->getInit()); 1026 1027 // Bail out on non-POD, not-trivially-constructable members. 1028 if (!(CE && CE->getConstructor()->isTrivial()) && 1029 !(FieldType.isTriviallyCopyableType(CGF.getContext()) || 1030 FieldType->isReferenceType())) 1031 return false; 1032 1033 // Bail out on volatile fields. 1034 if (!isMemcpyableField(Field)) 1035 return false; 1036 1037 // Otherwise we're good. 1038 return true; 1039 } 1040 1041 public: 1042 ConstructorMemcpyizer(CodeGenFunction &CGF, const CXXConstructorDecl *CD, 1043 FunctionArgList &Args) 1044 : FieldMemcpyizer(CGF, CD->getParent(), getTrivialCopySource(CGF, CD, Args)), 1045 ConstructorDecl(CD), 1046 MemcpyableCtor(CD->isDefaulted() && 1047 CD->isCopyOrMoveConstructor() && 1048 CGF.getLangOpts().getGC() == LangOptions::NonGC), 1049 Args(Args) { } 1050 1051 void addMemberInitializer(CXXCtorInitializer *MemberInit) { 1052 if (isMemberInitMemcpyable(MemberInit)) { 1053 AggregatedInits.push_back(MemberInit); 1054 addMemcpyableField(MemberInit->getMember()); 1055 } else { 1056 emitAggregatedInits(); 1057 EmitMemberInitializer(CGF, ConstructorDecl->getParent(), MemberInit, 1058 ConstructorDecl, Args); 1059 } 1060 } 1061 1062 void emitAggregatedInits() { 1063 if (AggregatedInits.size() <= 1) { 1064 // This memcpy is too small to be worthwhile. Fall back on default 1065 // codegen. 1066 if (!AggregatedInits.empty()) { 1067 CopyingValueRepresentation CVR(CGF); 1068 EmitMemberInitializer(CGF, ConstructorDecl->getParent(), 1069 AggregatedInits[0], ConstructorDecl, Args); 1070 } 1071 reset(); 1072 return; 1073 } 1074 1075 pushEHDestructors(); 1076 emitMemcpy(); 1077 AggregatedInits.clear(); 1078 } 1079 1080 void pushEHDestructors() { 1081 llvm::Value *ThisPtr = CGF.LoadCXXThis(); 1082 QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl); 1083 LValue LHS = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy); 1084 1085 for (unsigned i = 0; i < AggregatedInits.size(); ++i) { 1086 QualType FieldType = AggregatedInits[i]->getMember()->getType(); 1087 QualType::DestructionKind dtorKind = FieldType.isDestructedType(); 1088 if (CGF.needsEHCleanup(dtorKind)) 1089 CGF.pushEHDestroy(dtorKind, LHS.getAddress(), FieldType); 1090 } 1091 } 1092 1093 void finish() { 1094 emitAggregatedInits(); 1095 } 1096 1097 private: 1098 const CXXConstructorDecl *ConstructorDecl; 1099 bool MemcpyableCtor; 1100 FunctionArgList &Args; 1101 SmallVector<CXXCtorInitializer*, 16> AggregatedInits; 1102 }; 1103 1104 class AssignmentMemcpyizer : public FieldMemcpyizer { 1105 private: 1106 1107 // Returns the memcpyable field copied by the given statement, if one 1108 // exists. Otherwise returns null. 1109 FieldDecl *getMemcpyableField(Stmt *S) { 1110 if (!AssignmentsMemcpyable) 1111 return nullptr; 1112 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(S)) { 1113 // Recognise trivial assignments. 1114 if (BO->getOpcode() != BO_Assign) 1115 return nullptr; 1116 MemberExpr *ME = dyn_cast<MemberExpr>(BO->getLHS()); 1117 if (!ME) 1118 return nullptr; 1119 FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl()); 1120 if (!Field || !isMemcpyableField(Field)) 1121 return nullptr; 1122 Stmt *RHS = BO->getRHS(); 1123 if (ImplicitCastExpr *EC = dyn_cast<ImplicitCastExpr>(RHS)) 1124 RHS = EC->getSubExpr(); 1125 if (!RHS) 1126 return nullptr; 1127 MemberExpr *ME2 = dyn_cast<MemberExpr>(RHS); 1128 if (dyn_cast<FieldDecl>(ME2->getMemberDecl()) != Field) 1129 return nullptr; 1130 return Field; 1131 } else if (CXXMemberCallExpr *MCE = dyn_cast<CXXMemberCallExpr>(S)) { 1132 CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MCE->getCalleeDecl()); 1133 if (!(MD && (MD->isCopyAssignmentOperator() || 1134 MD->isMoveAssignmentOperator()) && 1135 MD->isTrivial())) 1136 return nullptr; 1137 MemberExpr *IOA = dyn_cast<MemberExpr>(MCE->getImplicitObjectArgument()); 1138 if (!IOA) 1139 return nullptr; 1140 FieldDecl *Field = dyn_cast<FieldDecl>(IOA->getMemberDecl()); 1141 if (!Field || !isMemcpyableField(Field)) 1142 return nullptr; 1143 MemberExpr *Arg0 = dyn_cast<MemberExpr>(MCE->getArg(0)); 1144 if (!Arg0 || Field != dyn_cast<FieldDecl>(Arg0->getMemberDecl())) 1145 return nullptr; 1146 return Field; 1147 } else if (CallExpr *CE = dyn_cast<CallExpr>(S)) { 1148 FunctionDecl *FD = dyn_cast<FunctionDecl>(CE->getCalleeDecl()); 1149 if (!FD || FD->getBuiltinID() != Builtin::BI__builtin_memcpy) 1150 return nullptr; 1151 Expr *DstPtr = CE->getArg(0); 1152 if (ImplicitCastExpr *DC = dyn_cast<ImplicitCastExpr>(DstPtr)) 1153 DstPtr = DC->getSubExpr(); 1154 UnaryOperator *DUO = dyn_cast<UnaryOperator>(DstPtr); 1155 if (!DUO || DUO->getOpcode() != UO_AddrOf) 1156 return nullptr; 1157 MemberExpr *ME = dyn_cast<MemberExpr>(DUO->getSubExpr()); 1158 if (!ME) 1159 return nullptr; 1160 FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl()); 1161 if (!Field || !isMemcpyableField(Field)) 1162 return nullptr; 1163 Expr *SrcPtr = CE->getArg(1); 1164 if (ImplicitCastExpr *SC = dyn_cast<ImplicitCastExpr>(SrcPtr)) 1165 SrcPtr = SC->getSubExpr(); 1166 UnaryOperator *SUO = dyn_cast<UnaryOperator>(SrcPtr); 1167 if (!SUO || SUO->getOpcode() != UO_AddrOf) 1168 return nullptr; 1169 MemberExpr *ME2 = dyn_cast<MemberExpr>(SUO->getSubExpr()); 1170 if (!ME2 || Field != dyn_cast<FieldDecl>(ME2->getMemberDecl())) 1171 return nullptr; 1172 return Field; 1173 } 1174 1175 return nullptr; 1176 } 1177 1178 bool AssignmentsMemcpyable; 1179 SmallVector<Stmt*, 16> AggregatedStmts; 1180 1181 public: 1182 1183 AssignmentMemcpyizer(CodeGenFunction &CGF, const CXXMethodDecl *AD, 1184 FunctionArgList &Args) 1185 : FieldMemcpyizer(CGF, AD->getParent(), Args[Args.size() - 1]), 1186 AssignmentsMemcpyable(CGF.getLangOpts().getGC() == LangOptions::NonGC) { 1187 assert(Args.size() == 2); 1188 } 1189 1190 void emitAssignment(Stmt *S) { 1191 FieldDecl *F = getMemcpyableField(S); 1192 if (F) { 1193 addMemcpyableField(F); 1194 AggregatedStmts.push_back(S); 1195 } else { 1196 emitAggregatedStmts(); 1197 CGF.EmitStmt(S); 1198 } 1199 } 1200 1201 void emitAggregatedStmts() { 1202 if (AggregatedStmts.size() <= 1) { 1203 if (!AggregatedStmts.empty()) { 1204 CopyingValueRepresentation CVR(CGF); 1205 CGF.EmitStmt(AggregatedStmts[0]); 1206 } 1207 reset(); 1208 } 1209 1210 emitMemcpy(); 1211 AggregatedStmts.clear(); 1212 } 1213 1214 void finish() { 1215 emitAggregatedStmts(); 1216 } 1217 }; 1218 1219 } 1220 1221 /// EmitCtorPrologue - This routine generates necessary code to initialize 1222 /// base classes and non-static data members belonging to this constructor. 1223 void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD, 1224 CXXCtorType CtorType, 1225 FunctionArgList &Args) { 1226 if (CD->isDelegatingConstructor()) 1227 return EmitDelegatingCXXConstructorCall(CD, Args); 1228 1229 const CXXRecordDecl *ClassDecl = CD->getParent(); 1230 1231 CXXConstructorDecl::init_const_iterator B = CD->init_begin(), 1232 E = CD->init_end(); 1233 1234 llvm::BasicBlock *BaseCtorContinueBB = nullptr; 1235 if (ClassDecl->getNumVBases() && 1236 !CGM.getTarget().getCXXABI().hasConstructorVariants()) { 1237 // The ABIs that don't have constructor variants need to put a branch 1238 // before the virtual base initialization code. 1239 BaseCtorContinueBB = 1240 CGM.getCXXABI().EmitCtorCompleteObjectHandler(*this, ClassDecl); 1241 assert(BaseCtorContinueBB); 1242 } 1243 1244 // Virtual base initializers first. 1245 for (; B != E && (*B)->isBaseInitializer() && (*B)->isBaseVirtual(); B++) { 1246 EmitBaseInitializer(*this, ClassDecl, *B, CtorType); 1247 } 1248 1249 if (BaseCtorContinueBB) { 1250 // Complete object handler should continue to the remaining initializers. 1251 Builder.CreateBr(BaseCtorContinueBB); 1252 EmitBlock(BaseCtorContinueBB); 1253 } 1254 1255 // Then, non-virtual base initializers. 1256 for (; B != E && (*B)->isBaseInitializer(); B++) { 1257 assert(!(*B)->isBaseVirtual()); 1258 EmitBaseInitializer(*this, ClassDecl, *B, CtorType); 1259 } 1260 1261 InitializeVTablePointers(ClassDecl); 1262 1263 // And finally, initialize class members. 1264 FieldConstructionScope FCS(*this, CXXThisValue); 1265 ConstructorMemcpyizer CM(*this, CD, Args); 1266 for (; B != E; B++) { 1267 CXXCtorInitializer *Member = (*B); 1268 assert(!Member->isBaseInitializer()); 1269 assert(Member->isAnyMemberInitializer() && 1270 "Delegating initializer on non-delegating constructor"); 1271 CM.addMemberInitializer(Member); 1272 } 1273 CM.finish(); 1274 } 1275 1276 static bool 1277 FieldHasTrivialDestructorBody(ASTContext &Context, const FieldDecl *Field); 1278 1279 static bool 1280 HasTrivialDestructorBody(ASTContext &Context, 1281 const CXXRecordDecl *BaseClassDecl, 1282 const CXXRecordDecl *MostDerivedClassDecl) 1283 { 1284 // If the destructor is trivial we don't have to check anything else. 1285 if (BaseClassDecl->hasTrivialDestructor()) 1286 return true; 1287 1288 if (!BaseClassDecl->getDestructor()->hasTrivialBody()) 1289 return false; 1290 1291 // Check fields. 1292 for (const auto *Field : BaseClassDecl->fields()) 1293 if (!FieldHasTrivialDestructorBody(Context, Field)) 1294 return false; 1295 1296 // Check non-virtual bases. 1297 for (const auto &I : BaseClassDecl->bases()) { 1298 if (I.isVirtual()) 1299 continue; 1300 1301 const CXXRecordDecl *NonVirtualBase = 1302 cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl()); 1303 if (!HasTrivialDestructorBody(Context, NonVirtualBase, 1304 MostDerivedClassDecl)) 1305 return false; 1306 } 1307 1308 if (BaseClassDecl == MostDerivedClassDecl) { 1309 // Check virtual bases. 1310 for (const auto &I : BaseClassDecl->vbases()) { 1311 const CXXRecordDecl *VirtualBase = 1312 cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl()); 1313 if (!HasTrivialDestructorBody(Context, VirtualBase, 1314 MostDerivedClassDecl)) 1315 return false; 1316 } 1317 } 1318 1319 return true; 1320 } 1321 1322 static bool 1323 FieldHasTrivialDestructorBody(ASTContext &Context, 1324 const FieldDecl *Field) 1325 { 1326 QualType FieldBaseElementType = Context.getBaseElementType(Field->getType()); 1327 1328 const RecordType *RT = FieldBaseElementType->getAs<RecordType>(); 1329 if (!RT) 1330 return true; 1331 1332 CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(RT->getDecl()); 1333 return HasTrivialDestructorBody(Context, FieldClassDecl, FieldClassDecl); 1334 } 1335 1336 /// CanSkipVTablePointerInitialization - Check whether we need to initialize 1337 /// any vtable pointers before calling this destructor. 1338 static bool CanSkipVTablePointerInitialization(ASTContext &Context, 1339 const CXXDestructorDecl *Dtor) { 1340 if (!Dtor->hasTrivialBody()) 1341 return false; 1342 1343 // Check the fields. 1344 const CXXRecordDecl *ClassDecl = Dtor->getParent(); 1345 for (const auto *Field : ClassDecl->fields()) 1346 if (!FieldHasTrivialDestructorBody(Context, Field)) 1347 return false; 1348 1349 return true; 1350 } 1351 1352 /// EmitDestructorBody - Emits the body of the current destructor. 1353 void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) { 1354 const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CurGD.getDecl()); 1355 CXXDtorType DtorType = CurGD.getDtorType(); 1356 1357 // The call to operator delete in a deleting destructor happens 1358 // outside of the function-try-block, which means it's always 1359 // possible to delegate the destructor body to the complete 1360 // destructor. Do so. 1361 if (DtorType == Dtor_Deleting) { 1362 EnterDtorCleanups(Dtor, Dtor_Deleting); 1363 EmitCXXDestructorCall(Dtor, Dtor_Complete, /*ForVirtualBase=*/false, 1364 /*Delegating=*/false, LoadCXXThis()); 1365 PopCleanupBlock(); 1366 return; 1367 } 1368 1369 Stmt *Body = Dtor->getBody(); 1370 1371 // If the body is a function-try-block, enter the try before 1372 // anything else. 1373 bool isTryBody = (Body && isa<CXXTryStmt>(Body)); 1374 if (isTryBody) 1375 EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true); 1376 EmitAsanPrologueOrEpilogue(false); 1377 1378 // Enter the epilogue cleanups. 1379 RunCleanupsScope DtorEpilogue(*this); 1380 1381 // If this is the complete variant, just invoke the base variant; 1382 // the epilogue will destruct the virtual bases. But we can't do 1383 // this optimization if the body is a function-try-block, because 1384 // we'd introduce *two* handler blocks. In the Microsoft ABI, we 1385 // always delegate because we might not have a definition in this TU. 1386 switch (DtorType) { 1387 case Dtor_Comdat: 1388 llvm_unreachable("not expecting a COMDAT"); 1389 1390 case Dtor_Deleting: llvm_unreachable("already handled deleting case"); 1391 1392 case Dtor_Complete: 1393 assert((Body || getTarget().getCXXABI().isMicrosoft()) && 1394 "can't emit a dtor without a body for non-Microsoft ABIs"); 1395 1396 // Enter the cleanup scopes for virtual bases. 1397 EnterDtorCleanups(Dtor, Dtor_Complete); 1398 1399 if (!isTryBody) { 1400 EmitCXXDestructorCall(Dtor, Dtor_Base, /*ForVirtualBase=*/false, 1401 /*Delegating=*/false, LoadCXXThis()); 1402 break; 1403 } 1404 // Fallthrough: act like we're in the base variant. 1405 1406 case Dtor_Base: 1407 assert(Body); 1408 1409 RegionCounter Cnt = getPGORegionCounter(Body); 1410 Cnt.beginRegion(Builder); 1411 1412 // Enter the cleanup scopes for fields and non-virtual bases. 1413 EnterDtorCleanups(Dtor, Dtor_Base); 1414 1415 // Initialize the vtable pointers before entering the body. 1416 if (!CanSkipVTablePointerInitialization(getContext(), Dtor)) 1417 InitializeVTablePointers(Dtor->getParent()); 1418 1419 if (isTryBody) 1420 EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock()); 1421 else if (Body) 1422 EmitStmt(Body); 1423 else { 1424 assert(Dtor->isImplicit() && "bodyless dtor not implicit"); 1425 // nothing to do besides what's in the epilogue 1426 } 1427 // -fapple-kext must inline any call to this dtor into 1428 // the caller's body. 1429 if (getLangOpts().AppleKext) 1430 CurFn->addFnAttr(llvm::Attribute::AlwaysInline); 1431 break; 1432 } 1433 1434 // Jump out through the epilogue cleanups. 1435 DtorEpilogue.ForceCleanup(); 1436 1437 // Exit the try if applicable. 1438 if (isTryBody) 1439 ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true); 1440 } 1441 1442 void CodeGenFunction::emitImplicitAssignmentOperatorBody(FunctionArgList &Args) { 1443 const CXXMethodDecl *AssignOp = cast<CXXMethodDecl>(CurGD.getDecl()); 1444 const Stmt *RootS = AssignOp->getBody(); 1445 assert(isa<CompoundStmt>(RootS) && 1446 "Body of an implicit assignment operator should be compound stmt."); 1447 const CompoundStmt *RootCS = cast<CompoundStmt>(RootS); 1448 1449 LexicalScope Scope(*this, RootCS->getSourceRange()); 1450 1451 AssignmentMemcpyizer AM(*this, AssignOp, Args); 1452 for (auto *I : RootCS->body()) 1453 AM.emitAssignment(I); 1454 AM.finish(); 1455 } 1456 1457 namespace { 1458 /// Call the operator delete associated with the current destructor. 1459 struct CallDtorDelete : EHScopeStack::Cleanup { 1460 CallDtorDelete() {} 1461 1462 void Emit(CodeGenFunction &CGF, Flags flags) override { 1463 const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CGF.CurCodeDecl); 1464 const CXXRecordDecl *ClassDecl = Dtor->getParent(); 1465 CGF.EmitDeleteCall(Dtor->getOperatorDelete(), CGF.LoadCXXThis(), 1466 CGF.getContext().getTagDeclType(ClassDecl)); 1467 } 1468 }; 1469 1470 struct CallDtorDeleteConditional : EHScopeStack::Cleanup { 1471 llvm::Value *ShouldDeleteCondition; 1472 public: 1473 CallDtorDeleteConditional(llvm::Value *ShouldDeleteCondition) 1474 : ShouldDeleteCondition(ShouldDeleteCondition) { 1475 assert(ShouldDeleteCondition != nullptr); 1476 } 1477 1478 void Emit(CodeGenFunction &CGF, Flags flags) override { 1479 llvm::BasicBlock *callDeleteBB = CGF.createBasicBlock("dtor.call_delete"); 1480 llvm::BasicBlock *continueBB = CGF.createBasicBlock("dtor.continue"); 1481 llvm::Value *ShouldCallDelete 1482 = CGF.Builder.CreateIsNull(ShouldDeleteCondition); 1483 CGF.Builder.CreateCondBr(ShouldCallDelete, continueBB, callDeleteBB); 1484 1485 CGF.EmitBlock(callDeleteBB); 1486 const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CGF.CurCodeDecl); 1487 const CXXRecordDecl *ClassDecl = Dtor->getParent(); 1488 CGF.EmitDeleteCall(Dtor->getOperatorDelete(), CGF.LoadCXXThis(), 1489 CGF.getContext().getTagDeclType(ClassDecl)); 1490 CGF.Builder.CreateBr(continueBB); 1491 1492 CGF.EmitBlock(continueBB); 1493 } 1494 }; 1495 1496 class DestroyField : public EHScopeStack::Cleanup { 1497 const FieldDecl *field; 1498 CodeGenFunction::Destroyer *destroyer; 1499 bool useEHCleanupForArray; 1500 1501 public: 1502 DestroyField(const FieldDecl *field, CodeGenFunction::Destroyer *destroyer, 1503 bool useEHCleanupForArray) 1504 : field(field), destroyer(destroyer), 1505 useEHCleanupForArray(useEHCleanupForArray) {} 1506 1507 void Emit(CodeGenFunction &CGF, Flags flags) override { 1508 // Find the address of the field. 1509 llvm::Value *thisValue = CGF.LoadCXXThis(); 1510 QualType RecordTy = CGF.getContext().getTagDeclType(field->getParent()); 1511 LValue ThisLV = CGF.MakeAddrLValue(thisValue, RecordTy); 1512 LValue LV = CGF.EmitLValueForField(ThisLV, field); 1513 assert(LV.isSimple()); 1514 1515 CGF.emitDestroy(LV.getAddress(), field->getType(), destroyer, 1516 flags.isForNormalCleanup() && useEHCleanupForArray); 1517 } 1518 }; 1519 } 1520 1521 /// \brief Emit all code that comes at the end of class's 1522 /// destructor. This is to call destructors on members and base classes 1523 /// in reverse order of their construction. 1524 void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD, 1525 CXXDtorType DtorType) { 1526 assert((!DD->isTrivial() || DD->hasAttr<DLLExportAttr>()) && 1527 "Should not emit dtor epilogue for non-exported trivial dtor!"); 1528 1529 // The deleting-destructor phase just needs to call the appropriate 1530 // operator delete that Sema picked up. 1531 if (DtorType == Dtor_Deleting) { 1532 assert(DD->getOperatorDelete() && 1533 "operator delete missing - EnterDtorCleanups"); 1534 if (CXXStructorImplicitParamValue) { 1535 // If there is an implicit param to the deleting dtor, it's a boolean 1536 // telling whether we should call delete at the end of the dtor. 1537 EHStack.pushCleanup<CallDtorDeleteConditional>( 1538 NormalAndEHCleanup, CXXStructorImplicitParamValue); 1539 } else { 1540 EHStack.pushCleanup<CallDtorDelete>(NormalAndEHCleanup); 1541 } 1542 return; 1543 } 1544 1545 const CXXRecordDecl *ClassDecl = DD->getParent(); 1546 1547 // Unions have no bases and do not call field destructors. 1548 if (ClassDecl->isUnion()) 1549 return; 1550 1551 // The complete-destructor phase just destructs all the virtual bases. 1552 if (DtorType == Dtor_Complete) { 1553 1554 // We push them in the forward order so that they'll be popped in 1555 // the reverse order. 1556 for (const auto &Base : ClassDecl->vbases()) { 1557 CXXRecordDecl *BaseClassDecl 1558 = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl()); 1559 1560 // Ignore trivial destructors. 1561 if (BaseClassDecl->hasTrivialDestructor()) 1562 continue; 1563 1564 EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup, 1565 BaseClassDecl, 1566 /*BaseIsVirtual*/ true); 1567 } 1568 1569 return; 1570 } 1571 1572 assert(DtorType == Dtor_Base); 1573 1574 // Destroy non-virtual bases. 1575 for (const auto &Base : ClassDecl->bases()) { 1576 // Ignore virtual bases. 1577 if (Base.isVirtual()) 1578 continue; 1579 1580 CXXRecordDecl *BaseClassDecl = Base.getType()->getAsCXXRecordDecl(); 1581 1582 // Ignore trivial destructors. 1583 if (BaseClassDecl->hasTrivialDestructor()) 1584 continue; 1585 1586 EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup, 1587 BaseClassDecl, 1588 /*BaseIsVirtual*/ false); 1589 } 1590 1591 // Destroy direct fields. 1592 for (const auto *Field : ClassDecl->fields()) { 1593 QualType type = Field->getType(); 1594 QualType::DestructionKind dtorKind = type.isDestructedType(); 1595 if (!dtorKind) continue; 1596 1597 // Anonymous union members do not have their destructors called. 1598 const RecordType *RT = type->getAsUnionType(); 1599 if (RT && RT->getDecl()->isAnonymousStructOrUnion()) continue; 1600 1601 CleanupKind cleanupKind = getCleanupKind(dtorKind); 1602 EHStack.pushCleanup<DestroyField>(cleanupKind, Field, 1603 getDestroyer(dtorKind), 1604 cleanupKind & EHCleanup); 1605 } 1606 } 1607 1608 /// EmitCXXAggrConstructorCall - Emit a loop to call a particular 1609 /// constructor for each of several members of an array. 1610 /// 1611 /// \param ctor the constructor to call for each element 1612 /// \param arrayType the type of the array to initialize 1613 /// \param arrayBegin an arrayType* 1614 /// \param zeroInitialize true if each element should be 1615 /// zero-initialized before it is constructed 1616 void CodeGenFunction::EmitCXXAggrConstructorCall( 1617 const CXXConstructorDecl *ctor, const ConstantArrayType *arrayType, 1618 llvm::Value *arrayBegin, const CXXConstructExpr *E, bool zeroInitialize) { 1619 QualType elementType; 1620 llvm::Value *numElements = 1621 emitArrayLength(arrayType, elementType, arrayBegin); 1622 1623 EmitCXXAggrConstructorCall(ctor, numElements, arrayBegin, E, zeroInitialize); 1624 } 1625 1626 /// EmitCXXAggrConstructorCall - Emit a loop to call a particular 1627 /// constructor for each of several members of an array. 1628 /// 1629 /// \param ctor the constructor to call for each element 1630 /// \param numElements the number of elements in the array; 1631 /// may be zero 1632 /// \param arrayBegin a T*, where T is the type constructed by ctor 1633 /// \param zeroInitialize true if each element should be 1634 /// zero-initialized before it is constructed 1635 void CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor, 1636 llvm::Value *numElements, 1637 llvm::Value *arrayBegin, 1638 const CXXConstructExpr *E, 1639 bool zeroInitialize) { 1640 1641 // It's legal for numElements to be zero. This can happen both 1642 // dynamically, because x can be zero in 'new A[x]', and statically, 1643 // because of GCC extensions that permit zero-length arrays. There 1644 // are probably legitimate places where we could assume that this 1645 // doesn't happen, but it's not clear that it's worth it. 1646 llvm::BranchInst *zeroCheckBranch = nullptr; 1647 1648 // Optimize for a constant count. 1649 llvm::ConstantInt *constantCount 1650 = dyn_cast<llvm::ConstantInt>(numElements); 1651 if (constantCount) { 1652 // Just skip out if the constant count is zero. 1653 if (constantCount->isZero()) return; 1654 1655 // Otherwise, emit the check. 1656 } else { 1657 llvm::BasicBlock *loopBB = createBasicBlock("new.ctorloop"); 1658 llvm::Value *iszero = Builder.CreateIsNull(numElements, "isempty"); 1659 zeroCheckBranch = Builder.CreateCondBr(iszero, loopBB, loopBB); 1660 EmitBlock(loopBB); 1661 } 1662 1663 // Find the end of the array. 1664 llvm::Value *arrayEnd = Builder.CreateInBoundsGEP(arrayBegin, numElements, 1665 "arrayctor.end"); 1666 1667 // Enter the loop, setting up a phi for the current location to initialize. 1668 llvm::BasicBlock *entryBB = Builder.GetInsertBlock(); 1669 llvm::BasicBlock *loopBB = createBasicBlock("arrayctor.loop"); 1670 EmitBlock(loopBB); 1671 llvm::PHINode *cur = Builder.CreatePHI(arrayBegin->getType(), 2, 1672 "arrayctor.cur"); 1673 cur->addIncoming(arrayBegin, entryBB); 1674 1675 // Inside the loop body, emit the constructor call on the array element. 1676 1677 QualType type = getContext().getTypeDeclType(ctor->getParent()); 1678 1679 // Zero initialize the storage, if requested. 1680 if (zeroInitialize) 1681 EmitNullInitialization(cur, type); 1682 1683 // C++ [class.temporary]p4: 1684 // There are two contexts in which temporaries are destroyed at a different 1685 // point than the end of the full-expression. The first context is when a 1686 // default constructor is called to initialize an element of an array. 1687 // If the constructor has one or more default arguments, the destruction of 1688 // every temporary created in a default argument expression is sequenced 1689 // before the construction of the next array element, if any. 1690 1691 { 1692 RunCleanupsScope Scope(*this); 1693 1694 // Evaluate the constructor and its arguments in a regular 1695 // partial-destroy cleanup. 1696 if (getLangOpts().Exceptions && 1697 !ctor->getParent()->hasTrivialDestructor()) { 1698 Destroyer *destroyer = destroyCXXObject; 1699 pushRegularPartialArrayCleanup(arrayBegin, cur, type, *destroyer); 1700 } 1701 1702 EmitCXXConstructorCall(ctor, Ctor_Complete, /*ForVirtualBase=*/false, 1703 /*Delegating=*/false, cur, E); 1704 } 1705 1706 // Go to the next element. 1707 llvm::Value *next = 1708 Builder.CreateInBoundsGEP(cur, llvm::ConstantInt::get(SizeTy, 1), 1709 "arrayctor.next"); 1710 cur->addIncoming(next, Builder.GetInsertBlock()); 1711 1712 // Check whether that's the end of the loop. 1713 llvm::Value *done = Builder.CreateICmpEQ(next, arrayEnd, "arrayctor.done"); 1714 llvm::BasicBlock *contBB = createBasicBlock("arrayctor.cont"); 1715 Builder.CreateCondBr(done, contBB, loopBB); 1716 1717 // Patch the earlier check to skip over the loop. 1718 if (zeroCheckBranch) zeroCheckBranch->setSuccessor(0, contBB); 1719 1720 EmitBlock(contBB); 1721 } 1722 1723 void CodeGenFunction::destroyCXXObject(CodeGenFunction &CGF, 1724 llvm::Value *addr, 1725 QualType type) { 1726 const RecordType *rtype = type->castAs<RecordType>(); 1727 const CXXRecordDecl *record = cast<CXXRecordDecl>(rtype->getDecl()); 1728 const CXXDestructorDecl *dtor = record->getDestructor(); 1729 assert(!dtor->isTrivial()); 1730 CGF.EmitCXXDestructorCall(dtor, Dtor_Complete, /*for vbase*/ false, 1731 /*Delegating=*/false, addr); 1732 } 1733 1734 void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D, 1735 CXXCtorType Type, 1736 bool ForVirtualBase, 1737 bool Delegating, llvm::Value *This, 1738 const CXXConstructExpr *E) { 1739 // If this is a trivial constructor, just emit what's needed. 1740 if (D->isTrivial() && !D->getParent()->mayInsertExtraPadding()) { 1741 if (E->getNumArgs() == 0) { 1742 // Trivial default constructor, no codegen required. 1743 assert(D->isDefaultConstructor() && 1744 "trivial 0-arg ctor not a default ctor"); 1745 return; 1746 } 1747 1748 assert(E->getNumArgs() == 1 && "unexpected argcount for trivial ctor"); 1749 assert(D->isCopyOrMoveConstructor() && 1750 "trivial 1-arg ctor not a copy/move ctor"); 1751 1752 const Expr *Arg = E->getArg(0); 1753 QualType Ty = Arg->getType(); 1754 llvm::Value *Src = EmitLValue(Arg).getAddress(); 1755 EmitAggregateCopy(This, Src, Ty); 1756 return; 1757 } 1758 1759 // C++11 [class.mfct.non-static]p2: 1760 // If a non-static member function of a class X is called for an object that 1761 // is not of type X, or of a type derived from X, the behavior is undefined. 1762 // FIXME: Provide a source location here. 1763 EmitTypeCheck(CodeGenFunction::TCK_ConstructorCall, SourceLocation(), This, 1764 getContext().getRecordType(D->getParent())); 1765 1766 CallArgList Args; 1767 1768 // Push the this ptr. 1769 Args.add(RValue::get(This), D->getThisType(getContext())); 1770 1771 // Add the rest of the user-supplied arguments. 1772 const FunctionProtoType *FPT = D->getType()->castAs<FunctionProtoType>(); 1773 EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end(), E->getConstructor()); 1774 1775 // Insert any ABI-specific implicit constructor arguments. 1776 unsigned ExtraArgs = CGM.getCXXABI().addImplicitConstructorArgs( 1777 *this, D, Type, ForVirtualBase, Delegating, Args); 1778 1779 // Emit the call. 1780 llvm::Value *Callee = CGM.getAddrOfCXXStructor(D, getFromCtorType(Type)); 1781 const CGFunctionInfo &Info = 1782 CGM.getTypes().arrangeCXXConstructorCall(Args, D, Type, ExtraArgs); 1783 EmitCall(Info, Callee, ReturnValueSlot(), Args, D); 1784 } 1785 1786 void 1787 CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D, 1788 llvm::Value *This, llvm::Value *Src, 1789 const CXXConstructExpr *E) { 1790 if (D->isTrivial() && 1791 !D->getParent()->mayInsertExtraPadding()) { 1792 assert(E->getNumArgs() == 1 && "unexpected argcount for trivial ctor"); 1793 assert(D->isCopyOrMoveConstructor() && 1794 "trivial 1-arg ctor not a copy/move ctor"); 1795 EmitAggregateCopy(This, Src, E->arg_begin()->getType()); 1796 return; 1797 } 1798 llvm::Value *Callee = CGM.getAddrOfCXXStructor(D, StructorType::Complete); 1799 assert(D->isInstance() && 1800 "Trying to emit a member call expr on a static method!"); 1801 1802 const FunctionProtoType *FPT = D->getType()->castAs<FunctionProtoType>(); 1803 1804 CallArgList Args; 1805 1806 // Push the this ptr. 1807 Args.add(RValue::get(This), D->getThisType(getContext())); 1808 1809 // Push the src ptr. 1810 QualType QT = *(FPT->param_type_begin()); 1811 llvm::Type *t = CGM.getTypes().ConvertType(QT); 1812 Src = Builder.CreateBitCast(Src, t); 1813 Args.add(RValue::get(Src), QT); 1814 1815 // Skip over first argument (Src). 1816 EmitCallArgs(Args, FPT, E->arg_begin() + 1, E->arg_end(), E->getConstructor(), 1817 /*ParamsToSkip*/ 1); 1818 1819 EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, RequiredArgs::All), 1820 Callee, ReturnValueSlot(), Args, D); 1821 } 1822 1823 void 1824 CodeGenFunction::EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor, 1825 CXXCtorType CtorType, 1826 const FunctionArgList &Args, 1827 SourceLocation Loc) { 1828 CallArgList DelegateArgs; 1829 1830 FunctionArgList::const_iterator I = Args.begin(), E = Args.end(); 1831 assert(I != E && "no parameters to constructor"); 1832 1833 // this 1834 DelegateArgs.add(RValue::get(LoadCXXThis()), (*I)->getType()); 1835 ++I; 1836 1837 // vtt 1838 if (llvm::Value *VTT = GetVTTParameter(GlobalDecl(Ctor, CtorType), 1839 /*ForVirtualBase=*/false, 1840 /*Delegating=*/true)) { 1841 QualType VoidPP = getContext().getPointerType(getContext().VoidPtrTy); 1842 DelegateArgs.add(RValue::get(VTT), VoidPP); 1843 1844 if (CGM.getCXXABI().NeedsVTTParameter(CurGD)) { 1845 assert(I != E && "cannot skip vtt parameter, already done with args"); 1846 assert((*I)->getType() == VoidPP && "skipping parameter not of vtt type"); 1847 ++I; 1848 } 1849 } 1850 1851 // Explicit arguments. 1852 for (; I != E; ++I) { 1853 const VarDecl *param = *I; 1854 // FIXME: per-argument source location 1855 EmitDelegateCallArg(DelegateArgs, param, Loc); 1856 } 1857 1858 llvm::Value *Callee = 1859 CGM.getAddrOfCXXStructor(Ctor, getFromCtorType(CtorType)); 1860 EmitCall(CGM.getTypes() 1861 .arrangeCXXStructorDeclaration(Ctor, getFromCtorType(CtorType)), 1862 Callee, ReturnValueSlot(), DelegateArgs, Ctor); 1863 } 1864 1865 namespace { 1866 struct CallDelegatingCtorDtor : EHScopeStack::Cleanup { 1867 const CXXDestructorDecl *Dtor; 1868 llvm::Value *Addr; 1869 CXXDtorType Type; 1870 1871 CallDelegatingCtorDtor(const CXXDestructorDecl *D, llvm::Value *Addr, 1872 CXXDtorType Type) 1873 : Dtor(D), Addr(Addr), Type(Type) {} 1874 1875 void Emit(CodeGenFunction &CGF, Flags flags) override { 1876 CGF.EmitCXXDestructorCall(Dtor, Type, /*ForVirtualBase=*/false, 1877 /*Delegating=*/true, Addr); 1878 } 1879 }; 1880 } 1881 1882 void 1883 CodeGenFunction::EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor, 1884 const FunctionArgList &Args) { 1885 assert(Ctor->isDelegatingConstructor()); 1886 1887 llvm::Value *ThisPtr = LoadCXXThis(); 1888 1889 QualType Ty = getContext().getTagDeclType(Ctor->getParent()); 1890 CharUnits Alignment = getContext().getTypeAlignInChars(Ty); 1891 AggValueSlot AggSlot = 1892 AggValueSlot::forAddr(ThisPtr, Alignment, Qualifiers(), 1893 AggValueSlot::IsDestructed, 1894 AggValueSlot::DoesNotNeedGCBarriers, 1895 AggValueSlot::IsNotAliased); 1896 1897 EmitAggExpr(Ctor->init_begin()[0]->getInit(), AggSlot); 1898 1899 const CXXRecordDecl *ClassDecl = Ctor->getParent(); 1900 if (CGM.getLangOpts().Exceptions && !ClassDecl->hasTrivialDestructor()) { 1901 CXXDtorType Type = 1902 CurGD.getCtorType() == Ctor_Complete ? Dtor_Complete : Dtor_Base; 1903 1904 EHStack.pushCleanup<CallDelegatingCtorDtor>(EHCleanup, 1905 ClassDecl->getDestructor(), 1906 ThisPtr, Type); 1907 } 1908 } 1909 1910 void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *DD, 1911 CXXDtorType Type, 1912 bool ForVirtualBase, 1913 bool Delegating, 1914 llvm::Value *This) { 1915 CGM.getCXXABI().EmitDestructorCall(*this, DD, Type, ForVirtualBase, 1916 Delegating, This); 1917 } 1918 1919 namespace { 1920 struct CallLocalDtor : EHScopeStack::Cleanup { 1921 const CXXDestructorDecl *Dtor; 1922 llvm::Value *Addr; 1923 1924 CallLocalDtor(const CXXDestructorDecl *D, llvm::Value *Addr) 1925 : Dtor(D), Addr(Addr) {} 1926 1927 void Emit(CodeGenFunction &CGF, Flags flags) override { 1928 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, 1929 /*ForVirtualBase=*/false, 1930 /*Delegating=*/false, Addr); 1931 } 1932 }; 1933 } 1934 1935 void CodeGenFunction::PushDestructorCleanup(const CXXDestructorDecl *D, 1936 llvm::Value *Addr) { 1937 EHStack.pushCleanup<CallLocalDtor>(NormalAndEHCleanup, D, Addr); 1938 } 1939 1940 void CodeGenFunction::PushDestructorCleanup(QualType T, llvm::Value *Addr) { 1941 CXXRecordDecl *ClassDecl = T->getAsCXXRecordDecl(); 1942 if (!ClassDecl) return; 1943 if (ClassDecl->hasTrivialDestructor()) return; 1944 1945 const CXXDestructorDecl *D = ClassDecl->getDestructor(); 1946 assert(D && D->isUsed() && "destructor not marked as used!"); 1947 PushDestructorCleanup(D, Addr); 1948 } 1949 1950 void 1951 CodeGenFunction::InitializeVTablePointer(BaseSubobject Base, 1952 const CXXRecordDecl *NearestVBase, 1953 CharUnits OffsetFromNearestVBase, 1954 const CXXRecordDecl *VTableClass) { 1955 // Compute the address point. 1956 bool NeedsVirtualOffset; 1957 llvm::Value *VTableAddressPoint = 1958 CGM.getCXXABI().getVTableAddressPointInStructor( 1959 *this, VTableClass, Base, NearestVBase, NeedsVirtualOffset); 1960 if (!VTableAddressPoint) 1961 return; 1962 1963 // Compute where to store the address point. 1964 llvm::Value *VirtualOffset = nullptr; 1965 CharUnits NonVirtualOffset = CharUnits::Zero(); 1966 1967 if (NeedsVirtualOffset) { 1968 // We need to use the virtual base offset offset because the virtual base 1969 // might have a different offset in the most derived class. 1970 VirtualOffset = CGM.getCXXABI().GetVirtualBaseClassOffset(*this, 1971 LoadCXXThis(), 1972 VTableClass, 1973 NearestVBase); 1974 NonVirtualOffset = OffsetFromNearestVBase; 1975 } else { 1976 // We can just use the base offset in the complete class. 1977 NonVirtualOffset = Base.getBaseOffset(); 1978 } 1979 1980 // Apply the offsets. 1981 llvm::Value *VTableField = LoadCXXThis(); 1982 1983 if (!NonVirtualOffset.isZero() || VirtualOffset) 1984 VTableField = ApplyNonVirtualAndVirtualOffset(*this, VTableField, 1985 NonVirtualOffset, 1986 VirtualOffset); 1987 1988 // Finally, store the address point. Use the same LLVM types as the field to 1989 // support optimization. 1990 llvm::Type *VTablePtrTy = 1991 llvm::FunctionType::get(CGM.Int32Ty, /*isVarArg=*/true) 1992 ->getPointerTo() 1993 ->getPointerTo(); 1994 VTableField = Builder.CreateBitCast(VTableField, VTablePtrTy->getPointerTo()); 1995 VTableAddressPoint = Builder.CreateBitCast(VTableAddressPoint, VTablePtrTy); 1996 llvm::StoreInst *Store = Builder.CreateStore(VTableAddressPoint, VTableField); 1997 CGM.DecorateInstruction(Store, CGM.getTBAAInfoForVTablePtr()); 1998 } 1999 2000 void 2001 CodeGenFunction::InitializeVTablePointers(BaseSubobject Base, 2002 const CXXRecordDecl *NearestVBase, 2003 CharUnits OffsetFromNearestVBase, 2004 bool BaseIsNonVirtualPrimaryBase, 2005 const CXXRecordDecl *VTableClass, 2006 VisitedVirtualBasesSetTy& VBases) { 2007 // If this base is a non-virtual primary base the address point has already 2008 // been set. 2009 if (!BaseIsNonVirtualPrimaryBase) { 2010 // Initialize the vtable pointer for this base. 2011 InitializeVTablePointer(Base, NearestVBase, OffsetFromNearestVBase, 2012 VTableClass); 2013 } 2014 2015 const CXXRecordDecl *RD = Base.getBase(); 2016 2017 // Traverse bases. 2018 for (const auto &I : RD->bases()) { 2019 CXXRecordDecl *BaseDecl 2020 = cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl()); 2021 2022 // Ignore classes without a vtable. 2023 if (!BaseDecl->isDynamicClass()) 2024 continue; 2025 2026 CharUnits BaseOffset; 2027 CharUnits BaseOffsetFromNearestVBase; 2028 bool BaseDeclIsNonVirtualPrimaryBase; 2029 2030 if (I.isVirtual()) { 2031 // Check if we've visited this virtual base before. 2032 if (!VBases.insert(BaseDecl).second) 2033 continue; 2034 2035 const ASTRecordLayout &Layout = 2036 getContext().getASTRecordLayout(VTableClass); 2037 2038 BaseOffset = Layout.getVBaseClassOffset(BaseDecl); 2039 BaseOffsetFromNearestVBase = CharUnits::Zero(); 2040 BaseDeclIsNonVirtualPrimaryBase = false; 2041 } else { 2042 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 2043 2044 BaseOffset = Base.getBaseOffset() + Layout.getBaseClassOffset(BaseDecl); 2045 BaseOffsetFromNearestVBase = 2046 OffsetFromNearestVBase + Layout.getBaseClassOffset(BaseDecl); 2047 BaseDeclIsNonVirtualPrimaryBase = Layout.getPrimaryBase() == BaseDecl; 2048 } 2049 2050 InitializeVTablePointers(BaseSubobject(BaseDecl, BaseOffset), 2051 I.isVirtual() ? BaseDecl : NearestVBase, 2052 BaseOffsetFromNearestVBase, 2053 BaseDeclIsNonVirtualPrimaryBase, 2054 VTableClass, VBases); 2055 } 2056 } 2057 2058 void CodeGenFunction::InitializeVTablePointers(const CXXRecordDecl *RD) { 2059 // Ignore classes without a vtable. 2060 if (!RD->isDynamicClass()) 2061 return; 2062 2063 // Initialize the vtable pointers for this class and all of its bases. 2064 VisitedVirtualBasesSetTy VBases; 2065 InitializeVTablePointers(BaseSubobject(RD, CharUnits::Zero()), 2066 /*NearestVBase=*/nullptr, 2067 /*OffsetFromNearestVBase=*/CharUnits::Zero(), 2068 /*BaseIsNonVirtualPrimaryBase=*/false, RD, VBases); 2069 2070 if (RD->getNumVBases()) 2071 CGM.getCXXABI().initializeHiddenVirtualInheritanceMembers(*this, RD); 2072 } 2073 2074 llvm::Value *CodeGenFunction::GetVTablePtr(llvm::Value *This, 2075 llvm::Type *Ty) { 2076 llvm::Value *VTablePtrSrc = Builder.CreateBitCast(This, Ty->getPointerTo()); 2077 llvm::Instruction *VTable = Builder.CreateLoad(VTablePtrSrc, "vtable"); 2078 CGM.DecorateInstruction(VTable, CGM.getTBAAInfoForVTablePtr()); 2079 return VTable; 2080 } 2081 2082 2083 // FIXME: Ideally Expr::IgnoreParenNoopCasts should do this, but it doesn't do 2084 // quite what we want. 2085 static const Expr *skipNoOpCastsAndParens(const Expr *E) { 2086 while (true) { 2087 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) { 2088 E = PE->getSubExpr(); 2089 continue; 2090 } 2091 2092 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) { 2093 if (CE->getCastKind() == CK_NoOp) { 2094 E = CE->getSubExpr(); 2095 continue; 2096 } 2097 } 2098 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 2099 if (UO->getOpcode() == UO_Extension) { 2100 E = UO->getSubExpr(); 2101 continue; 2102 } 2103 } 2104 return E; 2105 } 2106 } 2107 2108 bool 2109 CodeGenFunction::CanDevirtualizeMemberFunctionCall(const Expr *Base, 2110 const CXXMethodDecl *MD) { 2111 // When building with -fapple-kext, all calls must go through the vtable since 2112 // the kernel linker can do runtime patching of vtables. 2113 if (getLangOpts().AppleKext) 2114 return false; 2115 2116 // If the most derived class is marked final, we know that no subclass can 2117 // override this member function and so we can devirtualize it. For example: 2118 // 2119 // struct A { virtual void f(); } 2120 // struct B final : A { }; 2121 // 2122 // void f(B *b) { 2123 // b->f(); 2124 // } 2125 // 2126 const CXXRecordDecl *MostDerivedClassDecl = Base->getBestDynamicClassType(); 2127 if (MostDerivedClassDecl->hasAttr<FinalAttr>()) 2128 return true; 2129 2130 // If the member function is marked 'final', we know that it can't be 2131 // overridden and can therefore devirtualize it. 2132 if (MD->hasAttr<FinalAttr>()) 2133 return true; 2134 2135 // Similarly, if the class itself is marked 'final' it can't be overridden 2136 // and we can therefore devirtualize the member function call. 2137 if (MD->getParent()->hasAttr<FinalAttr>()) 2138 return true; 2139 2140 Base = skipNoOpCastsAndParens(Base); 2141 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) { 2142 if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) { 2143 // This is a record decl. We know the type and can devirtualize it. 2144 return VD->getType()->isRecordType(); 2145 } 2146 2147 return false; 2148 } 2149 2150 // We can devirtualize calls on an object accessed by a class member access 2151 // expression, since by C++11 [basic.life]p6 we know that it can't refer to 2152 // a derived class object constructed in the same location. 2153 if (const MemberExpr *ME = dyn_cast<MemberExpr>(Base)) 2154 if (const ValueDecl *VD = dyn_cast<ValueDecl>(ME->getMemberDecl())) 2155 return VD->getType()->isRecordType(); 2156 2157 // We can always devirtualize calls on temporary object expressions. 2158 if (isa<CXXConstructExpr>(Base)) 2159 return true; 2160 2161 // And calls on bound temporaries. 2162 if (isa<CXXBindTemporaryExpr>(Base)) 2163 return true; 2164 2165 // Check if this is a call expr that returns a record type. 2166 if (const CallExpr *CE = dyn_cast<CallExpr>(Base)) 2167 return CE->getCallReturnType()->isRecordType(); 2168 2169 // We can't devirtualize the call. 2170 return false; 2171 } 2172 2173 void CodeGenFunction::EmitForwardingCallToLambda( 2174 const CXXMethodDecl *callOperator, 2175 CallArgList &callArgs) { 2176 // Get the address of the call operator. 2177 const CGFunctionInfo &calleeFnInfo = 2178 CGM.getTypes().arrangeCXXMethodDeclaration(callOperator); 2179 llvm::Value *callee = 2180 CGM.GetAddrOfFunction(GlobalDecl(callOperator), 2181 CGM.getTypes().GetFunctionType(calleeFnInfo)); 2182 2183 // Prepare the return slot. 2184 const FunctionProtoType *FPT = 2185 callOperator->getType()->castAs<FunctionProtoType>(); 2186 QualType resultType = FPT->getReturnType(); 2187 ReturnValueSlot returnSlot; 2188 if (!resultType->isVoidType() && 2189 calleeFnInfo.getReturnInfo().getKind() == ABIArgInfo::Indirect && 2190 !hasScalarEvaluationKind(calleeFnInfo.getReturnType())) 2191 returnSlot = ReturnValueSlot(ReturnValue, resultType.isVolatileQualified()); 2192 2193 // We don't need to separately arrange the call arguments because 2194 // the call can't be variadic anyway --- it's impossible to forward 2195 // variadic arguments. 2196 2197 // Now emit our call. 2198 RValue RV = EmitCall(calleeFnInfo, callee, returnSlot, 2199 callArgs, callOperator); 2200 2201 // If necessary, copy the returned value into the slot. 2202 if (!resultType->isVoidType() && returnSlot.isNull()) 2203 EmitReturnOfRValue(RV, resultType); 2204 else 2205 EmitBranchThroughCleanup(ReturnBlock); 2206 } 2207 2208 void CodeGenFunction::EmitLambdaBlockInvokeBody() { 2209 const BlockDecl *BD = BlockInfo->getBlockDecl(); 2210 const VarDecl *variable = BD->capture_begin()->getVariable(); 2211 const CXXRecordDecl *Lambda = variable->getType()->getAsCXXRecordDecl(); 2212 2213 // Start building arguments for forwarding call 2214 CallArgList CallArgs; 2215 2216 QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda)); 2217 llvm::Value *ThisPtr = GetAddrOfBlockDecl(variable, false); 2218 CallArgs.add(RValue::get(ThisPtr), ThisType); 2219 2220 // Add the rest of the parameters. 2221 for (auto param : BD->params()) 2222 EmitDelegateCallArg(CallArgs, param, param->getLocStart()); 2223 2224 assert(!Lambda->isGenericLambda() && 2225 "generic lambda interconversion to block not implemented"); 2226 EmitForwardingCallToLambda(Lambda->getLambdaCallOperator(), CallArgs); 2227 } 2228 2229 void CodeGenFunction::EmitLambdaToBlockPointerBody(FunctionArgList &Args) { 2230 if (cast<CXXMethodDecl>(CurCodeDecl)->isVariadic()) { 2231 // FIXME: Making this work correctly is nasty because it requires either 2232 // cloning the body of the call operator or making the call operator forward. 2233 CGM.ErrorUnsupported(CurCodeDecl, "lambda conversion to variadic function"); 2234 return; 2235 } 2236 2237 EmitFunctionBody(Args, cast<FunctionDecl>(CurGD.getDecl())->getBody()); 2238 } 2239 2240 void CodeGenFunction::EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD) { 2241 const CXXRecordDecl *Lambda = MD->getParent(); 2242 2243 // Start building arguments for forwarding call 2244 CallArgList CallArgs; 2245 2246 QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda)); 2247 llvm::Value *ThisPtr = llvm::UndefValue::get(getTypes().ConvertType(ThisType)); 2248 CallArgs.add(RValue::get(ThisPtr), ThisType); 2249 2250 // Add the rest of the parameters. 2251 for (auto Param : MD->params()) 2252 EmitDelegateCallArg(CallArgs, Param, Param->getLocStart()); 2253 2254 const CXXMethodDecl *CallOp = Lambda->getLambdaCallOperator(); 2255 // For a generic lambda, find the corresponding call operator specialization 2256 // to which the call to the static-invoker shall be forwarded. 2257 if (Lambda->isGenericLambda()) { 2258 assert(MD->isFunctionTemplateSpecialization()); 2259 const TemplateArgumentList *TAL = MD->getTemplateSpecializationArgs(); 2260 FunctionTemplateDecl *CallOpTemplate = CallOp->getDescribedFunctionTemplate(); 2261 void *InsertPos = nullptr; 2262 FunctionDecl *CorrespondingCallOpSpecialization = 2263 CallOpTemplate->findSpecialization(TAL->asArray(), InsertPos); 2264 assert(CorrespondingCallOpSpecialization); 2265 CallOp = cast<CXXMethodDecl>(CorrespondingCallOpSpecialization); 2266 } 2267 EmitForwardingCallToLambda(CallOp, CallArgs); 2268 } 2269 2270 void CodeGenFunction::EmitLambdaStaticInvokeFunction(const CXXMethodDecl *MD) { 2271 if (MD->isVariadic()) { 2272 // FIXME: Making this work correctly is nasty because it requires either 2273 // cloning the body of the call operator or making the call operator forward. 2274 CGM.ErrorUnsupported(MD, "lambda conversion to variadic function"); 2275 return; 2276 } 2277 2278 EmitLambdaDelegatingInvokeBody(MD); 2279 } 2280