1 //===--- CGClass.cpp - Emit LLVM Code for C++ classes ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code dealing with C++ code generation of classes 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CGBlocks.h" 15 #include "CGCXXABI.h" 16 #include "CGDebugInfo.h" 17 #include "CGRecordLayout.h" 18 #include "CodeGenFunction.h" 19 #include "clang/AST/CXXInheritance.h" 20 #include "clang/AST/DeclTemplate.h" 21 #include "clang/AST/EvaluatedExprVisitor.h" 22 #include "clang/AST/RecordLayout.h" 23 #include "clang/AST/StmtCXX.h" 24 #include "clang/Basic/TargetBuiltins.h" 25 #include "clang/CodeGen/CGFunctionInfo.h" 26 #include "clang/Frontend/CodeGenOptions.h" 27 #include "llvm/IR/Intrinsics.h" 28 29 using namespace clang; 30 using namespace CodeGen; 31 32 CharUnits CodeGenModule::computeNonVirtualBaseClassOffset( 33 const CXXRecordDecl *DerivedClass, CastExpr::path_const_iterator Start, 34 CastExpr::path_const_iterator End) { 35 CharUnits Offset = CharUnits::Zero(); 36 37 const ASTContext &Context = getContext(); 38 const CXXRecordDecl *RD = DerivedClass; 39 40 for (CastExpr::path_const_iterator I = Start; I != End; ++I) { 41 const CXXBaseSpecifier *Base = *I; 42 assert(!Base->isVirtual() && "Should not see virtual bases here!"); 43 44 // Get the layout. 45 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 46 47 const CXXRecordDecl *BaseDecl = 48 cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl()); 49 50 // Add the offset. 51 Offset += Layout.getBaseClassOffset(BaseDecl); 52 53 RD = BaseDecl; 54 } 55 56 return Offset; 57 } 58 59 llvm::Constant * 60 CodeGenModule::GetNonVirtualBaseClassOffset(const CXXRecordDecl *ClassDecl, 61 CastExpr::path_const_iterator PathBegin, 62 CastExpr::path_const_iterator PathEnd) { 63 assert(PathBegin != PathEnd && "Base path should not be empty!"); 64 65 CharUnits Offset = 66 computeNonVirtualBaseClassOffset(ClassDecl, PathBegin, PathEnd); 67 if (Offset.isZero()) 68 return nullptr; 69 70 llvm::Type *PtrDiffTy = 71 Types.ConvertType(getContext().getPointerDiffType()); 72 73 return llvm::ConstantInt::get(PtrDiffTy, Offset.getQuantity()); 74 } 75 76 /// Gets the address of a direct base class within a complete object. 77 /// This should only be used for (1) non-virtual bases or (2) virtual bases 78 /// when the type is known to be complete (e.g. in complete destructors). 79 /// 80 /// The object pointed to by 'This' is assumed to be non-null. 81 llvm::Value * 82 CodeGenFunction::GetAddressOfDirectBaseInCompleteClass(llvm::Value *This, 83 const CXXRecordDecl *Derived, 84 const CXXRecordDecl *Base, 85 bool BaseIsVirtual) { 86 // 'this' must be a pointer (in some address space) to Derived. 87 assert(This->getType()->isPointerTy() && 88 cast<llvm::PointerType>(This->getType())->getElementType() 89 == ConvertType(Derived)); 90 91 // Compute the offset of the virtual base. 92 CharUnits Offset; 93 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(Derived); 94 if (BaseIsVirtual) 95 Offset = Layout.getVBaseClassOffset(Base); 96 else 97 Offset = Layout.getBaseClassOffset(Base); 98 99 // Shift and cast down to the base type. 100 // TODO: for complete types, this should be possible with a GEP. 101 llvm::Value *V = This; 102 if (Offset.isPositive()) { 103 V = Builder.CreateBitCast(V, Int8PtrTy); 104 V = Builder.CreateConstInBoundsGEP1_64(V, Offset.getQuantity()); 105 } 106 V = Builder.CreateBitCast(V, ConvertType(Base)->getPointerTo()); 107 108 return V; 109 } 110 111 static llvm::Value * 112 ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, llvm::Value *ptr, 113 CharUnits nonVirtualOffset, 114 llvm::Value *virtualOffset) { 115 // Assert that we have something to do. 116 assert(!nonVirtualOffset.isZero() || virtualOffset != nullptr); 117 118 // Compute the offset from the static and dynamic components. 119 llvm::Value *baseOffset; 120 if (!nonVirtualOffset.isZero()) { 121 baseOffset = llvm::ConstantInt::get(CGF.PtrDiffTy, 122 nonVirtualOffset.getQuantity()); 123 if (virtualOffset) { 124 baseOffset = CGF.Builder.CreateAdd(virtualOffset, baseOffset); 125 } 126 } else { 127 baseOffset = virtualOffset; 128 } 129 130 // Apply the base offset. 131 ptr = CGF.Builder.CreateBitCast(ptr, CGF.Int8PtrTy); 132 ptr = CGF.Builder.CreateInBoundsGEP(ptr, baseOffset, "add.ptr"); 133 return ptr; 134 } 135 136 llvm::Value *CodeGenFunction::GetAddressOfBaseClass( 137 llvm::Value *Value, const CXXRecordDecl *Derived, 138 CastExpr::path_const_iterator PathBegin, 139 CastExpr::path_const_iterator PathEnd, bool NullCheckValue, 140 SourceLocation Loc) { 141 assert(PathBegin != PathEnd && "Base path should not be empty!"); 142 143 CastExpr::path_const_iterator Start = PathBegin; 144 const CXXRecordDecl *VBase = nullptr; 145 146 // Sema has done some convenient canonicalization here: if the 147 // access path involved any virtual steps, the conversion path will 148 // *start* with a step down to the correct virtual base subobject, 149 // and hence will not require any further steps. 150 if ((*Start)->isVirtual()) { 151 VBase = 152 cast<CXXRecordDecl>((*Start)->getType()->getAs<RecordType>()->getDecl()); 153 ++Start; 154 } 155 156 // Compute the static offset of the ultimate destination within its 157 // allocating subobject (the virtual base, if there is one, or else 158 // the "complete" object that we see). 159 CharUnits NonVirtualOffset = CGM.computeNonVirtualBaseClassOffset( 160 VBase ? VBase : Derived, Start, PathEnd); 161 162 // If there's a virtual step, we can sometimes "devirtualize" it. 163 // For now, that's limited to when the derived type is final. 164 // TODO: "devirtualize" this for accesses to known-complete objects. 165 if (VBase && Derived->hasAttr<FinalAttr>()) { 166 const ASTRecordLayout &layout = getContext().getASTRecordLayout(Derived); 167 CharUnits vBaseOffset = layout.getVBaseClassOffset(VBase); 168 NonVirtualOffset += vBaseOffset; 169 VBase = nullptr; // we no longer have a virtual step 170 } 171 172 // Get the base pointer type. 173 llvm::Type *BasePtrTy = 174 ConvertType((PathEnd[-1])->getType())->getPointerTo(); 175 176 QualType DerivedTy = getContext().getRecordType(Derived); 177 CharUnits DerivedAlign = getContext().getTypeAlignInChars(DerivedTy); 178 179 // If the static offset is zero and we don't have a virtual step, 180 // just do a bitcast; null checks are unnecessary. 181 if (NonVirtualOffset.isZero() && !VBase) { 182 if (sanitizePerformTypeCheck()) { 183 EmitTypeCheck(TCK_Upcast, Loc, Value, DerivedTy, DerivedAlign, 184 !NullCheckValue); 185 } 186 return Builder.CreateBitCast(Value, BasePtrTy); 187 } 188 189 llvm::BasicBlock *origBB = nullptr; 190 llvm::BasicBlock *endBB = nullptr; 191 192 // Skip over the offset (and the vtable load) if we're supposed to 193 // null-check the pointer. 194 if (NullCheckValue) { 195 origBB = Builder.GetInsertBlock(); 196 llvm::BasicBlock *notNullBB = createBasicBlock("cast.notnull"); 197 endBB = createBasicBlock("cast.end"); 198 199 llvm::Value *isNull = Builder.CreateIsNull(Value); 200 Builder.CreateCondBr(isNull, endBB, notNullBB); 201 EmitBlock(notNullBB); 202 } 203 204 if (sanitizePerformTypeCheck()) { 205 EmitTypeCheck(VBase ? TCK_UpcastToVirtualBase : TCK_Upcast, Loc, Value, 206 DerivedTy, DerivedAlign, true); 207 } 208 209 // Compute the virtual offset. 210 llvm::Value *VirtualOffset = nullptr; 211 if (VBase) { 212 VirtualOffset = 213 CGM.getCXXABI().GetVirtualBaseClassOffset(*this, Value, Derived, VBase); 214 } 215 216 // Apply both offsets. 217 Value = ApplyNonVirtualAndVirtualOffset(*this, Value, 218 NonVirtualOffset, 219 VirtualOffset); 220 221 // Cast to the destination type. 222 Value = Builder.CreateBitCast(Value, BasePtrTy); 223 224 // Build a phi if we needed a null check. 225 if (NullCheckValue) { 226 llvm::BasicBlock *notNullBB = Builder.GetInsertBlock(); 227 Builder.CreateBr(endBB); 228 EmitBlock(endBB); 229 230 llvm::PHINode *PHI = Builder.CreatePHI(BasePtrTy, 2, "cast.result"); 231 PHI->addIncoming(Value, notNullBB); 232 PHI->addIncoming(llvm::Constant::getNullValue(BasePtrTy), origBB); 233 Value = PHI; 234 } 235 236 return Value; 237 } 238 239 llvm::Value * 240 CodeGenFunction::GetAddressOfDerivedClass(llvm::Value *Value, 241 const CXXRecordDecl *Derived, 242 CastExpr::path_const_iterator PathBegin, 243 CastExpr::path_const_iterator PathEnd, 244 bool NullCheckValue) { 245 assert(PathBegin != PathEnd && "Base path should not be empty!"); 246 247 QualType DerivedTy = 248 getContext().getCanonicalType(getContext().getTagDeclType(Derived)); 249 llvm::Type *DerivedPtrTy = ConvertType(DerivedTy)->getPointerTo(); 250 251 llvm::Value *NonVirtualOffset = 252 CGM.GetNonVirtualBaseClassOffset(Derived, PathBegin, PathEnd); 253 254 if (!NonVirtualOffset) { 255 // No offset, we can just cast back. 256 return Builder.CreateBitCast(Value, DerivedPtrTy); 257 } 258 259 llvm::BasicBlock *CastNull = nullptr; 260 llvm::BasicBlock *CastNotNull = nullptr; 261 llvm::BasicBlock *CastEnd = nullptr; 262 263 if (NullCheckValue) { 264 CastNull = createBasicBlock("cast.null"); 265 CastNotNull = createBasicBlock("cast.notnull"); 266 CastEnd = createBasicBlock("cast.end"); 267 268 llvm::Value *IsNull = Builder.CreateIsNull(Value); 269 Builder.CreateCondBr(IsNull, CastNull, CastNotNull); 270 EmitBlock(CastNotNull); 271 } 272 273 // Apply the offset. 274 Value = Builder.CreateBitCast(Value, Int8PtrTy); 275 Value = Builder.CreateGEP(Value, Builder.CreateNeg(NonVirtualOffset), 276 "sub.ptr"); 277 278 // Just cast. 279 Value = Builder.CreateBitCast(Value, DerivedPtrTy); 280 281 if (NullCheckValue) { 282 Builder.CreateBr(CastEnd); 283 EmitBlock(CastNull); 284 Builder.CreateBr(CastEnd); 285 EmitBlock(CastEnd); 286 287 llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2); 288 PHI->addIncoming(Value, CastNotNull); 289 PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), 290 CastNull); 291 Value = PHI; 292 } 293 294 return Value; 295 } 296 297 llvm::Value *CodeGenFunction::GetVTTParameter(GlobalDecl GD, 298 bool ForVirtualBase, 299 bool Delegating) { 300 if (!CGM.getCXXABI().NeedsVTTParameter(GD)) { 301 // This constructor/destructor does not need a VTT parameter. 302 return nullptr; 303 } 304 305 const CXXRecordDecl *RD = cast<CXXMethodDecl>(CurCodeDecl)->getParent(); 306 const CXXRecordDecl *Base = cast<CXXMethodDecl>(GD.getDecl())->getParent(); 307 308 llvm::Value *VTT; 309 310 uint64_t SubVTTIndex; 311 312 if (Delegating) { 313 // If this is a delegating constructor call, just load the VTT. 314 return LoadCXXVTT(); 315 } else if (RD == Base) { 316 // If the record matches the base, this is the complete ctor/dtor 317 // variant calling the base variant in a class with virtual bases. 318 assert(!CGM.getCXXABI().NeedsVTTParameter(CurGD) && 319 "doing no-op VTT offset in base dtor/ctor?"); 320 assert(!ForVirtualBase && "Can't have same class as virtual base!"); 321 SubVTTIndex = 0; 322 } else { 323 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 324 CharUnits BaseOffset = ForVirtualBase ? 325 Layout.getVBaseClassOffset(Base) : 326 Layout.getBaseClassOffset(Base); 327 328 SubVTTIndex = 329 CGM.getVTables().getSubVTTIndex(RD, BaseSubobject(Base, BaseOffset)); 330 assert(SubVTTIndex != 0 && "Sub-VTT index must be greater than zero!"); 331 } 332 333 if (CGM.getCXXABI().NeedsVTTParameter(CurGD)) { 334 // A VTT parameter was passed to the constructor, use it. 335 VTT = LoadCXXVTT(); 336 VTT = Builder.CreateConstInBoundsGEP1_64(VTT, SubVTTIndex); 337 } else { 338 // We're the complete constructor, so get the VTT by name. 339 VTT = CGM.getVTables().GetAddrOfVTT(RD); 340 VTT = Builder.CreateConstInBoundsGEP2_64(VTT, 0, SubVTTIndex); 341 } 342 343 return VTT; 344 } 345 346 namespace { 347 /// Call the destructor for a direct base class. 348 struct CallBaseDtor : EHScopeStack::Cleanup { 349 const CXXRecordDecl *BaseClass; 350 bool BaseIsVirtual; 351 CallBaseDtor(const CXXRecordDecl *Base, bool BaseIsVirtual) 352 : BaseClass(Base), BaseIsVirtual(BaseIsVirtual) {} 353 354 void Emit(CodeGenFunction &CGF, Flags flags) override { 355 const CXXRecordDecl *DerivedClass = 356 cast<CXXMethodDecl>(CGF.CurCodeDecl)->getParent(); 357 358 const CXXDestructorDecl *D = BaseClass->getDestructor(); 359 llvm::Value *Addr = 360 CGF.GetAddressOfDirectBaseInCompleteClass(CGF.LoadCXXThis(), 361 DerivedClass, BaseClass, 362 BaseIsVirtual); 363 CGF.EmitCXXDestructorCall(D, Dtor_Base, BaseIsVirtual, 364 /*Delegating=*/false, Addr); 365 } 366 }; 367 368 /// A visitor which checks whether an initializer uses 'this' in a 369 /// way which requires the vtable to be properly set. 370 struct DynamicThisUseChecker : ConstEvaluatedExprVisitor<DynamicThisUseChecker> { 371 typedef ConstEvaluatedExprVisitor<DynamicThisUseChecker> super; 372 373 bool UsesThis; 374 375 DynamicThisUseChecker(const ASTContext &C) : super(C), UsesThis(false) {} 376 377 // Black-list all explicit and implicit references to 'this'. 378 // 379 // Do we need to worry about external references to 'this' derived 380 // from arbitrary code? If so, then anything which runs arbitrary 381 // external code might potentially access the vtable. 382 void VisitCXXThisExpr(const CXXThisExpr *E) { UsesThis = true; } 383 }; 384 } 385 386 static bool BaseInitializerUsesThis(ASTContext &C, const Expr *Init) { 387 DynamicThisUseChecker Checker(C); 388 Checker.Visit(Init); 389 return Checker.UsesThis; 390 } 391 392 static void EmitBaseInitializer(CodeGenFunction &CGF, 393 const CXXRecordDecl *ClassDecl, 394 CXXCtorInitializer *BaseInit, 395 CXXCtorType CtorType) { 396 assert(BaseInit->isBaseInitializer() && 397 "Must have base initializer!"); 398 399 llvm::Value *ThisPtr = CGF.LoadCXXThis(); 400 401 const Type *BaseType = BaseInit->getBaseClass(); 402 CXXRecordDecl *BaseClassDecl = 403 cast<CXXRecordDecl>(BaseType->getAs<RecordType>()->getDecl()); 404 405 bool isBaseVirtual = BaseInit->isBaseVirtual(); 406 407 // The base constructor doesn't construct virtual bases. 408 if (CtorType == Ctor_Base && isBaseVirtual) 409 return; 410 411 // If the initializer for the base (other than the constructor 412 // itself) accesses 'this' in any way, we need to initialize the 413 // vtables. 414 if (BaseInitializerUsesThis(CGF.getContext(), BaseInit->getInit())) 415 CGF.InitializeVTablePointers(ClassDecl); 416 417 // We can pretend to be a complete class because it only matters for 418 // virtual bases, and we only do virtual bases for complete ctors. 419 llvm::Value *V = 420 CGF.GetAddressOfDirectBaseInCompleteClass(ThisPtr, ClassDecl, 421 BaseClassDecl, 422 isBaseVirtual); 423 CharUnits Alignment = CGF.getContext().getTypeAlignInChars(BaseType); 424 AggValueSlot AggSlot = 425 AggValueSlot::forAddr(V, Alignment, Qualifiers(), 426 AggValueSlot::IsDestructed, 427 AggValueSlot::DoesNotNeedGCBarriers, 428 AggValueSlot::IsNotAliased); 429 430 CGF.EmitAggExpr(BaseInit->getInit(), AggSlot); 431 432 if (CGF.CGM.getLangOpts().Exceptions && 433 !BaseClassDecl->hasTrivialDestructor()) 434 CGF.EHStack.pushCleanup<CallBaseDtor>(EHCleanup, BaseClassDecl, 435 isBaseVirtual); 436 } 437 438 static void EmitAggMemberInitializer(CodeGenFunction &CGF, 439 LValue LHS, 440 Expr *Init, 441 llvm::Value *ArrayIndexVar, 442 QualType T, 443 ArrayRef<VarDecl *> ArrayIndexes, 444 unsigned Index) { 445 if (Index == ArrayIndexes.size()) { 446 LValue LV = LHS; 447 448 if (ArrayIndexVar) { 449 // If we have an array index variable, load it and use it as an offset. 450 // Then, increment the value. 451 llvm::Value *Dest = LHS.getAddress(); 452 llvm::Value *ArrayIndex = CGF.Builder.CreateLoad(ArrayIndexVar); 453 Dest = CGF.Builder.CreateInBoundsGEP(Dest, ArrayIndex, "destaddress"); 454 llvm::Value *Next = llvm::ConstantInt::get(ArrayIndex->getType(), 1); 455 Next = CGF.Builder.CreateAdd(ArrayIndex, Next, "inc"); 456 CGF.Builder.CreateStore(Next, ArrayIndexVar); 457 458 // Update the LValue. 459 LV.setAddress(Dest); 460 CharUnits Align = CGF.getContext().getTypeAlignInChars(T); 461 LV.setAlignment(std::min(Align, LV.getAlignment())); 462 } 463 464 switch (CGF.getEvaluationKind(T)) { 465 case TEK_Scalar: 466 CGF.EmitScalarInit(Init, /*decl*/ nullptr, LV, false); 467 break; 468 case TEK_Complex: 469 CGF.EmitComplexExprIntoLValue(Init, LV, /*isInit*/ true); 470 break; 471 case TEK_Aggregate: { 472 AggValueSlot Slot = 473 AggValueSlot::forLValue(LV, 474 AggValueSlot::IsDestructed, 475 AggValueSlot::DoesNotNeedGCBarriers, 476 AggValueSlot::IsNotAliased); 477 478 CGF.EmitAggExpr(Init, Slot); 479 break; 480 } 481 } 482 483 return; 484 } 485 486 const ConstantArrayType *Array = CGF.getContext().getAsConstantArrayType(T); 487 assert(Array && "Array initialization without the array type?"); 488 llvm::Value *IndexVar 489 = CGF.GetAddrOfLocalVar(ArrayIndexes[Index]); 490 assert(IndexVar && "Array index variable not loaded"); 491 492 // Initialize this index variable to zero. 493 llvm::Value* Zero 494 = llvm::Constant::getNullValue( 495 CGF.ConvertType(CGF.getContext().getSizeType())); 496 CGF.Builder.CreateStore(Zero, IndexVar); 497 498 // Start the loop with a block that tests the condition. 499 llvm::BasicBlock *CondBlock = CGF.createBasicBlock("for.cond"); 500 llvm::BasicBlock *AfterFor = CGF.createBasicBlock("for.end"); 501 502 CGF.EmitBlock(CondBlock); 503 504 llvm::BasicBlock *ForBody = CGF.createBasicBlock("for.body"); 505 // Generate: if (loop-index < number-of-elements) fall to the loop body, 506 // otherwise, go to the block after the for-loop. 507 uint64_t NumElements = Array->getSize().getZExtValue(); 508 llvm::Value *Counter = CGF.Builder.CreateLoad(IndexVar); 509 llvm::Value *NumElementsPtr = 510 llvm::ConstantInt::get(Counter->getType(), NumElements); 511 llvm::Value *IsLess = CGF.Builder.CreateICmpULT(Counter, NumElementsPtr, 512 "isless"); 513 514 // If the condition is true, execute the body. 515 CGF.Builder.CreateCondBr(IsLess, ForBody, AfterFor); 516 517 CGF.EmitBlock(ForBody); 518 llvm::BasicBlock *ContinueBlock = CGF.createBasicBlock("for.inc"); 519 520 // Inside the loop body recurse to emit the inner loop or, eventually, the 521 // constructor call. 522 EmitAggMemberInitializer(CGF, LHS, Init, ArrayIndexVar, 523 Array->getElementType(), ArrayIndexes, Index + 1); 524 525 CGF.EmitBlock(ContinueBlock); 526 527 // Emit the increment of the loop counter. 528 llvm::Value *NextVal = llvm::ConstantInt::get(Counter->getType(), 1); 529 Counter = CGF.Builder.CreateLoad(IndexVar); 530 NextVal = CGF.Builder.CreateAdd(Counter, NextVal, "inc"); 531 CGF.Builder.CreateStore(NextVal, IndexVar); 532 533 // Finally, branch back up to the condition for the next iteration. 534 CGF.EmitBranch(CondBlock); 535 536 // Emit the fall-through block. 537 CGF.EmitBlock(AfterFor, true); 538 } 539 540 static bool isMemcpyEquivalentSpecialMember(const CXXMethodDecl *D) { 541 auto *CD = dyn_cast<CXXConstructorDecl>(D); 542 if (!(CD && CD->isCopyOrMoveConstructor()) && 543 !D->isCopyAssignmentOperator() && !D->isMoveAssignmentOperator()) 544 return false; 545 546 // We can emit a memcpy for a trivial copy or move constructor/assignment. 547 if (D->isTrivial() && !D->getParent()->mayInsertExtraPadding()) 548 return true; 549 550 // We *must* emit a memcpy for a defaulted union copy or move op. 551 if (D->getParent()->isUnion() && D->isDefaulted()) 552 return true; 553 554 return false; 555 } 556 557 static void EmitLValueForAnyFieldInitialization(CodeGenFunction &CGF, 558 CXXCtorInitializer *MemberInit, 559 LValue &LHS) { 560 FieldDecl *Field = MemberInit->getAnyMember(); 561 if (MemberInit->isIndirectMemberInitializer()) { 562 // If we are initializing an anonymous union field, drill down to the field. 563 IndirectFieldDecl *IndirectField = MemberInit->getIndirectMember(); 564 for (const auto *I : IndirectField->chain()) 565 LHS = CGF.EmitLValueForFieldInitialization(LHS, cast<FieldDecl>(I)); 566 } else { 567 LHS = CGF.EmitLValueForFieldInitialization(LHS, Field); 568 } 569 } 570 571 static void EmitMemberInitializer(CodeGenFunction &CGF, 572 const CXXRecordDecl *ClassDecl, 573 CXXCtorInitializer *MemberInit, 574 const CXXConstructorDecl *Constructor, 575 FunctionArgList &Args) { 576 ApplyDebugLocation Loc(CGF, MemberInit->getSourceLocation()); 577 assert(MemberInit->isAnyMemberInitializer() && 578 "Must have member initializer!"); 579 assert(MemberInit->getInit() && "Must have initializer!"); 580 581 // non-static data member initializers. 582 FieldDecl *Field = MemberInit->getAnyMember(); 583 QualType FieldType = Field->getType(); 584 585 llvm::Value *ThisPtr = CGF.LoadCXXThis(); 586 QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl); 587 LValue LHS = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy); 588 589 EmitLValueForAnyFieldInitialization(CGF, MemberInit, LHS); 590 591 // Special case: if we are in a copy or move constructor, and we are copying 592 // an array of PODs or classes with trivial copy constructors, ignore the 593 // AST and perform the copy we know is equivalent. 594 // FIXME: This is hacky at best... if we had a bit more explicit information 595 // in the AST, we could generalize it more easily. 596 const ConstantArrayType *Array 597 = CGF.getContext().getAsConstantArrayType(FieldType); 598 if (Array && Constructor->isDefaulted() && 599 Constructor->isCopyOrMoveConstructor()) { 600 QualType BaseElementTy = CGF.getContext().getBaseElementType(Array); 601 CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(MemberInit->getInit()); 602 if (BaseElementTy.isPODType(CGF.getContext()) || 603 (CE && isMemcpyEquivalentSpecialMember(CE->getConstructor()))) { 604 unsigned SrcArgIndex = 605 CGF.CGM.getCXXABI().getSrcArgforCopyCtor(Constructor, Args); 606 llvm::Value *SrcPtr 607 = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(Args[SrcArgIndex])); 608 LValue ThisRHSLV = CGF.MakeNaturalAlignAddrLValue(SrcPtr, RecordTy); 609 LValue Src = CGF.EmitLValueForFieldInitialization(ThisRHSLV, Field); 610 611 // Copy the aggregate. 612 CGF.EmitAggregateCopy(LHS.getAddress(), Src.getAddress(), FieldType, 613 LHS.isVolatileQualified()); 614 // Ensure that we destroy the objects if an exception is thrown later in 615 // the constructor. 616 QualType::DestructionKind dtorKind = FieldType.isDestructedType(); 617 if (CGF.needsEHCleanup(dtorKind)) 618 CGF.pushEHDestroy(dtorKind, LHS.getAddress(), FieldType); 619 return; 620 } 621 } 622 623 ArrayRef<VarDecl *> ArrayIndexes; 624 if (MemberInit->getNumArrayIndices()) 625 ArrayIndexes = MemberInit->getArrayIndexes(); 626 CGF.EmitInitializerForField(Field, LHS, MemberInit->getInit(), ArrayIndexes); 627 } 628 629 void CodeGenFunction::EmitInitializerForField( 630 FieldDecl *Field, LValue LHS, Expr *Init, 631 ArrayRef<VarDecl *> ArrayIndexes) { 632 QualType FieldType = Field->getType(); 633 switch (getEvaluationKind(FieldType)) { 634 case TEK_Scalar: 635 if (LHS.isSimple()) { 636 EmitExprAsInit(Init, Field, LHS, false); 637 } else { 638 RValue RHS = RValue::get(EmitScalarExpr(Init)); 639 EmitStoreThroughLValue(RHS, LHS); 640 } 641 break; 642 case TEK_Complex: 643 EmitComplexExprIntoLValue(Init, LHS, /*isInit*/ true); 644 break; 645 case TEK_Aggregate: { 646 llvm::Value *ArrayIndexVar = nullptr; 647 if (ArrayIndexes.size()) { 648 llvm::Type *SizeTy = ConvertType(getContext().getSizeType()); 649 650 // The LHS is a pointer to the first object we'll be constructing, as 651 // a flat array. 652 QualType BaseElementTy = getContext().getBaseElementType(FieldType); 653 llvm::Type *BasePtr = ConvertType(BaseElementTy); 654 BasePtr = llvm::PointerType::getUnqual(BasePtr); 655 llvm::Value *BaseAddrPtr = Builder.CreateBitCast(LHS.getAddress(), 656 BasePtr); 657 LHS = MakeAddrLValue(BaseAddrPtr, BaseElementTy); 658 659 // Create an array index that will be used to walk over all of the 660 // objects we're constructing. 661 ArrayIndexVar = CreateTempAlloca(SizeTy, "object.index"); 662 llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy); 663 Builder.CreateStore(Zero, ArrayIndexVar); 664 665 666 // Emit the block variables for the array indices, if any. 667 for (unsigned I = 0, N = ArrayIndexes.size(); I != N; ++I) 668 EmitAutoVarDecl(*ArrayIndexes[I]); 669 } 670 671 EmitAggMemberInitializer(*this, LHS, Init, ArrayIndexVar, FieldType, 672 ArrayIndexes, 0); 673 } 674 } 675 676 // Ensure that we destroy this object if an exception is thrown 677 // later in the constructor. 678 QualType::DestructionKind dtorKind = FieldType.isDestructedType(); 679 if (needsEHCleanup(dtorKind)) 680 pushEHDestroy(dtorKind, LHS.getAddress(), FieldType); 681 } 682 683 /// Checks whether the given constructor is a valid subject for the 684 /// complete-to-base constructor delegation optimization, i.e. 685 /// emitting the complete constructor as a simple call to the base 686 /// constructor. 687 static bool IsConstructorDelegationValid(const CXXConstructorDecl *Ctor) { 688 689 // Currently we disable the optimization for classes with virtual 690 // bases because (1) the addresses of parameter variables need to be 691 // consistent across all initializers but (2) the delegate function 692 // call necessarily creates a second copy of the parameter variable. 693 // 694 // The limiting example (purely theoretical AFAIK): 695 // struct A { A(int &c) { c++; } }; 696 // struct B : virtual A { 697 // B(int count) : A(count) { printf("%d\n", count); } 698 // }; 699 // ...although even this example could in principle be emitted as a 700 // delegation since the address of the parameter doesn't escape. 701 if (Ctor->getParent()->getNumVBases()) { 702 // TODO: white-list trivial vbase initializers. This case wouldn't 703 // be subject to the restrictions below. 704 705 // TODO: white-list cases where: 706 // - there are no non-reference parameters to the constructor 707 // - the initializers don't access any non-reference parameters 708 // - the initializers don't take the address of non-reference 709 // parameters 710 // - etc. 711 // If we ever add any of the above cases, remember that: 712 // - function-try-blocks will always blacklist this optimization 713 // - we need to perform the constructor prologue and cleanup in 714 // EmitConstructorBody. 715 716 return false; 717 } 718 719 // We also disable the optimization for variadic functions because 720 // it's impossible to "re-pass" varargs. 721 if (Ctor->getType()->getAs<FunctionProtoType>()->isVariadic()) 722 return false; 723 724 // FIXME: Decide if we can do a delegation of a delegating constructor. 725 if (Ctor->isDelegatingConstructor()) 726 return false; 727 728 return true; 729 } 730 731 // Emit code in ctor (Prologue==true) or dtor (Prologue==false) 732 // to poison the extra field paddings inserted under 733 // -fsanitize-address-field-padding=1|2. 734 void CodeGenFunction::EmitAsanPrologueOrEpilogue(bool Prologue) { 735 ASTContext &Context = getContext(); 736 const CXXRecordDecl *ClassDecl = 737 Prologue ? cast<CXXConstructorDecl>(CurGD.getDecl())->getParent() 738 : cast<CXXDestructorDecl>(CurGD.getDecl())->getParent(); 739 if (!ClassDecl->mayInsertExtraPadding()) return; 740 741 struct SizeAndOffset { 742 uint64_t Size; 743 uint64_t Offset; 744 }; 745 746 unsigned PtrSize = CGM.getDataLayout().getPointerSizeInBits(); 747 const ASTRecordLayout &Info = Context.getASTRecordLayout(ClassDecl); 748 749 // Populate sizes and offsets of fields. 750 SmallVector<SizeAndOffset, 16> SSV(Info.getFieldCount()); 751 for (unsigned i = 0, e = Info.getFieldCount(); i != e; ++i) 752 SSV[i].Offset = 753 Context.toCharUnitsFromBits(Info.getFieldOffset(i)).getQuantity(); 754 755 size_t NumFields = 0; 756 for (const auto *Field : ClassDecl->fields()) { 757 const FieldDecl *D = Field; 758 std::pair<CharUnits, CharUnits> FieldInfo = 759 Context.getTypeInfoInChars(D->getType()); 760 CharUnits FieldSize = FieldInfo.first; 761 assert(NumFields < SSV.size()); 762 SSV[NumFields].Size = D->isBitField() ? 0 : FieldSize.getQuantity(); 763 NumFields++; 764 } 765 assert(NumFields == SSV.size()); 766 if (SSV.size() <= 1) return; 767 768 // We will insert calls to __asan_* run-time functions. 769 // LLVM AddressSanitizer pass may decide to inline them later. 770 llvm::Type *Args[2] = {IntPtrTy, IntPtrTy}; 771 llvm::FunctionType *FTy = 772 llvm::FunctionType::get(CGM.VoidTy, Args, false); 773 llvm::Constant *F = CGM.CreateRuntimeFunction( 774 FTy, Prologue ? "__asan_poison_intra_object_redzone" 775 : "__asan_unpoison_intra_object_redzone"); 776 777 llvm::Value *ThisPtr = LoadCXXThis(); 778 ThisPtr = Builder.CreatePtrToInt(ThisPtr, IntPtrTy); 779 uint64_t TypeSize = Info.getNonVirtualSize().getQuantity(); 780 // For each field check if it has sufficient padding, 781 // if so (un)poison it with a call. 782 for (size_t i = 0; i < SSV.size(); i++) { 783 uint64_t AsanAlignment = 8; 784 uint64_t NextField = i == SSV.size() - 1 ? TypeSize : SSV[i + 1].Offset; 785 uint64_t PoisonSize = NextField - SSV[i].Offset - SSV[i].Size; 786 uint64_t EndOffset = SSV[i].Offset + SSV[i].Size; 787 if (PoisonSize < AsanAlignment || !SSV[i].Size || 788 (NextField % AsanAlignment) != 0) 789 continue; 790 Builder.CreateCall( 791 F, {Builder.CreateAdd(ThisPtr, Builder.getIntN(PtrSize, EndOffset)), 792 Builder.getIntN(PtrSize, PoisonSize)}); 793 } 794 } 795 796 /// EmitConstructorBody - Emits the body of the current constructor. 797 void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) { 798 EmitAsanPrologueOrEpilogue(true); 799 const CXXConstructorDecl *Ctor = cast<CXXConstructorDecl>(CurGD.getDecl()); 800 CXXCtorType CtorType = CurGD.getCtorType(); 801 802 assert((CGM.getTarget().getCXXABI().hasConstructorVariants() || 803 CtorType == Ctor_Complete) && 804 "can only generate complete ctor for this ABI"); 805 806 // Before we go any further, try the complete->base constructor 807 // delegation optimization. 808 if (CtorType == Ctor_Complete && IsConstructorDelegationValid(Ctor) && 809 CGM.getTarget().getCXXABI().hasConstructorVariants()) { 810 EmitDelegateCXXConstructorCall(Ctor, Ctor_Base, Args, Ctor->getLocEnd()); 811 return; 812 } 813 814 const FunctionDecl *Definition = 0; 815 Stmt *Body = Ctor->getBody(Definition); 816 assert(Definition == Ctor && "emitting wrong constructor body"); 817 818 // Enter the function-try-block before the constructor prologue if 819 // applicable. 820 bool IsTryBody = (Body && isa<CXXTryStmt>(Body)); 821 if (IsTryBody) 822 EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true); 823 824 incrementProfileCounter(Body); 825 826 RunCleanupsScope RunCleanups(*this); 827 828 // TODO: in restricted cases, we can emit the vbase initializers of 829 // a complete ctor and then delegate to the base ctor. 830 831 // Emit the constructor prologue, i.e. the base and member 832 // initializers. 833 EmitCtorPrologue(Ctor, CtorType, Args); 834 835 // Emit the body of the statement. 836 if (IsTryBody) 837 EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock()); 838 else if (Body) 839 EmitStmt(Body); 840 841 // Emit any cleanup blocks associated with the member or base 842 // initializers, which includes (along the exceptional path) the 843 // destructors for those members and bases that were fully 844 // constructed. 845 RunCleanups.ForceCleanup(); 846 847 if (IsTryBody) 848 ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true); 849 } 850 851 namespace { 852 /// RAII object to indicate that codegen is copying the value representation 853 /// instead of the object representation. Useful when copying a struct or 854 /// class which has uninitialized members and we're only performing 855 /// lvalue-to-rvalue conversion on the object but not its members. 856 class CopyingValueRepresentation { 857 public: 858 explicit CopyingValueRepresentation(CodeGenFunction &CGF) 859 : CGF(CGF), OldSanOpts(CGF.SanOpts) { 860 CGF.SanOpts.set(SanitizerKind::Bool, false); 861 CGF.SanOpts.set(SanitizerKind::Enum, false); 862 } 863 ~CopyingValueRepresentation() { 864 CGF.SanOpts = OldSanOpts; 865 } 866 private: 867 CodeGenFunction &CGF; 868 SanitizerSet OldSanOpts; 869 }; 870 } 871 872 namespace { 873 class FieldMemcpyizer { 874 public: 875 FieldMemcpyizer(CodeGenFunction &CGF, const CXXRecordDecl *ClassDecl, 876 const VarDecl *SrcRec) 877 : CGF(CGF), ClassDecl(ClassDecl), SrcRec(SrcRec), 878 RecLayout(CGF.getContext().getASTRecordLayout(ClassDecl)), 879 FirstField(nullptr), LastField(nullptr), FirstFieldOffset(0), 880 LastFieldOffset(0), LastAddedFieldIndex(0) {} 881 882 bool isMemcpyableField(FieldDecl *F) const { 883 // Never memcpy fields when we are adding poisoned paddings. 884 if (CGF.getContext().getLangOpts().SanitizeAddressFieldPadding) 885 return false; 886 Qualifiers Qual = F->getType().getQualifiers(); 887 if (Qual.hasVolatile() || Qual.hasObjCLifetime()) 888 return false; 889 return true; 890 } 891 892 void addMemcpyableField(FieldDecl *F) { 893 if (!FirstField) 894 addInitialField(F); 895 else 896 addNextField(F); 897 } 898 899 CharUnits getMemcpySize(uint64_t FirstByteOffset) const { 900 unsigned LastFieldSize = 901 LastField->isBitField() ? 902 LastField->getBitWidthValue(CGF.getContext()) : 903 CGF.getContext().getTypeSize(LastField->getType()); 904 uint64_t MemcpySizeBits = 905 LastFieldOffset + LastFieldSize - FirstByteOffset + 906 CGF.getContext().getCharWidth() - 1; 907 CharUnits MemcpySize = 908 CGF.getContext().toCharUnitsFromBits(MemcpySizeBits); 909 return MemcpySize; 910 } 911 912 void emitMemcpy() { 913 // Give the subclass a chance to bail out if it feels the memcpy isn't 914 // worth it (e.g. Hasn't aggregated enough data). 915 if (!FirstField) { 916 return; 917 } 918 919 uint64_t FirstByteOffset; 920 if (FirstField->isBitField()) { 921 const CGRecordLayout &RL = 922 CGF.getTypes().getCGRecordLayout(FirstField->getParent()); 923 const CGBitFieldInfo &BFInfo = RL.getBitFieldInfo(FirstField); 924 // FirstFieldOffset is not appropriate for bitfields, 925 // we need to use the storage offset instead. 926 FirstByteOffset = CGF.getContext().toBits(BFInfo.StorageOffset); 927 } else { 928 FirstByteOffset = FirstFieldOffset; 929 } 930 931 CharUnits MemcpySize = getMemcpySize(FirstByteOffset); 932 QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl); 933 llvm::Value *ThisPtr = CGF.LoadCXXThis(); 934 LValue DestLV = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy); 935 LValue Dest = CGF.EmitLValueForFieldInitialization(DestLV, FirstField); 936 llvm::Value *SrcPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(SrcRec)); 937 LValue SrcLV = CGF.MakeNaturalAlignAddrLValue(SrcPtr, RecordTy); 938 LValue Src = CGF.EmitLValueForFieldInitialization(SrcLV, FirstField); 939 940 CharUnits Offset = CGF.getContext().toCharUnitsFromBits(FirstByteOffset); 941 CharUnits Alignment = DestLV.getAlignment().alignmentAtOffset(Offset); 942 943 emitMemcpyIR(Dest.isBitField() ? Dest.getBitFieldAddr() : Dest.getAddress(), 944 Src.isBitField() ? Src.getBitFieldAddr() : Src.getAddress(), 945 MemcpySize, Alignment); 946 reset(); 947 } 948 949 void reset() { 950 FirstField = nullptr; 951 } 952 953 protected: 954 CodeGenFunction &CGF; 955 const CXXRecordDecl *ClassDecl; 956 957 private: 958 959 void emitMemcpyIR(llvm::Value *DestPtr, llvm::Value *SrcPtr, 960 CharUnits Size, CharUnits Alignment) { 961 llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType()); 962 llvm::Type *DBP = 963 llvm::Type::getInt8PtrTy(CGF.getLLVMContext(), DPT->getAddressSpace()); 964 DestPtr = CGF.Builder.CreateBitCast(DestPtr, DBP); 965 966 llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType()); 967 llvm::Type *SBP = 968 llvm::Type::getInt8PtrTy(CGF.getLLVMContext(), SPT->getAddressSpace()); 969 SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, SBP); 970 971 CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, Size.getQuantity(), 972 Alignment.getQuantity()); 973 } 974 975 void addInitialField(FieldDecl *F) { 976 FirstField = F; 977 LastField = F; 978 FirstFieldOffset = RecLayout.getFieldOffset(F->getFieldIndex()); 979 LastFieldOffset = FirstFieldOffset; 980 LastAddedFieldIndex = F->getFieldIndex(); 981 return; 982 } 983 984 void addNextField(FieldDecl *F) { 985 // For the most part, the following invariant will hold: 986 // F->getFieldIndex() == LastAddedFieldIndex + 1 987 // The one exception is that Sema won't add a copy-initializer for an 988 // unnamed bitfield, which will show up here as a gap in the sequence. 989 assert(F->getFieldIndex() >= LastAddedFieldIndex + 1 && 990 "Cannot aggregate fields out of order."); 991 LastAddedFieldIndex = F->getFieldIndex(); 992 993 // The 'first' and 'last' fields are chosen by offset, rather than field 994 // index. This allows the code to support bitfields, as well as regular 995 // fields. 996 uint64_t FOffset = RecLayout.getFieldOffset(F->getFieldIndex()); 997 if (FOffset < FirstFieldOffset) { 998 FirstField = F; 999 FirstFieldOffset = FOffset; 1000 } else if (FOffset > LastFieldOffset) { 1001 LastField = F; 1002 LastFieldOffset = FOffset; 1003 } 1004 } 1005 1006 const VarDecl *SrcRec; 1007 const ASTRecordLayout &RecLayout; 1008 FieldDecl *FirstField; 1009 FieldDecl *LastField; 1010 uint64_t FirstFieldOffset, LastFieldOffset; 1011 unsigned LastAddedFieldIndex; 1012 }; 1013 1014 class ConstructorMemcpyizer : public FieldMemcpyizer { 1015 private: 1016 1017 /// Get source argument for copy constructor. Returns null if not a copy 1018 /// constructor. 1019 static const VarDecl *getTrivialCopySource(CodeGenFunction &CGF, 1020 const CXXConstructorDecl *CD, 1021 FunctionArgList &Args) { 1022 if (CD->isCopyOrMoveConstructor() && CD->isDefaulted()) 1023 return Args[CGF.CGM.getCXXABI().getSrcArgforCopyCtor(CD, Args)]; 1024 return nullptr; 1025 } 1026 1027 // Returns true if a CXXCtorInitializer represents a member initialization 1028 // that can be rolled into a memcpy. 1029 bool isMemberInitMemcpyable(CXXCtorInitializer *MemberInit) const { 1030 if (!MemcpyableCtor) 1031 return false; 1032 FieldDecl *Field = MemberInit->getMember(); 1033 assert(Field && "No field for member init."); 1034 QualType FieldType = Field->getType(); 1035 CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(MemberInit->getInit()); 1036 1037 // Bail out on non-memcpyable, not-trivially-copyable members. 1038 if (!(CE && isMemcpyEquivalentSpecialMember(CE->getConstructor())) && 1039 !(FieldType.isTriviallyCopyableType(CGF.getContext()) || 1040 FieldType->isReferenceType())) 1041 return false; 1042 1043 // Bail out on volatile fields. 1044 if (!isMemcpyableField(Field)) 1045 return false; 1046 1047 // Otherwise we're good. 1048 return true; 1049 } 1050 1051 public: 1052 ConstructorMemcpyizer(CodeGenFunction &CGF, const CXXConstructorDecl *CD, 1053 FunctionArgList &Args) 1054 : FieldMemcpyizer(CGF, CD->getParent(), getTrivialCopySource(CGF, CD, Args)), 1055 ConstructorDecl(CD), 1056 MemcpyableCtor(CD->isDefaulted() && 1057 CD->isCopyOrMoveConstructor() && 1058 CGF.getLangOpts().getGC() == LangOptions::NonGC), 1059 Args(Args) { } 1060 1061 void addMemberInitializer(CXXCtorInitializer *MemberInit) { 1062 if (isMemberInitMemcpyable(MemberInit)) { 1063 AggregatedInits.push_back(MemberInit); 1064 addMemcpyableField(MemberInit->getMember()); 1065 } else { 1066 emitAggregatedInits(); 1067 EmitMemberInitializer(CGF, ConstructorDecl->getParent(), MemberInit, 1068 ConstructorDecl, Args); 1069 } 1070 } 1071 1072 void emitAggregatedInits() { 1073 if (AggregatedInits.size() <= 1) { 1074 // This memcpy is too small to be worthwhile. Fall back on default 1075 // codegen. 1076 if (!AggregatedInits.empty()) { 1077 CopyingValueRepresentation CVR(CGF); 1078 EmitMemberInitializer(CGF, ConstructorDecl->getParent(), 1079 AggregatedInits[0], ConstructorDecl, Args); 1080 AggregatedInits.clear(); 1081 } 1082 reset(); 1083 return; 1084 } 1085 1086 pushEHDestructors(); 1087 emitMemcpy(); 1088 AggregatedInits.clear(); 1089 } 1090 1091 void pushEHDestructors() { 1092 llvm::Value *ThisPtr = CGF.LoadCXXThis(); 1093 QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl); 1094 LValue LHS = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy); 1095 1096 for (unsigned i = 0; i < AggregatedInits.size(); ++i) { 1097 CXXCtorInitializer *MemberInit = AggregatedInits[i]; 1098 QualType FieldType = MemberInit->getAnyMember()->getType(); 1099 QualType::DestructionKind dtorKind = FieldType.isDestructedType(); 1100 if (!CGF.needsEHCleanup(dtorKind)) 1101 continue; 1102 LValue FieldLHS = LHS; 1103 EmitLValueForAnyFieldInitialization(CGF, MemberInit, FieldLHS); 1104 CGF.pushEHDestroy(dtorKind, FieldLHS.getAddress(), FieldType); 1105 } 1106 } 1107 1108 void finish() { 1109 emitAggregatedInits(); 1110 } 1111 1112 private: 1113 const CXXConstructorDecl *ConstructorDecl; 1114 bool MemcpyableCtor; 1115 FunctionArgList &Args; 1116 SmallVector<CXXCtorInitializer*, 16> AggregatedInits; 1117 }; 1118 1119 class AssignmentMemcpyizer : public FieldMemcpyizer { 1120 private: 1121 1122 // Returns the memcpyable field copied by the given statement, if one 1123 // exists. Otherwise returns null. 1124 FieldDecl *getMemcpyableField(Stmt *S) { 1125 if (!AssignmentsMemcpyable) 1126 return nullptr; 1127 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(S)) { 1128 // Recognise trivial assignments. 1129 if (BO->getOpcode() != BO_Assign) 1130 return nullptr; 1131 MemberExpr *ME = dyn_cast<MemberExpr>(BO->getLHS()); 1132 if (!ME) 1133 return nullptr; 1134 FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl()); 1135 if (!Field || !isMemcpyableField(Field)) 1136 return nullptr; 1137 Stmt *RHS = BO->getRHS(); 1138 if (ImplicitCastExpr *EC = dyn_cast<ImplicitCastExpr>(RHS)) 1139 RHS = EC->getSubExpr(); 1140 if (!RHS) 1141 return nullptr; 1142 MemberExpr *ME2 = dyn_cast<MemberExpr>(RHS); 1143 if (dyn_cast<FieldDecl>(ME2->getMemberDecl()) != Field) 1144 return nullptr; 1145 return Field; 1146 } else if (CXXMemberCallExpr *MCE = dyn_cast<CXXMemberCallExpr>(S)) { 1147 CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MCE->getCalleeDecl()); 1148 if (!(MD && isMemcpyEquivalentSpecialMember(MD))) 1149 return nullptr; 1150 MemberExpr *IOA = dyn_cast<MemberExpr>(MCE->getImplicitObjectArgument()); 1151 if (!IOA) 1152 return nullptr; 1153 FieldDecl *Field = dyn_cast<FieldDecl>(IOA->getMemberDecl()); 1154 if (!Field || !isMemcpyableField(Field)) 1155 return nullptr; 1156 MemberExpr *Arg0 = dyn_cast<MemberExpr>(MCE->getArg(0)); 1157 if (!Arg0 || Field != dyn_cast<FieldDecl>(Arg0->getMemberDecl())) 1158 return nullptr; 1159 return Field; 1160 } else if (CallExpr *CE = dyn_cast<CallExpr>(S)) { 1161 FunctionDecl *FD = dyn_cast<FunctionDecl>(CE->getCalleeDecl()); 1162 if (!FD || FD->getBuiltinID() != Builtin::BI__builtin_memcpy) 1163 return nullptr; 1164 Expr *DstPtr = CE->getArg(0); 1165 if (ImplicitCastExpr *DC = dyn_cast<ImplicitCastExpr>(DstPtr)) 1166 DstPtr = DC->getSubExpr(); 1167 UnaryOperator *DUO = dyn_cast<UnaryOperator>(DstPtr); 1168 if (!DUO || DUO->getOpcode() != UO_AddrOf) 1169 return nullptr; 1170 MemberExpr *ME = dyn_cast<MemberExpr>(DUO->getSubExpr()); 1171 if (!ME) 1172 return nullptr; 1173 FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl()); 1174 if (!Field || !isMemcpyableField(Field)) 1175 return nullptr; 1176 Expr *SrcPtr = CE->getArg(1); 1177 if (ImplicitCastExpr *SC = dyn_cast<ImplicitCastExpr>(SrcPtr)) 1178 SrcPtr = SC->getSubExpr(); 1179 UnaryOperator *SUO = dyn_cast<UnaryOperator>(SrcPtr); 1180 if (!SUO || SUO->getOpcode() != UO_AddrOf) 1181 return nullptr; 1182 MemberExpr *ME2 = dyn_cast<MemberExpr>(SUO->getSubExpr()); 1183 if (!ME2 || Field != dyn_cast<FieldDecl>(ME2->getMemberDecl())) 1184 return nullptr; 1185 return Field; 1186 } 1187 1188 return nullptr; 1189 } 1190 1191 bool AssignmentsMemcpyable; 1192 SmallVector<Stmt*, 16> AggregatedStmts; 1193 1194 public: 1195 1196 AssignmentMemcpyizer(CodeGenFunction &CGF, const CXXMethodDecl *AD, 1197 FunctionArgList &Args) 1198 : FieldMemcpyizer(CGF, AD->getParent(), Args[Args.size() - 1]), 1199 AssignmentsMemcpyable(CGF.getLangOpts().getGC() == LangOptions::NonGC) { 1200 assert(Args.size() == 2); 1201 } 1202 1203 void emitAssignment(Stmt *S) { 1204 FieldDecl *F = getMemcpyableField(S); 1205 if (F) { 1206 addMemcpyableField(F); 1207 AggregatedStmts.push_back(S); 1208 } else { 1209 emitAggregatedStmts(); 1210 CGF.EmitStmt(S); 1211 } 1212 } 1213 1214 void emitAggregatedStmts() { 1215 if (AggregatedStmts.size() <= 1) { 1216 if (!AggregatedStmts.empty()) { 1217 CopyingValueRepresentation CVR(CGF); 1218 CGF.EmitStmt(AggregatedStmts[0]); 1219 } 1220 reset(); 1221 } 1222 1223 emitMemcpy(); 1224 AggregatedStmts.clear(); 1225 } 1226 1227 void finish() { 1228 emitAggregatedStmts(); 1229 } 1230 }; 1231 1232 } 1233 1234 /// EmitCtorPrologue - This routine generates necessary code to initialize 1235 /// base classes and non-static data members belonging to this constructor. 1236 void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD, 1237 CXXCtorType CtorType, 1238 FunctionArgList &Args) { 1239 if (CD->isDelegatingConstructor()) 1240 return EmitDelegatingCXXConstructorCall(CD, Args); 1241 1242 const CXXRecordDecl *ClassDecl = CD->getParent(); 1243 1244 CXXConstructorDecl::init_const_iterator B = CD->init_begin(), 1245 E = CD->init_end(); 1246 1247 llvm::BasicBlock *BaseCtorContinueBB = nullptr; 1248 if (ClassDecl->getNumVBases() && 1249 !CGM.getTarget().getCXXABI().hasConstructorVariants()) { 1250 // The ABIs that don't have constructor variants need to put a branch 1251 // before the virtual base initialization code. 1252 BaseCtorContinueBB = 1253 CGM.getCXXABI().EmitCtorCompleteObjectHandler(*this, ClassDecl); 1254 assert(BaseCtorContinueBB); 1255 } 1256 1257 // Virtual base initializers first. 1258 for (; B != E && (*B)->isBaseInitializer() && (*B)->isBaseVirtual(); B++) { 1259 EmitBaseInitializer(*this, ClassDecl, *B, CtorType); 1260 } 1261 1262 if (BaseCtorContinueBB) { 1263 // Complete object handler should continue to the remaining initializers. 1264 Builder.CreateBr(BaseCtorContinueBB); 1265 EmitBlock(BaseCtorContinueBB); 1266 } 1267 1268 // Then, non-virtual base initializers. 1269 for (; B != E && (*B)->isBaseInitializer(); B++) { 1270 assert(!(*B)->isBaseVirtual()); 1271 EmitBaseInitializer(*this, ClassDecl, *B, CtorType); 1272 } 1273 1274 InitializeVTablePointers(ClassDecl); 1275 1276 // And finally, initialize class members. 1277 FieldConstructionScope FCS(*this, CXXThisValue); 1278 ConstructorMemcpyizer CM(*this, CD, Args); 1279 for (; B != E; B++) { 1280 CXXCtorInitializer *Member = (*B); 1281 assert(!Member->isBaseInitializer()); 1282 assert(Member->isAnyMemberInitializer() && 1283 "Delegating initializer on non-delegating constructor"); 1284 CM.addMemberInitializer(Member); 1285 } 1286 CM.finish(); 1287 } 1288 1289 static bool 1290 FieldHasTrivialDestructorBody(ASTContext &Context, const FieldDecl *Field); 1291 1292 static bool 1293 HasTrivialDestructorBody(ASTContext &Context, 1294 const CXXRecordDecl *BaseClassDecl, 1295 const CXXRecordDecl *MostDerivedClassDecl) 1296 { 1297 // If the destructor is trivial we don't have to check anything else. 1298 if (BaseClassDecl->hasTrivialDestructor()) 1299 return true; 1300 1301 if (!BaseClassDecl->getDestructor()->hasTrivialBody()) 1302 return false; 1303 1304 // Check fields. 1305 for (const auto *Field : BaseClassDecl->fields()) 1306 if (!FieldHasTrivialDestructorBody(Context, Field)) 1307 return false; 1308 1309 // Check non-virtual bases. 1310 for (const auto &I : BaseClassDecl->bases()) { 1311 if (I.isVirtual()) 1312 continue; 1313 1314 const CXXRecordDecl *NonVirtualBase = 1315 cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl()); 1316 if (!HasTrivialDestructorBody(Context, NonVirtualBase, 1317 MostDerivedClassDecl)) 1318 return false; 1319 } 1320 1321 if (BaseClassDecl == MostDerivedClassDecl) { 1322 // Check virtual bases. 1323 for (const auto &I : BaseClassDecl->vbases()) { 1324 const CXXRecordDecl *VirtualBase = 1325 cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl()); 1326 if (!HasTrivialDestructorBody(Context, VirtualBase, 1327 MostDerivedClassDecl)) 1328 return false; 1329 } 1330 } 1331 1332 return true; 1333 } 1334 1335 static bool 1336 FieldHasTrivialDestructorBody(ASTContext &Context, 1337 const FieldDecl *Field) 1338 { 1339 QualType FieldBaseElementType = Context.getBaseElementType(Field->getType()); 1340 1341 const RecordType *RT = FieldBaseElementType->getAs<RecordType>(); 1342 if (!RT) 1343 return true; 1344 1345 CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(RT->getDecl()); 1346 1347 // The destructor for an implicit anonymous union member is never invoked. 1348 if (FieldClassDecl->isUnion() && FieldClassDecl->isAnonymousStructOrUnion()) 1349 return false; 1350 1351 return HasTrivialDestructorBody(Context, FieldClassDecl, FieldClassDecl); 1352 } 1353 1354 /// CanSkipVTablePointerInitialization - Check whether we need to initialize 1355 /// any vtable pointers before calling this destructor. 1356 static bool CanSkipVTablePointerInitialization(ASTContext &Context, 1357 const CXXDestructorDecl *Dtor) { 1358 if (!Dtor->hasTrivialBody()) 1359 return false; 1360 1361 // Check the fields. 1362 const CXXRecordDecl *ClassDecl = Dtor->getParent(); 1363 for (const auto *Field : ClassDecl->fields()) 1364 if (!FieldHasTrivialDestructorBody(Context, Field)) 1365 return false; 1366 1367 return true; 1368 } 1369 1370 // Generates function call for handling object poisoning, passing in 1371 // references to 'this' and its size as arguments. 1372 // Disables tail call elimination, to prevent the current stack frame from 1373 // disappearing from the stack trace. 1374 static void EmitDtorSanitizerCallback(CodeGenFunction &CGF, 1375 const CXXDestructorDecl *Dtor) { 1376 const ASTRecordLayout &Layout = 1377 CGF.getContext().getASTRecordLayout(Dtor->getParent()); 1378 1379 // Nothing to poison 1380 if(Layout.getFieldCount() == 0) 1381 return; 1382 1383 // Construct pointer to region to begin poisoning, and calculate poison 1384 // size, so that only members declared in this class are poisoned. 1385 llvm::Value *OffsetPtr; 1386 CharUnits::QuantityType PoisonSize; 1387 ASTContext &Context = CGF.getContext(); 1388 1389 llvm::ConstantInt *OffsetSizePtr = llvm::ConstantInt::get( 1390 CGF.SizeTy, Context.toCharUnitsFromBits(Layout.getFieldOffset(0)). 1391 getQuantity()); 1392 1393 OffsetPtr = CGF.Builder.CreateGEP(CGF.Builder.CreateBitCast( 1394 CGF.LoadCXXThis(), CGF.Int8PtrTy), OffsetSizePtr); 1395 1396 PoisonSize = Layout.getSize().getQuantity() - 1397 Context.toCharUnitsFromBits(Layout.getFieldOffset(0)).getQuantity(); 1398 1399 llvm::Value *Args[] = { 1400 CGF.Builder.CreateBitCast(OffsetPtr, CGF.VoidPtrTy), 1401 llvm::ConstantInt::get(CGF.SizeTy, PoisonSize)}; 1402 1403 llvm::Type *ArgTypes[] = {CGF.VoidPtrTy, CGF.SizeTy}; 1404 1405 llvm::FunctionType *FnType = 1406 llvm::FunctionType::get(CGF.VoidTy, ArgTypes, false); 1407 llvm::Value *Fn = 1408 CGF.CGM.CreateRuntimeFunction(FnType, "__sanitizer_dtor_callback"); 1409 1410 // Disables tail call elimination, to prevent the current stack frame from 1411 // disappearing from the stack trace. 1412 CGF.CurFn->addFnAttr("disable-tail-calls", "true"); 1413 CGF.EmitNounwindRuntimeCall(Fn, Args); 1414 } 1415 1416 /// EmitDestructorBody - Emits the body of the current destructor. 1417 void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) { 1418 const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CurGD.getDecl()); 1419 CXXDtorType DtorType = CurGD.getDtorType(); 1420 1421 Stmt *Body = Dtor->getBody(); 1422 if (Body) 1423 incrementProfileCounter(Body); 1424 1425 // The call to operator delete in a deleting destructor happens 1426 // outside of the function-try-block, which means it's always 1427 // possible to delegate the destructor body to the complete 1428 // destructor. Do so. 1429 if (DtorType == Dtor_Deleting) { 1430 EnterDtorCleanups(Dtor, Dtor_Deleting); 1431 EmitCXXDestructorCall(Dtor, Dtor_Complete, /*ForVirtualBase=*/false, 1432 /*Delegating=*/false, LoadCXXThis()); 1433 PopCleanupBlock(); 1434 return; 1435 } 1436 1437 // If the body is a function-try-block, enter the try before 1438 // anything else. 1439 bool isTryBody = (Body && isa<CXXTryStmt>(Body)); 1440 if (isTryBody) 1441 EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true); 1442 EmitAsanPrologueOrEpilogue(false); 1443 1444 // Enter the epilogue cleanups. 1445 RunCleanupsScope DtorEpilogue(*this); 1446 1447 // If this is the complete variant, just invoke the base variant; 1448 // the epilogue will destruct the virtual bases. But we can't do 1449 // this optimization if the body is a function-try-block, because 1450 // we'd introduce *two* handler blocks. In the Microsoft ABI, we 1451 // always delegate because we might not have a definition in this TU. 1452 switch (DtorType) { 1453 case Dtor_Comdat: 1454 llvm_unreachable("not expecting a COMDAT"); 1455 1456 case Dtor_Deleting: llvm_unreachable("already handled deleting case"); 1457 1458 case Dtor_Complete: 1459 assert((Body || getTarget().getCXXABI().isMicrosoft()) && 1460 "can't emit a dtor without a body for non-Microsoft ABIs"); 1461 1462 // Enter the cleanup scopes for virtual bases. 1463 EnterDtorCleanups(Dtor, Dtor_Complete); 1464 1465 if (!isTryBody) { 1466 EmitCXXDestructorCall(Dtor, Dtor_Base, /*ForVirtualBase=*/false, 1467 /*Delegating=*/false, LoadCXXThis()); 1468 break; 1469 } 1470 // Fallthrough: act like we're in the base variant. 1471 1472 case Dtor_Base: 1473 assert(Body); 1474 1475 // Enter the cleanup scopes for fields and non-virtual bases. 1476 EnterDtorCleanups(Dtor, Dtor_Base); 1477 1478 // Initialize the vtable pointers before entering the body. 1479 if (!CanSkipVTablePointerInitialization(getContext(), Dtor)) 1480 InitializeVTablePointers(Dtor->getParent()); 1481 1482 if (isTryBody) 1483 EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock()); 1484 else if (Body) 1485 EmitStmt(Body); 1486 else { 1487 assert(Dtor->isImplicit() && "bodyless dtor not implicit"); 1488 // nothing to do besides what's in the epilogue 1489 } 1490 // -fapple-kext must inline any call to this dtor into 1491 // the caller's body. 1492 if (getLangOpts().AppleKext) 1493 CurFn->addFnAttr(llvm::Attribute::AlwaysInline); 1494 1495 // Insert memory-poisoning instrumentation, before final clean ups, 1496 // to ensure this class's members are protected from invalid access. 1497 if (CGM.getCodeGenOpts().SanitizeMemoryUseAfterDtor 1498 && SanOpts.has(SanitizerKind::Memory)) 1499 EmitDtorSanitizerCallback(*this, Dtor); 1500 1501 break; 1502 } 1503 1504 // Jump out through the epilogue cleanups. 1505 DtorEpilogue.ForceCleanup(); 1506 1507 // Exit the try if applicable. 1508 if (isTryBody) 1509 ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true); 1510 } 1511 1512 void CodeGenFunction::emitImplicitAssignmentOperatorBody(FunctionArgList &Args) { 1513 const CXXMethodDecl *AssignOp = cast<CXXMethodDecl>(CurGD.getDecl()); 1514 const Stmt *RootS = AssignOp->getBody(); 1515 assert(isa<CompoundStmt>(RootS) && 1516 "Body of an implicit assignment operator should be compound stmt."); 1517 const CompoundStmt *RootCS = cast<CompoundStmt>(RootS); 1518 1519 LexicalScope Scope(*this, RootCS->getSourceRange()); 1520 1521 AssignmentMemcpyizer AM(*this, AssignOp, Args); 1522 for (auto *I : RootCS->body()) 1523 AM.emitAssignment(I); 1524 AM.finish(); 1525 } 1526 1527 namespace { 1528 /// Call the operator delete associated with the current destructor. 1529 struct CallDtorDelete : EHScopeStack::Cleanup { 1530 CallDtorDelete() {} 1531 1532 void Emit(CodeGenFunction &CGF, Flags flags) override { 1533 const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CGF.CurCodeDecl); 1534 const CXXRecordDecl *ClassDecl = Dtor->getParent(); 1535 CGF.EmitDeleteCall(Dtor->getOperatorDelete(), CGF.LoadCXXThis(), 1536 CGF.getContext().getTagDeclType(ClassDecl)); 1537 } 1538 }; 1539 1540 struct CallDtorDeleteConditional : EHScopeStack::Cleanup { 1541 llvm::Value *ShouldDeleteCondition; 1542 public: 1543 CallDtorDeleteConditional(llvm::Value *ShouldDeleteCondition) 1544 : ShouldDeleteCondition(ShouldDeleteCondition) { 1545 assert(ShouldDeleteCondition != nullptr); 1546 } 1547 1548 void Emit(CodeGenFunction &CGF, Flags flags) override { 1549 llvm::BasicBlock *callDeleteBB = CGF.createBasicBlock("dtor.call_delete"); 1550 llvm::BasicBlock *continueBB = CGF.createBasicBlock("dtor.continue"); 1551 llvm::Value *ShouldCallDelete 1552 = CGF.Builder.CreateIsNull(ShouldDeleteCondition); 1553 CGF.Builder.CreateCondBr(ShouldCallDelete, continueBB, callDeleteBB); 1554 1555 CGF.EmitBlock(callDeleteBB); 1556 const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CGF.CurCodeDecl); 1557 const CXXRecordDecl *ClassDecl = Dtor->getParent(); 1558 CGF.EmitDeleteCall(Dtor->getOperatorDelete(), CGF.LoadCXXThis(), 1559 CGF.getContext().getTagDeclType(ClassDecl)); 1560 CGF.Builder.CreateBr(continueBB); 1561 1562 CGF.EmitBlock(continueBB); 1563 } 1564 }; 1565 1566 class DestroyField : public EHScopeStack::Cleanup { 1567 const FieldDecl *field; 1568 CodeGenFunction::Destroyer *destroyer; 1569 bool useEHCleanupForArray; 1570 1571 public: 1572 DestroyField(const FieldDecl *field, CodeGenFunction::Destroyer *destroyer, 1573 bool useEHCleanupForArray) 1574 : field(field), destroyer(destroyer), 1575 useEHCleanupForArray(useEHCleanupForArray) {} 1576 1577 void Emit(CodeGenFunction &CGF, Flags flags) override { 1578 // Find the address of the field. 1579 llvm::Value *thisValue = CGF.LoadCXXThis(); 1580 QualType RecordTy = CGF.getContext().getTagDeclType(field->getParent()); 1581 LValue ThisLV = CGF.MakeAddrLValue(thisValue, RecordTy); 1582 LValue LV = CGF.EmitLValueForField(ThisLV, field); 1583 assert(LV.isSimple()); 1584 1585 CGF.emitDestroy(LV.getAddress(), field->getType(), destroyer, 1586 flags.isForNormalCleanup() && useEHCleanupForArray); 1587 } 1588 }; 1589 } 1590 1591 /// \brief Emit all code that comes at the end of class's 1592 /// destructor. This is to call destructors on members and base classes 1593 /// in reverse order of their construction. 1594 void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD, 1595 CXXDtorType DtorType) { 1596 assert((!DD->isTrivial() || DD->hasAttr<DLLExportAttr>()) && 1597 "Should not emit dtor epilogue for non-exported trivial dtor!"); 1598 1599 // The deleting-destructor phase just needs to call the appropriate 1600 // operator delete that Sema picked up. 1601 if (DtorType == Dtor_Deleting) { 1602 assert(DD->getOperatorDelete() && 1603 "operator delete missing - EnterDtorCleanups"); 1604 if (CXXStructorImplicitParamValue) { 1605 // If there is an implicit param to the deleting dtor, it's a boolean 1606 // telling whether we should call delete at the end of the dtor. 1607 EHStack.pushCleanup<CallDtorDeleteConditional>( 1608 NormalAndEHCleanup, CXXStructorImplicitParamValue); 1609 } else { 1610 EHStack.pushCleanup<CallDtorDelete>(NormalAndEHCleanup); 1611 } 1612 return; 1613 } 1614 1615 const CXXRecordDecl *ClassDecl = DD->getParent(); 1616 1617 // Unions have no bases and do not call field destructors. 1618 if (ClassDecl->isUnion()) 1619 return; 1620 1621 // The complete-destructor phase just destructs all the virtual bases. 1622 if (DtorType == Dtor_Complete) { 1623 1624 // We push them in the forward order so that they'll be popped in 1625 // the reverse order. 1626 for (const auto &Base : ClassDecl->vbases()) { 1627 CXXRecordDecl *BaseClassDecl 1628 = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl()); 1629 1630 // Ignore trivial destructors. 1631 if (BaseClassDecl->hasTrivialDestructor()) 1632 continue; 1633 1634 EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup, 1635 BaseClassDecl, 1636 /*BaseIsVirtual*/ true); 1637 } 1638 1639 return; 1640 } 1641 1642 assert(DtorType == Dtor_Base); 1643 1644 // Destroy non-virtual bases. 1645 for (const auto &Base : ClassDecl->bases()) { 1646 // Ignore virtual bases. 1647 if (Base.isVirtual()) 1648 continue; 1649 1650 CXXRecordDecl *BaseClassDecl = Base.getType()->getAsCXXRecordDecl(); 1651 1652 // Ignore trivial destructors. 1653 if (BaseClassDecl->hasTrivialDestructor()) 1654 continue; 1655 1656 EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup, 1657 BaseClassDecl, 1658 /*BaseIsVirtual*/ false); 1659 } 1660 1661 // Destroy direct fields. 1662 for (const auto *Field : ClassDecl->fields()) { 1663 QualType type = Field->getType(); 1664 QualType::DestructionKind dtorKind = type.isDestructedType(); 1665 if (!dtorKind) continue; 1666 1667 // Anonymous union members do not have their destructors called. 1668 const RecordType *RT = type->getAsUnionType(); 1669 if (RT && RT->getDecl()->isAnonymousStructOrUnion()) continue; 1670 1671 CleanupKind cleanupKind = getCleanupKind(dtorKind); 1672 EHStack.pushCleanup<DestroyField>(cleanupKind, Field, 1673 getDestroyer(dtorKind), 1674 cleanupKind & EHCleanup); 1675 } 1676 } 1677 1678 /// EmitCXXAggrConstructorCall - Emit a loop to call a particular 1679 /// constructor for each of several members of an array. 1680 /// 1681 /// \param ctor the constructor to call for each element 1682 /// \param arrayType the type of the array to initialize 1683 /// \param arrayBegin an arrayType* 1684 /// \param zeroInitialize true if each element should be 1685 /// zero-initialized before it is constructed 1686 void CodeGenFunction::EmitCXXAggrConstructorCall( 1687 const CXXConstructorDecl *ctor, const ConstantArrayType *arrayType, 1688 llvm::Value *arrayBegin, const CXXConstructExpr *E, bool zeroInitialize) { 1689 QualType elementType; 1690 llvm::Value *numElements = 1691 emitArrayLength(arrayType, elementType, arrayBegin); 1692 1693 EmitCXXAggrConstructorCall(ctor, numElements, arrayBegin, E, zeroInitialize); 1694 } 1695 1696 /// EmitCXXAggrConstructorCall - Emit a loop to call a particular 1697 /// constructor for each of several members of an array. 1698 /// 1699 /// \param ctor the constructor to call for each element 1700 /// \param numElements the number of elements in the array; 1701 /// may be zero 1702 /// \param arrayBegin a T*, where T is the type constructed by ctor 1703 /// \param zeroInitialize true if each element should be 1704 /// zero-initialized before it is constructed 1705 void CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor, 1706 llvm::Value *numElements, 1707 llvm::Value *arrayBegin, 1708 const CXXConstructExpr *E, 1709 bool zeroInitialize) { 1710 1711 // It's legal for numElements to be zero. This can happen both 1712 // dynamically, because x can be zero in 'new A[x]', and statically, 1713 // because of GCC extensions that permit zero-length arrays. There 1714 // are probably legitimate places where we could assume that this 1715 // doesn't happen, but it's not clear that it's worth it. 1716 llvm::BranchInst *zeroCheckBranch = nullptr; 1717 1718 // Optimize for a constant count. 1719 llvm::ConstantInt *constantCount 1720 = dyn_cast<llvm::ConstantInt>(numElements); 1721 if (constantCount) { 1722 // Just skip out if the constant count is zero. 1723 if (constantCount->isZero()) return; 1724 1725 // Otherwise, emit the check. 1726 } else { 1727 llvm::BasicBlock *loopBB = createBasicBlock("new.ctorloop"); 1728 llvm::Value *iszero = Builder.CreateIsNull(numElements, "isempty"); 1729 zeroCheckBranch = Builder.CreateCondBr(iszero, loopBB, loopBB); 1730 EmitBlock(loopBB); 1731 } 1732 1733 // Find the end of the array. 1734 llvm::Value *arrayEnd = Builder.CreateInBoundsGEP(arrayBegin, numElements, 1735 "arrayctor.end"); 1736 1737 // Enter the loop, setting up a phi for the current location to initialize. 1738 llvm::BasicBlock *entryBB = Builder.GetInsertBlock(); 1739 llvm::BasicBlock *loopBB = createBasicBlock("arrayctor.loop"); 1740 EmitBlock(loopBB); 1741 llvm::PHINode *cur = Builder.CreatePHI(arrayBegin->getType(), 2, 1742 "arrayctor.cur"); 1743 cur->addIncoming(arrayBegin, entryBB); 1744 1745 // Inside the loop body, emit the constructor call on the array element. 1746 1747 QualType type = getContext().getTypeDeclType(ctor->getParent()); 1748 1749 // Zero initialize the storage, if requested. 1750 if (zeroInitialize) 1751 EmitNullInitialization(cur, type); 1752 1753 // C++ [class.temporary]p4: 1754 // There are two contexts in which temporaries are destroyed at a different 1755 // point than the end of the full-expression. The first context is when a 1756 // default constructor is called to initialize an element of an array. 1757 // If the constructor has one or more default arguments, the destruction of 1758 // every temporary created in a default argument expression is sequenced 1759 // before the construction of the next array element, if any. 1760 1761 { 1762 RunCleanupsScope Scope(*this); 1763 1764 // Evaluate the constructor and its arguments in a regular 1765 // partial-destroy cleanup. 1766 if (getLangOpts().Exceptions && 1767 !ctor->getParent()->hasTrivialDestructor()) { 1768 Destroyer *destroyer = destroyCXXObject; 1769 pushRegularPartialArrayCleanup(arrayBegin, cur, type, *destroyer); 1770 } 1771 1772 EmitCXXConstructorCall(ctor, Ctor_Complete, /*ForVirtualBase=*/false, 1773 /*Delegating=*/false, cur, E); 1774 } 1775 1776 // Go to the next element. 1777 llvm::Value *next = 1778 Builder.CreateInBoundsGEP(cur, llvm::ConstantInt::get(SizeTy, 1), 1779 "arrayctor.next"); 1780 cur->addIncoming(next, Builder.GetInsertBlock()); 1781 1782 // Check whether that's the end of the loop. 1783 llvm::Value *done = Builder.CreateICmpEQ(next, arrayEnd, "arrayctor.done"); 1784 llvm::BasicBlock *contBB = createBasicBlock("arrayctor.cont"); 1785 Builder.CreateCondBr(done, contBB, loopBB); 1786 1787 // Patch the earlier check to skip over the loop. 1788 if (zeroCheckBranch) zeroCheckBranch->setSuccessor(0, contBB); 1789 1790 EmitBlock(contBB); 1791 } 1792 1793 void CodeGenFunction::destroyCXXObject(CodeGenFunction &CGF, 1794 llvm::Value *addr, 1795 QualType type) { 1796 const RecordType *rtype = type->castAs<RecordType>(); 1797 const CXXRecordDecl *record = cast<CXXRecordDecl>(rtype->getDecl()); 1798 const CXXDestructorDecl *dtor = record->getDestructor(); 1799 assert(!dtor->isTrivial()); 1800 CGF.EmitCXXDestructorCall(dtor, Dtor_Complete, /*for vbase*/ false, 1801 /*Delegating=*/false, addr); 1802 } 1803 1804 void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D, 1805 CXXCtorType Type, 1806 bool ForVirtualBase, 1807 bool Delegating, llvm::Value *This, 1808 const CXXConstructExpr *E) { 1809 // C++11 [class.mfct.non-static]p2: 1810 // If a non-static member function of a class X is called for an object that 1811 // is not of type X, or of a type derived from X, the behavior is undefined. 1812 // FIXME: Provide a source location here. 1813 EmitTypeCheck(CodeGenFunction::TCK_ConstructorCall, SourceLocation(), This, 1814 getContext().getRecordType(D->getParent())); 1815 1816 if (D->isTrivial() && D->isDefaultConstructor()) { 1817 assert(E->getNumArgs() == 0 && "trivial default ctor with args"); 1818 return; 1819 } 1820 1821 // If this is a trivial constructor, just emit what's needed. If this is a 1822 // union copy constructor, we must emit a memcpy, because the AST does not 1823 // model that copy. 1824 if (isMemcpyEquivalentSpecialMember(D)) { 1825 assert(E->getNumArgs() == 1 && "unexpected argcount for trivial ctor"); 1826 1827 const Expr *Arg = E->getArg(0); 1828 QualType SrcTy = Arg->getType(); 1829 llvm::Value *Src = EmitLValue(Arg).getAddress(); 1830 QualType DestTy = getContext().getTypeDeclType(D->getParent()); 1831 EmitAggregateCopyCtor(This, Src, DestTy, SrcTy); 1832 return; 1833 } 1834 1835 CallArgList Args; 1836 1837 // Push the this ptr. 1838 Args.add(RValue::get(This), D->getThisType(getContext())); 1839 1840 // Add the rest of the user-supplied arguments. 1841 const FunctionProtoType *FPT = D->getType()->castAs<FunctionProtoType>(); 1842 EmitCallArgs(Args, FPT, E->arguments(), E->getConstructor()); 1843 1844 // Insert any ABI-specific implicit constructor arguments. 1845 unsigned ExtraArgs = CGM.getCXXABI().addImplicitConstructorArgs( 1846 *this, D, Type, ForVirtualBase, Delegating, Args); 1847 1848 // Emit the call. 1849 llvm::Value *Callee = CGM.getAddrOfCXXStructor(D, getFromCtorType(Type)); 1850 const CGFunctionInfo &Info = 1851 CGM.getTypes().arrangeCXXConstructorCall(Args, D, Type, ExtraArgs); 1852 EmitCall(Info, Callee, ReturnValueSlot(), Args, D); 1853 } 1854 1855 void 1856 CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D, 1857 llvm::Value *This, llvm::Value *Src, 1858 const CXXConstructExpr *E) { 1859 if (isMemcpyEquivalentSpecialMember(D)) { 1860 assert(E->getNumArgs() == 1 && "unexpected argcount for trivial ctor"); 1861 assert(D->isCopyOrMoveConstructor() && 1862 "trivial 1-arg ctor not a copy/move ctor"); 1863 EmitAggregateCopyCtor(This, Src, 1864 getContext().getTypeDeclType(D->getParent()), 1865 (*E->arg_begin())->getType()); 1866 return; 1867 } 1868 llvm::Value *Callee = CGM.getAddrOfCXXStructor(D, StructorType::Complete); 1869 assert(D->isInstance() && 1870 "Trying to emit a member call expr on a static method!"); 1871 1872 const FunctionProtoType *FPT = D->getType()->castAs<FunctionProtoType>(); 1873 1874 CallArgList Args; 1875 1876 // Push the this ptr. 1877 Args.add(RValue::get(This), D->getThisType(getContext())); 1878 1879 // Push the src ptr. 1880 QualType QT = *(FPT->param_type_begin()); 1881 llvm::Type *t = CGM.getTypes().ConvertType(QT); 1882 Src = Builder.CreateBitCast(Src, t); 1883 Args.add(RValue::get(Src), QT); 1884 1885 // Skip over first argument (Src). 1886 EmitCallArgs(Args, FPT, drop_begin(E->arguments(), 1), E->getConstructor(), 1887 /*ParamsToSkip*/ 1); 1888 1889 EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, RequiredArgs::All), 1890 Callee, ReturnValueSlot(), Args, D); 1891 } 1892 1893 void 1894 CodeGenFunction::EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor, 1895 CXXCtorType CtorType, 1896 const FunctionArgList &Args, 1897 SourceLocation Loc) { 1898 CallArgList DelegateArgs; 1899 1900 FunctionArgList::const_iterator I = Args.begin(), E = Args.end(); 1901 assert(I != E && "no parameters to constructor"); 1902 1903 // this 1904 DelegateArgs.add(RValue::get(LoadCXXThis()), (*I)->getType()); 1905 ++I; 1906 1907 // vtt 1908 if (llvm::Value *VTT = GetVTTParameter(GlobalDecl(Ctor, CtorType), 1909 /*ForVirtualBase=*/false, 1910 /*Delegating=*/true)) { 1911 QualType VoidPP = getContext().getPointerType(getContext().VoidPtrTy); 1912 DelegateArgs.add(RValue::get(VTT), VoidPP); 1913 1914 if (CGM.getCXXABI().NeedsVTTParameter(CurGD)) { 1915 assert(I != E && "cannot skip vtt parameter, already done with args"); 1916 assert((*I)->getType() == VoidPP && "skipping parameter not of vtt type"); 1917 ++I; 1918 } 1919 } 1920 1921 // Explicit arguments. 1922 for (; I != E; ++I) { 1923 const VarDecl *param = *I; 1924 // FIXME: per-argument source location 1925 EmitDelegateCallArg(DelegateArgs, param, Loc); 1926 } 1927 1928 llvm::Value *Callee = 1929 CGM.getAddrOfCXXStructor(Ctor, getFromCtorType(CtorType)); 1930 EmitCall(CGM.getTypes() 1931 .arrangeCXXStructorDeclaration(Ctor, getFromCtorType(CtorType)), 1932 Callee, ReturnValueSlot(), DelegateArgs, Ctor); 1933 } 1934 1935 namespace { 1936 struct CallDelegatingCtorDtor : EHScopeStack::Cleanup { 1937 const CXXDestructorDecl *Dtor; 1938 llvm::Value *Addr; 1939 CXXDtorType Type; 1940 1941 CallDelegatingCtorDtor(const CXXDestructorDecl *D, llvm::Value *Addr, 1942 CXXDtorType Type) 1943 : Dtor(D), Addr(Addr), Type(Type) {} 1944 1945 void Emit(CodeGenFunction &CGF, Flags flags) override { 1946 CGF.EmitCXXDestructorCall(Dtor, Type, /*ForVirtualBase=*/false, 1947 /*Delegating=*/true, Addr); 1948 } 1949 }; 1950 } 1951 1952 void 1953 CodeGenFunction::EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor, 1954 const FunctionArgList &Args) { 1955 assert(Ctor->isDelegatingConstructor()); 1956 1957 llvm::Value *ThisPtr = LoadCXXThis(); 1958 1959 QualType Ty = getContext().getTagDeclType(Ctor->getParent()); 1960 CharUnits Alignment = getContext().getTypeAlignInChars(Ty); 1961 AggValueSlot AggSlot = 1962 AggValueSlot::forAddr(ThisPtr, Alignment, Qualifiers(), 1963 AggValueSlot::IsDestructed, 1964 AggValueSlot::DoesNotNeedGCBarriers, 1965 AggValueSlot::IsNotAliased); 1966 1967 EmitAggExpr(Ctor->init_begin()[0]->getInit(), AggSlot); 1968 1969 const CXXRecordDecl *ClassDecl = Ctor->getParent(); 1970 if (CGM.getLangOpts().Exceptions && !ClassDecl->hasTrivialDestructor()) { 1971 CXXDtorType Type = 1972 CurGD.getCtorType() == Ctor_Complete ? Dtor_Complete : Dtor_Base; 1973 1974 EHStack.pushCleanup<CallDelegatingCtorDtor>(EHCleanup, 1975 ClassDecl->getDestructor(), 1976 ThisPtr, Type); 1977 } 1978 } 1979 1980 void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *DD, 1981 CXXDtorType Type, 1982 bool ForVirtualBase, 1983 bool Delegating, 1984 llvm::Value *This) { 1985 CGM.getCXXABI().EmitDestructorCall(*this, DD, Type, ForVirtualBase, 1986 Delegating, This); 1987 } 1988 1989 namespace { 1990 struct CallLocalDtor : EHScopeStack::Cleanup { 1991 const CXXDestructorDecl *Dtor; 1992 llvm::Value *Addr; 1993 1994 CallLocalDtor(const CXXDestructorDecl *D, llvm::Value *Addr) 1995 : Dtor(D), Addr(Addr) {} 1996 1997 void Emit(CodeGenFunction &CGF, Flags flags) override { 1998 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, 1999 /*ForVirtualBase=*/false, 2000 /*Delegating=*/false, Addr); 2001 } 2002 }; 2003 } 2004 2005 void CodeGenFunction::PushDestructorCleanup(const CXXDestructorDecl *D, 2006 llvm::Value *Addr) { 2007 EHStack.pushCleanup<CallLocalDtor>(NormalAndEHCleanup, D, Addr); 2008 } 2009 2010 void CodeGenFunction::PushDestructorCleanup(QualType T, llvm::Value *Addr) { 2011 CXXRecordDecl *ClassDecl = T->getAsCXXRecordDecl(); 2012 if (!ClassDecl) return; 2013 if (ClassDecl->hasTrivialDestructor()) return; 2014 2015 const CXXDestructorDecl *D = ClassDecl->getDestructor(); 2016 assert(D && D->isUsed() && "destructor not marked as used!"); 2017 PushDestructorCleanup(D, Addr); 2018 } 2019 2020 void 2021 CodeGenFunction::InitializeVTablePointer(BaseSubobject Base, 2022 const CXXRecordDecl *NearestVBase, 2023 CharUnits OffsetFromNearestVBase, 2024 const CXXRecordDecl *VTableClass) { 2025 const CXXRecordDecl *RD = Base.getBase(); 2026 2027 // Don't initialize the vtable pointer if the class is marked with the 2028 // 'novtable' attribute. 2029 if ((RD == VTableClass || RD == NearestVBase) && 2030 VTableClass->hasAttr<MSNoVTableAttr>()) 2031 return; 2032 2033 // Compute the address point. 2034 bool NeedsVirtualOffset; 2035 llvm::Value *VTableAddressPoint = 2036 CGM.getCXXABI().getVTableAddressPointInStructor( 2037 *this, VTableClass, Base, NearestVBase, NeedsVirtualOffset); 2038 if (!VTableAddressPoint) 2039 return; 2040 2041 // Compute where to store the address point. 2042 llvm::Value *VirtualOffset = nullptr; 2043 CharUnits NonVirtualOffset = CharUnits::Zero(); 2044 2045 if (NeedsVirtualOffset) { 2046 // We need to use the virtual base offset offset because the virtual base 2047 // might have a different offset in the most derived class. 2048 VirtualOffset = CGM.getCXXABI().GetVirtualBaseClassOffset(*this, 2049 LoadCXXThis(), 2050 VTableClass, 2051 NearestVBase); 2052 NonVirtualOffset = OffsetFromNearestVBase; 2053 } else { 2054 // We can just use the base offset in the complete class. 2055 NonVirtualOffset = Base.getBaseOffset(); 2056 } 2057 2058 // Apply the offsets. 2059 llvm::Value *VTableField = LoadCXXThis(); 2060 2061 if (!NonVirtualOffset.isZero() || VirtualOffset) 2062 VTableField = ApplyNonVirtualAndVirtualOffset(*this, VTableField, 2063 NonVirtualOffset, 2064 VirtualOffset); 2065 2066 // Finally, store the address point. Use the same LLVM types as the field to 2067 // support optimization. 2068 llvm::Type *VTablePtrTy = 2069 llvm::FunctionType::get(CGM.Int32Ty, /*isVarArg=*/true) 2070 ->getPointerTo() 2071 ->getPointerTo(); 2072 VTableField = Builder.CreateBitCast(VTableField, VTablePtrTy->getPointerTo()); 2073 VTableAddressPoint = Builder.CreateBitCast(VTableAddressPoint, VTablePtrTy); 2074 llvm::StoreInst *Store = Builder.CreateStore(VTableAddressPoint, VTableField); 2075 CGM.DecorateInstruction(Store, CGM.getTBAAInfoForVTablePtr()); 2076 } 2077 2078 void 2079 CodeGenFunction::InitializeVTablePointers(BaseSubobject Base, 2080 const CXXRecordDecl *NearestVBase, 2081 CharUnits OffsetFromNearestVBase, 2082 bool BaseIsNonVirtualPrimaryBase, 2083 const CXXRecordDecl *VTableClass, 2084 VisitedVirtualBasesSetTy& VBases) { 2085 // If this base is a non-virtual primary base the address point has already 2086 // been set. 2087 if (!BaseIsNonVirtualPrimaryBase) { 2088 // Initialize the vtable pointer for this base. 2089 InitializeVTablePointer(Base, NearestVBase, OffsetFromNearestVBase, 2090 VTableClass); 2091 } 2092 2093 const CXXRecordDecl *RD = Base.getBase(); 2094 2095 // Traverse bases. 2096 for (const auto &I : RD->bases()) { 2097 CXXRecordDecl *BaseDecl 2098 = cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl()); 2099 2100 // Ignore classes without a vtable. 2101 if (!BaseDecl->isDynamicClass()) 2102 continue; 2103 2104 CharUnits BaseOffset; 2105 CharUnits BaseOffsetFromNearestVBase; 2106 bool BaseDeclIsNonVirtualPrimaryBase; 2107 2108 if (I.isVirtual()) { 2109 // Check if we've visited this virtual base before. 2110 if (!VBases.insert(BaseDecl).second) 2111 continue; 2112 2113 const ASTRecordLayout &Layout = 2114 getContext().getASTRecordLayout(VTableClass); 2115 2116 BaseOffset = Layout.getVBaseClassOffset(BaseDecl); 2117 BaseOffsetFromNearestVBase = CharUnits::Zero(); 2118 BaseDeclIsNonVirtualPrimaryBase = false; 2119 } else { 2120 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 2121 2122 BaseOffset = Base.getBaseOffset() + Layout.getBaseClassOffset(BaseDecl); 2123 BaseOffsetFromNearestVBase = 2124 OffsetFromNearestVBase + Layout.getBaseClassOffset(BaseDecl); 2125 BaseDeclIsNonVirtualPrimaryBase = Layout.getPrimaryBase() == BaseDecl; 2126 } 2127 2128 InitializeVTablePointers(BaseSubobject(BaseDecl, BaseOffset), 2129 I.isVirtual() ? BaseDecl : NearestVBase, 2130 BaseOffsetFromNearestVBase, 2131 BaseDeclIsNonVirtualPrimaryBase, 2132 VTableClass, VBases); 2133 } 2134 } 2135 2136 void CodeGenFunction::InitializeVTablePointers(const CXXRecordDecl *RD) { 2137 // Ignore classes without a vtable. 2138 if (!RD->isDynamicClass()) 2139 return; 2140 2141 // Initialize the vtable pointers for this class and all of its bases. 2142 VisitedVirtualBasesSetTy VBases; 2143 InitializeVTablePointers(BaseSubobject(RD, CharUnits::Zero()), 2144 /*NearestVBase=*/nullptr, 2145 /*OffsetFromNearestVBase=*/CharUnits::Zero(), 2146 /*BaseIsNonVirtualPrimaryBase=*/false, RD, VBases); 2147 2148 if (RD->getNumVBases()) 2149 CGM.getCXXABI().initializeHiddenVirtualInheritanceMembers(*this, RD); 2150 } 2151 2152 llvm::Value *CodeGenFunction::GetVTablePtr(llvm::Value *This, 2153 llvm::Type *Ty) { 2154 llvm::Value *VTablePtrSrc = Builder.CreateBitCast(This, Ty->getPointerTo()); 2155 llvm::Instruction *VTable = Builder.CreateLoad(VTablePtrSrc, "vtable"); 2156 CGM.DecorateInstruction(VTable, CGM.getTBAAInfoForVTablePtr()); 2157 return VTable; 2158 } 2159 2160 // If a class has a single non-virtual base and does not introduce or override 2161 // virtual member functions or fields, it will have the same layout as its base. 2162 // This function returns the least derived such class. 2163 // 2164 // Casting an instance of a base class to such a derived class is technically 2165 // undefined behavior, but it is a relatively common hack for introducing member 2166 // functions on class instances with specific properties (e.g. llvm::Operator) 2167 // that works under most compilers and should not have security implications, so 2168 // we allow it by default. It can be disabled with -fsanitize=cfi-cast-strict. 2169 static const CXXRecordDecl * 2170 LeastDerivedClassWithSameLayout(const CXXRecordDecl *RD) { 2171 if (!RD->field_empty()) 2172 return RD; 2173 2174 if (RD->getNumVBases() != 0) 2175 return RD; 2176 2177 if (RD->getNumBases() != 1) 2178 return RD; 2179 2180 for (const CXXMethodDecl *MD : RD->methods()) { 2181 if (MD->isVirtual()) { 2182 // Virtual member functions are only ok if they are implicit destructors 2183 // because the implicit destructor will have the same semantics as the 2184 // base class's destructor if no fields are added. 2185 if (isa<CXXDestructorDecl>(MD) && MD->isImplicit()) 2186 continue; 2187 return RD; 2188 } 2189 } 2190 2191 return LeastDerivedClassWithSameLayout( 2192 RD->bases_begin()->getType()->getAsCXXRecordDecl()); 2193 } 2194 2195 void CodeGenFunction::EmitVTablePtrCheckForCall(const CXXMethodDecl *MD, 2196 llvm::Value *VTable, 2197 CFITypeCheckKind TCK, 2198 SourceLocation Loc) { 2199 const CXXRecordDecl *ClassDecl = MD->getParent(); 2200 if (!SanOpts.has(SanitizerKind::CFICastStrict)) 2201 ClassDecl = LeastDerivedClassWithSameLayout(ClassDecl); 2202 2203 EmitVTablePtrCheck(ClassDecl, VTable, TCK, Loc); 2204 } 2205 2206 void CodeGenFunction::EmitVTablePtrCheckForCast(QualType T, 2207 llvm::Value *Derived, 2208 bool MayBeNull, 2209 CFITypeCheckKind TCK, 2210 SourceLocation Loc) { 2211 if (!getLangOpts().CPlusPlus) 2212 return; 2213 2214 auto *ClassTy = T->getAs<RecordType>(); 2215 if (!ClassTy) 2216 return; 2217 2218 const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(ClassTy->getDecl()); 2219 2220 if (!ClassDecl->isCompleteDefinition() || !ClassDecl->isDynamicClass()) 2221 return; 2222 2223 if (!SanOpts.has(SanitizerKind::CFICastStrict)) 2224 ClassDecl = LeastDerivedClassWithSameLayout(ClassDecl); 2225 2226 llvm::BasicBlock *ContBlock = 0; 2227 2228 if (MayBeNull) { 2229 llvm::Value *DerivedNotNull = 2230 Builder.CreateIsNotNull(Derived, "cast.nonnull"); 2231 2232 llvm::BasicBlock *CheckBlock = createBasicBlock("cast.check"); 2233 ContBlock = createBasicBlock("cast.cont"); 2234 2235 Builder.CreateCondBr(DerivedNotNull, CheckBlock, ContBlock); 2236 2237 EmitBlock(CheckBlock); 2238 } 2239 2240 llvm::Value *VTable = GetVTablePtr(Derived, Int8PtrTy); 2241 EmitVTablePtrCheck(ClassDecl, VTable, TCK, Loc); 2242 2243 if (MayBeNull) { 2244 Builder.CreateBr(ContBlock); 2245 EmitBlock(ContBlock); 2246 } 2247 } 2248 2249 void CodeGenFunction::EmitVTablePtrCheck(const CXXRecordDecl *RD, 2250 llvm::Value *VTable, 2251 CFITypeCheckKind TCK, 2252 SourceLocation Loc) { 2253 if (CGM.IsCFIBlacklistedRecord(RD)) 2254 return; 2255 2256 SanitizerScope SanScope(this); 2257 2258 std::string OutName; 2259 llvm::raw_string_ostream Out(OutName); 2260 CGM.getCXXABI().getMangleContext().mangleCXXVTableBitSet(RD, Out); 2261 2262 llvm::Value *BitSetName = llvm::MetadataAsValue::get( 2263 getLLVMContext(), llvm::MDString::get(getLLVMContext(), Out.str())); 2264 2265 llvm::Value *CastedVTable = Builder.CreateBitCast(VTable, Int8PtrTy); 2266 llvm::Value *BitSetTest = 2267 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::bitset_test), 2268 {CastedVTable, BitSetName}); 2269 2270 SanitizerMask M; 2271 switch (TCK) { 2272 case CFITCK_VCall: 2273 M = SanitizerKind::CFIVCall; 2274 break; 2275 case CFITCK_NVCall: 2276 M = SanitizerKind::CFINVCall; 2277 break; 2278 case CFITCK_DerivedCast: 2279 M = SanitizerKind::CFIDerivedCast; 2280 break; 2281 case CFITCK_UnrelatedCast: 2282 M = SanitizerKind::CFIUnrelatedCast; 2283 break; 2284 } 2285 2286 llvm::Constant *StaticData[] = { 2287 EmitCheckSourceLocation(Loc), 2288 EmitCheckTypeDescriptor(QualType(RD->getTypeForDecl(), 0)), 2289 llvm::ConstantInt::get(Int8Ty, TCK), 2290 }; 2291 EmitCheck(std::make_pair(BitSetTest, M), "cfi_bad_type", StaticData, 2292 CastedVTable); 2293 } 2294 2295 // FIXME: Ideally Expr::IgnoreParenNoopCasts should do this, but it doesn't do 2296 // quite what we want. 2297 static const Expr *skipNoOpCastsAndParens(const Expr *E) { 2298 while (true) { 2299 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) { 2300 E = PE->getSubExpr(); 2301 continue; 2302 } 2303 2304 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) { 2305 if (CE->getCastKind() == CK_NoOp) { 2306 E = CE->getSubExpr(); 2307 continue; 2308 } 2309 } 2310 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 2311 if (UO->getOpcode() == UO_Extension) { 2312 E = UO->getSubExpr(); 2313 continue; 2314 } 2315 } 2316 return E; 2317 } 2318 } 2319 2320 bool 2321 CodeGenFunction::CanDevirtualizeMemberFunctionCall(const Expr *Base, 2322 const CXXMethodDecl *MD) { 2323 // When building with -fapple-kext, all calls must go through the vtable since 2324 // the kernel linker can do runtime patching of vtables. 2325 if (getLangOpts().AppleKext) 2326 return false; 2327 2328 // If the most derived class is marked final, we know that no subclass can 2329 // override this member function and so we can devirtualize it. For example: 2330 // 2331 // struct A { virtual void f(); } 2332 // struct B final : A { }; 2333 // 2334 // void f(B *b) { 2335 // b->f(); 2336 // } 2337 // 2338 const CXXRecordDecl *MostDerivedClassDecl = Base->getBestDynamicClassType(); 2339 if (MostDerivedClassDecl->hasAttr<FinalAttr>()) 2340 return true; 2341 2342 // If the member function is marked 'final', we know that it can't be 2343 // overridden and can therefore devirtualize it. 2344 if (MD->hasAttr<FinalAttr>()) 2345 return true; 2346 2347 // Similarly, if the class itself is marked 'final' it can't be overridden 2348 // and we can therefore devirtualize the member function call. 2349 if (MD->getParent()->hasAttr<FinalAttr>()) 2350 return true; 2351 2352 Base = skipNoOpCastsAndParens(Base); 2353 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) { 2354 if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) { 2355 // This is a record decl. We know the type and can devirtualize it. 2356 return VD->getType()->isRecordType(); 2357 } 2358 2359 return false; 2360 } 2361 2362 // We can devirtualize calls on an object accessed by a class member access 2363 // expression, since by C++11 [basic.life]p6 we know that it can't refer to 2364 // a derived class object constructed in the same location. 2365 if (const MemberExpr *ME = dyn_cast<MemberExpr>(Base)) 2366 if (const ValueDecl *VD = dyn_cast<ValueDecl>(ME->getMemberDecl())) 2367 return VD->getType()->isRecordType(); 2368 2369 // We can always devirtualize calls on temporary object expressions. 2370 if (isa<CXXConstructExpr>(Base)) 2371 return true; 2372 2373 // And calls on bound temporaries. 2374 if (isa<CXXBindTemporaryExpr>(Base)) 2375 return true; 2376 2377 // Check if this is a call expr that returns a record type. 2378 if (const CallExpr *CE = dyn_cast<CallExpr>(Base)) 2379 return CE->getCallReturnType(getContext())->isRecordType(); 2380 2381 // We can't devirtualize the call. 2382 return false; 2383 } 2384 2385 void CodeGenFunction::EmitForwardingCallToLambda( 2386 const CXXMethodDecl *callOperator, 2387 CallArgList &callArgs) { 2388 // Get the address of the call operator. 2389 const CGFunctionInfo &calleeFnInfo = 2390 CGM.getTypes().arrangeCXXMethodDeclaration(callOperator); 2391 llvm::Value *callee = 2392 CGM.GetAddrOfFunction(GlobalDecl(callOperator), 2393 CGM.getTypes().GetFunctionType(calleeFnInfo)); 2394 2395 // Prepare the return slot. 2396 const FunctionProtoType *FPT = 2397 callOperator->getType()->castAs<FunctionProtoType>(); 2398 QualType resultType = FPT->getReturnType(); 2399 ReturnValueSlot returnSlot; 2400 if (!resultType->isVoidType() && 2401 calleeFnInfo.getReturnInfo().getKind() == ABIArgInfo::Indirect && 2402 !hasScalarEvaluationKind(calleeFnInfo.getReturnType())) 2403 returnSlot = ReturnValueSlot(ReturnValue, resultType.isVolatileQualified()); 2404 2405 // We don't need to separately arrange the call arguments because 2406 // the call can't be variadic anyway --- it's impossible to forward 2407 // variadic arguments. 2408 2409 // Now emit our call. 2410 RValue RV = EmitCall(calleeFnInfo, callee, returnSlot, 2411 callArgs, callOperator); 2412 2413 // If necessary, copy the returned value into the slot. 2414 if (!resultType->isVoidType() && returnSlot.isNull()) 2415 EmitReturnOfRValue(RV, resultType); 2416 else 2417 EmitBranchThroughCleanup(ReturnBlock); 2418 } 2419 2420 void CodeGenFunction::EmitLambdaBlockInvokeBody() { 2421 const BlockDecl *BD = BlockInfo->getBlockDecl(); 2422 const VarDecl *variable = BD->capture_begin()->getVariable(); 2423 const CXXRecordDecl *Lambda = variable->getType()->getAsCXXRecordDecl(); 2424 2425 // Start building arguments for forwarding call 2426 CallArgList CallArgs; 2427 2428 QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda)); 2429 llvm::Value *ThisPtr = GetAddrOfBlockDecl(variable, false); 2430 CallArgs.add(RValue::get(ThisPtr), ThisType); 2431 2432 // Add the rest of the parameters. 2433 for (auto param : BD->params()) 2434 EmitDelegateCallArg(CallArgs, param, param->getLocStart()); 2435 2436 assert(!Lambda->isGenericLambda() && 2437 "generic lambda interconversion to block not implemented"); 2438 EmitForwardingCallToLambda(Lambda->getLambdaCallOperator(), CallArgs); 2439 } 2440 2441 void CodeGenFunction::EmitLambdaToBlockPointerBody(FunctionArgList &Args) { 2442 if (cast<CXXMethodDecl>(CurCodeDecl)->isVariadic()) { 2443 // FIXME: Making this work correctly is nasty because it requires either 2444 // cloning the body of the call operator or making the call operator forward. 2445 CGM.ErrorUnsupported(CurCodeDecl, "lambda conversion to variadic function"); 2446 return; 2447 } 2448 2449 EmitFunctionBody(Args, cast<FunctionDecl>(CurGD.getDecl())->getBody()); 2450 } 2451 2452 void CodeGenFunction::EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD) { 2453 const CXXRecordDecl *Lambda = MD->getParent(); 2454 2455 // Start building arguments for forwarding call 2456 CallArgList CallArgs; 2457 2458 QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda)); 2459 llvm::Value *ThisPtr = llvm::UndefValue::get(getTypes().ConvertType(ThisType)); 2460 CallArgs.add(RValue::get(ThisPtr), ThisType); 2461 2462 // Add the rest of the parameters. 2463 for (auto Param : MD->params()) 2464 EmitDelegateCallArg(CallArgs, Param, Param->getLocStart()); 2465 2466 const CXXMethodDecl *CallOp = Lambda->getLambdaCallOperator(); 2467 // For a generic lambda, find the corresponding call operator specialization 2468 // to which the call to the static-invoker shall be forwarded. 2469 if (Lambda->isGenericLambda()) { 2470 assert(MD->isFunctionTemplateSpecialization()); 2471 const TemplateArgumentList *TAL = MD->getTemplateSpecializationArgs(); 2472 FunctionTemplateDecl *CallOpTemplate = CallOp->getDescribedFunctionTemplate(); 2473 void *InsertPos = nullptr; 2474 FunctionDecl *CorrespondingCallOpSpecialization = 2475 CallOpTemplate->findSpecialization(TAL->asArray(), InsertPos); 2476 assert(CorrespondingCallOpSpecialization); 2477 CallOp = cast<CXXMethodDecl>(CorrespondingCallOpSpecialization); 2478 } 2479 EmitForwardingCallToLambda(CallOp, CallArgs); 2480 } 2481 2482 void CodeGenFunction::EmitLambdaStaticInvokeFunction(const CXXMethodDecl *MD) { 2483 if (MD->isVariadic()) { 2484 // FIXME: Making this work correctly is nasty because it requires either 2485 // cloning the body of the call operator or making the call operator forward. 2486 CGM.ErrorUnsupported(MD, "lambda conversion to variadic function"); 2487 return; 2488 } 2489 2490 EmitLambdaDelegatingInvokeBody(MD); 2491 } 2492