1 //===--- CGClass.cpp - Emit LLVM Code for C++ classes ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code dealing with C++ code generation of classes 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CGBlocks.h" 15 #include "CGCXXABI.h" 16 #include "CGDebugInfo.h" 17 #include "CGRecordLayout.h" 18 #include "CodeGenFunction.h" 19 #include "clang/AST/CXXInheritance.h" 20 #include "clang/AST/DeclTemplate.h" 21 #include "clang/AST/EvaluatedExprVisitor.h" 22 #include "clang/AST/RecordLayout.h" 23 #include "clang/AST/StmtCXX.h" 24 #include "clang/Basic/TargetBuiltins.h" 25 #include "clang/CodeGen/CGFunctionInfo.h" 26 #include "clang/Frontend/CodeGenOptions.h" 27 #include "llvm/IR/Intrinsics.h" 28 29 using namespace clang; 30 using namespace CodeGen; 31 32 /// Return the best known alignment for an unknown pointer to a 33 /// particular class. 34 CharUnits CodeGenModule::getClassPointerAlignment(const CXXRecordDecl *RD) { 35 if (!RD->isCompleteDefinition()) 36 return CharUnits::One(); // Hopefully won't be used anywhere. 37 38 auto &layout = getContext().getASTRecordLayout(RD); 39 40 // If the class is final, then we know that the pointer points to an 41 // object of that type and can use the full alignment. 42 if (RD->hasAttr<FinalAttr>()) { 43 return layout.getAlignment(); 44 45 // Otherwise, we have to assume it could be a subclass. 46 } else { 47 return layout.getNonVirtualAlignment(); 48 } 49 } 50 51 /// Return the best known alignment for a pointer to a virtual base, 52 /// given the alignment of a pointer to the derived class. 53 CharUnits CodeGenModule::getVBaseAlignment(CharUnits actualDerivedAlign, 54 const CXXRecordDecl *derivedClass, 55 const CXXRecordDecl *vbaseClass) { 56 // The basic idea here is that an underaligned derived pointer might 57 // indicate an underaligned base pointer. 58 59 assert(vbaseClass->isCompleteDefinition()); 60 auto &baseLayout = getContext().getASTRecordLayout(vbaseClass); 61 CharUnits expectedVBaseAlign = baseLayout.getNonVirtualAlignment(); 62 63 return getDynamicOffsetAlignment(actualDerivedAlign, derivedClass, 64 expectedVBaseAlign); 65 } 66 67 CharUnits 68 CodeGenModule::getDynamicOffsetAlignment(CharUnits actualBaseAlign, 69 const CXXRecordDecl *baseDecl, 70 CharUnits expectedTargetAlign) { 71 // If the base is an incomplete type (which is, alas, possible with 72 // member pointers), be pessimistic. 73 if (!baseDecl->isCompleteDefinition()) 74 return std::min(actualBaseAlign, expectedTargetAlign); 75 76 auto &baseLayout = getContext().getASTRecordLayout(baseDecl); 77 CharUnits expectedBaseAlign = baseLayout.getNonVirtualAlignment(); 78 79 // If the class is properly aligned, assume the target offset is, too. 80 // 81 // This actually isn't necessarily the right thing to do --- if the 82 // class is a complete object, but it's only properly aligned for a 83 // base subobject, then the alignments of things relative to it are 84 // probably off as well. (Note that this requires the alignment of 85 // the target to be greater than the NV alignment of the derived 86 // class.) 87 // 88 // However, our approach to this kind of under-alignment can only 89 // ever be best effort; after all, we're never going to propagate 90 // alignments through variables or parameters. Note, in particular, 91 // that constructing a polymorphic type in an address that's less 92 // than pointer-aligned will generally trap in the constructor, 93 // unless we someday add some sort of attribute to change the 94 // assumed alignment of 'this'. So our goal here is pretty much 95 // just to allow the user to explicitly say that a pointer is 96 // under-aligned and then safely access its fields and v-tables. 97 if (actualBaseAlign >= expectedBaseAlign) { 98 return expectedTargetAlign; 99 } 100 101 // Otherwise, we might be offset by an arbitrary multiple of the 102 // actual alignment. The correct adjustment is to take the min of 103 // the two alignments. 104 return std::min(actualBaseAlign, expectedTargetAlign); 105 } 106 107 Address CodeGenFunction::LoadCXXThisAddress() { 108 assert(CurFuncDecl && "loading 'this' without a func declaration?"); 109 assert(isa<CXXMethodDecl>(CurFuncDecl)); 110 111 // Lazily compute CXXThisAlignment. 112 if (CXXThisAlignment.isZero()) { 113 // Just use the best known alignment for the parent. 114 // TODO: if we're currently emitting a complete-object ctor/dtor, 115 // we can always use the complete-object alignment. 116 auto RD = cast<CXXMethodDecl>(CurFuncDecl)->getParent(); 117 CXXThisAlignment = CGM.getClassPointerAlignment(RD); 118 } 119 120 return Address(LoadCXXThis(), CXXThisAlignment); 121 } 122 123 /// Emit the address of a field using a member data pointer. 124 /// 125 /// \param E Only used for emergency diagnostics 126 Address 127 CodeGenFunction::EmitCXXMemberDataPointerAddress(const Expr *E, Address base, 128 llvm::Value *memberPtr, 129 const MemberPointerType *memberPtrType, 130 AlignmentSource *alignSource) { 131 // Ask the ABI to compute the actual address. 132 llvm::Value *ptr = 133 CGM.getCXXABI().EmitMemberDataPointerAddress(*this, E, base, 134 memberPtr, memberPtrType); 135 136 QualType memberType = memberPtrType->getPointeeType(); 137 CharUnits memberAlign = getNaturalTypeAlignment(memberType, alignSource); 138 memberAlign = 139 CGM.getDynamicOffsetAlignment(base.getAlignment(), 140 memberPtrType->getClass()->getAsCXXRecordDecl(), 141 memberAlign); 142 return Address(ptr, memberAlign); 143 } 144 145 CharUnits CodeGenModule::computeNonVirtualBaseClassOffset( 146 const CXXRecordDecl *DerivedClass, CastExpr::path_const_iterator Start, 147 CastExpr::path_const_iterator End) { 148 CharUnits Offset = CharUnits::Zero(); 149 150 const ASTContext &Context = getContext(); 151 const CXXRecordDecl *RD = DerivedClass; 152 153 for (CastExpr::path_const_iterator I = Start; I != End; ++I) { 154 const CXXBaseSpecifier *Base = *I; 155 assert(!Base->isVirtual() && "Should not see virtual bases here!"); 156 157 // Get the layout. 158 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 159 160 const CXXRecordDecl *BaseDecl = 161 cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl()); 162 163 // Add the offset. 164 Offset += Layout.getBaseClassOffset(BaseDecl); 165 166 RD = BaseDecl; 167 } 168 169 return Offset; 170 } 171 172 llvm::Constant * 173 CodeGenModule::GetNonVirtualBaseClassOffset(const CXXRecordDecl *ClassDecl, 174 CastExpr::path_const_iterator PathBegin, 175 CastExpr::path_const_iterator PathEnd) { 176 assert(PathBegin != PathEnd && "Base path should not be empty!"); 177 178 CharUnits Offset = 179 computeNonVirtualBaseClassOffset(ClassDecl, PathBegin, PathEnd); 180 if (Offset.isZero()) 181 return nullptr; 182 183 llvm::Type *PtrDiffTy = 184 Types.ConvertType(getContext().getPointerDiffType()); 185 186 return llvm::ConstantInt::get(PtrDiffTy, Offset.getQuantity()); 187 } 188 189 /// Gets the address of a direct base class within a complete object. 190 /// This should only be used for (1) non-virtual bases or (2) virtual bases 191 /// when the type is known to be complete (e.g. in complete destructors). 192 /// 193 /// The object pointed to by 'This' is assumed to be non-null. 194 Address 195 CodeGenFunction::GetAddressOfDirectBaseInCompleteClass(Address This, 196 const CXXRecordDecl *Derived, 197 const CXXRecordDecl *Base, 198 bool BaseIsVirtual) { 199 // 'this' must be a pointer (in some address space) to Derived. 200 assert(This.getElementType() == ConvertType(Derived)); 201 202 // Compute the offset of the virtual base. 203 CharUnits Offset; 204 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(Derived); 205 if (BaseIsVirtual) 206 Offset = Layout.getVBaseClassOffset(Base); 207 else 208 Offset = Layout.getBaseClassOffset(Base); 209 210 // Shift and cast down to the base type. 211 // TODO: for complete types, this should be possible with a GEP. 212 Address V = This; 213 if (!Offset.isZero()) { 214 V = Builder.CreateElementBitCast(V, Int8Ty); 215 V = Builder.CreateConstInBoundsByteGEP(V, Offset); 216 } 217 V = Builder.CreateElementBitCast(V, ConvertType(Base)); 218 219 return V; 220 } 221 222 static Address 223 ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, Address addr, 224 CharUnits nonVirtualOffset, 225 llvm::Value *virtualOffset, 226 const CXXRecordDecl *derivedClass, 227 const CXXRecordDecl *nearestVBase) { 228 // Assert that we have something to do. 229 assert(!nonVirtualOffset.isZero() || virtualOffset != nullptr); 230 231 // Compute the offset from the static and dynamic components. 232 llvm::Value *baseOffset; 233 if (!nonVirtualOffset.isZero()) { 234 baseOffset = llvm::ConstantInt::get(CGF.PtrDiffTy, 235 nonVirtualOffset.getQuantity()); 236 if (virtualOffset) { 237 baseOffset = CGF.Builder.CreateAdd(virtualOffset, baseOffset); 238 } 239 } else { 240 baseOffset = virtualOffset; 241 } 242 243 // Apply the base offset. 244 llvm::Value *ptr = addr.getPointer(); 245 ptr = CGF.Builder.CreateBitCast(ptr, CGF.Int8PtrTy); 246 ptr = CGF.Builder.CreateInBoundsGEP(ptr, baseOffset, "add.ptr"); 247 248 // If we have a virtual component, the alignment of the result will 249 // be relative only to the known alignment of that vbase. 250 CharUnits alignment; 251 if (virtualOffset) { 252 assert(nearestVBase && "virtual offset without vbase?"); 253 alignment = CGF.CGM.getVBaseAlignment(addr.getAlignment(), 254 derivedClass, nearestVBase); 255 } else { 256 alignment = addr.getAlignment(); 257 } 258 alignment = alignment.alignmentAtOffset(nonVirtualOffset); 259 260 return Address(ptr, alignment); 261 } 262 263 Address CodeGenFunction::GetAddressOfBaseClass( 264 Address Value, const CXXRecordDecl *Derived, 265 CastExpr::path_const_iterator PathBegin, 266 CastExpr::path_const_iterator PathEnd, bool NullCheckValue, 267 SourceLocation Loc) { 268 assert(PathBegin != PathEnd && "Base path should not be empty!"); 269 270 CastExpr::path_const_iterator Start = PathBegin; 271 const CXXRecordDecl *VBase = nullptr; 272 273 // Sema has done some convenient canonicalization here: if the 274 // access path involved any virtual steps, the conversion path will 275 // *start* with a step down to the correct virtual base subobject, 276 // and hence will not require any further steps. 277 if ((*Start)->isVirtual()) { 278 VBase = 279 cast<CXXRecordDecl>((*Start)->getType()->getAs<RecordType>()->getDecl()); 280 ++Start; 281 } 282 283 // Compute the static offset of the ultimate destination within its 284 // allocating subobject (the virtual base, if there is one, or else 285 // the "complete" object that we see). 286 CharUnits NonVirtualOffset = CGM.computeNonVirtualBaseClassOffset( 287 VBase ? VBase : Derived, Start, PathEnd); 288 289 // If there's a virtual step, we can sometimes "devirtualize" it. 290 // For now, that's limited to when the derived type is final. 291 // TODO: "devirtualize" this for accesses to known-complete objects. 292 if (VBase && Derived->hasAttr<FinalAttr>()) { 293 const ASTRecordLayout &layout = getContext().getASTRecordLayout(Derived); 294 CharUnits vBaseOffset = layout.getVBaseClassOffset(VBase); 295 NonVirtualOffset += vBaseOffset; 296 VBase = nullptr; // we no longer have a virtual step 297 } 298 299 // Get the base pointer type. 300 llvm::Type *BasePtrTy = 301 ConvertType((PathEnd[-1])->getType())->getPointerTo(); 302 303 QualType DerivedTy = getContext().getRecordType(Derived); 304 CharUnits DerivedAlign = CGM.getClassPointerAlignment(Derived); 305 306 // If the static offset is zero and we don't have a virtual step, 307 // just do a bitcast; null checks are unnecessary. 308 if (NonVirtualOffset.isZero() && !VBase) { 309 if (sanitizePerformTypeCheck()) { 310 EmitTypeCheck(TCK_Upcast, Loc, Value.getPointer(), 311 DerivedTy, DerivedAlign, !NullCheckValue); 312 } 313 return Builder.CreateBitCast(Value, BasePtrTy); 314 } 315 316 llvm::BasicBlock *origBB = nullptr; 317 llvm::BasicBlock *endBB = nullptr; 318 319 // Skip over the offset (and the vtable load) if we're supposed to 320 // null-check the pointer. 321 if (NullCheckValue) { 322 origBB = Builder.GetInsertBlock(); 323 llvm::BasicBlock *notNullBB = createBasicBlock("cast.notnull"); 324 endBB = createBasicBlock("cast.end"); 325 326 llvm::Value *isNull = Builder.CreateIsNull(Value.getPointer()); 327 Builder.CreateCondBr(isNull, endBB, notNullBB); 328 EmitBlock(notNullBB); 329 } 330 331 if (sanitizePerformTypeCheck()) { 332 EmitTypeCheck(VBase ? TCK_UpcastToVirtualBase : TCK_Upcast, Loc, 333 Value.getPointer(), DerivedTy, DerivedAlign, true); 334 } 335 336 // Compute the virtual offset. 337 llvm::Value *VirtualOffset = nullptr; 338 if (VBase) { 339 VirtualOffset = 340 CGM.getCXXABI().GetVirtualBaseClassOffset(*this, Value, Derived, VBase); 341 } 342 343 // Apply both offsets. 344 Value = ApplyNonVirtualAndVirtualOffset(*this, Value, NonVirtualOffset, 345 VirtualOffset, Derived, VBase); 346 347 // Cast to the destination type. 348 Value = Builder.CreateBitCast(Value, BasePtrTy); 349 350 // Build a phi if we needed a null check. 351 if (NullCheckValue) { 352 llvm::BasicBlock *notNullBB = Builder.GetInsertBlock(); 353 Builder.CreateBr(endBB); 354 EmitBlock(endBB); 355 356 llvm::PHINode *PHI = Builder.CreatePHI(BasePtrTy, 2, "cast.result"); 357 PHI->addIncoming(Value.getPointer(), notNullBB); 358 PHI->addIncoming(llvm::Constant::getNullValue(BasePtrTy), origBB); 359 Value = Address(PHI, Value.getAlignment()); 360 } 361 362 return Value; 363 } 364 365 Address 366 CodeGenFunction::GetAddressOfDerivedClass(Address BaseAddr, 367 const CXXRecordDecl *Derived, 368 CastExpr::path_const_iterator PathBegin, 369 CastExpr::path_const_iterator PathEnd, 370 bool NullCheckValue) { 371 assert(PathBegin != PathEnd && "Base path should not be empty!"); 372 373 QualType DerivedTy = 374 getContext().getCanonicalType(getContext().getTagDeclType(Derived)); 375 llvm::Type *DerivedPtrTy = ConvertType(DerivedTy)->getPointerTo(); 376 377 llvm::Value *NonVirtualOffset = 378 CGM.GetNonVirtualBaseClassOffset(Derived, PathBegin, PathEnd); 379 380 if (!NonVirtualOffset) { 381 // No offset, we can just cast back. 382 return Builder.CreateBitCast(BaseAddr, DerivedPtrTy); 383 } 384 385 llvm::BasicBlock *CastNull = nullptr; 386 llvm::BasicBlock *CastNotNull = nullptr; 387 llvm::BasicBlock *CastEnd = nullptr; 388 389 if (NullCheckValue) { 390 CastNull = createBasicBlock("cast.null"); 391 CastNotNull = createBasicBlock("cast.notnull"); 392 CastEnd = createBasicBlock("cast.end"); 393 394 llvm::Value *IsNull = Builder.CreateIsNull(BaseAddr.getPointer()); 395 Builder.CreateCondBr(IsNull, CastNull, CastNotNull); 396 EmitBlock(CastNotNull); 397 } 398 399 // Apply the offset. 400 llvm::Value *Value = Builder.CreateBitCast(BaseAddr.getPointer(), Int8PtrTy); 401 Value = Builder.CreateGEP(Value, Builder.CreateNeg(NonVirtualOffset), 402 "sub.ptr"); 403 404 // Just cast. 405 Value = Builder.CreateBitCast(Value, DerivedPtrTy); 406 407 // Produce a PHI if we had a null-check. 408 if (NullCheckValue) { 409 Builder.CreateBr(CastEnd); 410 EmitBlock(CastNull); 411 Builder.CreateBr(CastEnd); 412 EmitBlock(CastEnd); 413 414 llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2); 415 PHI->addIncoming(Value, CastNotNull); 416 PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull); 417 Value = PHI; 418 } 419 420 return Address(Value, CGM.getClassPointerAlignment(Derived)); 421 } 422 423 llvm::Value *CodeGenFunction::GetVTTParameter(GlobalDecl GD, 424 bool ForVirtualBase, 425 bool Delegating) { 426 if (!CGM.getCXXABI().NeedsVTTParameter(GD)) { 427 // This constructor/destructor does not need a VTT parameter. 428 return nullptr; 429 } 430 431 const CXXRecordDecl *RD = cast<CXXMethodDecl>(CurCodeDecl)->getParent(); 432 const CXXRecordDecl *Base = cast<CXXMethodDecl>(GD.getDecl())->getParent(); 433 434 llvm::Value *VTT; 435 436 uint64_t SubVTTIndex; 437 438 if (Delegating) { 439 // If this is a delegating constructor call, just load the VTT. 440 return LoadCXXVTT(); 441 } else if (RD == Base) { 442 // If the record matches the base, this is the complete ctor/dtor 443 // variant calling the base variant in a class with virtual bases. 444 assert(!CGM.getCXXABI().NeedsVTTParameter(CurGD) && 445 "doing no-op VTT offset in base dtor/ctor?"); 446 assert(!ForVirtualBase && "Can't have same class as virtual base!"); 447 SubVTTIndex = 0; 448 } else { 449 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 450 CharUnits BaseOffset = ForVirtualBase ? 451 Layout.getVBaseClassOffset(Base) : 452 Layout.getBaseClassOffset(Base); 453 454 SubVTTIndex = 455 CGM.getVTables().getSubVTTIndex(RD, BaseSubobject(Base, BaseOffset)); 456 assert(SubVTTIndex != 0 && "Sub-VTT index must be greater than zero!"); 457 } 458 459 if (CGM.getCXXABI().NeedsVTTParameter(CurGD)) { 460 // A VTT parameter was passed to the constructor, use it. 461 VTT = LoadCXXVTT(); 462 VTT = Builder.CreateConstInBoundsGEP1_64(VTT, SubVTTIndex); 463 } else { 464 // We're the complete constructor, so get the VTT by name. 465 VTT = CGM.getVTables().GetAddrOfVTT(RD); 466 VTT = Builder.CreateConstInBoundsGEP2_64(VTT, 0, SubVTTIndex); 467 } 468 469 return VTT; 470 } 471 472 namespace { 473 /// Call the destructor for a direct base class. 474 struct CallBaseDtor final : EHScopeStack::Cleanup { 475 const CXXRecordDecl *BaseClass; 476 bool BaseIsVirtual; 477 CallBaseDtor(const CXXRecordDecl *Base, bool BaseIsVirtual) 478 : BaseClass(Base), BaseIsVirtual(BaseIsVirtual) {} 479 480 void Emit(CodeGenFunction &CGF, Flags flags) override { 481 const CXXRecordDecl *DerivedClass = 482 cast<CXXMethodDecl>(CGF.CurCodeDecl)->getParent(); 483 484 const CXXDestructorDecl *D = BaseClass->getDestructor(); 485 Address Addr = 486 CGF.GetAddressOfDirectBaseInCompleteClass(CGF.LoadCXXThisAddress(), 487 DerivedClass, BaseClass, 488 BaseIsVirtual); 489 CGF.EmitCXXDestructorCall(D, Dtor_Base, BaseIsVirtual, 490 /*Delegating=*/false, Addr); 491 } 492 }; 493 494 /// A visitor which checks whether an initializer uses 'this' in a 495 /// way which requires the vtable to be properly set. 496 struct DynamicThisUseChecker : ConstEvaluatedExprVisitor<DynamicThisUseChecker> { 497 typedef ConstEvaluatedExprVisitor<DynamicThisUseChecker> super; 498 499 bool UsesThis; 500 501 DynamicThisUseChecker(const ASTContext &C) : super(C), UsesThis(false) {} 502 503 // Black-list all explicit and implicit references to 'this'. 504 // 505 // Do we need to worry about external references to 'this' derived 506 // from arbitrary code? If so, then anything which runs arbitrary 507 // external code might potentially access the vtable. 508 void VisitCXXThisExpr(const CXXThisExpr *E) { UsesThis = true; } 509 }; 510 } 511 512 static bool BaseInitializerUsesThis(ASTContext &C, const Expr *Init) { 513 DynamicThisUseChecker Checker(C); 514 Checker.Visit(Init); 515 return Checker.UsesThis; 516 } 517 518 static void EmitBaseInitializer(CodeGenFunction &CGF, 519 const CXXRecordDecl *ClassDecl, 520 CXXCtorInitializer *BaseInit, 521 CXXCtorType CtorType) { 522 assert(BaseInit->isBaseInitializer() && 523 "Must have base initializer!"); 524 525 Address ThisPtr = CGF.LoadCXXThisAddress(); 526 527 const Type *BaseType = BaseInit->getBaseClass(); 528 CXXRecordDecl *BaseClassDecl = 529 cast<CXXRecordDecl>(BaseType->getAs<RecordType>()->getDecl()); 530 531 bool isBaseVirtual = BaseInit->isBaseVirtual(); 532 533 // The base constructor doesn't construct virtual bases. 534 if (CtorType == Ctor_Base && isBaseVirtual) 535 return; 536 537 // If the initializer for the base (other than the constructor 538 // itself) accesses 'this' in any way, we need to initialize the 539 // vtables. 540 if (BaseInitializerUsesThis(CGF.getContext(), BaseInit->getInit())) 541 CGF.InitializeVTablePointers(ClassDecl); 542 543 // We can pretend to be a complete class because it only matters for 544 // virtual bases, and we only do virtual bases for complete ctors. 545 Address V = 546 CGF.GetAddressOfDirectBaseInCompleteClass(ThisPtr, ClassDecl, 547 BaseClassDecl, 548 isBaseVirtual); 549 AggValueSlot AggSlot = 550 AggValueSlot::forAddr(V, Qualifiers(), 551 AggValueSlot::IsDestructed, 552 AggValueSlot::DoesNotNeedGCBarriers, 553 AggValueSlot::IsNotAliased); 554 555 CGF.EmitAggExpr(BaseInit->getInit(), AggSlot); 556 557 if (CGF.CGM.getLangOpts().Exceptions && 558 !BaseClassDecl->hasTrivialDestructor()) 559 CGF.EHStack.pushCleanup<CallBaseDtor>(EHCleanup, BaseClassDecl, 560 isBaseVirtual); 561 } 562 563 static void EmitAggMemberInitializer(CodeGenFunction &CGF, 564 LValue LHS, 565 Expr *Init, 566 Address ArrayIndexVar, 567 QualType T, 568 ArrayRef<VarDecl *> ArrayIndexes, 569 unsigned Index) { 570 if (Index == ArrayIndexes.size()) { 571 LValue LV = LHS; 572 573 if (ArrayIndexVar.isValid()) { 574 // If we have an array index variable, load it and use it as an offset. 575 // Then, increment the value. 576 llvm::Value *Dest = LHS.getPointer(); 577 llvm::Value *ArrayIndex = CGF.Builder.CreateLoad(ArrayIndexVar); 578 Dest = CGF.Builder.CreateInBoundsGEP(Dest, ArrayIndex, "destaddress"); 579 llvm::Value *Next = llvm::ConstantInt::get(ArrayIndex->getType(), 1); 580 Next = CGF.Builder.CreateAdd(ArrayIndex, Next, "inc"); 581 CGF.Builder.CreateStore(Next, ArrayIndexVar); 582 583 // Update the LValue. 584 CharUnits EltSize = CGF.getContext().getTypeSizeInChars(T); 585 CharUnits Align = LV.getAlignment().alignmentOfArrayElement(EltSize); 586 LV.setAddress(Address(Dest, Align)); 587 } 588 589 switch (CGF.getEvaluationKind(T)) { 590 case TEK_Scalar: 591 CGF.EmitScalarInit(Init, /*decl*/ nullptr, LV, false); 592 break; 593 case TEK_Complex: 594 CGF.EmitComplexExprIntoLValue(Init, LV, /*isInit*/ true); 595 break; 596 case TEK_Aggregate: { 597 AggValueSlot Slot = 598 AggValueSlot::forLValue(LV, 599 AggValueSlot::IsDestructed, 600 AggValueSlot::DoesNotNeedGCBarriers, 601 AggValueSlot::IsNotAliased); 602 603 CGF.EmitAggExpr(Init, Slot); 604 break; 605 } 606 } 607 608 return; 609 } 610 611 const ConstantArrayType *Array = CGF.getContext().getAsConstantArrayType(T); 612 assert(Array && "Array initialization without the array type?"); 613 Address IndexVar = CGF.GetAddrOfLocalVar(ArrayIndexes[Index]); 614 615 // Initialize this index variable to zero. 616 llvm::Value* Zero 617 = llvm::Constant::getNullValue(IndexVar.getElementType()); 618 CGF.Builder.CreateStore(Zero, IndexVar); 619 620 // Start the loop with a block that tests the condition. 621 llvm::BasicBlock *CondBlock = CGF.createBasicBlock("for.cond"); 622 llvm::BasicBlock *AfterFor = CGF.createBasicBlock("for.end"); 623 624 CGF.EmitBlock(CondBlock); 625 626 llvm::BasicBlock *ForBody = CGF.createBasicBlock("for.body"); 627 // Generate: if (loop-index < number-of-elements) fall to the loop body, 628 // otherwise, go to the block after the for-loop. 629 uint64_t NumElements = Array->getSize().getZExtValue(); 630 llvm::Value *Counter = CGF.Builder.CreateLoad(IndexVar); 631 llvm::Value *NumElementsPtr = 632 llvm::ConstantInt::get(Counter->getType(), NumElements); 633 llvm::Value *IsLess = CGF.Builder.CreateICmpULT(Counter, NumElementsPtr, 634 "isless"); 635 636 // If the condition is true, execute the body. 637 CGF.Builder.CreateCondBr(IsLess, ForBody, AfterFor); 638 639 CGF.EmitBlock(ForBody); 640 llvm::BasicBlock *ContinueBlock = CGF.createBasicBlock("for.inc"); 641 642 // Inside the loop body recurse to emit the inner loop or, eventually, the 643 // constructor call. 644 EmitAggMemberInitializer(CGF, LHS, Init, ArrayIndexVar, 645 Array->getElementType(), ArrayIndexes, Index + 1); 646 647 CGF.EmitBlock(ContinueBlock); 648 649 // Emit the increment of the loop counter. 650 llvm::Value *NextVal = llvm::ConstantInt::get(Counter->getType(), 1); 651 Counter = CGF.Builder.CreateLoad(IndexVar); 652 NextVal = CGF.Builder.CreateAdd(Counter, NextVal, "inc"); 653 CGF.Builder.CreateStore(NextVal, IndexVar); 654 655 // Finally, branch back up to the condition for the next iteration. 656 CGF.EmitBranch(CondBlock); 657 658 // Emit the fall-through block. 659 CGF.EmitBlock(AfterFor, true); 660 } 661 662 static bool isMemcpyEquivalentSpecialMember(const CXXMethodDecl *D) { 663 auto *CD = dyn_cast<CXXConstructorDecl>(D); 664 if (!(CD && CD->isCopyOrMoveConstructor()) && 665 !D->isCopyAssignmentOperator() && !D->isMoveAssignmentOperator()) 666 return false; 667 668 // We can emit a memcpy for a trivial copy or move constructor/assignment. 669 if (D->isTrivial() && !D->getParent()->mayInsertExtraPadding()) 670 return true; 671 672 // We *must* emit a memcpy for a defaulted union copy or move op. 673 if (D->getParent()->isUnion() && D->isDefaulted()) 674 return true; 675 676 return false; 677 } 678 679 static void EmitLValueForAnyFieldInitialization(CodeGenFunction &CGF, 680 CXXCtorInitializer *MemberInit, 681 LValue &LHS) { 682 FieldDecl *Field = MemberInit->getAnyMember(); 683 if (MemberInit->isIndirectMemberInitializer()) { 684 // If we are initializing an anonymous union field, drill down to the field. 685 IndirectFieldDecl *IndirectField = MemberInit->getIndirectMember(); 686 for (const auto *I : IndirectField->chain()) 687 LHS = CGF.EmitLValueForFieldInitialization(LHS, cast<FieldDecl>(I)); 688 } else { 689 LHS = CGF.EmitLValueForFieldInitialization(LHS, Field); 690 } 691 } 692 693 static void EmitMemberInitializer(CodeGenFunction &CGF, 694 const CXXRecordDecl *ClassDecl, 695 CXXCtorInitializer *MemberInit, 696 const CXXConstructorDecl *Constructor, 697 FunctionArgList &Args) { 698 ApplyDebugLocation Loc(CGF, MemberInit->getSourceLocation()); 699 assert(MemberInit->isAnyMemberInitializer() && 700 "Must have member initializer!"); 701 assert(MemberInit->getInit() && "Must have initializer!"); 702 703 // non-static data member initializers. 704 FieldDecl *Field = MemberInit->getAnyMember(); 705 QualType FieldType = Field->getType(); 706 707 llvm::Value *ThisPtr = CGF.LoadCXXThis(); 708 QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl); 709 LValue LHS = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy); 710 711 EmitLValueForAnyFieldInitialization(CGF, MemberInit, LHS); 712 713 // Special case: if we are in a copy or move constructor, and we are copying 714 // an array of PODs or classes with trivial copy constructors, ignore the 715 // AST and perform the copy we know is equivalent. 716 // FIXME: This is hacky at best... if we had a bit more explicit information 717 // in the AST, we could generalize it more easily. 718 const ConstantArrayType *Array 719 = CGF.getContext().getAsConstantArrayType(FieldType); 720 if (Array && Constructor->isDefaulted() && 721 Constructor->isCopyOrMoveConstructor()) { 722 QualType BaseElementTy = CGF.getContext().getBaseElementType(Array); 723 CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(MemberInit->getInit()); 724 if (BaseElementTy.isPODType(CGF.getContext()) || 725 (CE && isMemcpyEquivalentSpecialMember(CE->getConstructor()))) { 726 unsigned SrcArgIndex = 727 CGF.CGM.getCXXABI().getSrcArgforCopyCtor(Constructor, Args); 728 llvm::Value *SrcPtr 729 = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(Args[SrcArgIndex])); 730 LValue ThisRHSLV = CGF.MakeNaturalAlignAddrLValue(SrcPtr, RecordTy); 731 LValue Src = CGF.EmitLValueForFieldInitialization(ThisRHSLV, Field); 732 733 // Copy the aggregate. 734 CGF.EmitAggregateCopy(LHS.getAddress(), Src.getAddress(), FieldType, 735 LHS.isVolatileQualified()); 736 // Ensure that we destroy the objects if an exception is thrown later in 737 // the constructor. 738 QualType::DestructionKind dtorKind = FieldType.isDestructedType(); 739 if (CGF.needsEHCleanup(dtorKind)) 740 CGF.pushEHDestroy(dtorKind, LHS.getAddress(), FieldType); 741 return; 742 } 743 } 744 745 ArrayRef<VarDecl *> ArrayIndexes; 746 if (MemberInit->getNumArrayIndices()) 747 ArrayIndexes = MemberInit->getArrayIndexes(); 748 CGF.EmitInitializerForField(Field, LHS, MemberInit->getInit(), ArrayIndexes); 749 } 750 751 void CodeGenFunction::EmitInitializerForField(FieldDecl *Field, LValue LHS, 752 Expr *Init, ArrayRef<VarDecl *> ArrayIndexes) { 753 QualType FieldType = Field->getType(); 754 switch (getEvaluationKind(FieldType)) { 755 case TEK_Scalar: 756 if (LHS.isSimple()) { 757 EmitExprAsInit(Init, Field, LHS, false); 758 } else { 759 RValue RHS = RValue::get(EmitScalarExpr(Init)); 760 EmitStoreThroughLValue(RHS, LHS); 761 } 762 break; 763 case TEK_Complex: 764 EmitComplexExprIntoLValue(Init, LHS, /*isInit*/ true); 765 break; 766 case TEK_Aggregate: { 767 Address ArrayIndexVar = Address::invalid(); 768 if (ArrayIndexes.size()) { 769 // The LHS is a pointer to the first object we'll be constructing, as 770 // a flat array. 771 QualType BaseElementTy = getContext().getBaseElementType(FieldType); 772 llvm::Type *BasePtr = ConvertType(BaseElementTy); 773 BasePtr = llvm::PointerType::getUnqual(BasePtr); 774 Address BaseAddrPtr = Builder.CreateBitCast(LHS.getAddress(), BasePtr); 775 LHS = MakeAddrLValue(BaseAddrPtr, BaseElementTy); 776 777 // Create an array index that will be used to walk over all of the 778 // objects we're constructing. 779 ArrayIndexVar = CreateMemTemp(getContext().getSizeType(), "object.index"); 780 llvm::Value *Zero = 781 llvm::Constant::getNullValue(ArrayIndexVar.getElementType()); 782 Builder.CreateStore(Zero, ArrayIndexVar); 783 784 // Emit the block variables for the array indices, if any. 785 for (unsigned I = 0, N = ArrayIndexes.size(); I != N; ++I) 786 EmitAutoVarDecl(*ArrayIndexes[I]); 787 } 788 789 EmitAggMemberInitializer(*this, LHS, Init, ArrayIndexVar, FieldType, 790 ArrayIndexes, 0); 791 } 792 } 793 794 // Ensure that we destroy this object if an exception is thrown 795 // later in the constructor. 796 QualType::DestructionKind dtorKind = FieldType.isDestructedType(); 797 if (needsEHCleanup(dtorKind)) 798 pushEHDestroy(dtorKind, LHS.getAddress(), FieldType); 799 } 800 801 /// Checks whether the given constructor is a valid subject for the 802 /// complete-to-base constructor delegation optimization, i.e. 803 /// emitting the complete constructor as a simple call to the base 804 /// constructor. 805 static bool IsConstructorDelegationValid(const CXXConstructorDecl *Ctor) { 806 807 // Currently we disable the optimization for classes with virtual 808 // bases because (1) the addresses of parameter variables need to be 809 // consistent across all initializers but (2) the delegate function 810 // call necessarily creates a second copy of the parameter variable. 811 // 812 // The limiting example (purely theoretical AFAIK): 813 // struct A { A(int &c) { c++; } }; 814 // struct B : virtual A { 815 // B(int count) : A(count) { printf("%d\n", count); } 816 // }; 817 // ...although even this example could in principle be emitted as a 818 // delegation since the address of the parameter doesn't escape. 819 if (Ctor->getParent()->getNumVBases()) { 820 // TODO: white-list trivial vbase initializers. This case wouldn't 821 // be subject to the restrictions below. 822 823 // TODO: white-list cases where: 824 // - there are no non-reference parameters to the constructor 825 // - the initializers don't access any non-reference parameters 826 // - the initializers don't take the address of non-reference 827 // parameters 828 // - etc. 829 // If we ever add any of the above cases, remember that: 830 // - function-try-blocks will always blacklist this optimization 831 // - we need to perform the constructor prologue and cleanup in 832 // EmitConstructorBody. 833 834 return false; 835 } 836 837 // We also disable the optimization for variadic functions because 838 // it's impossible to "re-pass" varargs. 839 if (Ctor->getType()->getAs<FunctionProtoType>()->isVariadic()) 840 return false; 841 842 // FIXME: Decide if we can do a delegation of a delegating constructor. 843 if (Ctor->isDelegatingConstructor()) 844 return false; 845 846 return true; 847 } 848 849 // Emit code in ctor (Prologue==true) or dtor (Prologue==false) 850 // to poison the extra field paddings inserted under 851 // -fsanitize-address-field-padding=1|2. 852 void CodeGenFunction::EmitAsanPrologueOrEpilogue(bool Prologue) { 853 ASTContext &Context = getContext(); 854 const CXXRecordDecl *ClassDecl = 855 Prologue ? cast<CXXConstructorDecl>(CurGD.getDecl())->getParent() 856 : cast<CXXDestructorDecl>(CurGD.getDecl())->getParent(); 857 if (!ClassDecl->mayInsertExtraPadding()) return; 858 859 struct SizeAndOffset { 860 uint64_t Size; 861 uint64_t Offset; 862 }; 863 864 unsigned PtrSize = CGM.getDataLayout().getPointerSizeInBits(); 865 const ASTRecordLayout &Info = Context.getASTRecordLayout(ClassDecl); 866 867 // Populate sizes and offsets of fields. 868 SmallVector<SizeAndOffset, 16> SSV(Info.getFieldCount()); 869 for (unsigned i = 0, e = Info.getFieldCount(); i != e; ++i) 870 SSV[i].Offset = 871 Context.toCharUnitsFromBits(Info.getFieldOffset(i)).getQuantity(); 872 873 size_t NumFields = 0; 874 for (const auto *Field : ClassDecl->fields()) { 875 const FieldDecl *D = Field; 876 std::pair<CharUnits, CharUnits> FieldInfo = 877 Context.getTypeInfoInChars(D->getType()); 878 CharUnits FieldSize = FieldInfo.first; 879 assert(NumFields < SSV.size()); 880 SSV[NumFields].Size = D->isBitField() ? 0 : FieldSize.getQuantity(); 881 NumFields++; 882 } 883 assert(NumFields == SSV.size()); 884 if (SSV.size() <= 1) return; 885 886 // We will insert calls to __asan_* run-time functions. 887 // LLVM AddressSanitizer pass may decide to inline them later. 888 llvm::Type *Args[2] = {IntPtrTy, IntPtrTy}; 889 llvm::FunctionType *FTy = 890 llvm::FunctionType::get(CGM.VoidTy, Args, false); 891 llvm::Constant *F = CGM.CreateRuntimeFunction( 892 FTy, Prologue ? "__asan_poison_intra_object_redzone" 893 : "__asan_unpoison_intra_object_redzone"); 894 895 llvm::Value *ThisPtr = LoadCXXThis(); 896 ThisPtr = Builder.CreatePtrToInt(ThisPtr, IntPtrTy); 897 uint64_t TypeSize = Info.getNonVirtualSize().getQuantity(); 898 // For each field check if it has sufficient padding, 899 // if so (un)poison it with a call. 900 for (size_t i = 0; i < SSV.size(); i++) { 901 uint64_t AsanAlignment = 8; 902 uint64_t NextField = i == SSV.size() - 1 ? TypeSize : SSV[i + 1].Offset; 903 uint64_t PoisonSize = NextField - SSV[i].Offset - SSV[i].Size; 904 uint64_t EndOffset = SSV[i].Offset + SSV[i].Size; 905 if (PoisonSize < AsanAlignment || !SSV[i].Size || 906 (NextField % AsanAlignment) != 0) 907 continue; 908 Builder.CreateCall( 909 F, {Builder.CreateAdd(ThisPtr, Builder.getIntN(PtrSize, EndOffset)), 910 Builder.getIntN(PtrSize, PoisonSize)}); 911 } 912 } 913 914 /// EmitConstructorBody - Emits the body of the current constructor. 915 void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) { 916 EmitAsanPrologueOrEpilogue(true); 917 const CXXConstructorDecl *Ctor = cast<CXXConstructorDecl>(CurGD.getDecl()); 918 CXXCtorType CtorType = CurGD.getCtorType(); 919 920 assert((CGM.getTarget().getCXXABI().hasConstructorVariants() || 921 CtorType == Ctor_Complete) && 922 "can only generate complete ctor for this ABI"); 923 924 // Before we go any further, try the complete->base constructor 925 // delegation optimization. 926 if (CtorType == Ctor_Complete && IsConstructorDelegationValid(Ctor) && 927 CGM.getTarget().getCXXABI().hasConstructorVariants()) { 928 EmitDelegateCXXConstructorCall(Ctor, Ctor_Base, Args, Ctor->getLocEnd()); 929 return; 930 } 931 932 const FunctionDecl *Definition = 0; 933 Stmt *Body = Ctor->getBody(Definition); 934 assert(Definition == Ctor && "emitting wrong constructor body"); 935 936 // Enter the function-try-block before the constructor prologue if 937 // applicable. 938 bool IsTryBody = (Body && isa<CXXTryStmt>(Body)); 939 if (IsTryBody) 940 EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true); 941 942 incrementProfileCounter(Body); 943 944 RunCleanupsScope RunCleanups(*this); 945 946 // TODO: in restricted cases, we can emit the vbase initializers of 947 // a complete ctor and then delegate to the base ctor. 948 949 // Emit the constructor prologue, i.e. the base and member 950 // initializers. 951 EmitCtorPrologue(Ctor, CtorType, Args); 952 953 // Emit the body of the statement. 954 if (IsTryBody) 955 EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock()); 956 else if (Body) 957 EmitStmt(Body); 958 959 // Emit any cleanup blocks associated with the member or base 960 // initializers, which includes (along the exceptional path) the 961 // destructors for those members and bases that were fully 962 // constructed. 963 RunCleanups.ForceCleanup(); 964 965 if (IsTryBody) 966 ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true); 967 } 968 969 namespace { 970 /// RAII object to indicate that codegen is copying the value representation 971 /// instead of the object representation. Useful when copying a struct or 972 /// class which has uninitialized members and we're only performing 973 /// lvalue-to-rvalue conversion on the object but not its members. 974 class CopyingValueRepresentation { 975 public: 976 explicit CopyingValueRepresentation(CodeGenFunction &CGF) 977 : CGF(CGF), OldSanOpts(CGF.SanOpts) { 978 CGF.SanOpts.set(SanitizerKind::Bool, false); 979 CGF.SanOpts.set(SanitizerKind::Enum, false); 980 } 981 ~CopyingValueRepresentation() { 982 CGF.SanOpts = OldSanOpts; 983 } 984 private: 985 CodeGenFunction &CGF; 986 SanitizerSet OldSanOpts; 987 }; 988 } 989 990 namespace { 991 class FieldMemcpyizer { 992 public: 993 FieldMemcpyizer(CodeGenFunction &CGF, const CXXRecordDecl *ClassDecl, 994 const VarDecl *SrcRec) 995 : CGF(CGF), ClassDecl(ClassDecl), SrcRec(SrcRec), 996 RecLayout(CGF.getContext().getASTRecordLayout(ClassDecl)), 997 FirstField(nullptr), LastField(nullptr), FirstFieldOffset(0), 998 LastFieldOffset(0), LastAddedFieldIndex(0) {} 999 1000 bool isMemcpyableField(FieldDecl *F) const { 1001 // Never memcpy fields when we are adding poisoned paddings. 1002 if (CGF.getContext().getLangOpts().SanitizeAddressFieldPadding) 1003 return false; 1004 Qualifiers Qual = F->getType().getQualifiers(); 1005 if (Qual.hasVolatile() || Qual.hasObjCLifetime()) 1006 return false; 1007 return true; 1008 } 1009 1010 void addMemcpyableField(FieldDecl *F) { 1011 if (!FirstField) 1012 addInitialField(F); 1013 else 1014 addNextField(F); 1015 } 1016 1017 CharUnits getMemcpySize(uint64_t FirstByteOffset) const { 1018 unsigned LastFieldSize = 1019 LastField->isBitField() ? 1020 LastField->getBitWidthValue(CGF.getContext()) : 1021 CGF.getContext().getTypeSize(LastField->getType()); 1022 uint64_t MemcpySizeBits = 1023 LastFieldOffset + LastFieldSize - FirstByteOffset + 1024 CGF.getContext().getCharWidth() - 1; 1025 CharUnits MemcpySize = 1026 CGF.getContext().toCharUnitsFromBits(MemcpySizeBits); 1027 return MemcpySize; 1028 } 1029 1030 void emitMemcpy() { 1031 // Give the subclass a chance to bail out if it feels the memcpy isn't 1032 // worth it (e.g. Hasn't aggregated enough data). 1033 if (!FirstField) { 1034 return; 1035 } 1036 1037 uint64_t FirstByteOffset; 1038 if (FirstField->isBitField()) { 1039 const CGRecordLayout &RL = 1040 CGF.getTypes().getCGRecordLayout(FirstField->getParent()); 1041 const CGBitFieldInfo &BFInfo = RL.getBitFieldInfo(FirstField); 1042 // FirstFieldOffset is not appropriate for bitfields, 1043 // we need to use the storage offset instead. 1044 FirstByteOffset = CGF.getContext().toBits(BFInfo.StorageOffset); 1045 } else { 1046 FirstByteOffset = FirstFieldOffset; 1047 } 1048 1049 CharUnits MemcpySize = getMemcpySize(FirstByteOffset); 1050 QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl); 1051 Address ThisPtr = CGF.LoadCXXThisAddress(); 1052 LValue DestLV = CGF.MakeAddrLValue(ThisPtr, RecordTy); 1053 LValue Dest = CGF.EmitLValueForFieldInitialization(DestLV, FirstField); 1054 llvm::Value *SrcPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(SrcRec)); 1055 LValue SrcLV = CGF.MakeNaturalAlignAddrLValue(SrcPtr, RecordTy); 1056 LValue Src = CGF.EmitLValueForFieldInitialization(SrcLV, FirstField); 1057 1058 emitMemcpyIR(Dest.isBitField() ? Dest.getBitFieldAddress() : Dest.getAddress(), 1059 Src.isBitField() ? Src.getBitFieldAddress() : Src.getAddress(), 1060 MemcpySize); 1061 reset(); 1062 } 1063 1064 void reset() { 1065 FirstField = nullptr; 1066 } 1067 1068 protected: 1069 CodeGenFunction &CGF; 1070 const CXXRecordDecl *ClassDecl; 1071 1072 private: 1073 1074 void emitMemcpyIR(Address DestPtr, Address SrcPtr, CharUnits Size) { 1075 llvm::PointerType *DPT = DestPtr.getType(); 1076 llvm::Type *DBP = 1077 llvm::Type::getInt8PtrTy(CGF.getLLVMContext(), DPT->getAddressSpace()); 1078 DestPtr = CGF.Builder.CreateBitCast(DestPtr, DBP); 1079 1080 llvm::PointerType *SPT = SrcPtr.getType(); 1081 llvm::Type *SBP = 1082 llvm::Type::getInt8PtrTy(CGF.getLLVMContext(), SPT->getAddressSpace()); 1083 SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, SBP); 1084 1085 CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, Size.getQuantity()); 1086 } 1087 1088 void addInitialField(FieldDecl *F) { 1089 FirstField = F; 1090 LastField = F; 1091 FirstFieldOffset = RecLayout.getFieldOffset(F->getFieldIndex()); 1092 LastFieldOffset = FirstFieldOffset; 1093 LastAddedFieldIndex = F->getFieldIndex(); 1094 return; 1095 } 1096 1097 void addNextField(FieldDecl *F) { 1098 // For the most part, the following invariant will hold: 1099 // F->getFieldIndex() == LastAddedFieldIndex + 1 1100 // The one exception is that Sema won't add a copy-initializer for an 1101 // unnamed bitfield, which will show up here as a gap in the sequence. 1102 assert(F->getFieldIndex() >= LastAddedFieldIndex + 1 && 1103 "Cannot aggregate fields out of order."); 1104 LastAddedFieldIndex = F->getFieldIndex(); 1105 1106 // The 'first' and 'last' fields are chosen by offset, rather than field 1107 // index. This allows the code to support bitfields, as well as regular 1108 // fields. 1109 uint64_t FOffset = RecLayout.getFieldOffset(F->getFieldIndex()); 1110 if (FOffset < FirstFieldOffset) { 1111 FirstField = F; 1112 FirstFieldOffset = FOffset; 1113 } else if (FOffset > LastFieldOffset) { 1114 LastField = F; 1115 LastFieldOffset = FOffset; 1116 } 1117 } 1118 1119 const VarDecl *SrcRec; 1120 const ASTRecordLayout &RecLayout; 1121 FieldDecl *FirstField; 1122 FieldDecl *LastField; 1123 uint64_t FirstFieldOffset, LastFieldOffset; 1124 unsigned LastAddedFieldIndex; 1125 }; 1126 1127 class ConstructorMemcpyizer : public FieldMemcpyizer { 1128 private: 1129 1130 /// Get source argument for copy constructor. Returns null if not a copy 1131 /// constructor. 1132 static const VarDecl *getTrivialCopySource(CodeGenFunction &CGF, 1133 const CXXConstructorDecl *CD, 1134 FunctionArgList &Args) { 1135 if (CD->isCopyOrMoveConstructor() && CD->isDefaulted()) 1136 return Args[CGF.CGM.getCXXABI().getSrcArgforCopyCtor(CD, Args)]; 1137 return nullptr; 1138 } 1139 1140 // Returns true if a CXXCtorInitializer represents a member initialization 1141 // that can be rolled into a memcpy. 1142 bool isMemberInitMemcpyable(CXXCtorInitializer *MemberInit) const { 1143 if (!MemcpyableCtor) 1144 return false; 1145 FieldDecl *Field = MemberInit->getMember(); 1146 assert(Field && "No field for member init."); 1147 QualType FieldType = Field->getType(); 1148 CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(MemberInit->getInit()); 1149 1150 // Bail out on non-memcpyable, not-trivially-copyable members. 1151 if (!(CE && isMemcpyEquivalentSpecialMember(CE->getConstructor())) && 1152 !(FieldType.isTriviallyCopyableType(CGF.getContext()) || 1153 FieldType->isReferenceType())) 1154 return false; 1155 1156 // Bail out on volatile fields. 1157 if (!isMemcpyableField(Field)) 1158 return false; 1159 1160 // Otherwise we're good. 1161 return true; 1162 } 1163 1164 public: 1165 ConstructorMemcpyizer(CodeGenFunction &CGF, const CXXConstructorDecl *CD, 1166 FunctionArgList &Args) 1167 : FieldMemcpyizer(CGF, CD->getParent(), getTrivialCopySource(CGF, CD, Args)), 1168 ConstructorDecl(CD), 1169 MemcpyableCtor(CD->isDefaulted() && 1170 CD->isCopyOrMoveConstructor() && 1171 CGF.getLangOpts().getGC() == LangOptions::NonGC), 1172 Args(Args) { } 1173 1174 void addMemberInitializer(CXXCtorInitializer *MemberInit) { 1175 if (isMemberInitMemcpyable(MemberInit)) { 1176 AggregatedInits.push_back(MemberInit); 1177 addMemcpyableField(MemberInit->getMember()); 1178 } else { 1179 emitAggregatedInits(); 1180 EmitMemberInitializer(CGF, ConstructorDecl->getParent(), MemberInit, 1181 ConstructorDecl, Args); 1182 } 1183 } 1184 1185 void emitAggregatedInits() { 1186 if (AggregatedInits.size() <= 1) { 1187 // This memcpy is too small to be worthwhile. Fall back on default 1188 // codegen. 1189 if (!AggregatedInits.empty()) { 1190 CopyingValueRepresentation CVR(CGF); 1191 EmitMemberInitializer(CGF, ConstructorDecl->getParent(), 1192 AggregatedInits[0], ConstructorDecl, Args); 1193 AggregatedInits.clear(); 1194 } 1195 reset(); 1196 return; 1197 } 1198 1199 pushEHDestructors(); 1200 emitMemcpy(); 1201 AggregatedInits.clear(); 1202 } 1203 1204 void pushEHDestructors() { 1205 Address ThisPtr = CGF.LoadCXXThisAddress(); 1206 QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl); 1207 LValue LHS = CGF.MakeAddrLValue(ThisPtr, RecordTy); 1208 1209 for (unsigned i = 0; i < AggregatedInits.size(); ++i) { 1210 CXXCtorInitializer *MemberInit = AggregatedInits[i]; 1211 QualType FieldType = MemberInit->getAnyMember()->getType(); 1212 QualType::DestructionKind dtorKind = FieldType.isDestructedType(); 1213 if (!CGF.needsEHCleanup(dtorKind)) 1214 continue; 1215 LValue FieldLHS = LHS; 1216 EmitLValueForAnyFieldInitialization(CGF, MemberInit, FieldLHS); 1217 CGF.pushEHDestroy(dtorKind, FieldLHS.getAddress(), FieldType); 1218 } 1219 } 1220 1221 void finish() { 1222 emitAggregatedInits(); 1223 } 1224 1225 private: 1226 const CXXConstructorDecl *ConstructorDecl; 1227 bool MemcpyableCtor; 1228 FunctionArgList &Args; 1229 SmallVector<CXXCtorInitializer*, 16> AggregatedInits; 1230 }; 1231 1232 class AssignmentMemcpyizer : public FieldMemcpyizer { 1233 private: 1234 1235 // Returns the memcpyable field copied by the given statement, if one 1236 // exists. Otherwise returns null. 1237 FieldDecl *getMemcpyableField(Stmt *S) { 1238 if (!AssignmentsMemcpyable) 1239 return nullptr; 1240 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(S)) { 1241 // Recognise trivial assignments. 1242 if (BO->getOpcode() != BO_Assign) 1243 return nullptr; 1244 MemberExpr *ME = dyn_cast<MemberExpr>(BO->getLHS()); 1245 if (!ME) 1246 return nullptr; 1247 FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl()); 1248 if (!Field || !isMemcpyableField(Field)) 1249 return nullptr; 1250 Stmt *RHS = BO->getRHS(); 1251 if (ImplicitCastExpr *EC = dyn_cast<ImplicitCastExpr>(RHS)) 1252 RHS = EC->getSubExpr(); 1253 if (!RHS) 1254 return nullptr; 1255 MemberExpr *ME2 = dyn_cast<MemberExpr>(RHS); 1256 if (dyn_cast<FieldDecl>(ME2->getMemberDecl()) != Field) 1257 return nullptr; 1258 return Field; 1259 } else if (CXXMemberCallExpr *MCE = dyn_cast<CXXMemberCallExpr>(S)) { 1260 CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MCE->getCalleeDecl()); 1261 if (!(MD && isMemcpyEquivalentSpecialMember(MD))) 1262 return nullptr; 1263 MemberExpr *IOA = dyn_cast<MemberExpr>(MCE->getImplicitObjectArgument()); 1264 if (!IOA) 1265 return nullptr; 1266 FieldDecl *Field = dyn_cast<FieldDecl>(IOA->getMemberDecl()); 1267 if (!Field || !isMemcpyableField(Field)) 1268 return nullptr; 1269 MemberExpr *Arg0 = dyn_cast<MemberExpr>(MCE->getArg(0)); 1270 if (!Arg0 || Field != dyn_cast<FieldDecl>(Arg0->getMemberDecl())) 1271 return nullptr; 1272 return Field; 1273 } else if (CallExpr *CE = dyn_cast<CallExpr>(S)) { 1274 FunctionDecl *FD = dyn_cast<FunctionDecl>(CE->getCalleeDecl()); 1275 if (!FD || FD->getBuiltinID() != Builtin::BI__builtin_memcpy) 1276 return nullptr; 1277 Expr *DstPtr = CE->getArg(0); 1278 if (ImplicitCastExpr *DC = dyn_cast<ImplicitCastExpr>(DstPtr)) 1279 DstPtr = DC->getSubExpr(); 1280 UnaryOperator *DUO = dyn_cast<UnaryOperator>(DstPtr); 1281 if (!DUO || DUO->getOpcode() != UO_AddrOf) 1282 return nullptr; 1283 MemberExpr *ME = dyn_cast<MemberExpr>(DUO->getSubExpr()); 1284 if (!ME) 1285 return nullptr; 1286 FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl()); 1287 if (!Field || !isMemcpyableField(Field)) 1288 return nullptr; 1289 Expr *SrcPtr = CE->getArg(1); 1290 if (ImplicitCastExpr *SC = dyn_cast<ImplicitCastExpr>(SrcPtr)) 1291 SrcPtr = SC->getSubExpr(); 1292 UnaryOperator *SUO = dyn_cast<UnaryOperator>(SrcPtr); 1293 if (!SUO || SUO->getOpcode() != UO_AddrOf) 1294 return nullptr; 1295 MemberExpr *ME2 = dyn_cast<MemberExpr>(SUO->getSubExpr()); 1296 if (!ME2 || Field != dyn_cast<FieldDecl>(ME2->getMemberDecl())) 1297 return nullptr; 1298 return Field; 1299 } 1300 1301 return nullptr; 1302 } 1303 1304 bool AssignmentsMemcpyable; 1305 SmallVector<Stmt*, 16> AggregatedStmts; 1306 1307 public: 1308 1309 AssignmentMemcpyizer(CodeGenFunction &CGF, const CXXMethodDecl *AD, 1310 FunctionArgList &Args) 1311 : FieldMemcpyizer(CGF, AD->getParent(), Args[Args.size() - 1]), 1312 AssignmentsMemcpyable(CGF.getLangOpts().getGC() == LangOptions::NonGC) { 1313 assert(Args.size() == 2); 1314 } 1315 1316 void emitAssignment(Stmt *S) { 1317 FieldDecl *F = getMemcpyableField(S); 1318 if (F) { 1319 addMemcpyableField(F); 1320 AggregatedStmts.push_back(S); 1321 } else { 1322 emitAggregatedStmts(); 1323 CGF.EmitStmt(S); 1324 } 1325 } 1326 1327 void emitAggregatedStmts() { 1328 if (AggregatedStmts.size() <= 1) { 1329 if (!AggregatedStmts.empty()) { 1330 CopyingValueRepresentation CVR(CGF); 1331 CGF.EmitStmt(AggregatedStmts[0]); 1332 } 1333 reset(); 1334 } 1335 1336 emitMemcpy(); 1337 AggregatedStmts.clear(); 1338 } 1339 1340 void finish() { 1341 emitAggregatedStmts(); 1342 } 1343 }; 1344 1345 } 1346 1347 /// EmitCtorPrologue - This routine generates necessary code to initialize 1348 /// base classes and non-static data members belonging to this constructor. 1349 void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD, 1350 CXXCtorType CtorType, 1351 FunctionArgList &Args) { 1352 if (CD->isDelegatingConstructor()) 1353 return EmitDelegatingCXXConstructorCall(CD, Args); 1354 1355 const CXXRecordDecl *ClassDecl = CD->getParent(); 1356 1357 CXXConstructorDecl::init_const_iterator B = CD->init_begin(), 1358 E = CD->init_end(); 1359 1360 llvm::BasicBlock *BaseCtorContinueBB = nullptr; 1361 if (ClassDecl->getNumVBases() && 1362 !CGM.getTarget().getCXXABI().hasConstructorVariants()) { 1363 // The ABIs that don't have constructor variants need to put a branch 1364 // before the virtual base initialization code. 1365 BaseCtorContinueBB = 1366 CGM.getCXXABI().EmitCtorCompleteObjectHandler(*this, ClassDecl); 1367 assert(BaseCtorContinueBB); 1368 } 1369 1370 // Virtual base initializers first. 1371 for (; B != E && (*B)->isBaseInitializer() && (*B)->isBaseVirtual(); B++) { 1372 EmitBaseInitializer(*this, ClassDecl, *B, CtorType); 1373 } 1374 1375 if (BaseCtorContinueBB) { 1376 // Complete object handler should continue to the remaining initializers. 1377 Builder.CreateBr(BaseCtorContinueBB); 1378 EmitBlock(BaseCtorContinueBB); 1379 } 1380 1381 // Then, non-virtual base initializers. 1382 for (; B != E && (*B)->isBaseInitializer(); B++) { 1383 assert(!(*B)->isBaseVirtual()); 1384 EmitBaseInitializer(*this, ClassDecl, *B, CtorType); 1385 } 1386 1387 InitializeVTablePointers(ClassDecl); 1388 1389 // And finally, initialize class members. 1390 FieldConstructionScope FCS(*this, LoadCXXThisAddress()); 1391 ConstructorMemcpyizer CM(*this, CD, Args); 1392 for (; B != E; B++) { 1393 CXXCtorInitializer *Member = (*B); 1394 assert(!Member->isBaseInitializer()); 1395 assert(Member->isAnyMemberInitializer() && 1396 "Delegating initializer on non-delegating constructor"); 1397 CM.addMemberInitializer(Member); 1398 } 1399 CM.finish(); 1400 } 1401 1402 static bool 1403 FieldHasTrivialDestructorBody(ASTContext &Context, const FieldDecl *Field); 1404 1405 static bool 1406 HasTrivialDestructorBody(ASTContext &Context, 1407 const CXXRecordDecl *BaseClassDecl, 1408 const CXXRecordDecl *MostDerivedClassDecl) 1409 { 1410 // If the destructor is trivial we don't have to check anything else. 1411 if (BaseClassDecl->hasTrivialDestructor()) 1412 return true; 1413 1414 if (!BaseClassDecl->getDestructor()->hasTrivialBody()) 1415 return false; 1416 1417 // Check fields. 1418 for (const auto *Field : BaseClassDecl->fields()) 1419 if (!FieldHasTrivialDestructorBody(Context, Field)) 1420 return false; 1421 1422 // Check non-virtual bases. 1423 for (const auto &I : BaseClassDecl->bases()) { 1424 if (I.isVirtual()) 1425 continue; 1426 1427 const CXXRecordDecl *NonVirtualBase = 1428 cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl()); 1429 if (!HasTrivialDestructorBody(Context, NonVirtualBase, 1430 MostDerivedClassDecl)) 1431 return false; 1432 } 1433 1434 if (BaseClassDecl == MostDerivedClassDecl) { 1435 // Check virtual bases. 1436 for (const auto &I : BaseClassDecl->vbases()) { 1437 const CXXRecordDecl *VirtualBase = 1438 cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl()); 1439 if (!HasTrivialDestructorBody(Context, VirtualBase, 1440 MostDerivedClassDecl)) 1441 return false; 1442 } 1443 } 1444 1445 return true; 1446 } 1447 1448 static bool 1449 FieldHasTrivialDestructorBody(ASTContext &Context, 1450 const FieldDecl *Field) 1451 { 1452 QualType FieldBaseElementType = Context.getBaseElementType(Field->getType()); 1453 1454 const RecordType *RT = FieldBaseElementType->getAs<RecordType>(); 1455 if (!RT) 1456 return true; 1457 1458 CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(RT->getDecl()); 1459 1460 // The destructor for an implicit anonymous union member is never invoked. 1461 if (FieldClassDecl->isUnion() && FieldClassDecl->isAnonymousStructOrUnion()) 1462 return false; 1463 1464 return HasTrivialDestructorBody(Context, FieldClassDecl, FieldClassDecl); 1465 } 1466 1467 /// CanSkipVTablePointerInitialization - Check whether we need to initialize 1468 /// any vtable pointers before calling this destructor. 1469 static bool CanSkipVTablePointerInitialization(CodeGenFunction &CGF, 1470 const CXXDestructorDecl *Dtor) { 1471 if (!Dtor->hasTrivialBody()) 1472 return false; 1473 1474 // Check the fields. 1475 const CXXRecordDecl *ClassDecl = Dtor->getParent(); 1476 for (const auto *Field : ClassDecl->fields()) 1477 if (!FieldHasTrivialDestructorBody(CGF.getContext(), Field)) 1478 return false; 1479 1480 return true; 1481 } 1482 1483 /// EmitDestructorBody - Emits the body of the current destructor. 1484 void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) { 1485 const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CurGD.getDecl()); 1486 CXXDtorType DtorType = CurGD.getDtorType(); 1487 1488 Stmt *Body = Dtor->getBody(); 1489 if (Body) 1490 incrementProfileCounter(Body); 1491 1492 // The call to operator delete in a deleting destructor happens 1493 // outside of the function-try-block, which means it's always 1494 // possible to delegate the destructor body to the complete 1495 // destructor. Do so. 1496 if (DtorType == Dtor_Deleting) { 1497 EnterDtorCleanups(Dtor, Dtor_Deleting); 1498 EmitCXXDestructorCall(Dtor, Dtor_Complete, /*ForVirtualBase=*/false, 1499 /*Delegating=*/false, LoadCXXThisAddress()); 1500 PopCleanupBlock(); 1501 return; 1502 } 1503 1504 // If the body is a function-try-block, enter the try before 1505 // anything else. 1506 bool isTryBody = (Body && isa<CXXTryStmt>(Body)); 1507 if (isTryBody) 1508 EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true); 1509 EmitAsanPrologueOrEpilogue(false); 1510 1511 // Enter the epilogue cleanups. 1512 RunCleanupsScope DtorEpilogue(*this); 1513 1514 // If this is the complete variant, just invoke the base variant; 1515 // the epilogue will destruct the virtual bases. But we can't do 1516 // this optimization if the body is a function-try-block, because 1517 // we'd introduce *two* handler blocks. In the Microsoft ABI, we 1518 // always delegate because we might not have a definition in this TU. 1519 switch (DtorType) { 1520 case Dtor_Comdat: 1521 llvm_unreachable("not expecting a COMDAT"); 1522 1523 case Dtor_Deleting: llvm_unreachable("already handled deleting case"); 1524 1525 case Dtor_Complete: 1526 assert((Body || getTarget().getCXXABI().isMicrosoft()) && 1527 "can't emit a dtor without a body for non-Microsoft ABIs"); 1528 1529 // Enter the cleanup scopes for virtual bases. 1530 EnterDtorCleanups(Dtor, Dtor_Complete); 1531 1532 if (!isTryBody) { 1533 EmitCXXDestructorCall(Dtor, Dtor_Base, /*ForVirtualBase=*/false, 1534 /*Delegating=*/false, LoadCXXThisAddress()); 1535 break; 1536 } 1537 // Fallthrough: act like we're in the base variant. 1538 1539 case Dtor_Base: 1540 assert(Body); 1541 1542 // Enter the cleanup scopes for fields and non-virtual bases. 1543 EnterDtorCleanups(Dtor, Dtor_Base); 1544 1545 // Initialize the vtable pointers before entering the body. 1546 if (!CanSkipVTablePointerInitialization(*this, Dtor)) 1547 InitializeVTablePointers(Dtor->getParent()); 1548 1549 if (isTryBody) 1550 EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock()); 1551 else if (Body) 1552 EmitStmt(Body); 1553 else { 1554 assert(Dtor->isImplicit() && "bodyless dtor not implicit"); 1555 // nothing to do besides what's in the epilogue 1556 } 1557 // -fapple-kext must inline any call to this dtor into 1558 // the caller's body. 1559 if (getLangOpts().AppleKext) 1560 CurFn->addFnAttr(llvm::Attribute::AlwaysInline); 1561 1562 break; 1563 } 1564 1565 // Jump out through the epilogue cleanups. 1566 DtorEpilogue.ForceCleanup(); 1567 1568 // Exit the try if applicable. 1569 if (isTryBody) 1570 ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true); 1571 } 1572 1573 void CodeGenFunction::emitImplicitAssignmentOperatorBody(FunctionArgList &Args) { 1574 const CXXMethodDecl *AssignOp = cast<CXXMethodDecl>(CurGD.getDecl()); 1575 const Stmt *RootS = AssignOp->getBody(); 1576 assert(isa<CompoundStmt>(RootS) && 1577 "Body of an implicit assignment operator should be compound stmt."); 1578 const CompoundStmt *RootCS = cast<CompoundStmt>(RootS); 1579 1580 LexicalScope Scope(*this, RootCS->getSourceRange()); 1581 1582 AssignmentMemcpyizer AM(*this, AssignOp, Args); 1583 for (auto *I : RootCS->body()) 1584 AM.emitAssignment(I); 1585 AM.finish(); 1586 } 1587 1588 namespace { 1589 /// Call the operator delete associated with the current destructor. 1590 struct CallDtorDelete final : EHScopeStack::Cleanup { 1591 CallDtorDelete() {} 1592 1593 void Emit(CodeGenFunction &CGF, Flags flags) override { 1594 const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CGF.CurCodeDecl); 1595 const CXXRecordDecl *ClassDecl = Dtor->getParent(); 1596 CGF.EmitDeleteCall(Dtor->getOperatorDelete(), CGF.LoadCXXThis(), 1597 CGF.getContext().getTagDeclType(ClassDecl)); 1598 } 1599 }; 1600 1601 struct CallDtorDeleteConditional final : EHScopeStack::Cleanup { 1602 llvm::Value *ShouldDeleteCondition; 1603 public: 1604 CallDtorDeleteConditional(llvm::Value *ShouldDeleteCondition) 1605 : ShouldDeleteCondition(ShouldDeleteCondition) { 1606 assert(ShouldDeleteCondition != nullptr); 1607 } 1608 1609 void Emit(CodeGenFunction &CGF, Flags flags) override { 1610 llvm::BasicBlock *callDeleteBB = CGF.createBasicBlock("dtor.call_delete"); 1611 llvm::BasicBlock *continueBB = CGF.createBasicBlock("dtor.continue"); 1612 llvm::Value *ShouldCallDelete 1613 = CGF.Builder.CreateIsNull(ShouldDeleteCondition); 1614 CGF.Builder.CreateCondBr(ShouldCallDelete, continueBB, callDeleteBB); 1615 1616 CGF.EmitBlock(callDeleteBB); 1617 const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CGF.CurCodeDecl); 1618 const CXXRecordDecl *ClassDecl = Dtor->getParent(); 1619 CGF.EmitDeleteCall(Dtor->getOperatorDelete(), CGF.LoadCXXThis(), 1620 CGF.getContext().getTagDeclType(ClassDecl)); 1621 CGF.Builder.CreateBr(continueBB); 1622 1623 CGF.EmitBlock(continueBB); 1624 } 1625 }; 1626 1627 class DestroyField final : public EHScopeStack::Cleanup { 1628 const FieldDecl *field; 1629 CodeGenFunction::Destroyer *destroyer; 1630 bool useEHCleanupForArray; 1631 1632 public: 1633 DestroyField(const FieldDecl *field, CodeGenFunction::Destroyer *destroyer, 1634 bool useEHCleanupForArray) 1635 : field(field), destroyer(destroyer), 1636 useEHCleanupForArray(useEHCleanupForArray) {} 1637 1638 void Emit(CodeGenFunction &CGF, Flags flags) override { 1639 // Find the address of the field. 1640 Address thisValue = CGF.LoadCXXThisAddress(); 1641 QualType RecordTy = CGF.getContext().getTagDeclType(field->getParent()); 1642 LValue ThisLV = CGF.MakeAddrLValue(thisValue, RecordTy); 1643 LValue LV = CGF.EmitLValueForField(ThisLV, field); 1644 assert(LV.isSimple()); 1645 1646 CGF.emitDestroy(LV.getAddress(), field->getType(), destroyer, 1647 flags.isForNormalCleanup() && useEHCleanupForArray); 1648 } 1649 }; 1650 1651 class SanitizeDtor final : public EHScopeStack::Cleanup { 1652 const CXXDestructorDecl *Dtor; 1653 1654 public: 1655 SanitizeDtor(const CXXDestructorDecl *Dtor) : Dtor(Dtor) {} 1656 1657 // Generate function call for handling object poisoning. 1658 // Disables tail call elimination, to prevent the current stack frame 1659 // from disappearing from the stack trace. 1660 void Emit(CodeGenFunction &CGF, Flags flags) override { 1661 const ASTRecordLayout &Layout = 1662 CGF.getContext().getASTRecordLayout(Dtor->getParent()); 1663 1664 // Nothing to poison. 1665 if (Layout.getFieldCount() == 0) 1666 return; 1667 1668 // Prevent the current stack frame from disappearing from the stack trace. 1669 CGF.CurFn->addFnAttr("disable-tail-calls", "true"); 1670 1671 // Construct pointer to region to begin poisoning, and calculate poison 1672 // size, so that only members declared in this class are poisoned. 1673 ASTContext &Context = CGF.getContext(); 1674 unsigned fieldIndex = 0; 1675 int startIndex = -1; 1676 // RecordDecl::field_iterator Field; 1677 for (const FieldDecl *Field : Dtor->getParent()->fields()) { 1678 // Poison field if it is trivial 1679 if (FieldHasTrivialDestructorBody(Context, Field)) { 1680 // Start sanitizing at this field 1681 if (startIndex < 0) 1682 startIndex = fieldIndex; 1683 1684 // Currently on the last field, and it must be poisoned with the 1685 // current block. 1686 if (fieldIndex == Layout.getFieldCount() - 1) { 1687 PoisonBlock(CGF, startIndex, Layout.getFieldCount()); 1688 } 1689 } else if (startIndex >= 0) { 1690 // No longer within a block of memory to poison, so poison the block 1691 PoisonBlock(CGF, startIndex, fieldIndex); 1692 // Re-set the start index 1693 startIndex = -1; 1694 } 1695 fieldIndex += 1; 1696 } 1697 } 1698 1699 private: 1700 /// \param layoutStartOffset index of the ASTRecordLayout field to 1701 /// start poisoning (inclusive) 1702 /// \param layoutEndOffset index of the ASTRecordLayout field to 1703 /// end poisoning (exclusive) 1704 void PoisonBlock(CodeGenFunction &CGF, unsigned layoutStartOffset, 1705 unsigned layoutEndOffset) { 1706 ASTContext &Context = CGF.getContext(); 1707 const ASTRecordLayout &Layout = 1708 Context.getASTRecordLayout(Dtor->getParent()); 1709 1710 llvm::ConstantInt *OffsetSizePtr = llvm::ConstantInt::get( 1711 CGF.SizeTy, 1712 Context.toCharUnitsFromBits(Layout.getFieldOffset(layoutStartOffset)) 1713 .getQuantity()); 1714 1715 llvm::Value *OffsetPtr = CGF.Builder.CreateGEP( 1716 CGF.Builder.CreateBitCast(CGF.LoadCXXThis(), CGF.Int8PtrTy), 1717 OffsetSizePtr); 1718 1719 CharUnits::QuantityType PoisonSize; 1720 if (layoutEndOffset >= Layout.getFieldCount()) { 1721 PoisonSize = Layout.getNonVirtualSize().getQuantity() - 1722 Context.toCharUnitsFromBits( 1723 Layout.getFieldOffset(layoutStartOffset)) 1724 .getQuantity(); 1725 } else { 1726 PoisonSize = Context.toCharUnitsFromBits( 1727 Layout.getFieldOffset(layoutEndOffset) - 1728 Layout.getFieldOffset(layoutStartOffset)) 1729 .getQuantity(); 1730 } 1731 1732 if (PoisonSize == 0) 1733 return; 1734 1735 // Pass in void pointer and size of region as arguments to runtime 1736 // function 1737 llvm::Value *Args[] = {CGF.Builder.CreateBitCast(OffsetPtr, CGF.VoidPtrTy), 1738 llvm::ConstantInt::get(CGF.SizeTy, PoisonSize)}; 1739 1740 llvm::Type *ArgTypes[] = {CGF.VoidPtrTy, CGF.SizeTy}; 1741 1742 llvm::FunctionType *FnType = 1743 llvm::FunctionType::get(CGF.VoidTy, ArgTypes, false); 1744 llvm::Value *Fn = 1745 CGF.CGM.CreateRuntimeFunction(FnType, "__sanitizer_dtor_callback"); 1746 CGF.EmitNounwindRuntimeCall(Fn, Args); 1747 } 1748 }; 1749 } 1750 1751 /// \brief Emit all code that comes at the end of class's 1752 /// destructor. This is to call destructors on members and base classes 1753 /// in reverse order of their construction. 1754 void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD, 1755 CXXDtorType DtorType) { 1756 assert((!DD->isTrivial() || DD->hasAttr<DLLExportAttr>()) && 1757 "Should not emit dtor epilogue for non-exported trivial dtor!"); 1758 1759 // The deleting-destructor phase just needs to call the appropriate 1760 // operator delete that Sema picked up. 1761 if (DtorType == Dtor_Deleting) { 1762 assert(DD->getOperatorDelete() && 1763 "operator delete missing - EnterDtorCleanups"); 1764 if (CXXStructorImplicitParamValue) { 1765 // If there is an implicit param to the deleting dtor, it's a boolean 1766 // telling whether we should call delete at the end of the dtor. 1767 EHStack.pushCleanup<CallDtorDeleteConditional>( 1768 NormalAndEHCleanup, CXXStructorImplicitParamValue); 1769 } else { 1770 EHStack.pushCleanup<CallDtorDelete>(NormalAndEHCleanup); 1771 } 1772 return; 1773 } 1774 1775 const CXXRecordDecl *ClassDecl = DD->getParent(); 1776 1777 // Unions have no bases and do not call field destructors. 1778 if (ClassDecl->isUnion()) 1779 return; 1780 1781 // The complete-destructor phase just destructs all the virtual bases. 1782 if (DtorType == Dtor_Complete) { 1783 1784 // We push them in the forward order so that they'll be popped in 1785 // the reverse order. 1786 for (const auto &Base : ClassDecl->vbases()) { 1787 CXXRecordDecl *BaseClassDecl 1788 = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl()); 1789 1790 // Ignore trivial destructors. 1791 if (BaseClassDecl->hasTrivialDestructor()) 1792 continue; 1793 1794 EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup, 1795 BaseClassDecl, 1796 /*BaseIsVirtual*/ true); 1797 } 1798 1799 return; 1800 } 1801 1802 assert(DtorType == Dtor_Base); 1803 1804 // Destroy non-virtual bases. 1805 for (const auto &Base : ClassDecl->bases()) { 1806 // Ignore virtual bases. 1807 if (Base.isVirtual()) 1808 continue; 1809 1810 CXXRecordDecl *BaseClassDecl = Base.getType()->getAsCXXRecordDecl(); 1811 1812 // Ignore trivial destructors. 1813 if (BaseClassDecl->hasTrivialDestructor()) 1814 continue; 1815 1816 EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup, 1817 BaseClassDecl, 1818 /*BaseIsVirtual*/ false); 1819 } 1820 1821 // Poison fields such that access after their destructors are 1822 // invoked, and before the base class destructor runs, is invalid. 1823 if (CGM.getCodeGenOpts().SanitizeMemoryUseAfterDtor && 1824 SanOpts.has(SanitizerKind::Memory)) 1825 EHStack.pushCleanup<SanitizeDtor>(NormalAndEHCleanup, DD); 1826 1827 // Destroy direct fields. 1828 for (const auto *Field : ClassDecl->fields()) { 1829 QualType type = Field->getType(); 1830 QualType::DestructionKind dtorKind = type.isDestructedType(); 1831 if (!dtorKind) continue; 1832 1833 // Anonymous union members do not have their destructors called. 1834 const RecordType *RT = type->getAsUnionType(); 1835 if (RT && RT->getDecl()->isAnonymousStructOrUnion()) continue; 1836 1837 CleanupKind cleanupKind = getCleanupKind(dtorKind); 1838 EHStack.pushCleanup<DestroyField>(cleanupKind, Field, 1839 getDestroyer(dtorKind), 1840 cleanupKind & EHCleanup); 1841 } 1842 } 1843 1844 /// EmitCXXAggrConstructorCall - Emit a loop to call a particular 1845 /// constructor for each of several members of an array. 1846 /// 1847 /// \param ctor the constructor to call for each element 1848 /// \param arrayType the type of the array to initialize 1849 /// \param arrayBegin an arrayType* 1850 /// \param zeroInitialize true if each element should be 1851 /// zero-initialized before it is constructed 1852 void CodeGenFunction::EmitCXXAggrConstructorCall( 1853 const CXXConstructorDecl *ctor, const ConstantArrayType *arrayType, 1854 Address arrayBegin, const CXXConstructExpr *E, bool zeroInitialize) { 1855 QualType elementType; 1856 llvm::Value *numElements = 1857 emitArrayLength(arrayType, elementType, arrayBegin); 1858 1859 EmitCXXAggrConstructorCall(ctor, numElements, arrayBegin, E, zeroInitialize); 1860 } 1861 1862 /// EmitCXXAggrConstructorCall - Emit a loop to call a particular 1863 /// constructor for each of several members of an array. 1864 /// 1865 /// \param ctor the constructor to call for each element 1866 /// \param numElements the number of elements in the array; 1867 /// may be zero 1868 /// \param arrayBase a T*, where T is the type constructed by ctor 1869 /// \param zeroInitialize true if each element should be 1870 /// zero-initialized before it is constructed 1871 void CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor, 1872 llvm::Value *numElements, 1873 Address arrayBase, 1874 const CXXConstructExpr *E, 1875 bool zeroInitialize) { 1876 1877 // It's legal for numElements to be zero. This can happen both 1878 // dynamically, because x can be zero in 'new A[x]', and statically, 1879 // because of GCC extensions that permit zero-length arrays. There 1880 // are probably legitimate places where we could assume that this 1881 // doesn't happen, but it's not clear that it's worth it. 1882 llvm::BranchInst *zeroCheckBranch = nullptr; 1883 1884 // Optimize for a constant count. 1885 llvm::ConstantInt *constantCount 1886 = dyn_cast<llvm::ConstantInt>(numElements); 1887 if (constantCount) { 1888 // Just skip out if the constant count is zero. 1889 if (constantCount->isZero()) return; 1890 1891 // Otherwise, emit the check. 1892 } else { 1893 llvm::BasicBlock *loopBB = createBasicBlock("new.ctorloop"); 1894 llvm::Value *iszero = Builder.CreateIsNull(numElements, "isempty"); 1895 zeroCheckBranch = Builder.CreateCondBr(iszero, loopBB, loopBB); 1896 EmitBlock(loopBB); 1897 } 1898 1899 // Find the end of the array. 1900 llvm::Value *arrayBegin = arrayBase.getPointer(); 1901 llvm::Value *arrayEnd = Builder.CreateInBoundsGEP(arrayBegin, numElements, 1902 "arrayctor.end"); 1903 1904 // Enter the loop, setting up a phi for the current location to initialize. 1905 llvm::BasicBlock *entryBB = Builder.GetInsertBlock(); 1906 llvm::BasicBlock *loopBB = createBasicBlock("arrayctor.loop"); 1907 EmitBlock(loopBB); 1908 llvm::PHINode *cur = Builder.CreatePHI(arrayBegin->getType(), 2, 1909 "arrayctor.cur"); 1910 cur->addIncoming(arrayBegin, entryBB); 1911 1912 // Inside the loop body, emit the constructor call on the array element. 1913 1914 // The alignment of the base, adjusted by the size of a single element, 1915 // provides a conservative estimate of the alignment of every element. 1916 // (This assumes we never start tracking offsetted alignments.) 1917 // 1918 // Note that these are complete objects and so we don't need to 1919 // use the non-virtual size or alignment. 1920 QualType type = getContext().getTypeDeclType(ctor->getParent()); 1921 CharUnits eltAlignment = 1922 arrayBase.getAlignment() 1923 .alignmentOfArrayElement(getContext().getTypeSizeInChars(type)); 1924 Address curAddr = Address(cur, eltAlignment); 1925 1926 // Zero initialize the storage, if requested. 1927 if (zeroInitialize) 1928 EmitNullInitialization(curAddr, type); 1929 1930 // C++ [class.temporary]p4: 1931 // There are two contexts in which temporaries are destroyed at a different 1932 // point than the end of the full-expression. The first context is when a 1933 // default constructor is called to initialize an element of an array. 1934 // If the constructor has one or more default arguments, the destruction of 1935 // every temporary created in a default argument expression is sequenced 1936 // before the construction of the next array element, if any. 1937 1938 { 1939 RunCleanupsScope Scope(*this); 1940 1941 // Evaluate the constructor and its arguments in a regular 1942 // partial-destroy cleanup. 1943 if (getLangOpts().Exceptions && 1944 !ctor->getParent()->hasTrivialDestructor()) { 1945 Destroyer *destroyer = destroyCXXObject; 1946 pushRegularPartialArrayCleanup(arrayBegin, cur, type, eltAlignment, 1947 *destroyer); 1948 } 1949 1950 EmitCXXConstructorCall(ctor, Ctor_Complete, /*ForVirtualBase=*/false, 1951 /*Delegating=*/false, curAddr, E); 1952 } 1953 1954 // Go to the next element. 1955 llvm::Value *next = 1956 Builder.CreateInBoundsGEP(cur, llvm::ConstantInt::get(SizeTy, 1), 1957 "arrayctor.next"); 1958 cur->addIncoming(next, Builder.GetInsertBlock()); 1959 1960 // Check whether that's the end of the loop. 1961 llvm::Value *done = Builder.CreateICmpEQ(next, arrayEnd, "arrayctor.done"); 1962 llvm::BasicBlock *contBB = createBasicBlock("arrayctor.cont"); 1963 Builder.CreateCondBr(done, contBB, loopBB); 1964 1965 // Patch the earlier check to skip over the loop. 1966 if (zeroCheckBranch) zeroCheckBranch->setSuccessor(0, contBB); 1967 1968 EmitBlock(contBB); 1969 } 1970 1971 void CodeGenFunction::destroyCXXObject(CodeGenFunction &CGF, 1972 Address addr, 1973 QualType type) { 1974 const RecordType *rtype = type->castAs<RecordType>(); 1975 const CXXRecordDecl *record = cast<CXXRecordDecl>(rtype->getDecl()); 1976 const CXXDestructorDecl *dtor = record->getDestructor(); 1977 assert(!dtor->isTrivial()); 1978 CGF.EmitCXXDestructorCall(dtor, Dtor_Complete, /*for vbase*/ false, 1979 /*Delegating=*/false, addr); 1980 } 1981 1982 void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D, 1983 CXXCtorType Type, 1984 bool ForVirtualBase, 1985 bool Delegating, Address This, 1986 const CXXConstructExpr *E) { 1987 const CXXRecordDecl *ClassDecl = D->getParent(); 1988 1989 // C++11 [class.mfct.non-static]p2: 1990 // If a non-static member function of a class X is called for an object that 1991 // is not of type X, or of a type derived from X, the behavior is undefined. 1992 // FIXME: Provide a source location here. 1993 EmitTypeCheck(CodeGenFunction::TCK_ConstructorCall, SourceLocation(), 1994 This.getPointer(), getContext().getRecordType(ClassDecl)); 1995 1996 if (D->isTrivial() && D->isDefaultConstructor()) { 1997 assert(E->getNumArgs() == 0 && "trivial default ctor with args"); 1998 return; 1999 } 2000 2001 // If this is a trivial constructor, just emit what's needed. If this is a 2002 // union copy constructor, we must emit a memcpy, because the AST does not 2003 // model that copy. 2004 if (isMemcpyEquivalentSpecialMember(D)) { 2005 assert(E->getNumArgs() == 1 && "unexpected argcount for trivial ctor"); 2006 2007 const Expr *Arg = E->getArg(0); 2008 QualType SrcTy = Arg->getType(); 2009 Address Src = EmitLValue(Arg).getAddress(); 2010 QualType DestTy = getContext().getTypeDeclType(ClassDecl); 2011 EmitAggregateCopyCtor(This, Src, DestTy, SrcTy); 2012 return; 2013 } 2014 2015 CallArgList Args; 2016 2017 // Push the this ptr. 2018 Args.add(RValue::get(This.getPointer()), D->getThisType(getContext())); 2019 2020 // Add the rest of the user-supplied arguments. 2021 const FunctionProtoType *FPT = D->getType()->castAs<FunctionProtoType>(); 2022 EmitCallArgs(Args, FPT, E->arguments(), E->getConstructor()); 2023 2024 // Insert any ABI-specific implicit constructor arguments. 2025 unsigned ExtraArgs = CGM.getCXXABI().addImplicitConstructorArgs( 2026 *this, D, Type, ForVirtualBase, Delegating, Args); 2027 2028 // Emit the call. 2029 llvm::Value *Callee = CGM.getAddrOfCXXStructor(D, getFromCtorType(Type)); 2030 const CGFunctionInfo &Info = 2031 CGM.getTypes().arrangeCXXConstructorCall(Args, D, Type, ExtraArgs); 2032 EmitCall(Info, Callee, ReturnValueSlot(), Args, D); 2033 2034 // Generate vtable assumptions if we're constructing a complete object 2035 // with a vtable. We don't do this for base subobjects for two reasons: 2036 // first, it's incorrect for classes with virtual bases, and second, we're 2037 // about to overwrite the vptrs anyway. 2038 // We also have to make sure if we can refer to vtable: 2039 // - Otherwise we can refer to vtable if it's safe to speculatively emit. 2040 // FIXME: If vtable is used by ctor/dtor, or if vtable is external and we are 2041 // sure that definition of vtable is not hidden, 2042 // then we are always safe to refer to it. 2043 if (CGM.getCodeGenOpts().OptimizationLevel > 0 && 2044 ClassDecl->isDynamicClass() && Type != Ctor_Base && 2045 CGM.getCXXABI().canSpeculativelyEmitVTable(ClassDecl)) 2046 EmitVTableAssumptionLoads(ClassDecl, This); 2047 } 2048 2049 void CodeGenFunction::EmitVTableAssumptionLoad(const VPtr &Vptr, Address This) { 2050 llvm::Value *VTableGlobal = 2051 CGM.getCXXABI().getVTableAddressPoint(Vptr.Base, Vptr.VTableClass); 2052 if (!VTableGlobal) 2053 return; 2054 2055 // We can just use the base offset in the complete class. 2056 CharUnits NonVirtualOffset = Vptr.Base.getBaseOffset(); 2057 2058 if (!NonVirtualOffset.isZero()) 2059 This = 2060 ApplyNonVirtualAndVirtualOffset(*this, This, NonVirtualOffset, nullptr, 2061 Vptr.VTableClass, Vptr.NearestVBase); 2062 2063 llvm::Value *VPtrValue = GetVTablePtr(This, VTableGlobal->getType()); 2064 llvm::Value *Cmp = 2065 Builder.CreateICmpEQ(VPtrValue, VTableGlobal, "cmp.vtables"); 2066 Builder.CreateAssumption(Cmp); 2067 } 2068 2069 void CodeGenFunction::EmitVTableAssumptionLoads(const CXXRecordDecl *ClassDecl, 2070 Address This) { 2071 if (CGM.getCXXABI().doStructorsInitializeVPtrs(ClassDecl)) 2072 for (const VPtr &Vptr : getVTablePointers(ClassDecl)) 2073 EmitVTableAssumptionLoad(Vptr, This); 2074 } 2075 2076 void 2077 CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D, 2078 Address This, Address Src, 2079 const CXXConstructExpr *E) { 2080 if (isMemcpyEquivalentSpecialMember(D)) { 2081 assert(E->getNumArgs() == 1 && "unexpected argcount for trivial ctor"); 2082 assert(D->isCopyOrMoveConstructor() && 2083 "trivial 1-arg ctor not a copy/move ctor"); 2084 EmitAggregateCopyCtor(This, Src, 2085 getContext().getTypeDeclType(D->getParent()), 2086 (*E->arg_begin())->getType()); 2087 return; 2088 } 2089 llvm::Value *Callee = CGM.getAddrOfCXXStructor(D, StructorType::Complete); 2090 assert(D->isInstance() && 2091 "Trying to emit a member call expr on a static method!"); 2092 2093 const FunctionProtoType *FPT = D->getType()->castAs<FunctionProtoType>(); 2094 2095 CallArgList Args; 2096 2097 // Push the this ptr. 2098 Args.add(RValue::get(This.getPointer()), D->getThisType(getContext())); 2099 2100 // Push the src ptr. 2101 QualType QT = *(FPT->param_type_begin()); 2102 llvm::Type *t = CGM.getTypes().ConvertType(QT); 2103 Src = Builder.CreateBitCast(Src, t); 2104 Args.add(RValue::get(Src.getPointer()), QT); 2105 2106 // Skip over first argument (Src). 2107 EmitCallArgs(Args, FPT, drop_begin(E->arguments(), 1), E->getConstructor(), 2108 /*ParamsToSkip*/ 1); 2109 2110 EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, RequiredArgs::All), 2111 Callee, ReturnValueSlot(), Args, D); 2112 } 2113 2114 void 2115 CodeGenFunction::EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor, 2116 CXXCtorType CtorType, 2117 const FunctionArgList &Args, 2118 SourceLocation Loc) { 2119 CallArgList DelegateArgs; 2120 2121 FunctionArgList::const_iterator I = Args.begin(), E = Args.end(); 2122 assert(I != E && "no parameters to constructor"); 2123 2124 // this 2125 DelegateArgs.add(RValue::get(LoadCXXThis()), (*I)->getType()); 2126 ++I; 2127 2128 // vtt 2129 if (llvm::Value *VTT = GetVTTParameter(GlobalDecl(Ctor, CtorType), 2130 /*ForVirtualBase=*/false, 2131 /*Delegating=*/true)) { 2132 QualType VoidPP = getContext().getPointerType(getContext().VoidPtrTy); 2133 DelegateArgs.add(RValue::get(VTT), VoidPP); 2134 2135 if (CGM.getCXXABI().NeedsVTTParameter(CurGD)) { 2136 assert(I != E && "cannot skip vtt parameter, already done with args"); 2137 assert((*I)->getType() == VoidPP && "skipping parameter not of vtt type"); 2138 ++I; 2139 } 2140 } 2141 2142 // Explicit arguments. 2143 for (; I != E; ++I) { 2144 const VarDecl *param = *I; 2145 // FIXME: per-argument source location 2146 EmitDelegateCallArg(DelegateArgs, param, Loc); 2147 } 2148 2149 llvm::Value *Callee = 2150 CGM.getAddrOfCXXStructor(Ctor, getFromCtorType(CtorType)); 2151 EmitCall(CGM.getTypes() 2152 .arrangeCXXStructorDeclaration(Ctor, getFromCtorType(CtorType)), 2153 Callee, ReturnValueSlot(), DelegateArgs, Ctor); 2154 } 2155 2156 namespace { 2157 struct CallDelegatingCtorDtor final : EHScopeStack::Cleanup { 2158 const CXXDestructorDecl *Dtor; 2159 Address Addr; 2160 CXXDtorType Type; 2161 2162 CallDelegatingCtorDtor(const CXXDestructorDecl *D, Address Addr, 2163 CXXDtorType Type) 2164 : Dtor(D), Addr(Addr), Type(Type) {} 2165 2166 void Emit(CodeGenFunction &CGF, Flags flags) override { 2167 CGF.EmitCXXDestructorCall(Dtor, Type, /*ForVirtualBase=*/false, 2168 /*Delegating=*/true, Addr); 2169 } 2170 }; 2171 } 2172 2173 void 2174 CodeGenFunction::EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor, 2175 const FunctionArgList &Args) { 2176 assert(Ctor->isDelegatingConstructor()); 2177 2178 Address ThisPtr = LoadCXXThisAddress(); 2179 2180 AggValueSlot AggSlot = 2181 AggValueSlot::forAddr(ThisPtr, Qualifiers(), 2182 AggValueSlot::IsDestructed, 2183 AggValueSlot::DoesNotNeedGCBarriers, 2184 AggValueSlot::IsNotAliased); 2185 2186 EmitAggExpr(Ctor->init_begin()[0]->getInit(), AggSlot); 2187 2188 const CXXRecordDecl *ClassDecl = Ctor->getParent(); 2189 if (CGM.getLangOpts().Exceptions && !ClassDecl->hasTrivialDestructor()) { 2190 CXXDtorType Type = 2191 CurGD.getCtorType() == Ctor_Complete ? Dtor_Complete : Dtor_Base; 2192 2193 EHStack.pushCleanup<CallDelegatingCtorDtor>(EHCleanup, 2194 ClassDecl->getDestructor(), 2195 ThisPtr, Type); 2196 } 2197 } 2198 2199 void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *DD, 2200 CXXDtorType Type, 2201 bool ForVirtualBase, 2202 bool Delegating, 2203 Address This) { 2204 CGM.getCXXABI().EmitDestructorCall(*this, DD, Type, ForVirtualBase, 2205 Delegating, This); 2206 } 2207 2208 namespace { 2209 struct CallLocalDtor final : EHScopeStack::Cleanup { 2210 const CXXDestructorDecl *Dtor; 2211 Address Addr; 2212 2213 CallLocalDtor(const CXXDestructorDecl *D, Address Addr) 2214 : Dtor(D), Addr(Addr) {} 2215 2216 void Emit(CodeGenFunction &CGF, Flags flags) override { 2217 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, 2218 /*ForVirtualBase=*/false, 2219 /*Delegating=*/false, Addr); 2220 } 2221 }; 2222 } 2223 2224 void CodeGenFunction::PushDestructorCleanup(const CXXDestructorDecl *D, 2225 Address Addr) { 2226 EHStack.pushCleanup<CallLocalDtor>(NormalAndEHCleanup, D, Addr); 2227 } 2228 2229 void CodeGenFunction::PushDestructorCleanup(QualType T, Address Addr) { 2230 CXXRecordDecl *ClassDecl = T->getAsCXXRecordDecl(); 2231 if (!ClassDecl) return; 2232 if (ClassDecl->hasTrivialDestructor()) return; 2233 2234 const CXXDestructorDecl *D = ClassDecl->getDestructor(); 2235 assert(D && D->isUsed() && "destructor not marked as used!"); 2236 PushDestructorCleanup(D, Addr); 2237 } 2238 2239 void CodeGenFunction::InitializeVTablePointer(const VPtr &Vptr) { 2240 // Compute the address point. 2241 llvm::Value *VTableAddressPoint = 2242 CGM.getCXXABI().getVTableAddressPointInStructor( 2243 *this, Vptr.VTableClass, Vptr.Base, Vptr.NearestVBase); 2244 2245 if (!VTableAddressPoint) 2246 return; 2247 2248 // Compute where to store the address point. 2249 llvm::Value *VirtualOffset = nullptr; 2250 CharUnits NonVirtualOffset = CharUnits::Zero(); 2251 2252 if (CGM.getCXXABI().isVirtualOffsetNeededForVTableField(*this, Vptr)) { 2253 // We need to use the virtual base offset offset because the virtual base 2254 // might have a different offset in the most derived class. 2255 2256 VirtualOffset = CGM.getCXXABI().GetVirtualBaseClassOffset( 2257 *this, LoadCXXThisAddress(), Vptr.VTableClass, Vptr.NearestVBase); 2258 NonVirtualOffset = Vptr.OffsetFromNearestVBase; 2259 } else { 2260 // We can just use the base offset in the complete class. 2261 NonVirtualOffset = Vptr.Base.getBaseOffset(); 2262 } 2263 2264 // Apply the offsets. 2265 Address VTableField = LoadCXXThisAddress(); 2266 2267 if (!NonVirtualOffset.isZero() || VirtualOffset) 2268 VTableField = ApplyNonVirtualAndVirtualOffset( 2269 *this, VTableField, NonVirtualOffset, VirtualOffset, Vptr.VTableClass, 2270 Vptr.NearestVBase); 2271 2272 // Finally, store the address point. Use the same LLVM types as the field to 2273 // support optimization. 2274 llvm::Type *VTablePtrTy = 2275 llvm::FunctionType::get(CGM.Int32Ty, /*isVarArg=*/true) 2276 ->getPointerTo() 2277 ->getPointerTo(); 2278 VTableField = Builder.CreateBitCast(VTableField, VTablePtrTy->getPointerTo()); 2279 VTableAddressPoint = Builder.CreateBitCast(VTableAddressPoint, VTablePtrTy); 2280 2281 llvm::StoreInst *Store = Builder.CreateStore(VTableAddressPoint, VTableField); 2282 CGM.DecorateInstruction(Store, CGM.getTBAAInfoForVTablePtr()); 2283 } 2284 2285 CodeGenFunction::VPtrsVector 2286 CodeGenFunction::getVTablePointers(const CXXRecordDecl *VTableClass) { 2287 CodeGenFunction::VPtrsVector VPtrsResult; 2288 VisitedVirtualBasesSetTy VBases; 2289 getVTablePointers(BaseSubobject(VTableClass, CharUnits::Zero()), 2290 /*NearestVBase=*/nullptr, 2291 /*OffsetFromNearestVBase=*/CharUnits::Zero(), 2292 /*BaseIsNonVirtualPrimaryBase=*/false, VTableClass, VBases, 2293 VPtrsResult); 2294 return VPtrsResult; 2295 } 2296 2297 void CodeGenFunction::getVTablePointers(BaseSubobject Base, 2298 const CXXRecordDecl *NearestVBase, 2299 CharUnits OffsetFromNearestVBase, 2300 bool BaseIsNonVirtualPrimaryBase, 2301 const CXXRecordDecl *VTableClass, 2302 VisitedVirtualBasesSetTy &VBases, 2303 VPtrsVector &Vptrs) { 2304 // If this base is a non-virtual primary base the address point has already 2305 // been set. 2306 if (!BaseIsNonVirtualPrimaryBase) { 2307 // Initialize the vtable pointer for this base. 2308 VPtr Vptr = {Base, NearestVBase, OffsetFromNearestVBase, VTableClass}; 2309 Vptrs.push_back(Vptr); 2310 } 2311 2312 const CXXRecordDecl *RD = Base.getBase(); 2313 2314 // Traverse bases. 2315 for (const auto &I : RD->bases()) { 2316 CXXRecordDecl *BaseDecl 2317 = cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl()); 2318 2319 // Ignore classes without a vtable. 2320 if (!BaseDecl->isDynamicClass()) 2321 continue; 2322 2323 CharUnits BaseOffset; 2324 CharUnits BaseOffsetFromNearestVBase; 2325 bool BaseDeclIsNonVirtualPrimaryBase; 2326 2327 if (I.isVirtual()) { 2328 // Check if we've visited this virtual base before. 2329 if (!VBases.insert(BaseDecl).second) 2330 continue; 2331 2332 const ASTRecordLayout &Layout = 2333 getContext().getASTRecordLayout(VTableClass); 2334 2335 BaseOffset = Layout.getVBaseClassOffset(BaseDecl); 2336 BaseOffsetFromNearestVBase = CharUnits::Zero(); 2337 BaseDeclIsNonVirtualPrimaryBase = false; 2338 } else { 2339 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 2340 2341 BaseOffset = Base.getBaseOffset() + Layout.getBaseClassOffset(BaseDecl); 2342 BaseOffsetFromNearestVBase = 2343 OffsetFromNearestVBase + Layout.getBaseClassOffset(BaseDecl); 2344 BaseDeclIsNonVirtualPrimaryBase = Layout.getPrimaryBase() == BaseDecl; 2345 } 2346 2347 getVTablePointers( 2348 BaseSubobject(BaseDecl, BaseOffset), 2349 I.isVirtual() ? BaseDecl : NearestVBase, BaseOffsetFromNearestVBase, 2350 BaseDeclIsNonVirtualPrimaryBase, VTableClass, VBases, Vptrs); 2351 } 2352 } 2353 2354 void CodeGenFunction::InitializeVTablePointers(const CXXRecordDecl *RD) { 2355 // Ignore classes without a vtable. 2356 if (!RD->isDynamicClass()) 2357 return; 2358 2359 // Initialize the vtable pointers for this class and all of its bases. 2360 if (CGM.getCXXABI().doStructorsInitializeVPtrs(RD)) 2361 for (const VPtr &Vptr : getVTablePointers(RD)) 2362 InitializeVTablePointer(Vptr); 2363 2364 if (RD->getNumVBases()) 2365 CGM.getCXXABI().initializeHiddenVirtualInheritanceMembers(*this, RD); 2366 } 2367 2368 llvm::Value *CodeGenFunction::GetVTablePtr(Address This, 2369 llvm::Type *Ty) { 2370 Address VTablePtrSrc = Builder.CreateElementBitCast(This, Ty); 2371 llvm::Instruction *VTable = Builder.CreateLoad(VTablePtrSrc, "vtable"); 2372 CGM.DecorateInstruction(VTable, CGM.getTBAAInfoForVTablePtr()); 2373 return VTable; 2374 } 2375 2376 // If a class has a single non-virtual base and does not introduce or override 2377 // virtual member functions or fields, it will have the same layout as its base. 2378 // This function returns the least derived such class. 2379 // 2380 // Casting an instance of a base class to such a derived class is technically 2381 // undefined behavior, but it is a relatively common hack for introducing member 2382 // functions on class instances with specific properties (e.g. llvm::Operator) 2383 // that works under most compilers and should not have security implications, so 2384 // we allow it by default. It can be disabled with -fsanitize=cfi-cast-strict. 2385 static const CXXRecordDecl * 2386 LeastDerivedClassWithSameLayout(const CXXRecordDecl *RD) { 2387 if (!RD->field_empty()) 2388 return RD; 2389 2390 if (RD->getNumVBases() != 0) 2391 return RD; 2392 2393 if (RD->getNumBases() != 1) 2394 return RD; 2395 2396 for (const CXXMethodDecl *MD : RD->methods()) { 2397 if (MD->isVirtual()) { 2398 // Virtual member functions are only ok if they are implicit destructors 2399 // because the implicit destructor will have the same semantics as the 2400 // base class's destructor if no fields are added. 2401 if (isa<CXXDestructorDecl>(MD) && MD->isImplicit()) 2402 continue; 2403 return RD; 2404 } 2405 } 2406 2407 return LeastDerivedClassWithSameLayout( 2408 RD->bases_begin()->getType()->getAsCXXRecordDecl()); 2409 } 2410 2411 void CodeGenFunction::EmitVTablePtrCheckForCall(const CXXMethodDecl *MD, 2412 llvm::Value *VTable, 2413 CFITypeCheckKind TCK, 2414 SourceLocation Loc) { 2415 const CXXRecordDecl *ClassDecl = MD->getParent(); 2416 if (!SanOpts.has(SanitizerKind::CFICastStrict)) 2417 ClassDecl = LeastDerivedClassWithSameLayout(ClassDecl); 2418 2419 EmitVTablePtrCheck(ClassDecl, VTable, TCK, Loc); 2420 } 2421 2422 void CodeGenFunction::EmitVTablePtrCheckForCast(QualType T, 2423 llvm::Value *Derived, 2424 bool MayBeNull, 2425 CFITypeCheckKind TCK, 2426 SourceLocation Loc) { 2427 if (!getLangOpts().CPlusPlus) 2428 return; 2429 2430 auto *ClassTy = T->getAs<RecordType>(); 2431 if (!ClassTy) 2432 return; 2433 2434 const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(ClassTy->getDecl()); 2435 2436 if (!ClassDecl->isCompleteDefinition() || !ClassDecl->isDynamicClass()) 2437 return; 2438 2439 if (!SanOpts.has(SanitizerKind::CFICastStrict)) 2440 ClassDecl = LeastDerivedClassWithSameLayout(ClassDecl); 2441 2442 llvm::BasicBlock *ContBlock = 0; 2443 2444 if (MayBeNull) { 2445 llvm::Value *DerivedNotNull = 2446 Builder.CreateIsNotNull(Derived, "cast.nonnull"); 2447 2448 llvm::BasicBlock *CheckBlock = createBasicBlock("cast.check"); 2449 ContBlock = createBasicBlock("cast.cont"); 2450 2451 Builder.CreateCondBr(DerivedNotNull, CheckBlock, ContBlock); 2452 2453 EmitBlock(CheckBlock); 2454 } 2455 2456 llvm::Value *VTable = 2457 GetVTablePtr(Address(Derived, getPointerAlign()), Int8PtrTy); 2458 EmitVTablePtrCheck(ClassDecl, VTable, TCK, Loc); 2459 2460 if (MayBeNull) { 2461 Builder.CreateBr(ContBlock); 2462 EmitBlock(ContBlock); 2463 } 2464 } 2465 2466 void CodeGenFunction::EmitVTablePtrCheck(const CXXRecordDecl *RD, 2467 llvm::Value *VTable, 2468 CFITypeCheckKind TCK, 2469 SourceLocation Loc) { 2470 if (CGM.IsCFIBlacklistedRecord(RD)) 2471 return; 2472 2473 SanitizerScope SanScope(this); 2474 2475 llvm::Value *BitSetName = llvm::MetadataAsValue::get( 2476 getLLVMContext(), 2477 CGM.CreateMetadataIdentifierForType(QualType(RD->getTypeForDecl(), 0))); 2478 2479 llvm::Value *CastedVTable = Builder.CreateBitCast(VTable, Int8PtrTy); 2480 llvm::Value *BitSetTest = 2481 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::bitset_test), 2482 {CastedVTable, BitSetName}); 2483 2484 SanitizerMask M; 2485 switch (TCK) { 2486 case CFITCK_VCall: 2487 M = SanitizerKind::CFIVCall; 2488 break; 2489 case CFITCK_NVCall: 2490 M = SanitizerKind::CFINVCall; 2491 break; 2492 case CFITCK_DerivedCast: 2493 M = SanitizerKind::CFIDerivedCast; 2494 break; 2495 case CFITCK_UnrelatedCast: 2496 M = SanitizerKind::CFIUnrelatedCast; 2497 break; 2498 } 2499 2500 llvm::Constant *StaticData[] = { 2501 EmitCheckSourceLocation(Loc), 2502 EmitCheckTypeDescriptor(QualType(RD->getTypeForDecl(), 0)), 2503 llvm::ConstantInt::get(Int8Ty, TCK), 2504 }; 2505 EmitCheck(std::make_pair(BitSetTest, M), "cfi_bad_type", StaticData, 2506 CastedVTable); 2507 } 2508 2509 // FIXME: Ideally Expr::IgnoreParenNoopCasts should do this, but it doesn't do 2510 // quite what we want. 2511 static const Expr *skipNoOpCastsAndParens(const Expr *E) { 2512 while (true) { 2513 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) { 2514 E = PE->getSubExpr(); 2515 continue; 2516 } 2517 2518 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) { 2519 if (CE->getCastKind() == CK_NoOp) { 2520 E = CE->getSubExpr(); 2521 continue; 2522 } 2523 } 2524 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 2525 if (UO->getOpcode() == UO_Extension) { 2526 E = UO->getSubExpr(); 2527 continue; 2528 } 2529 } 2530 return E; 2531 } 2532 } 2533 2534 bool 2535 CodeGenFunction::CanDevirtualizeMemberFunctionCall(const Expr *Base, 2536 const CXXMethodDecl *MD) { 2537 // When building with -fapple-kext, all calls must go through the vtable since 2538 // the kernel linker can do runtime patching of vtables. 2539 if (getLangOpts().AppleKext) 2540 return false; 2541 2542 // If the most derived class is marked final, we know that no subclass can 2543 // override this member function and so we can devirtualize it. For example: 2544 // 2545 // struct A { virtual void f(); } 2546 // struct B final : A { }; 2547 // 2548 // void f(B *b) { 2549 // b->f(); 2550 // } 2551 // 2552 const CXXRecordDecl *MostDerivedClassDecl = Base->getBestDynamicClassType(); 2553 if (MostDerivedClassDecl->hasAttr<FinalAttr>()) 2554 return true; 2555 2556 // If the member function is marked 'final', we know that it can't be 2557 // overridden and can therefore devirtualize it. 2558 if (MD->hasAttr<FinalAttr>()) 2559 return true; 2560 2561 // Similarly, if the class itself is marked 'final' it can't be overridden 2562 // and we can therefore devirtualize the member function call. 2563 if (MD->getParent()->hasAttr<FinalAttr>()) 2564 return true; 2565 2566 Base = skipNoOpCastsAndParens(Base); 2567 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) { 2568 if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) { 2569 // This is a record decl. We know the type and can devirtualize it. 2570 return VD->getType()->isRecordType(); 2571 } 2572 2573 return false; 2574 } 2575 2576 // We can devirtualize calls on an object accessed by a class member access 2577 // expression, since by C++11 [basic.life]p6 we know that it can't refer to 2578 // a derived class object constructed in the same location. 2579 if (const MemberExpr *ME = dyn_cast<MemberExpr>(Base)) 2580 if (const ValueDecl *VD = dyn_cast<ValueDecl>(ME->getMemberDecl())) 2581 return VD->getType()->isRecordType(); 2582 2583 // We can always devirtualize calls on temporary object expressions. 2584 if (isa<CXXConstructExpr>(Base)) 2585 return true; 2586 2587 // And calls on bound temporaries. 2588 if (isa<CXXBindTemporaryExpr>(Base)) 2589 return true; 2590 2591 // Check if this is a call expr that returns a record type. 2592 if (const CallExpr *CE = dyn_cast<CallExpr>(Base)) 2593 return CE->getCallReturnType(getContext())->isRecordType(); 2594 2595 // We can't devirtualize the call. 2596 return false; 2597 } 2598 2599 void CodeGenFunction::EmitForwardingCallToLambda( 2600 const CXXMethodDecl *callOperator, 2601 CallArgList &callArgs) { 2602 // Get the address of the call operator. 2603 const CGFunctionInfo &calleeFnInfo = 2604 CGM.getTypes().arrangeCXXMethodDeclaration(callOperator); 2605 llvm::Value *callee = 2606 CGM.GetAddrOfFunction(GlobalDecl(callOperator), 2607 CGM.getTypes().GetFunctionType(calleeFnInfo)); 2608 2609 // Prepare the return slot. 2610 const FunctionProtoType *FPT = 2611 callOperator->getType()->castAs<FunctionProtoType>(); 2612 QualType resultType = FPT->getReturnType(); 2613 ReturnValueSlot returnSlot; 2614 if (!resultType->isVoidType() && 2615 calleeFnInfo.getReturnInfo().getKind() == ABIArgInfo::Indirect && 2616 !hasScalarEvaluationKind(calleeFnInfo.getReturnType())) 2617 returnSlot = ReturnValueSlot(ReturnValue, resultType.isVolatileQualified()); 2618 2619 // We don't need to separately arrange the call arguments because 2620 // the call can't be variadic anyway --- it's impossible to forward 2621 // variadic arguments. 2622 2623 // Now emit our call. 2624 RValue RV = EmitCall(calleeFnInfo, callee, returnSlot, 2625 callArgs, callOperator); 2626 2627 // If necessary, copy the returned value into the slot. 2628 if (!resultType->isVoidType() && returnSlot.isNull()) 2629 EmitReturnOfRValue(RV, resultType); 2630 else 2631 EmitBranchThroughCleanup(ReturnBlock); 2632 } 2633 2634 void CodeGenFunction::EmitLambdaBlockInvokeBody() { 2635 const BlockDecl *BD = BlockInfo->getBlockDecl(); 2636 const VarDecl *variable = BD->capture_begin()->getVariable(); 2637 const CXXRecordDecl *Lambda = variable->getType()->getAsCXXRecordDecl(); 2638 2639 // Start building arguments for forwarding call 2640 CallArgList CallArgs; 2641 2642 QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda)); 2643 Address ThisPtr = GetAddrOfBlockDecl(variable, false); 2644 CallArgs.add(RValue::get(ThisPtr.getPointer()), ThisType); 2645 2646 // Add the rest of the parameters. 2647 for (auto param : BD->params()) 2648 EmitDelegateCallArg(CallArgs, param, param->getLocStart()); 2649 2650 assert(!Lambda->isGenericLambda() && 2651 "generic lambda interconversion to block not implemented"); 2652 EmitForwardingCallToLambda(Lambda->getLambdaCallOperator(), CallArgs); 2653 } 2654 2655 void CodeGenFunction::EmitLambdaToBlockPointerBody(FunctionArgList &Args) { 2656 if (cast<CXXMethodDecl>(CurCodeDecl)->isVariadic()) { 2657 // FIXME: Making this work correctly is nasty because it requires either 2658 // cloning the body of the call operator or making the call operator forward. 2659 CGM.ErrorUnsupported(CurCodeDecl, "lambda conversion to variadic function"); 2660 return; 2661 } 2662 2663 EmitFunctionBody(Args, cast<FunctionDecl>(CurGD.getDecl())->getBody()); 2664 } 2665 2666 void CodeGenFunction::EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD) { 2667 const CXXRecordDecl *Lambda = MD->getParent(); 2668 2669 // Start building arguments for forwarding call 2670 CallArgList CallArgs; 2671 2672 QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda)); 2673 llvm::Value *ThisPtr = llvm::UndefValue::get(getTypes().ConvertType(ThisType)); 2674 CallArgs.add(RValue::get(ThisPtr), ThisType); 2675 2676 // Add the rest of the parameters. 2677 for (auto Param : MD->params()) 2678 EmitDelegateCallArg(CallArgs, Param, Param->getLocStart()); 2679 2680 const CXXMethodDecl *CallOp = Lambda->getLambdaCallOperator(); 2681 // For a generic lambda, find the corresponding call operator specialization 2682 // to which the call to the static-invoker shall be forwarded. 2683 if (Lambda->isGenericLambda()) { 2684 assert(MD->isFunctionTemplateSpecialization()); 2685 const TemplateArgumentList *TAL = MD->getTemplateSpecializationArgs(); 2686 FunctionTemplateDecl *CallOpTemplate = CallOp->getDescribedFunctionTemplate(); 2687 void *InsertPos = nullptr; 2688 FunctionDecl *CorrespondingCallOpSpecialization = 2689 CallOpTemplate->findSpecialization(TAL->asArray(), InsertPos); 2690 assert(CorrespondingCallOpSpecialization); 2691 CallOp = cast<CXXMethodDecl>(CorrespondingCallOpSpecialization); 2692 } 2693 EmitForwardingCallToLambda(CallOp, CallArgs); 2694 } 2695 2696 void CodeGenFunction::EmitLambdaStaticInvokeFunction(const CXXMethodDecl *MD) { 2697 if (MD->isVariadic()) { 2698 // FIXME: Making this work correctly is nasty because it requires either 2699 // cloning the body of the call operator or making the call operator forward. 2700 CGM.ErrorUnsupported(MD, "lambda conversion to variadic function"); 2701 return; 2702 } 2703 2704 EmitLambdaDelegatingInvokeBody(MD); 2705 } 2706