1 //===--- CGVTables.cpp - Emit LLVM Code for C++ vtables -------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code dealing with C++ code generation of virtual tables. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CGCXXABI.h" 15 #include "CodeGenFunction.h" 16 #include "CodeGenModule.h" 17 #include "clang/CodeGen/ConstantInitBuilder.h" 18 #include "clang/AST/CXXInheritance.h" 19 #include "clang/AST/RecordLayout.h" 20 #include "clang/CodeGen/CGFunctionInfo.h" 21 #include "clang/Frontend/CodeGenOptions.h" 22 #include "llvm/Support/Format.h" 23 #include "llvm/Transforms/Utils/Cloning.h" 24 #include <algorithm> 25 #include <cstdio> 26 27 using namespace clang; 28 using namespace CodeGen; 29 30 CodeGenVTables::CodeGenVTables(CodeGenModule &CGM) 31 : CGM(CGM), VTContext(CGM.getContext().getVTableContext()) {} 32 33 llvm::Constant *CodeGenModule::GetAddrOfThunk(GlobalDecl GD, 34 const ThunkInfo &Thunk) { 35 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 36 37 // Compute the mangled name. 38 SmallString<256> Name; 39 llvm::raw_svector_ostream Out(Name); 40 if (const CXXDestructorDecl* DD = dyn_cast<CXXDestructorDecl>(MD)) 41 getCXXABI().getMangleContext().mangleCXXDtorThunk(DD, GD.getDtorType(), 42 Thunk.This, Out); 43 else 44 getCXXABI().getMangleContext().mangleThunk(MD, Thunk, Out); 45 46 llvm::Type *Ty = getTypes().GetFunctionTypeForVTable(GD); 47 return GetOrCreateLLVMFunction(Name, Ty, GD, /*ForVTable=*/true, 48 /*DontDefer=*/true, /*IsThunk=*/true); 49 } 50 51 static void setThunkVisibility(CodeGenModule &CGM, const CXXMethodDecl *MD, 52 const ThunkInfo &Thunk, llvm::Function *Fn) { 53 CGM.setGlobalVisibility(Fn, MD); 54 } 55 56 static void setThunkProperties(CodeGenModule &CGM, const ThunkInfo &Thunk, 57 llvm::Function *ThunkFn, bool ForVTable, 58 GlobalDecl GD) { 59 CGM.setFunctionLinkage(GD, ThunkFn); 60 CGM.getCXXABI().setThunkLinkage(ThunkFn, ForVTable, GD, 61 !Thunk.Return.isEmpty()); 62 63 // Set the right visibility. 64 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 65 setThunkVisibility(CGM, MD, Thunk, ThunkFn); 66 67 if (CGM.supportsCOMDAT() && ThunkFn->isWeakForLinker()) 68 ThunkFn->setComdat(CGM.getModule().getOrInsertComdat(ThunkFn->getName())); 69 } 70 71 #ifndef NDEBUG 72 static bool similar(const ABIArgInfo &infoL, CanQualType typeL, 73 const ABIArgInfo &infoR, CanQualType typeR) { 74 return (infoL.getKind() == infoR.getKind() && 75 (typeL == typeR || 76 (isa<PointerType>(typeL) && isa<PointerType>(typeR)) || 77 (isa<ReferenceType>(typeL) && isa<ReferenceType>(typeR)))); 78 } 79 #endif 80 81 static RValue PerformReturnAdjustment(CodeGenFunction &CGF, 82 QualType ResultType, RValue RV, 83 const ThunkInfo &Thunk) { 84 // Emit the return adjustment. 85 bool NullCheckValue = !ResultType->isReferenceType(); 86 87 llvm::BasicBlock *AdjustNull = nullptr; 88 llvm::BasicBlock *AdjustNotNull = nullptr; 89 llvm::BasicBlock *AdjustEnd = nullptr; 90 91 llvm::Value *ReturnValue = RV.getScalarVal(); 92 93 if (NullCheckValue) { 94 AdjustNull = CGF.createBasicBlock("adjust.null"); 95 AdjustNotNull = CGF.createBasicBlock("adjust.notnull"); 96 AdjustEnd = CGF.createBasicBlock("adjust.end"); 97 98 llvm::Value *IsNull = CGF.Builder.CreateIsNull(ReturnValue); 99 CGF.Builder.CreateCondBr(IsNull, AdjustNull, AdjustNotNull); 100 CGF.EmitBlock(AdjustNotNull); 101 } 102 103 auto ClassDecl = ResultType->getPointeeType()->getAsCXXRecordDecl(); 104 auto ClassAlign = CGF.CGM.getClassPointerAlignment(ClassDecl); 105 ReturnValue = CGF.CGM.getCXXABI().performReturnAdjustment(CGF, 106 Address(ReturnValue, ClassAlign), 107 Thunk.Return); 108 109 if (NullCheckValue) { 110 CGF.Builder.CreateBr(AdjustEnd); 111 CGF.EmitBlock(AdjustNull); 112 CGF.Builder.CreateBr(AdjustEnd); 113 CGF.EmitBlock(AdjustEnd); 114 115 llvm::PHINode *PHI = CGF.Builder.CreatePHI(ReturnValue->getType(), 2); 116 PHI->addIncoming(ReturnValue, AdjustNotNull); 117 PHI->addIncoming(llvm::Constant::getNullValue(ReturnValue->getType()), 118 AdjustNull); 119 ReturnValue = PHI; 120 } 121 122 return RValue::get(ReturnValue); 123 } 124 125 // This function does roughly the same thing as GenerateThunk, but in a 126 // very different way, so that va_start and va_end work correctly. 127 // FIXME: This function assumes "this" is the first non-sret LLVM argument of 128 // a function, and that there is an alloca built in the entry block 129 // for all accesses to "this". 130 // FIXME: This function assumes there is only one "ret" statement per function. 131 // FIXME: Cloning isn't correct in the presence of indirect goto! 132 // FIXME: This implementation of thunks bloats codesize by duplicating the 133 // function definition. There are alternatives: 134 // 1. Add some sort of stub support to LLVM for cases where we can 135 // do a this adjustment, then a sibcall. 136 // 2. We could transform the definition to take a va_list instead of an 137 // actual variable argument list, then have the thunks (including a 138 // no-op thunk for the regular definition) call va_start/va_end. 139 // There's a bit of per-call overhead for this solution, but it's 140 // better for codesize if the definition is long. 141 llvm::Function * 142 CodeGenFunction::GenerateVarArgsThunk(llvm::Function *Fn, 143 const CGFunctionInfo &FnInfo, 144 GlobalDecl GD, const ThunkInfo &Thunk) { 145 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 146 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 147 QualType ResultType = FPT->getReturnType(); 148 149 // Get the original function 150 assert(FnInfo.isVariadic()); 151 llvm::Type *Ty = CGM.getTypes().GetFunctionType(FnInfo); 152 llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true); 153 llvm::Function *BaseFn = cast<llvm::Function>(Callee); 154 155 // Clone to thunk. 156 llvm::ValueToValueMapTy VMap; 157 llvm::Function *NewFn = llvm::CloneFunction(BaseFn, VMap); 158 Fn->replaceAllUsesWith(NewFn); 159 NewFn->takeName(Fn); 160 Fn->eraseFromParent(); 161 Fn = NewFn; 162 163 // "Initialize" CGF (minimally). 164 CurFn = Fn; 165 166 // Get the "this" value 167 llvm::Function::arg_iterator AI = Fn->arg_begin(); 168 if (CGM.ReturnTypeUsesSRet(FnInfo)) 169 ++AI; 170 171 // Find the first store of "this", which will be to the alloca associated 172 // with "this". 173 Address ThisPtr(&*AI, CGM.getClassPointerAlignment(MD->getParent())); 174 llvm::BasicBlock *EntryBB = &Fn->front(); 175 llvm::BasicBlock::iterator ThisStore = 176 std::find_if(EntryBB->begin(), EntryBB->end(), [&](llvm::Instruction &I) { 177 return isa<llvm::StoreInst>(I) && 178 I.getOperand(0) == ThisPtr.getPointer(); 179 }); 180 assert(ThisStore != EntryBB->end() && 181 "Store of this should be in entry block?"); 182 // Adjust "this", if necessary. 183 Builder.SetInsertPoint(&*ThisStore); 184 llvm::Value *AdjustedThisPtr = 185 CGM.getCXXABI().performThisAdjustment(*this, ThisPtr, Thunk.This); 186 ThisStore->setOperand(0, AdjustedThisPtr); 187 188 if (!Thunk.Return.isEmpty()) { 189 // Fix up the returned value, if necessary. 190 for (llvm::BasicBlock &BB : *Fn) { 191 llvm::Instruction *T = BB.getTerminator(); 192 if (isa<llvm::ReturnInst>(T)) { 193 RValue RV = RValue::get(T->getOperand(0)); 194 T->eraseFromParent(); 195 Builder.SetInsertPoint(&BB); 196 RV = PerformReturnAdjustment(*this, ResultType, RV, Thunk); 197 Builder.CreateRet(RV.getScalarVal()); 198 break; 199 } 200 } 201 } 202 203 return Fn; 204 } 205 206 void CodeGenFunction::StartThunk(llvm::Function *Fn, GlobalDecl GD, 207 const CGFunctionInfo &FnInfo) { 208 assert(!CurGD.getDecl() && "CurGD was already set!"); 209 CurGD = GD; 210 CurFuncIsThunk = true; 211 212 // Build FunctionArgs. 213 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 214 QualType ThisType = MD->getThisType(getContext()); 215 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 216 QualType ResultType = CGM.getCXXABI().HasThisReturn(GD) 217 ? ThisType 218 : CGM.getCXXABI().hasMostDerivedReturn(GD) 219 ? CGM.getContext().VoidPtrTy 220 : FPT->getReturnType(); 221 FunctionArgList FunctionArgs; 222 223 // Create the implicit 'this' parameter declaration. 224 CGM.getCXXABI().buildThisParam(*this, FunctionArgs); 225 226 // Add the rest of the parameters. 227 FunctionArgs.append(MD->param_begin(), MD->param_end()); 228 229 if (isa<CXXDestructorDecl>(MD)) 230 CGM.getCXXABI().addImplicitStructorParams(*this, ResultType, FunctionArgs); 231 232 // Start defining the function. 233 auto NL = ApplyDebugLocation::CreateEmpty(*this); 234 StartFunction(GlobalDecl(), ResultType, Fn, FnInfo, FunctionArgs, 235 MD->getLocation()); 236 // Create a scope with an artificial location for the body of this function. 237 auto AL = ApplyDebugLocation::CreateArtificial(*this); 238 239 // Since we didn't pass a GlobalDecl to StartFunction, do this ourselves. 240 CGM.getCXXABI().EmitInstanceFunctionProlog(*this); 241 CXXThisValue = CXXABIThisValue; 242 CurCodeDecl = MD; 243 CurFuncDecl = MD; 244 } 245 246 void CodeGenFunction::FinishThunk() { 247 // Clear these to restore the invariants expected by 248 // StartFunction/FinishFunction. 249 CurCodeDecl = nullptr; 250 CurFuncDecl = nullptr; 251 252 FinishFunction(); 253 } 254 255 void CodeGenFunction::EmitCallAndReturnForThunk(llvm::Constant *CalleePtr, 256 const ThunkInfo *Thunk) { 257 assert(isa<CXXMethodDecl>(CurGD.getDecl()) && 258 "Please use a new CGF for this thunk"); 259 const CXXMethodDecl *MD = cast<CXXMethodDecl>(CurGD.getDecl()); 260 261 // Adjust the 'this' pointer if necessary 262 llvm::Value *AdjustedThisPtr = 263 Thunk ? CGM.getCXXABI().performThisAdjustment( 264 *this, LoadCXXThisAddress(), Thunk->This) 265 : LoadCXXThis(); 266 267 if (CurFnInfo->usesInAlloca()) { 268 // We don't handle return adjusting thunks, because they require us to call 269 // the copy constructor. For now, fall through and pretend the return 270 // adjustment was empty so we don't crash. 271 if (Thunk && !Thunk->Return.isEmpty()) { 272 CGM.ErrorUnsupported( 273 MD, "non-trivial argument copy for return-adjusting thunk"); 274 } 275 EmitMustTailThunk(MD, AdjustedThisPtr, CalleePtr); 276 return; 277 } 278 279 // Start building CallArgs. 280 CallArgList CallArgs; 281 QualType ThisType = MD->getThisType(getContext()); 282 CallArgs.add(RValue::get(AdjustedThisPtr), ThisType); 283 284 if (isa<CXXDestructorDecl>(MD)) 285 CGM.getCXXABI().adjustCallArgsForDestructorThunk(*this, CurGD, CallArgs); 286 287 #ifndef NDEBUG 288 unsigned PrefixArgs = CallArgs.size() - 1; 289 #endif 290 // Add the rest of the arguments. 291 for (const ParmVarDecl *PD : MD->parameters()) 292 EmitDelegateCallArg(CallArgs, PD, SourceLocation()); 293 294 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 295 296 #ifndef NDEBUG 297 const CGFunctionInfo &CallFnInfo = CGM.getTypes().arrangeCXXMethodCall( 298 CallArgs, FPT, RequiredArgs::forPrototypePlus(FPT, 1, MD), PrefixArgs); 299 assert(CallFnInfo.getRegParm() == CurFnInfo->getRegParm() && 300 CallFnInfo.isNoReturn() == CurFnInfo->isNoReturn() && 301 CallFnInfo.getCallingConvention() == CurFnInfo->getCallingConvention()); 302 assert(isa<CXXDestructorDecl>(MD) || // ignore dtor return types 303 similar(CallFnInfo.getReturnInfo(), CallFnInfo.getReturnType(), 304 CurFnInfo->getReturnInfo(), CurFnInfo->getReturnType())); 305 assert(CallFnInfo.arg_size() == CurFnInfo->arg_size()); 306 for (unsigned i = 0, e = CurFnInfo->arg_size(); i != e; ++i) 307 assert(similar(CallFnInfo.arg_begin()[i].info, 308 CallFnInfo.arg_begin()[i].type, 309 CurFnInfo->arg_begin()[i].info, 310 CurFnInfo->arg_begin()[i].type)); 311 #endif 312 313 // Determine whether we have a return value slot to use. 314 QualType ResultType = CGM.getCXXABI().HasThisReturn(CurGD) 315 ? ThisType 316 : CGM.getCXXABI().hasMostDerivedReturn(CurGD) 317 ? CGM.getContext().VoidPtrTy 318 : FPT->getReturnType(); 319 ReturnValueSlot Slot; 320 if (!ResultType->isVoidType() && 321 CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect && 322 !hasScalarEvaluationKind(CurFnInfo->getReturnType())) 323 Slot = ReturnValueSlot(ReturnValue, ResultType.isVolatileQualified()); 324 325 // Now emit our call. 326 llvm::Instruction *CallOrInvoke; 327 CGCallee Callee = CGCallee::forDirect(CalleePtr, MD); 328 RValue RV = EmitCall(*CurFnInfo, Callee, Slot, CallArgs, &CallOrInvoke); 329 330 // Consider return adjustment if we have ThunkInfo. 331 if (Thunk && !Thunk->Return.isEmpty()) 332 RV = PerformReturnAdjustment(*this, ResultType, RV, *Thunk); 333 else if (llvm::CallInst* Call = dyn_cast<llvm::CallInst>(CallOrInvoke)) 334 Call->setTailCallKind(llvm::CallInst::TCK_Tail); 335 336 // Emit return. 337 if (!ResultType->isVoidType() && Slot.isNull()) 338 CGM.getCXXABI().EmitReturnFromThunk(*this, RV, ResultType); 339 340 // Disable the final ARC autorelease. 341 AutoreleaseResult = false; 342 343 FinishThunk(); 344 } 345 346 void CodeGenFunction::EmitMustTailThunk(const CXXMethodDecl *MD, 347 llvm::Value *AdjustedThisPtr, 348 llvm::Value *CalleePtr) { 349 // Emitting a musttail call thunk doesn't use any of the CGCall.cpp machinery 350 // to translate AST arguments into LLVM IR arguments. For thunks, we know 351 // that the caller prototype more or less matches the callee prototype with 352 // the exception of 'this'. 353 SmallVector<llvm::Value *, 8> Args; 354 for (llvm::Argument &A : CurFn->args()) 355 Args.push_back(&A); 356 357 // Set the adjusted 'this' pointer. 358 const ABIArgInfo &ThisAI = CurFnInfo->arg_begin()->info; 359 if (ThisAI.isDirect()) { 360 const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo(); 361 int ThisArgNo = RetAI.isIndirect() && !RetAI.isSRetAfterThis() ? 1 : 0; 362 llvm::Type *ThisType = Args[ThisArgNo]->getType(); 363 if (ThisType != AdjustedThisPtr->getType()) 364 AdjustedThisPtr = Builder.CreateBitCast(AdjustedThisPtr, ThisType); 365 Args[ThisArgNo] = AdjustedThisPtr; 366 } else { 367 assert(ThisAI.isInAlloca() && "this is passed directly or inalloca"); 368 Address ThisAddr = GetAddrOfLocalVar(CXXABIThisDecl); 369 llvm::Type *ThisType = ThisAddr.getElementType(); 370 if (ThisType != AdjustedThisPtr->getType()) 371 AdjustedThisPtr = Builder.CreateBitCast(AdjustedThisPtr, ThisType); 372 Builder.CreateStore(AdjustedThisPtr, ThisAddr); 373 } 374 375 // Emit the musttail call manually. Even if the prologue pushed cleanups, we 376 // don't actually want to run them. 377 llvm::CallInst *Call = Builder.CreateCall(CalleePtr, Args); 378 Call->setTailCallKind(llvm::CallInst::TCK_MustTail); 379 380 // Apply the standard set of call attributes. 381 unsigned CallingConv; 382 llvm::AttributeList Attrs; 383 CGM.ConstructAttributeList(CalleePtr->getName(), *CurFnInfo, MD, Attrs, 384 CallingConv, /*AttrOnCallSite=*/true); 385 Call->setAttributes(Attrs); 386 Call->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv)); 387 388 if (Call->getType()->isVoidTy()) 389 Builder.CreateRetVoid(); 390 else 391 Builder.CreateRet(Call); 392 393 // Finish the function to maintain CodeGenFunction invariants. 394 // FIXME: Don't emit unreachable code. 395 EmitBlock(createBasicBlock()); 396 FinishFunction(); 397 } 398 399 void CodeGenFunction::generateThunk(llvm::Function *Fn, 400 const CGFunctionInfo &FnInfo, 401 GlobalDecl GD, const ThunkInfo &Thunk) { 402 StartThunk(Fn, GD, FnInfo); 403 // Create a scope with an artificial location for the body of this function. 404 auto AL = ApplyDebugLocation::CreateArtificial(*this); 405 406 // Get our callee. 407 llvm::Type *Ty = 408 CGM.getTypes().GetFunctionType(CGM.getTypes().arrangeGlobalDeclaration(GD)); 409 llvm::Constant *Callee = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true); 410 411 // Make the call and return the result. 412 EmitCallAndReturnForThunk(Callee, &Thunk); 413 } 414 415 void CodeGenVTables::emitThunk(GlobalDecl GD, const ThunkInfo &Thunk, 416 bool ForVTable) { 417 const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeGlobalDeclaration(GD); 418 419 // FIXME: re-use FnInfo in this computation. 420 llvm::Constant *C = CGM.GetAddrOfThunk(GD, Thunk); 421 llvm::GlobalValue *Entry; 422 423 // Strip off a bitcast if we got one back. 424 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(C)) { 425 assert(CE->getOpcode() == llvm::Instruction::BitCast); 426 Entry = cast<llvm::GlobalValue>(CE->getOperand(0)); 427 } else { 428 Entry = cast<llvm::GlobalValue>(C); 429 } 430 431 // There's already a declaration with the same name, check if it has the same 432 // type or if we need to replace it. 433 if (Entry->getType()->getElementType() != 434 CGM.getTypes().GetFunctionTypeForVTable(GD)) { 435 llvm::GlobalValue *OldThunkFn = Entry; 436 437 // If the types mismatch then we have to rewrite the definition. 438 assert(OldThunkFn->isDeclaration() && 439 "Shouldn't replace non-declaration"); 440 441 // Remove the name from the old thunk function and get a new thunk. 442 OldThunkFn->setName(StringRef()); 443 Entry = cast<llvm::GlobalValue>(CGM.GetAddrOfThunk(GD, Thunk)); 444 445 // If needed, replace the old thunk with a bitcast. 446 if (!OldThunkFn->use_empty()) { 447 llvm::Constant *NewPtrForOldDecl = 448 llvm::ConstantExpr::getBitCast(Entry, OldThunkFn->getType()); 449 OldThunkFn->replaceAllUsesWith(NewPtrForOldDecl); 450 } 451 452 // Remove the old thunk. 453 OldThunkFn->eraseFromParent(); 454 } 455 456 llvm::Function *ThunkFn = cast<llvm::Function>(Entry); 457 bool ABIHasKeyFunctions = CGM.getTarget().getCXXABI().hasKeyFunctions(); 458 bool UseAvailableExternallyLinkage = ForVTable && ABIHasKeyFunctions; 459 460 if (!ThunkFn->isDeclaration()) { 461 if (!ABIHasKeyFunctions || UseAvailableExternallyLinkage) { 462 // There is already a thunk emitted for this function, do nothing. 463 return; 464 } 465 466 setThunkProperties(CGM, Thunk, ThunkFn, ForVTable, GD); 467 return; 468 } 469 470 CGM.SetLLVMFunctionAttributesForDefinition(GD.getDecl(), ThunkFn); 471 472 if (ThunkFn->isVarArg()) { 473 // Varargs thunks are special; we can't just generate a call because 474 // we can't copy the varargs. Our implementation is rather 475 // expensive/sucky at the moment, so don't generate the thunk unless 476 // we have to. 477 // FIXME: Do something better here; GenerateVarArgsThunk is extremely ugly. 478 if (UseAvailableExternallyLinkage) 479 return; 480 ThunkFn = 481 CodeGenFunction(CGM).GenerateVarArgsThunk(ThunkFn, FnInfo, GD, Thunk); 482 } else { 483 // Normal thunk body generation. 484 CodeGenFunction(CGM).generateThunk(ThunkFn, FnInfo, GD, Thunk); 485 } 486 487 setThunkProperties(CGM, Thunk, ThunkFn, ForVTable, GD); 488 } 489 490 void CodeGenVTables::maybeEmitThunkForVTable(GlobalDecl GD, 491 const ThunkInfo &Thunk) { 492 // If the ABI has key functions, only the TU with the key function should emit 493 // the thunk. However, we can allow inlining of thunks if we emit them with 494 // available_externally linkage together with vtables when optimizations are 495 // enabled. 496 if (CGM.getTarget().getCXXABI().hasKeyFunctions() && 497 !CGM.getCodeGenOpts().OptimizationLevel) 498 return; 499 500 // We can't emit thunks for member functions with incomplete types. 501 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 502 if (!CGM.getTypes().isFuncTypeConvertible( 503 MD->getType()->castAs<FunctionType>())) 504 return; 505 506 emitThunk(GD, Thunk, /*ForVTable=*/true); 507 } 508 509 void CodeGenVTables::EmitThunks(GlobalDecl GD) 510 { 511 const CXXMethodDecl *MD = 512 cast<CXXMethodDecl>(GD.getDecl())->getCanonicalDecl(); 513 514 // We don't need to generate thunks for the base destructor. 515 if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base) 516 return; 517 518 const VTableContextBase::ThunkInfoVectorTy *ThunkInfoVector = 519 VTContext->getThunkInfo(GD); 520 521 if (!ThunkInfoVector) 522 return; 523 524 for (const ThunkInfo& Thunk : *ThunkInfoVector) 525 emitThunk(GD, Thunk, /*ForVTable=*/false); 526 } 527 528 void CodeGenVTables::addVTableComponent( 529 ConstantArrayBuilder &builder, const VTableLayout &layout, 530 unsigned idx, llvm::Constant *rtti, unsigned &nextVTableThunkIndex) { 531 auto &component = layout.vtable_components()[idx]; 532 533 auto addOffsetConstant = [&](CharUnits offset) { 534 builder.add(llvm::ConstantExpr::getIntToPtr( 535 llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity()), 536 CGM.Int8PtrTy)); 537 }; 538 539 switch (component.getKind()) { 540 case VTableComponent::CK_VCallOffset: 541 return addOffsetConstant(component.getVCallOffset()); 542 543 case VTableComponent::CK_VBaseOffset: 544 return addOffsetConstant(component.getVBaseOffset()); 545 546 case VTableComponent::CK_OffsetToTop: 547 return addOffsetConstant(component.getOffsetToTop()); 548 549 case VTableComponent::CK_RTTI: 550 return builder.add(llvm::ConstantExpr::getBitCast(rtti, CGM.Int8PtrTy)); 551 552 case VTableComponent::CK_FunctionPointer: 553 case VTableComponent::CK_CompleteDtorPointer: 554 case VTableComponent::CK_DeletingDtorPointer: { 555 GlobalDecl GD; 556 557 // Get the right global decl. 558 switch (component.getKind()) { 559 default: 560 llvm_unreachable("Unexpected vtable component kind"); 561 case VTableComponent::CK_FunctionPointer: 562 GD = component.getFunctionDecl(); 563 break; 564 case VTableComponent::CK_CompleteDtorPointer: 565 GD = GlobalDecl(component.getDestructorDecl(), Dtor_Complete); 566 break; 567 case VTableComponent::CK_DeletingDtorPointer: 568 GD = GlobalDecl(component.getDestructorDecl(), Dtor_Deleting); 569 break; 570 } 571 572 if (CGM.getLangOpts().CUDA) { 573 // Emit NULL for methods we can't codegen on this 574 // side. Otherwise we'd end up with vtable with unresolved 575 // references. 576 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 577 // OK on device side: functions w/ __device__ attribute 578 // OK on host side: anything except __device__-only functions. 579 bool CanEmitMethod = 580 CGM.getLangOpts().CUDAIsDevice 581 ? MD->hasAttr<CUDADeviceAttr>() 582 : (MD->hasAttr<CUDAHostAttr>() || !MD->hasAttr<CUDADeviceAttr>()); 583 if (!CanEmitMethod) 584 return builder.addNullPointer(CGM.Int8PtrTy); 585 // Method is acceptable, continue processing as usual. 586 } 587 588 auto getSpecialVirtualFn = [&](StringRef name) { 589 llvm::FunctionType *fnTy = 590 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false); 591 llvm::Constant *fn = CGM.CreateRuntimeFunction(fnTy, name); 592 if (auto f = dyn_cast<llvm::Function>(fn)) 593 f->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); 594 return llvm::ConstantExpr::getBitCast(fn, CGM.Int8PtrTy); 595 }; 596 597 llvm::Constant *fnPtr; 598 599 // Pure virtual member functions. 600 if (cast<CXXMethodDecl>(GD.getDecl())->isPure()) { 601 if (!PureVirtualFn) 602 PureVirtualFn = 603 getSpecialVirtualFn(CGM.getCXXABI().GetPureVirtualCallName()); 604 fnPtr = PureVirtualFn; 605 606 // Deleted virtual member functions. 607 } else if (cast<CXXMethodDecl>(GD.getDecl())->isDeleted()) { 608 if (!DeletedVirtualFn) 609 DeletedVirtualFn = 610 getSpecialVirtualFn(CGM.getCXXABI().GetDeletedVirtualCallName()); 611 fnPtr = DeletedVirtualFn; 612 613 // Thunks. 614 } else if (nextVTableThunkIndex < layout.vtable_thunks().size() && 615 layout.vtable_thunks()[nextVTableThunkIndex].first == idx) { 616 auto &thunkInfo = layout.vtable_thunks()[nextVTableThunkIndex].second; 617 618 maybeEmitThunkForVTable(GD, thunkInfo); 619 nextVTableThunkIndex++; 620 fnPtr = CGM.GetAddrOfThunk(GD, thunkInfo); 621 622 // Otherwise we can use the method definition directly. 623 } else { 624 llvm::Type *fnTy = CGM.getTypes().GetFunctionTypeForVTable(GD); 625 fnPtr = CGM.GetAddrOfFunction(GD, fnTy, /*ForVTable=*/true); 626 } 627 628 fnPtr = llvm::ConstantExpr::getBitCast(fnPtr, CGM.Int8PtrTy); 629 builder.add(fnPtr); 630 return; 631 } 632 633 case VTableComponent::CK_UnusedFunctionPointer: 634 return builder.addNullPointer(CGM.Int8PtrTy); 635 } 636 637 llvm_unreachable("Unexpected vtable component kind"); 638 } 639 640 llvm::Type *CodeGenVTables::getVTableType(const VTableLayout &layout) { 641 SmallVector<llvm::Type *, 4> tys; 642 for (unsigned i = 0, e = layout.getNumVTables(); i != e; ++i) { 643 tys.push_back(llvm::ArrayType::get(CGM.Int8PtrTy, layout.getVTableSize(i))); 644 } 645 646 return llvm::StructType::get(CGM.getLLVMContext(), tys); 647 } 648 649 void CodeGenVTables::createVTableInitializer(ConstantStructBuilder &builder, 650 const VTableLayout &layout, 651 llvm::Constant *rtti) { 652 unsigned nextVTableThunkIndex = 0; 653 for (unsigned i = 0, e = layout.getNumVTables(); i != e; ++i) { 654 auto vtableElem = builder.beginArray(CGM.Int8PtrTy); 655 size_t thisIndex = layout.getVTableOffset(i); 656 size_t nextIndex = thisIndex + layout.getVTableSize(i); 657 for (unsigned i = thisIndex; i != nextIndex; ++i) { 658 addVTableComponent(vtableElem, layout, i, rtti, nextVTableThunkIndex); 659 } 660 vtableElem.finishAndAddTo(builder); 661 } 662 } 663 664 llvm::GlobalVariable * 665 CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD, 666 const BaseSubobject &Base, 667 bool BaseIsVirtual, 668 llvm::GlobalVariable::LinkageTypes Linkage, 669 VTableAddressPointsMapTy& AddressPoints) { 670 if (CGDebugInfo *DI = CGM.getModuleDebugInfo()) 671 DI->completeClassData(Base.getBase()); 672 673 std::unique_ptr<VTableLayout> VTLayout( 674 getItaniumVTableContext().createConstructionVTableLayout( 675 Base.getBase(), Base.getBaseOffset(), BaseIsVirtual, RD)); 676 677 // Add the address points. 678 AddressPoints = VTLayout->getAddressPoints(); 679 680 // Get the mangled construction vtable name. 681 SmallString<256> OutName; 682 llvm::raw_svector_ostream Out(OutName); 683 cast<ItaniumMangleContext>(CGM.getCXXABI().getMangleContext()) 684 .mangleCXXCtorVTable(RD, Base.getBaseOffset().getQuantity(), 685 Base.getBase(), Out); 686 StringRef Name = OutName.str(); 687 688 llvm::Type *VTType = getVTableType(*VTLayout); 689 690 // Construction vtable symbols are not part of the Itanium ABI, so we cannot 691 // guarantee that they actually will be available externally. Instead, when 692 // emitting an available_externally VTT, we provide references to an internal 693 // linkage construction vtable. The ABI only requires complete-object vtables 694 // to be the same for all instances of a type, not construction vtables. 695 if (Linkage == llvm::GlobalVariable::AvailableExternallyLinkage) 696 Linkage = llvm::GlobalVariable::InternalLinkage; 697 698 // Create the variable that will hold the construction vtable. 699 llvm::GlobalVariable *VTable = 700 CGM.CreateOrReplaceCXXRuntimeVariable(Name, VTType, Linkage); 701 CGM.setGlobalVisibility(VTable, RD); 702 703 // V-tables are always unnamed_addr. 704 VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); 705 706 llvm::Constant *RTTI = CGM.GetAddrOfRTTIDescriptor( 707 CGM.getContext().getTagDeclType(Base.getBase())); 708 709 // Create and set the initializer. 710 ConstantInitBuilder builder(CGM); 711 auto components = builder.beginStruct(); 712 createVTableInitializer(components, *VTLayout, RTTI); 713 components.finishAndSetAsInitializer(VTable); 714 715 CGM.EmitVTableTypeMetadata(VTable, *VTLayout.get()); 716 717 return VTable; 718 } 719 720 static bool shouldEmitAvailableExternallyVTable(const CodeGenModule &CGM, 721 const CXXRecordDecl *RD) { 722 return CGM.getCodeGenOpts().OptimizationLevel > 0 && 723 CGM.getCXXABI().canSpeculativelyEmitVTable(RD); 724 } 725 726 /// Compute the required linkage of the vtable for the given class. 727 /// 728 /// Note that we only call this at the end of the translation unit. 729 llvm::GlobalVariable::LinkageTypes 730 CodeGenModule::getVTableLinkage(const CXXRecordDecl *RD) { 731 if (!RD->isExternallyVisible()) 732 return llvm::GlobalVariable::InternalLinkage; 733 734 // We're at the end of the translation unit, so the current key 735 // function is fully correct. 736 const CXXMethodDecl *keyFunction = Context.getCurrentKeyFunction(RD); 737 if (keyFunction && !RD->hasAttr<DLLImportAttr>()) { 738 // If this class has a key function, use that to determine the 739 // linkage of the vtable. 740 const FunctionDecl *def = nullptr; 741 if (keyFunction->hasBody(def)) 742 keyFunction = cast<CXXMethodDecl>(def); 743 744 switch (keyFunction->getTemplateSpecializationKind()) { 745 case TSK_Undeclared: 746 case TSK_ExplicitSpecialization: 747 assert((def || CodeGenOpts.OptimizationLevel > 0 || 748 CodeGenOpts.getDebugInfo() != codegenoptions::NoDebugInfo) && 749 "Shouldn't query vtable linkage without key function, " 750 "optimizations, or debug info"); 751 if (!def && CodeGenOpts.OptimizationLevel > 0) 752 return llvm::GlobalVariable::AvailableExternallyLinkage; 753 754 if (keyFunction->isInlined()) 755 return !Context.getLangOpts().AppleKext ? 756 llvm::GlobalVariable::LinkOnceODRLinkage : 757 llvm::Function::InternalLinkage; 758 759 return llvm::GlobalVariable::ExternalLinkage; 760 761 case TSK_ImplicitInstantiation: 762 return !Context.getLangOpts().AppleKext ? 763 llvm::GlobalVariable::LinkOnceODRLinkage : 764 llvm::Function::InternalLinkage; 765 766 case TSK_ExplicitInstantiationDefinition: 767 return !Context.getLangOpts().AppleKext ? 768 llvm::GlobalVariable::WeakODRLinkage : 769 llvm::Function::InternalLinkage; 770 771 case TSK_ExplicitInstantiationDeclaration: 772 llvm_unreachable("Should not have been asked to emit this"); 773 } 774 } 775 776 // -fapple-kext mode does not support weak linkage, so we must use 777 // internal linkage. 778 if (Context.getLangOpts().AppleKext) 779 return llvm::Function::InternalLinkage; 780 781 llvm::GlobalVariable::LinkageTypes DiscardableODRLinkage = 782 llvm::GlobalValue::LinkOnceODRLinkage; 783 llvm::GlobalVariable::LinkageTypes NonDiscardableODRLinkage = 784 llvm::GlobalValue::WeakODRLinkage; 785 if (RD->hasAttr<DLLExportAttr>()) { 786 // Cannot discard exported vtables. 787 DiscardableODRLinkage = NonDiscardableODRLinkage; 788 } else if (RD->hasAttr<DLLImportAttr>()) { 789 // Imported vtables are available externally. 790 DiscardableODRLinkage = llvm::GlobalVariable::AvailableExternallyLinkage; 791 NonDiscardableODRLinkage = llvm::GlobalVariable::AvailableExternallyLinkage; 792 } 793 794 switch (RD->getTemplateSpecializationKind()) { 795 case TSK_Undeclared: 796 case TSK_ExplicitSpecialization: 797 case TSK_ImplicitInstantiation: 798 return DiscardableODRLinkage; 799 800 case TSK_ExplicitInstantiationDeclaration: 801 // Explicit instantiations in MSVC do not provide vtables, so we must emit 802 // our own. 803 if (getTarget().getCXXABI().isMicrosoft()) 804 return DiscardableODRLinkage; 805 return shouldEmitAvailableExternallyVTable(*this, RD) 806 ? llvm::GlobalVariable::AvailableExternallyLinkage 807 : llvm::GlobalVariable::ExternalLinkage; 808 809 case TSK_ExplicitInstantiationDefinition: 810 return NonDiscardableODRLinkage; 811 } 812 813 llvm_unreachable("Invalid TemplateSpecializationKind!"); 814 } 815 816 /// This is a callback from Sema to tell us that that a particular vtable is 817 /// required to be emitted in this translation unit. 818 /// 819 /// This is only called for vtables that _must_ be emitted (mainly due to key 820 /// functions). For weak vtables, CodeGen tracks when they are needed and 821 /// emits them as-needed. 822 void CodeGenModule::EmitVTable(CXXRecordDecl *theClass) { 823 VTables.GenerateClassData(theClass); 824 } 825 826 void 827 CodeGenVTables::GenerateClassData(const CXXRecordDecl *RD) { 828 if (CGDebugInfo *DI = CGM.getModuleDebugInfo()) 829 DI->completeClassData(RD); 830 831 if (RD->getNumVBases()) 832 CGM.getCXXABI().emitVirtualInheritanceTables(RD); 833 834 CGM.getCXXABI().emitVTableDefinitions(*this, RD); 835 } 836 837 /// At this point in the translation unit, does it appear that can we 838 /// rely on the vtable being defined elsewhere in the program? 839 /// 840 /// The response is really only definitive when called at the end of 841 /// the translation unit. 842 /// 843 /// The only semantic restriction here is that the object file should 844 /// not contain a vtable definition when that vtable is defined 845 /// strongly elsewhere. Otherwise, we'd just like to avoid emitting 846 /// vtables when unnecessary. 847 bool CodeGenVTables::isVTableExternal(const CXXRecordDecl *RD) { 848 assert(RD->isDynamicClass() && "Non-dynamic classes have no VTable."); 849 850 // We always synthesize vtables if they are needed in the MS ABI. MSVC doesn't 851 // emit them even if there is an explicit template instantiation. 852 if (CGM.getTarget().getCXXABI().isMicrosoft()) 853 return false; 854 855 // If we have an explicit instantiation declaration (and not a 856 // definition), the vtable is defined elsewhere. 857 TemplateSpecializationKind TSK = RD->getTemplateSpecializationKind(); 858 if (TSK == TSK_ExplicitInstantiationDeclaration) 859 return true; 860 861 // Otherwise, if the class is an instantiated template, the 862 // vtable must be defined here. 863 if (TSK == TSK_ImplicitInstantiation || 864 TSK == TSK_ExplicitInstantiationDefinition) 865 return false; 866 867 // Otherwise, if the class doesn't have a key function (possibly 868 // anymore), the vtable must be defined here. 869 const CXXMethodDecl *keyFunction = CGM.getContext().getCurrentKeyFunction(RD); 870 if (!keyFunction) 871 return false; 872 873 // Otherwise, if we don't have a definition of the key function, the 874 // vtable must be defined somewhere else. 875 return !keyFunction->hasBody(); 876 } 877 878 /// Given that we're currently at the end of the translation unit, and 879 /// we've emitted a reference to the vtable for this class, should 880 /// we define that vtable? 881 static bool shouldEmitVTableAtEndOfTranslationUnit(CodeGenModule &CGM, 882 const CXXRecordDecl *RD) { 883 // If vtable is internal then it has to be done. 884 if (!CGM.getVTables().isVTableExternal(RD)) 885 return true; 886 887 // If it's external then maybe we will need it as available_externally. 888 return shouldEmitAvailableExternallyVTable(CGM, RD); 889 } 890 891 /// Given that at some point we emitted a reference to one or more 892 /// vtables, and that we are now at the end of the translation unit, 893 /// decide whether we should emit them. 894 void CodeGenModule::EmitDeferredVTables() { 895 #ifndef NDEBUG 896 // Remember the size of DeferredVTables, because we're going to assume 897 // that this entire operation doesn't modify it. 898 size_t savedSize = DeferredVTables.size(); 899 #endif 900 901 for (const CXXRecordDecl *RD : DeferredVTables) 902 if (shouldEmitVTableAtEndOfTranslationUnit(*this, RD)) 903 VTables.GenerateClassData(RD); 904 else if (shouldOpportunisticallyEmitVTables()) 905 OpportunisticVTables.push_back(RD); 906 907 assert(savedSize == DeferredVTables.size() && 908 "deferred extra vtables during vtable emission?"); 909 DeferredVTables.clear(); 910 } 911 912 bool CodeGenModule::HasHiddenLTOVisibility(const CXXRecordDecl *RD) { 913 LinkageInfo LV = RD->getLinkageAndVisibility(); 914 if (!isExternallyVisible(LV.getLinkage())) 915 return true; 916 917 if (RD->hasAttr<LTOVisibilityPublicAttr>() || RD->hasAttr<UuidAttr>()) 918 return false; 919 920 if (getTriple().isOSBinFormatCOFF()) { 921 if (RD->hasAttr<DLLExportAttr>() || RD->hasAttr<DLLImportAttr>()) 922 return false; 923 } else { 924 if (LV.getVisibility() != HiddenVisibility) 925 return false; 926 } 927 928 if (getCodeGenOpts().LTOVisibilityPublicStd) { 929 const DeclContext *DC = RD; 930 while (1) { 931 auto *D = cast<Decl>(DC); 932 DC = DC->getParent(); 933 if (isa<TranslationUnitDecl>(DC->getRedeclContext())) { 934 if (auto *ND = dyn_cast<NamespaceDecl>(D)) 935 if (const IdentifierInfo *II = ND->getIdentifier()) 936 if (II->isStr("std") || II->isStr("stdext")) 937 return false; 938 break; 939 } 940 } 941 } 942 943 return true; 944 } 945 946 void CodeGenModule::EmitVTableTypeMetadata(llvm::GlobalVariable *VTable, 947 const VTableLayout &VTLayout) { 948 if (!getCodeGenOpts().LTOUnit) 949 return; 950 951 CharUnits PointerWidth = 952 Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0)); 953 954 typedef std::pair<const CXXRecordDecl *, unsigned> BSEntry; 955 std::vector<BSEntry> BitsetEntries; 956 // Create a bit set entry for each address point. 957 for (auto &&AP : VTLayout.getAddressPoints()) 958 BitsetEntries.push_back( 959 std::make_pair(AP.first.getBase(), 960 VTLayout.getVTableOffset(AP.second.VTableIndex) + 961 AP.second.AddressPointIndex)); 962 963 // Sort the bit set entries for determinism. 964 std::sort(BitsetEntries.begin(), BitsetEntries.end(), 965 [this](const BSEntry &E1, const BSEntry &E2) { 966 if (&E1 == &E2) 967 return false; 968 969 std::string S1; 970 llvm::raw_string_ostream O1(S1); 971 getCXXABI().getMangleContext().mangleTypeName( 972 QualType(E1.first->getTypeForDecl(), 0), O1); 973 O1.flush(); 974 975 std::string S2; 976 llvm::raw_string_ostream O2(S2); 977 getCXXABI().getMangleContext().mangleTypeName( 978 QualType(E2.first->getTypeForDecl(), 0), O2); 979 O2.flush(); 980 981 if (S1 < S2) 982 return true; 983 if (S1 != S2) 984 return false; 985 986 return E1.second < E2.second; 987 }); 988 989 for (auto BitsetEntry : BitsetEntries) 990 AddVTableTypeMetadata(VTable, PointerWidth * BitsetEntry.second, 991 BitsetEntry.first); 992 } 993