1 //===--- CGVTables.cpp - Emit LLVM Code for C++ vtables -------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code dealing with C++ code generation of virtual tables. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CGCXXABI.h" 15 #include "CodeGenFunction.h" 16 #include "CodeGenModule.h" 17 #include "clang/AST/CXXInheritance.h" 18 #include "clang/AST/RecordLayout.h" 19 #include "clang/CodeGen/CGFunctionInfo.h" 20 #include "clang/Frontend/CodeGenOptions.h" 21 #include "llvm/Support/Format.h" 22 #include "llvm/Transforms/Utils/Cloning.h" 23 #include <algorithm> 24 #include <cstdio> 25 26 using namespace clang; 27 using namespace CodeGen; 28 29 CodeGenVTables::CodeGenVTables(CodeGenModule &CGM) 30 : CGM(CGM), VTContext(CGM.getContext().getVTableContext()) {} 31 32 llvm::Constant *CodeGenModule::GetAddrOfThunk(GlobalDecl GD, 33 const ThunkInfo &Thunk) { 34 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 35 36 // Compute the mangled name. 37 SmallString<256> Name; 38 llvm::raw_svector_ostream Out(Name); 39 if (const CXXDestructorDecl* DD = dyn_cast<CXXDestructorDecl>(MD)) 40 getCXXABI().getMangleContext().mangleCXXDtorThunk(DD, GD.getDtorType(), 41 Thunk.This, Out); 42 else 43 getCXXABI().getMangleContext().mangleThunk(MD, Thunk, Out); 44 45 llvm::Type *Ty = getTypes().GetFunctionTypeForVTable(GD); 46 return GetOrCreateLLVMFunction(Name, Ty, GD, /*ForVTable=*/true, 47 /*DontDefer=*/true, /*IsThunk=*/true); 48 } 49 50 static void setThunkVisibility(CodeGenModule &CGM, const CXXMethodDecl *MD, 51 const ThunkInfo &Thunk, llvm::Function *Fn) { 52 CGM.setGlobalVisibility(Fn, MD); 53 } 54 55 static void setThunkProperties(CodeGenModule &CGM, const ThunkInfo &Thunk, 56 llvm::Function *ThunkFn, bool ForVTable, 57 GlobalDecl GD) { 58 CGM.setFunctionLinkage(GD, ThunkFn); 59 CGM.getCXXABI().setThunkLinkage(ThunkFn, ForVTable, GD, 60 !Thunk.Return.isEmpty()); 61 62 // Set the right visibility. 63 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 64 setThunkVisibility(CGM, MD, Thunk, ThunkFn); 65 66 if (CGM.supportsCOMDAT() && ThunkFn->isWeakForLinker()) 67 ThunkFn->setComdat(CGM.getModule().getOrInsertComdat(ThunkFn->getName())); 68 } 69 70 #ifndef NDEBUG 71 static bool similar(const ABIArgInfo &infoL, CanQualType typeL, 72 const ABIArgInfo &infoR, CanQualType typeR) { 73 return (infoL.getKind() == infoR.getKind() && 74 (typeL == typeR || 75 (isa<PointerType>(typeL) && isa<PointerType>(typeR)) || 76 (isa<ReferenceType>(typeL) && isa<ReferenceType>(typeR)))); 77 } 78 #endif 79 80 static RValue PerformReturnAdjustment(CodeGenFunction &CGF, 81 QualType ResultType, RValue RV, 82 const ThunkInfo &Thunk) { 83 // Emit the return adjustment. 84 bool NullCheckValue = !ResultType->isReferenceType(); 85 86 llvm::BasicBlock *AdjustNull = nullptr; 87 llvm::BasicBlock *AdjustNotNull = nullptr; 88 llvm::BasicBlock *AdjustEnd = nullptr; 89 90 llvm::Value *ReturnValue = RV.getScalarVal(); 91 92 if (NullCheckValue) { 93 AdjustNull = CGF.createBasicBlock("adjust.null"); 94 AdjustNotNull = CGF.createBasicBlock("adjust.notnull"); 95 AdjustEnd = CGF.createBasicBlock("adjust.end"); 96 97 llvm::Value *IsNull = CGF.Builder.CreateIsNull(ReturnValue); 98 CGF.Builder.CreateCondBr(IsNull, AdjustNull, AdjustNotNull); 99 CGF.EmitBlock(AdjustNotNull); 100 } 101 102 auto ClassDecl = ResultType->getPointeeType()->getAsCXXRecordDecl(); 103 auto ClassAlign = CGF.CGM.getClassPointerAlignment(ClassDecl); 104 ReturnValue = CGF.CGM.getCXXABI().performReturnAdjustment(CGF, 105 Address(ReturnValue, ClassAlign), 106 Thunk.Return); 107 108 if (NullCheckValue) { 109 CGF.Builder.CreateBr(AdjustEnd); 110 CGF.EmitBlock(AdjustNull); 111 CGF.Builder.CreateBr(AdjustEnd); 112 CGF.EmitBlock(AdjustEnd); 113 114 llvm::PHINode *PHI = CGF.Builder.CreatePHI(ReturnValue->getType(), 2); 115 PHI->addIncoming(ReturnValue, AdjustNotNull); 116 PHI->addIncoming(llvm::Constant::getNullValue(ReturnValue->getType()), 117 AdjustNull); 118 ReturnValue = PHI; 119 } 120 121 return RValue::get(ReturnValue); 122 } 123 124 // This function does roughly the same thing as GenerateThunk, but in a 125 // very different way, so that va_start and va_end work correctly. 126 // FIXME: This function assumes "this" is the first non-sret LLVM argument of 127 // a function, and that there is an alloca built in the entry block 128 // for all accesses to "this". 129 // FIXME: This function assumes there is only one "ret" statement per function. 130 // FIXME: Cloning isn't correct in the presence of indirect goto! 131 // FIXME: This implementation of thunks bloats codesize by duplicating the 132 // function definition. There are alternatives: 133 // 1. Add some sort of stub support to LLVM for cases where we can 134 // do a this adjustment, then a sibcall. 135 // 2. We could transform the definition to take a va_list instead of an 136 // actual variable argument list, then have the thunks (including a 137 // no-op thunk for the regular definition) call va_start/va_end. 138 // There's a bit of per-call overhead for this solution, but it's 139 // better for codesize if the definition is long. 140 llvm::Function * 141 CodeGenFunction::GenerateVarArgsThunk(llvm::Function *Fn, 142 const CGFunctionInfo &FnInfo, 143 GlobalDecl GD, const ThunkInfo &Thunk) { 144 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 145 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 146 QualType ResultType = FPT->getReturnType(); 147 148 // Get the original function 149 assert(FnInfo.isVariadic()); 150 llvm::Type *Ty = CGM.getTypes().GetFunctionType(FnInfo); 151 llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true); 152 llvm::Function *BaseFn = cast<llvm::Function>(Callee); 153 154 // Clone to thunk. 155 llvm::ValueToValueMapTy VMap; 156 llvm::Function *NewFn = llvm::CloneFunction(BaseFn, VMap); 157 Fn->replaceAllUsesWith(NewFn); 158 NewFn->takeName(Fn); 159 Fn->eraseFromParent(); 160 Fn = NewFn; 161 162 // "Initialize" CGF (minimally). 163 CurFn = Fn; 164 165 // Get the "this" value 166 llvm::Function::arg_iterator AI = Fn->arg_begin(); 167 if (CGM.ReturnTypeUsesSRet(FnInfo)) 168 ++AI; 169 170 // Find the first store of "this", which will be to the alloca associated 171 // with "this". 172 Address ThisPtr(&*AI, CGM.getClassPointerAlignment(MD->getParent())); 173 llvm::BasicBlock *EntryBB = &Fn->front(); 174 llvm::BasicBlock::iterator ThisStore = 175 std::find_if(EntryBB->begin(), EntryBB->end(), [&](llvm::Instruction &I) { 176 return isa<llvm::StoreInst>(I) && 177 I.getOperand(0) == ThisPtr.getPointer(); 178 }); 179 assert(ThisStore != EntryBB->end() && 180 "Store of this should be in entry block?"); 181 // Adjust "this", if necessary. 182 Builder.SetInsertPoint(&*ThisStore); 183 llvm::Value *AdjustedThisPtr = 184 CGM.getCXXABI().performThisAdjustment(*this, ThisPtr, Thunk.This); 185 ThisStore->setOperand(0, AdjustedThisPtr); 186 187 if (!Thunk.Return.isEmpty()) { 188 // Fix up the returned value, if necessary. 189 for (llvm::BasicBlock &BB : *Fn) { 190 llvm::Instruction *T = BB.getTerminator(); 191 if (isa<llvm::ReturnInst>(T)) { 192 RValue RV = RValue::get(T->getOperand(0)); 193 T->eraseFromParent(); 194 Builder.SetInsertPoint(&BB); 195 RV = PerformReturnAdjustment(*this, ResultType, RV, Thunk); 196 Builder.CreateRet(RV.getScalarVal()); 197 break; 198 } 199 } 200 } 201 202 return Fn; 203 } 204 205 void CodeGenFunction::StartThunk(llvm::Function *Fn, GlobalDecl GD, 206 const CGFunctionInfo &FnInfo) { 207 assert(!CurGD.getDecl() && "CurGD was already set!"); 208 CurGD = GD; 209 CurFuncIsThunk = true; 210 211 // Build FunctionArgs. 212 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 213 QualType ThisType = MD->getThisType(getContext()); 214 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 215 QualType ResultType = CGM.getCXXABI().HasThisReturn(GD) 216 ? ThisType 217 : CGM.getCXXABI().hasMostDerivedReturn(GD) 218 ? CGM.getContext().VoidPtrTy 219 : FPT->getReturnType(); 220 FunctionArgList FunctionArgs; 221 222 // Create the implicit 'this' parameter declaration. 223 CGM.getCXXABI().buildThisParam(*this, FunctionArgs); 224 225 // Add the rest of the parameters. 226 FunctionArgs.append(MD->param_begin(), MD->param_end()); 227 228 if (isa<CXXDestructorDecl>(MD)) 229 CGM.getCXXABI().addImplicitStructorParams(*this, ResultType, FunctionArgs); 230 231 // Start defining the function. 232 StartFunction(GlobalDecl(), ResultType, Fn, FnInfo, FunctionArgs, 233 MD->getLocation(), MD->getLocation()); 234 235 // Since we didn't pass a GlobalDecl to StartFunction, do this ourselves. 236 CGM.getCXXABI().EmitInstanceFunctionProlog(*this); 237 CXXThisValue = CXXABIThisValue; 238 CurCodeDecl = MD; 239 CurFuncDecl = MD; 240 } 241 242 void CodeGenFunction::FinishThunk() { 243 // Clear these to restore the invariants expected by 244 // StartFunction/FinishFunction. 245 CurCodeDecl = nullptr; 246 CurFuncDecl = nullptr; 247 248 FinishFunction(); 249 } 250 251 void CodeGenFunction::EmitCallAndReturnForThunk(llvm::Constant *CalleePtr, 252 const ThunkInfo *Thunk) { 253 assert(isa<CXXMethodDecl>(CurGD.getDecl()) && 254 "Please use a new CGF for this thunk"); 255 const CXXMethodDecl *MD = cast<CXXMethodDecl>(CurGD.getDecl()); 256 257 // Adjust the 'this' pointer if necessary 258 llvm::Value *AdjustedThisPtr = 259 Thunk ? CGM.getCXXABI().performThisAdjustment( 260 *this, LoadCXXThisAddress(), Thunk->This) 261 : LoadCXXThis(); 262 263 if (CurFnInfo->usesInAlloca()) { 264 // We don't handle return adjusting thunks, because they require us to call 265 // the copy constructor. For now, fall through and pretend the return 266 // adjustment was empty so we don't crash. 267 if (Thunk && !Thunk->Return.isEmpty()) { 268 CGM.ErrorUnsupported( 269 MD, "non-trivial argument copy for return-adjusting thunk"); 270 } 271 EmitMustTailThunk(MD, AdjustedThisPtr, CalleePtr); 272 return; 273 } 274 275 // Start building CallArgs. 276 CallArgList CallArgs; 277 QualType ThisType = MD->getThisType(getContext()); 278 CallArgs.add(RValue::get(AdjustedThisPtr), ThisType); 279 280 if (isa<CXXDestructorDecl>(MD)) 281 CGM.getCXXABI().adjustCallArgsForDestructorThunk(*this, CurGD, CallArgs); 282 283 // Add the rest of the arguments. 284 for (const ParmVarDecl *PD : MD->parameters()) 285 EmitDelegateCallArg(CallArgs, PD, PD->getLocStart()); 286 287 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 288 289 #ifndef NDEBUG 290 const CGFunctionInfo &CallFnInfo = CGM.getTypes().arrangeCXXMethodCall( 291 CallArgs, FPT, RequiredArgs::forPrototypePlus(FPT, 1, MD)); 292 assert(CallFnInfo.getRegParm() == CurFnInfo->getRegParm() && 293 CallFnInfo.isNoReturn() == CurFnInfo->isNoReturn() && 294 CallFnInfo.getCallingConvention() == CurFnInfo->getCallingConvention()); 295 assert(isa<CXXDestructorDecl>(MD) || // ignore dtor return types 296 similar(CallFnInfo.getReturnInfo(), CallFnInfo.getReturnType(), 297 CurFnInfo->getReturnInfo(), CurFnInfo->getReturnType())); 298 assert(CallFnInfo.arg_size() == CurFnInfo->arg_size()); 299 for (unsigned i = 0, e = CurFnInfo->arg_size(); i != e; ++i) 300 assert(similar(CallFnInfo.arg_begin()[i].info, 301 CallFnInfo.arg_begin()[i].type, 302 CurFnInfo->arg_begin()[i].info, 303 CurFnInfo->arg_begin()[i].type)); 304 #endif 305 306 // Determine whether we have a return value slot to use. 307 QualType ResultType = CGM.getCXXABI().HasThisReturn(CurGD) 308 ? ThisType 309 : CGM.getCXXABI().hasMostDerivedReturn(CurGD) 310 ? CGM.getContext().VoidPtrTy 311 : FPT->getReturnType(); 312 ReturnValueSlot Slot; 313 if (!ResultType->isVoidType() && 314 CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect && 315 !hasScalarEvaluationKind(CurFnInfo->getReturnType())) 316 Slot = ReturnValueSlot(ReturnValue, ResultType.isVolatileQualified()); 317 318 // Now emit our call. 319 llvm::Instruction *CallOrInvoke; 320 CGCallee Callee = CGCallee::forDirect(CalleePtr, MD); 321 RValue RV = EmitCall(*CurFnInfo, Callee, Slot, CallArgs, &CallOrInvoke); 322 323 // Consider return adjustment if we have ThunkInfo. 324 if (Thunk && !Thunk->Return.isEmpty()) 325 RV = PerformReturnAdjustment(*this, ResultType, RV, *Thunk); 326 else if (llvm::CallInst* Call = dyn_cast<llvm::CallInst>(CallOrInvoke)) 327 Call->setTailCallKind(llvm::CallInst::TCK_Tail); 328 329 // Emit return. 330 if (!ResultType->isVoidType() && Slot.isNull()) 331 CGM.getCXXABI().EmitReturnFromThunk(*this, RV, ResultType); 332 333 // Disable the final ARC autorelease. 334 AutoreleaseResult = false; 335 336 FinishThunk(); 337 } 338 339 void CodeGenFunction::EmitMustTailThunk(const CXXMethodDecl *MD, 340 llvm::Value *AdjustedThisPtr, 341 llvm::Value *CalleePtr) { 342 // Emitting a musttail call thunk doesn't use any of the CGCall.cpp machinery 343 // to translate AST arguments into LLVM IR arguments. For thunks, we know 344 // that the caller prototype more or less matches the callee prototype with 345 // the exception of 'this'. 346 SmallVector<llvm::Value *, 8> Args; 347 for (llvm::Argument &A : CurFn->args()) 348 Args.push_back(&A); 349 350 // Set the adjusted 'this' pointer. 351 const ABIArgInfo &ThisAI = CurFnInfo->arg_begin()->info; 352 if (ThisAI.isDirect()) { 353 const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo(); 354 int ThisArgNo = RetAI.isIndirect() && !RetAI.isSRetAfterThis() ? 1 : 0; 355 llvm::Type *ThisType = Args[ThisArgNo]->getType(); 356 if (ThisType != AdjustedThisPtr->getType()) 357 AdjustedThisPtr = Builder.CreateBitCast(AdjustedThisPtr, ThisType); 358 Args[ThisArgNo] = AdjustedThisPtr; 359 } else { 360 assert(ThisAI.isInAlloca() && "this is passed directly or inalloca"); 361 Address ThisAddr = GetAddrOfLocalVar(CXXABIThisDecl); 362 llvm::Type *ThisType = ThisAddr.getElementType(); 363 if (ThisType != AdjustedThisPtr->getType()) 364 AdjustedThisPtr = Builder.CreateBitCast(AdjustedThisPtr, ThisType); 365 Builder.CreateStore(AdjustedThisPtr, ThisAddr); 366 } 367 368 // Emit the musttail call manually. Even if the prologue pushed cleanups, we 369 // don't actually want to run them. 370 llvm::CallInst *Call = Builder.CreateCall(CalleePtr, Args); 371 Call->setTailCallKind(llvm::CallInst::TCK_MustTail); 372 373 // Apply the standard set of call attributes. 374 unsigned CallingConv; 375 CodeGen::AttributeListType AttributeList; 376 CGM.ConstructAttributeList(CalleePtr->getName(), 377 *CurFnInfo, MD, AttributeList, 378 CallingConv, /*AttrOnCallSite=*/true); 379 llvm::AttributeSet Attrs = 380 llvm::AttributeSet::get(getLLVMContext(), AttributeList); 381 Call->setAttributes(Attrs); 382 Call->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv)); 383 384 if (Call->getType()->isVoidTy()) 385 Builder.CreateRetVoid(); 386 else 387 Builder.CreateRet(Call); 388 389 // Finish the function to maintain CodeGenFunction invariants. 390 // FIXME: Don't emit unreachable code. 391 EmitBlock(createBasicBlock()); 392 FinishFunction(); 393 } 394 395 void CodeGenFunction::generateThunk(llvm::Function *Fn, 396 const CGFunctionInfo &FnInfo, 397 GlobalDecl GD, const ThunkInfo &Thunk) { 398 StartThunk(Fn, GD, FnInfo); 399 400 // Get our callee. 401 llvm::Type *Ty = 402 CGM.getTypes().GetFunctionType(CGM.getTypes().arrangeGlobalDeclaration(GD)); 403 llvm::Constant *Callee = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true); 404 405 // Make the call and return the result. 406 EmitCallAndReturnForThunk(Callee, &Thunk); 407 } 408 409 void CodeGenVTables::emitThunk(GlobalDecl GD, const ThunkInfo &Thunk, 410 bool ForVTable) { 411 const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeGlobalDeclaration(GD); 412 413 // FIXME: re-use FnInfo in this computation. 414 llvm::Constant *C = CGM.GetAddrOfThunk(GD, Thunk); 415 llvm::GlobalValue *Entry; 416 417 // Strip off a bitcast if we got one back. 418 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(C)) { 419 assert(CE->getOpcode() == llvm::Instruction::BitCast); 420 Entry = cast<llvm::GlobalValue>(CE->getOperand(0)); 421 } else { 422 Entry = cast<llvm::GlobalValue>(C); 423 } 424 425 // There's already a declaration with the same name, check if it has the same 426 // type or if we need to replace it. 427 if (Entry->getType()->getElementType() != 428 CGM.getTypes().GetFunctionTypeForVTable(GD)) { 429 llvm::GlobalValue *OldThunkFn = Entry; 430 431 // If the types mismatch then we have to rewrite the definition. 432 assert(OldThunkFn->isDeclaration() && 433 "Shouldn't replace non-declaration"); 434 435 // Remove the name from the old thunk function and get a new thunk. 436 OldThunkFn->setName(StringRef()); 437 Entry = cast<llvm::GlobalValue>(CGM.GetAddrOfThunk(GD, Thunk)); 438 439 // If needed, replace the old thunk with a bitcast. 440 if (!OldThunkFn->use_empty()) { 441 llvm::Constant *NewPtrForOldDecl = 442 llvm::ConstantExpr::getBitCast(Entry, OldThunkFn->getType()); 443 OldThunkFn->replaceAllUsesWith(NewPtrForOldDecl); 444 } 445 446 // Remove the old thunk. 447 OldThunkFn->eraseFromParent(); 448 } 449 450 llvm::Function *ThunkFn = cast<llvm::Function>(Entry); 451 bool ABIHasKeyFunctions = CGM.getTarget().getCXXABI().hasKeyFunctions(); 452 bool UseAvailableExternallyLinkage = ForVTable && ABIHasKeyFunctions; 453 454 if (!ThunkFn->isDeclaration()) { 455 if (!ABIHasKeyFunctions || UseAvailableExternallyLinkage) { 456 // There is already a thunk emitted for this function, do nothing. 457 return; 458 } 459 460 setThunkProperties(CGM, Thunk, ThunkFn, ForVTable, GD); 461 return; 462 } 463 464 CGM.SetLLVMFunctionAttributesForDefinition(GD.getDecl(), ThunkFn); 465 466 if (ThunkFn->isVarArg()) { 467 // Varargs thunks are special; we can't just generate a call because 468 // we can't copy the varargs. Our implementation is rather 469 // expensive/sucky at the moment, so don't generate the thunk unless 470 // we have to. 471 // FIXME: Do something better here; GenerateVarArgsThunk is extremely ugly. 472 if (UseAvailableExternallyLinkage) 473 return; 474 ThunkFn = 475 CodeGenFunction(CGM).GenerateVarArgsThunk(ThunkFn, FnInfo, GD, Thunk); 476 } else { 477 // Normal thunk body generation. 478 CodeGenFunction(CGM).generateThunk(ThunkFn, FnInfo, GD, Thunk); 479 } 480 481 setThunkProperties(CGM, Thunk, ThunkFn, ForVTable, GD); 482 } 483 484 void CodeGenVTables::maybeEmitThunkForVTable(GlobalDecl GD, 485 const ThunkInfo &Thunk) { 486 // If the ABI has key functions, only the TU with the key function should emit 487 // the thunk. However, we can allow inlining of thunks if we emit them with 488 // available_externally linkage together with vtables when optimizations are 489 // enabled. 490 if (CGM.getTarget().getCXXABI().hasKeyFunctions() && 491 !CGM.getCodeGenOpts().OptimizationLevel) 492 return; 493 494 // We can't emit thunks for member functions with incomplete types. 495 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 496 if (!CGM.getTypes().isFuncTypeConvertible( 497 MD->getType()->castAs<FunctionType>())) 498 return; 499 500 emitThunk(GD, Thunk, /*ForVTable=*/true); 501 } 502 503 void CodeGenVTables::EmitThunks(GlobalDecl GD) 504 { 505 const CXXMethodDecl *MD = 506 cast<CXXMethodDecl>(GD.getDecl())->getCanonicalDecl(); 507 508 // We don't need to generate thunks for the base destructor. 509 if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base) 510 return; 511 512 const VTableContextBase::ThunkInfoVectorTy *ThunkInfoVector = 513 VTContext->getThunkInfo(GD); 514 515 if (!ThunkInfoVector) 516 return; 517 518 for (const ThunkInfo& Thunk : *ThunkInfoVector) 519 emitThunk(GD, Thunk, /*ForVTable=*/false); 520 } 521 522 llvm::Constant *CodeGenVTables::CreateVTableComponent( 523 unsigned Idx, const VTableLayout &VTLayout, llvm::Constant *RTTI, 524 unsigned &NextVTableThunkIndex) { 525 VTableComponent Component = VTLayout.vtable_components()[Idx]; 526 527 auto OffsetConstant = [&](CharUnits Offset) { 528 return llvm::ConstantExpr::getIntToPtr( 529 llvm::ConstantInt::get(CGM.PtrDiffTy, Offset.getQuantity()), 530 CGM.Int8PtrTy); 531 }; 532 533 switch (Component.getKind()) { 534 case VTableComponent::CK_VCallOffset: 535 return OffsetConstant(Component.getVCallOffset()); 536 537 case VTableComponent::CK_VBaseOffset: 538 return OffsetConstant(Component.getVBaseOffset()); 539 540 case VTableComponent::CK_OffsetToTop: 541 return OffsetConstant(Component.getOffsetToTop()); 542 543 case VTableComponent::CK_RTTI: 544 return RTTI; 545 546 case VTableComponent::CK_FunctionPointer: 547 case VTableComponent::CK_CompleteDtorPointer: 548 case VTableComponent::CK_DeletingDtorPointer: { 549 GlobalDecl GD; 550 551 // Get the right global decl. 552 switch (Component.getKind()) { 553 default: 554 llvm_unreachable("Unexpected vtable component kind"); 555 case VTableComponent::CK_FunctionPointer: 556 GD = Component.getFunctionDecl(); 557 break; 558 case VTableComponent::CK_CompleteDtorPointer: 559 GD = GlobalDecl(Component.getDestructorDecl(), Dtor_Complete); 560 break; 561 case VTableComponent::CK_DeletingDtorPointer: 562 GD = GlobalDecl(Component.getDestructorDecl(), Dtor_Deleting); 563 break; 564 } 565 566 if (CGM.getLangOpts().CUDA) { 567 // Emit NULL for methods we can't codegen on this 568 // side. Otherwise we'd end up with vtable with unresolved 569 // references. 570 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 571 // OK on device side: functions w/ __device__ attribute 572 // OK on host side: anything except __device__-only functions. 573 bool CanEmitMethod = 574 CGM.getLangOpts().CUDAIsDevice 575 ? MD->hasAttr<CUDADeviceAttr>() 576 : (MD->hasAttr<CUDAHostAttr>() || !MD->hasAttr<CUDADeviceAttr>()); 577 if (!CanEmitMethod) 578 return llvm::ConstantExpr::getNullValue(CGM.Int8PtrTy); 579 // Method is acceptable, continue processing as usual. 580 } 581 582 auto SpecialVirtualFn = [&](llvm::Constant *&Cache, StringRef Name) { 583 if (!Cache) { 584 llvm::FunctionType *Ty = 585 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false); 586 Cache = CGM.CreateRuntimeFunction(Ty, Name); 587 if (auto *F = dyn_cast<llvm::Function>(Cache)) 588 F->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); 589 Cache = llvm::ConstantExpr::getBitCast(Cache, CGM.Int8PtrTy); 590 } 591 return Cache; 592 }; 593 594 if (cast<CXXMethodDecl>(GD.getDecl())->isPure()) 595 // We have a pure virtual member function. 596 return SpecialVirtualFn(PureVirtualFn, 597 CGM.getCXXABI().GetPureVirtualCallName()); 598 599 if (cast<CXXMethodDecl>(GD.getDecl())->isDeleted()) 600 return SpecialVirtualFn(DeletedVirtualFn, 601 CGM.getCXXABI().GetDeletedVirtualCallName()); 602 603 // Check if we should use a thunk. 604 if (NextVTableThunkIndex < VTLayout.vtable_thunks().size() && 605 VTLayout.vtable_thunks()[NextVTableThunkIndex].first == Idx) { 606 const ThunkInfo &Thunk = 607 VTLayout.vtable_thunks()[NextVTableThunkIndex].second; 608 609 maybeEmitThunkForVTable(GD, Thunk); 610 NextVTableThunkIndex++; 611 return CGM.GetAddrOfThunk(GD, Thunk); 612 } 613 614 llvm::Type *Ty = CGM.getTypes().GetFunctionTypeForVTable(GD); 615 return CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true); 616 } 617 618 case VTableComponent::CK_UnusedFunctionPointer: 619 return llvm::ConstantExpr::getNullValue(CGM.Int8PtrTy); 620 } 621 622 llvm_unreachable("Unexpected vtable component kind"); 623 } 624 625 llvm::Constant * 626 CodeGenVTables::CreateVTableInitializer(const VTableLayout &VTLayout, 627 llvm::Constant *RTTI) { 628 SmallVector<llvm::Constant *, 64> Inits; 629 unsigned NextVTableThunkIndex = 0; 630 631 for (unsigned I = 0, E = VTLayout.vtable_components().size(); I != E; ++I) { 632 llvm::Constant *Init = 633 CreateVTableComponent(I, VTLayout, RTTI, NextVTableThunkIndex); 634 Inits.push_back(llvm::ConstantExpr::getBitCast(Init, CGM.Int8PtrTy)); 635 } 636 637 llvm::ArrayType *ArrayType = 638 llvm::ArrayType::get(CGM.Int8PtrTy, VTLayout.vtable_components().size()); 639 return llvm::ConstantArray::get(ArrayType, Inits); 640 } 641 642 llvm::GlobalVariable * 643 CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD, 644 const BaseSubobject &Base, 645 bool BaseIsVirtual, 646 llvm::GlobalVariable::LinkageTypes Linkage, 647 VTableAddressPointsMapTy& AddressPoints) { 648 if (CGDebugInfo *DI = CGM.getModuleDebugInfo()) 649 DI->completeClassData(Base.getBase()); 650 651 std::unique_ptr<VTableLayout> VTLayout( 652 getItaniumVTableContext().createConstructionVTableLayout( 653 Base.getBase(), Base.getBaseOffset(), BaseIsVirtual, RD)); 654 655 // Add the address points. 656 AddressPoints = VTLayout->getAddressPoints(); 657 658 // Get the mangled construction vtable name. 659 SmallString<256> OutName; 660 llvm::raw_svector_ostream Out(OutName); 661 cast<ItaniumMangleContext>(CGM.getCXXABI().getMangleContext()) 662 .mangleCXXCtorVTable(RD, Base.getBaseOffset().getQuantity(), 663 Base.getBase(), Out); 664 StringRef Name = OutName.str(); 665 666 llvm::ArrayType *ArrayType = 667 llvm::ArrayType::get(CGM.Int8PtrTy, VTLayout->vtable_components().size()); 668 669 // Construction vtable symbols are not part of the Itanium ABI, so we cannot 670 // guarantee that they actually will be available externally. Instead, when 671 // emitting an available_externally VTT, we provide references to an internal 672 // linkage construction vtable. The ABI only requires complete-object vtables 673 // to be the same for all instances of a type, not construction vtables. 674 if (Linkage == llvm::GlobalVariable::AvailableExternallyLinkage) 675 Linkage = llvm::GlobalVariable::InternalLinkage; 676 677 // Create the variable that will hold the construction vtable. 678 llvm::GlobalVariable *VTable = 679 CGM.CreateOrReplaceCXXRuntimeVariable(Name, ArrayType, Linkage); 680 CGM.setGlobalVisibility(VTable, RD); 681 682 // V-tables are always unnamed_addr. 683 VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); 684 685 llvm::Constant *RTTI = CGM.GetAddrOfRTTIDescriptor( 686 CGM.getContext().getTagDeclType(Base.getBase())); 687 688 // Create and set the initializer. 689 llvm::Constant *Init = CreateVTableInitializer(*VTLayout, RTTI); 690 VTable->setInitializer(Init); 691 692 CGM.EmitVTableTypeMetadata(VTable, *VTLayout.get()); 693 694 return VTable; 695 } 696 697 static bool shouldEmitAvailableExternallyVTable(const CodeGenModule &CGM, 698 const CXXRecordDecl *RD) { 699 return CGM.getCodeGenOpts().OptimizationLevel > 0 && 700 CGM.getCXXABI().canSpeculativelyEmitVTable(RD); 701 } 702 703 /// Compute the required linkage of the vtable for the given class. 704 /// 705 /// Note that we only call this at the end of the translation unit. 706 llvm::GlobalVariable::LinkageTypes 707 CodeGenModule::getVTableLinkage(const CXXRecordDecl *RD) { 708 if (!RD->isExternallyVisible()) 709 return llvm::GlobalVariable::InternalLinkage; 710 711 // We're at the end of the translation unit, so the current key 712 // function is fully correct. 713 const CXXMethodDecl *keyFunction = Context.getCurrentKeyFunction(RD); 714 if (keyFunction && !RD->hasAttr<DLLImportAttr>()) { 715 // If this class has a key function, use that to determine the 716 // linkage of the vtable. 717 const FunctionDecl *def = nullptr; 718 if (keyFunction->hasBody(def)) 719 keyFunction = cast<CXXMethodDecl>(def); 720 721 switch (keyFunction->getTemplateSpecializationKind()) { 722 case TSK_Undeclared: 723 case TSK_ExplicitSpecialization: 724 assert((def || CodeGenOpts.OptimizationLevel > 0) && 725 "Shouldn't query vtable linkage without key function or " 726 "optimizations"); 727 if (!def && CodeGenOpts.OptimizationLevel > 0) 728 return llvm::GlobalVariable::AvailableExternallyLinkage; 729 730 if (keyFunction->isInlined()) 731 return !Context.getLangOpts().AppleKext ? 732 llvm::GlobalVariable::LinkOnceODRLinkage : 733 llvm::Function::InternalLinkage; 734 735 return llvm::GlobalVariable::ExternalLinkage; 736 737 case TSK_ImplicitInstantiation: 738 return !Context.getLangOpts().AppleKext ? 739 llvm::GlobalVariable::LinkOnceODRLinkage : 740 llvm::Function::InternalLinkage; 741 742 case TSK_ExplicitInstantiationDefinition: 743 return !Context.getLangOpts().AppleKext ? 744 llvm::GlobalVariable::WeakODRLinkage : 745 llvm::Function::InternalLinkage; 746 747 case TSK_ExplicitInstantiationDeclaration: 748 llvm_unreachable("Should not have been asked to emit this"); 749 } 750 } 751 752 // -fapple-kext mode does not support weak linkage, so we must use 753 // internal linkage. 754 if (Context.getLangOpts().AppleKext) 755 return llvm::Function::InternalLinkage; 756 757 llvm::GlobalVariable::LinkageTypes DiscardableODRLinkage = 758 llvm::GlobalValue::LinkOnceODRLinkage; 759 llvm::GlobalVariable::LinkageTypes NonDiscardableODRLinkage = 760 llvm::GlobalValue::WeakODRLinkage; 761 if (RD->hasAttr<DLLExportAttr>()) { 762 // Cannot discard exported vtables. 763 DiscardableODRLinkage = NonDiscardableODRLinkage; 764 } else if (RD->hasAttr<DLLImportAttr>()) { 765 // Imported vtables are available externally. 766 DiscardableODRLinkage = llvm::GlobalVariable::AvailableExternallyLinkage; 767 NonDiscardableODRLinkage = llvm::GlobalVariable::AvailableExternallyLinkage; 768 } 769 770 switch (RD->getTemplateSpecializationKind()) { 771 case TSK_Undeclared: 772 case TSK_ExplicitSpecialization: 773 case TSK_ImplicitInstantiation: 774 return DiscardableODRLinkage; 775 776 case TSK_ExplicitInstantiationDeclaration: 777 // Explicit instantiations in MSVC do not provide vtables, so we must emit 778 // our own. 779 if (getTarget().getCXXABI().isMicrosoft()) 780 return DiscardableODRLinkage; 781 return shouldEmitAvailableExternallyVTable(*this, RD) 782 ? llvm::GlobalVariable::AvailableExternallyLinkage 783 : llvm::GlobalVariable::ExternalLinkage; 784 785 case TSK_ExplicitInstantiationDefinition: 786 return NonDiscardableODRLinkage; 787 } 788 789 llvm_unreachable("Invalid TemplateSpecializationKind!"); 790 } 791 792 /// This is a callback from Sema to tell us that that a particular vtable is 793 /// required to be emitted in this translation unit. 794 /// 795 /// This is only called for vtables that _must_ be emitted (mainly due to key 796 /// functions). For weak vtables, CodeGen tracks when they are needed and 797 /// emits them as-needed. 798 void CodeGenModule::EmitVTable(CXXRecordDecl *theClass) { 799 VTables.GenerateClassData(theClass); 800 } 801 802 void 803 CodeGenVTables::GenerateClassData(const CXXRecordDecl *RD) { 804 if (CGDebugInfo *DI = CGM.getModuleDebugInfo()) 805 DI->completeClassData(RD); 806 807 if (RD->getNumVBases()) 808 CGM.getCXXABI().emitVirtualInheritanceTables(RD); 809 810 CGM.getCXXABI().emitVTableDefinitions(*this, RD); 811 } 812 813 /// At this point in the translation unit, does it appear that can we 814 /// rely on the vtable being defined elsewhere in the program? 815 /// 816 /// The response is really only definitive when called at the end of 817 /// the translation unit. 818 /// 819 /// The only semantic restriction here is that the object file should 820 /// not contain a vtable definition when that vtable is defined 821 /// strongly elsewhere. Otherwise, we'd just like to avoid emitting 822 /// vtables when unnecessary. 823 bool CodeGenVTables::isVTableExternal(const CXXRecordDecl *RD) { 824 assert(RD->isDynamicClass() && "Non-dynamic classes have no VTable."); 825 826 // We always synthesize vtables if they are needed in the MS ABI. MSVC doesn't 827 // emit them even if there is an explicit template instantiation. 828 if (CGM.getTarget().getCXXABI().isMicrosoft()) 829 return false; 830 831 // If we have an explicit instantiation declaration (and not a 832 // definition), the vtable is defined elsewhere. 833 TemplateSpecializationKind TSK = RD->getTemplateSpecializationKind(); 834 if (TSK == TSK_ExplicitInstantiationDeclaration) 835 return true; 836 837 // Otherwise, if the class is an instantiated template, the 838 // vtable must be defined here. 839 if (TSK == TSK_ImplicitInstantiation || 840 TSK == TSK_ExplicitInstantiationDefinition) 841 return false; 842 843 // Otherwise, if the class doesn't have a key function (possibly 844 // anymore), the vtable must be defined here. 845 const CXXMethodDecl *keyFunction = CGM.getContext().getCurrentKeyFunction(RD); 846 if (!keyFunction) 847 return false; 848 849 // Otherwise, if we don't have a definition of the key function, the 850 // vtable must be defined somewhere else. 851 return !keyFunction->hasBody(); 852 } 853 854 /// Given that we're currently at the end of the translation unit, and 855 /// we've emitted a reference to the vtable for this class, should 856 /// we define that vtable? 857 static bool shouldEmitVTableAtEndOfTranslationUnit(CodeGenModule &CGM, 858 const CXXRecordDecl *RD) { 859 // If vtable is internal then it has to be done. 860 if (!CGM.getVTables().isVTableExternal(RD)) 861 return true; 862 863 // If it's external then maybe we will need it as available_externally. 864 return shouldEmitAvailableExternallyVTable(CGM, RD); 865 } 866 867 /// Given that at some point we emitted a reference to one or more 868 /// vtables, and that we are now at the end of the translation unit, 869 /// decide whether we should emit them. 870 void CodeGenModule::EmitDeferredVTables() { 871 #ifndef NDEBUG 872 // Remember the size of DeferredVTables, because we're going to assume 873 // that this entire operation doesn't modify it. 874 size_t savedSize = DeferredVTables.size(); 875 #endif 876 877 for (const CXXRecordDecl *RD : DeferredVTables) 878 if (shouldEmitVTableAtEndOfTranslationUnit(*this, RD)) 879 VTables.GenerateClassData(RD); 880 881 assert(savedSize == DeferredVTables.size() && 882 "deferred extra vtables during vtable emission?"); 883 DeferredVTables.clear(); 884 } 885 886 bool CodeGenModule::HasHiddenLTOVisibility(const CXXRecordDecl *RD) { 887 LinkageInfo LV = RD->getLinkageAndVisibility(); 888 if (!isExternallyVisible(LV.getLinkage())) 889 return true; 890 891 if (RD->hasAttr<LTOVisibilityPublicAttr>() || RD->hasAttr<UuidAttr>()) 892 return false; 893 894 if (getTriple().isOSBinFormatCOFF()) { 895 if (RD->hasAttr<DLLExportAttr>() || RD->hasAttr<DLLImportAttr>()) 896 return false; 897 } else { 898 if (LV.getVisibility() != HiddenVisibility) 899 return false; 900 } 901 902 if (getCodeGenOpts().LTOVisibilityPublicStd) { 903 const DeclContext *DC = RD; 904 while (1) { 905 auto *D = cast<Decl>(DC); 906 DC = DC->getParent(); 907 if (isa<TranslationUnitDecl>(DC->getRedeclContext())) { 908 if (auto *ND = dyn_cast<NamespaceDecl>(D)) 909 if (const IdentifierInfo *II = ND->getIdentifier()) 910 if (II->isStr("std") || II->isStr("stdext")) 911 return false; 912 break; 913 } 914 } 915 } 916 917 return true; 918 } 919 920 void CodeGenModule::EmitVTableTypeMetadata(llvm::GlobalVariable *VTable, 921 const VTableLayout &VTLayout) { 922 if (!getCodeGenOpts().PrepareForLTO) 923 return; 924 925 CharUnits PointerWidth = 926 Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0)); 927 928 typedef std::pair<const CXXRecordDecl *, unsigned> BSEntry; 929 std::vector<BSEntry> BitsetEntries; 930 // Create a bit set entry for each address point. 931 for (auto &&AP : VTLayout.getAddressPoints()) 932 BitsetEntries.push_back(std::make_pair(AP.first.getBase(), AP.second)); 933 934 // Sort the bit set entries for determinism. 935 std::sort(BitsetEntries.begin(), BitsetEntries.end(), 936 [this](const BSEntry &E1, const BSEntry &E2) { 937 if (&E1 == &E2) 938 return false; 939 940 std::string S1; 941 llvm::raw_string_ostream O1(S1); 942 getCXXABI().getMangleContext().mangleTypeName( 943 QualType(E1.first->getTypeForDecl(), 0), O1); 944 O1.flush(); 945 946 std::string S2; 947 llvm::raw_string_ostream O2(S2); 948 getCXXABI().getMangleContext().mangleTypeName( 949 QualType(E2.first->getTypeForDecl(), 0), O2); 950 O2.flush(); 951 952 if (S1 < S2) 953 return true; 954 if (S1 != S2) 955 return false; 956 957 return E1.second < E2.second; 958 }); 959 960 for (auto BitsetEntry : BitsetEntries) 961 AddVTableTypeMetadata(VTable, PointerWidth * BitsetEntry.second, 962 BitsetEntry.first); 963 } 964