1 //===--- CGVTables.cpp - Emit LLVM Code for C++ vtables -------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code dealing with C++ code generation of virtual tables. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CodeGenFunction.h" 15 #include "CGCXXABI.h" 16 #include "CodeGenModule.h" 17 #include "clang/AST/CXXInheritance.h" 18 #include "clang/AST/RecordLayout.h" 19 #include "clang/CodeGen/CGFunctionInfo.h" 20 #include "clang/Frontend/CodeGenOptions.h" 21 #include "llvm/ADT/DenseSet.h" 22 #include "llvm/ADT/SetVector.h" 23 #include "llvm/Support/Compiler.h" 24 #include "llvm/Support/Format.h" 25 #include "llvm/Transforms/Utils/Cloning.h" 26 #include <algorithm> 27 #include <cstdio> 28 29 using namespace clang; 30 using namespace CodeGen; 31 32 CodeGenVTables::CodeGenVTables(CodeGenModule &CGM) 33 : CGM(CGM), VTContext(CGM.getContext().getVTableContext()) {} 34 35 llvm::Constant *CodeGenModule::GetAddrOfThunk(GlobalDecl GD, 36 const ThunkInfo &Thunk) { 37 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 38 39 // Compute the mangled name. 40 SmallString<256> Name; 41 llvm::raw_svector_ostream Out(Name); 42 if (const CXXDestructorDecl* DD = dyn_cast<CXXDestructorDecl>(MD)) 43 getCXXABI().getMangleContext().mangleCXXDtorThunk(DD, GD.getDtorType(), 44 Thunk.This, Out); 45 else 46 getCXXABI().getMangleContext().mangleThunk(MD, Thunk, Out); 47 Out.flush(); 48 49 llvm::Type *Ty = getTypes().GetFunctionTypeForVTable(GD); 50 return GetOrCreateLLVMFunction(Name, Ty, GD, /*ForVTable=*/true, 51 /*DontDefer*/ true); 52 } 53 54 static void setThunkVisibility(CodeGenModule &CGM, const CXXMethodDecl *MD, 55 const ThunkInfo &Thunk, llvm::Function *Fn) { 56 CGM.setGlobalVisibility(Fn, MD); 57 } 58 59 #ifndef NDEBUG 60 static bool similar(const ABIArgInfo &infoL, CanQualType typeL, 61 const ABIArgInfo &infoR, CanQualType typeR) { 62 return (infoL.getKind() == infoR.getKind() && 63 (typeL == typeR || 64 (isa<PointerType>(typeL) && isa<PointerType>(typeR)) || 65 (isa<ReferenceType>(typeL) && isa<ReferenceType>(typeR)))); 66 } 67 #endif 68 69 static RValue PerformReturnAdjustment(CodeGenFunction &CGF, 70 QualType ResultType, RValue RV, 71 const ThunkInfo &Thunk) { 72 // Emit the return adjustment. 73 bool NullCheckValue = !ResultType->isReferenceType(); 74 75 llvm::BasicBlock *AdjustNull = nullptr; 76 llvm::BasicBlock *AdjustNotNull = nullptr; 77 llvm::BasicBlock *AdjustEnd = nullptr; 78 79 llvm::Value *ReturnValue = RV.getScalarVal(); 80 81 if (NullCheckValue) { 82 AdjustNull = CGF.createBasicBlock("adjust.null"); 83 AdjustNotNull = CGF.createBasicBlock("adjust.notnull"); 84 AdjustEnd = CGF.createBasicBlock("adjust.end"); 85 86 llvm::Value *IsNull = CGF.Builder.CreateIsNull(ReturnValue); 87 CGF.Builder.CreateCondBr(IsNull, AdjustNull, AdjustNotNull); 88 CGF.EmitBlock(AdjustNotNull); 89 } 90 91 ReturnValue = CGF.CGM.getCXXABI().performReturnAdjustment(CGF, ReturnValue, 92 Thunk.Return); 93 94 if (NullCheckValue) { 95 CGF.Builder.CreateBr(AdjustEnd); 96 CGF.EmitBlock(AdjustNull); 97 CGF.Builder.CreateBr(AdjustEnd); 98 CGF.EmitBlock(AdjustEnd); 99 100 llvm::PHINode *PHI = CGF.Builder.CreatePHI(ReturnValue->getType(), 2); 101 PHI->addIncoming(ReturnValue, AdjustNotNull); 102 PHI->addIncoming(llvm::Constant::getNullValue(ReturnValue->getType()), 103 AdjustNull); 104 ReturnValue = PHI; 105 } 106 107 return RValue::get(ReturnValue); 108 } 109 110 // This function does roughly the same thing as GenerateThunk, but in a 111 // very different way, so that va_start and va_end work correctly. 112 // FIXME: This function assumes "this" is the first non-sret LLVM argument of 113 // a function, and that there is an alloca built in the entry block 114 // for all accesses to "this". 115 // FIXME: This function assumes there is only one "ret" statement per function. 116 // FIXME: Cloning isn't correct in the presence of indirect goto! 117 // FIXME: This implementation of thunks bloats codesize by duplicating the 118 // function definition. There are alternatives: 119 // 1. Add some sort of stub support to LLVM for cases where we can 120 // do a this adjustment, then a sibcall. 121 // 2. We could transform the definition to take a va_list instead of an 122 // actual variable argument list, then have the thunks (including a 123 // no-op thunk for the regular definition) call va_start/va_end. 124 // There's a bit of per-call overhead for this solution, but it's 125 // better for codesize if the definition is long. 126 void CodeGenFunction::GenerateVarArgsThunk( 127 llvm::Function *Fn, 128 const CGFunctionInfo &FnInfo, 129 GlobalDecl GD, const ThunkInfo &Thunk) { 130 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 131 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 132 QualType ResultType = FPT->getReturnType(); 133 134 // Get the original function 135 assert(FnInfo.isVariadic()); 136 llvm::Type *Ty = CGM.getTypes().GetFunctionType(FnInfo); 137 llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true); 138 llvm::Function *BaseFn = cast<llvm::Function>(Callee); 139 140 // Clone to thunk. 141 llvm::ValueToValueMapTy VMap; 142 llvm::Function *NewFn = llvm::CloneFunction(BaseFn, VMap, 143 /*ModuleLevelChanges=*/false); 144 CGM.getModule().getFunctionList().push_back(NewFn); 145 Fn->replaceAllUsesWith(NewFn); 146 NewFn->takeName(Fn); 147 Fn->eraseFromParent(); 148 Fn = NewFn; 149 150 // "Initialize" CGF (minimally). 151 CurFn = Fn; 152 153 // Get the "this" value 154 llvm::Function::arg_iterator AI = Fn->arg_begin(); 155 if (CGM.ReturnTypeUsesSRet(FnInfo)) 156 ++AI; 157 158 // Find the first store of "this", which will be to the alloca associated 159 // with "this". 160 llvm::Value *ThisPtr = &*AI; 161 llvm::BasicBlock *EntryBB = Fn->begin(); 162 llvm::Instruction *ThisStore = nullptr; 163 for (llvm::BasicBlock::iterator I = EntryBB->begin(), E = EntryBB->end(); 164 I != E; I++) { 165 if (isa<llvm::StoreInst>(I) && I->getOperand(0) == ThisPtr) { 166 ThisStore = cast<llvm::StoreInst>(I); 167 break; 168 } 169 } 170 assert(ThisStore && "Store of this should be in entry block?"); 171 // Adjust "this", if necessary. 172 Builder.SetInsertPoint(ThisStore); 173 llvm::Value *AdjustedThisPtr = 174 CGM.getCXXABI().performThisAdjustment(*this, ThisPtr, Thunk.This); 175 ThisStore->setOperand(0, AdjustedThisPtr); 176 177 if (!Thunk.Return.isEmpty()) { 178 // Fix up the returned value, if necessary. 179 for (llvm::Function::iterator I = Fn->begin(), E = Fn->end(); I != E; I++) { 180 llvm::Instruction *T = I->getTerminator(); 181 if (isa<llvm::ReturnInst>(T)) { 182 RValue RV = RValue::get(T->getOperand(0)); 183 T->eraseFromParent(); 184 Builder.SetInsertPoint(&*I); 185 RV = PerformReturnAdjustment(*this, ResultType, RV, Thunk); 186 Builder.CreateRet(RV.getScalarVal()); 187 break; 188 } 189 } 190 } 191 } 192 193 void CodeGenFunction::StartThunk(llvm::Function *Fn, GlobalDecl GD, 194 const CGFunctionInfo &FnInfo) { 195 assert(!CurGD.getDecl() && "CurGD was already set!"); 196 CurGD = GD; 197 CurFuncIsThunk = true; 198 199 // Build FunctionArgs. 200 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 201 QualType ThisType = MD->getThisType(getContext()); 202 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 203 QualType ResultType = 204 CGM.getCXXABI().HasThisReturn(GD) ? ThisType : FPT->getReturnType(); 205 FunctionArgList FunctionArgs; 206 207 // Create the implicit 'this' parameter declaration. 208 CGM.getCXXABI().buildThisParam(*this, FunctionArgs); 209 210 // Add the rest of the parameters. 211 FunctionArgs.append(MD->param_begin(), MD->param_end()); 212 213 if (isa<CXXDestructorDecl>(MD)) 214 CGM.getCXXABI().addImplicitStructorParams(*this, ResultType, FunctionArgs); 215 216 // Start defining the function. 217 StartFunction(GlobalDecl(), ResultType, Fn, FnInfo, FunctionArgs, 218 MD->getLocation(), SourceLocation()); 219 220 // Since we didn't pass a GlobalDecl to StartFunction, do this ourselves. 221 CGM.getCXXABI().EmitInstanceFunctionProlog(*this); 222 CXXThisValue = CXXABIThisValue; 223 } 224 225 void CodeGenFunction::EmitCallAndReturnForThunk(llvm::Value *Callee, 226 const ThunkInfo *Thunk) { 227 assert(isa<CXXMethodDecl>(CurGD.getDecl()) && 228 "Please use a new CGF for this thunk"); 229 const CXXMethodDecl *MD = cast<CXXMethodDecl>(CurGD.getDecl()); 230 231 // Adjust the 'this' pointer if necessary 232 llvm::Value *AdjustedThisPtr = Thunk ? CGM.getCXXABI().performThisAdjustment( 233 *this, LoadCXXThis(), Thunk->This) 234 : LoadCXXThis(); 235 236 if (CurFnInfo->usesInAlloca()) { 237 // We don't handle return adjusting thunks, because they require us to call 238 // the copy constructor. For now, fall through and pretend the return 239 // adjustment was empty so we don't crash. 240 if (Thunk && !Thunk->Return.isEmpty()) { 241 CGM.ErrorUnsupported( 242 MD, "non-trivial argument copy for return-adjusting thunk"); 243 } 244 EmitMustTailThunk(MD, AdjustedThisPtr, Callee); 245 return; 246 } 247 248 // Start building CallArgs. 249 CallArgList CallArgs; 250 QualType ThisType = MD->getThisType(getContext()); 251 CallArgs.add(RValue::get(AdjustedThisPtr), ThisType); 252 253 if (isa<CXXDestructorDecl>(MD)) 254 CGM.getCXXABI().adjustCallArgsForDestructorThunk(*this, CurGD, CallArgs); 255 256 // Add the rest of the arguments. 257 for (const ParmVarDecl *PD : MD->params()) 258 EmitDelegateCallArg(CallArgs, PD, PD->getLocStart()); 259 260 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 261 262 #ifndef NDEBUG 263 const CGFunctionInfo &CallFnInfo = 264 CGM.getTypes().arrangeCXXMethodCall(CallArgs, FPT, 265 RequiredArgs::forPrototypePlus(FPT, 1)); 266 assert(CallFnInfo.getRegParm() == CurFnInfo->getRegParm() && 267 CallFnInfo.isNoReturn() == CurFnInfo->isNoReturn() && 268 CallFnInfo.getCallingConvention() == CurFnInfo->getCallingConvention()); 269 assert(isa<CXXDestructorDecl>(MD) || // ignore dtor return types 270 similar(CallFnInfo.getReturnInfo(), CallFnInfo.getReturnType(), 271 CurFnInfo->getReturnInfo(), CurFnInfo->getReturnType())); 272 assert(CallFnInfo.arg_size() == CurFnInfo->arg_size()); 273 for (unsigned i = 0, e = CurFnInfo->arg_size(); i != e; ++i) 274 assert(similar(CallFnInfo.arg_begin()[i].info, 275 CallFnInfo.arg_begin()[i].type, 276 CurFnInfo->arg_begin()[i].info, 277 CurFnInfo->arg_begin()[i].type)); 278 #endif 279 280 // Determine whether we have a return value slot to use. 281 QualType ResultType = 282 CGM.getCXXABI().HasThisReturn(CurGD) ? ThisType : FPT->getReturnType(); 283 ReturnValueSlot Slot; 284 if (!ResultType->isVoidType() && 285 CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect && 286 !hasScalarEvaluationKind(CurFnInfo->getReturnType())) 287 Slot = ReturnValueSlot(ReturnValue, ResultType.isVolatileQualified()); 288 289 // Now emit our call. 290 llvm::Instruction *CallOrInvoke; 291 RValue RV = EmitCall(*CurFnInfo, Callee, Slot, CallArgs, MD, &CallOrInvoke); 292 293 // Consider return adjustment if we have ThunkInfo. 294 if (Thunk && !Thunk->Return.isEmpty()) 295 RV = PerformReturnAdjustment(*this, ResultType, RV, *Thunk); 296 297 // Emit return. 298 if (!ResultType->isVoidType() && Slot.isNull()) 299 CGM.getCXXABI().EmitReturnFromThunk(*this, RV, ResultType); 300 301 // Disable the final ARC autorelease. 302 AutoreleaseResult = false; 303 304 FinishFunction(); 305 } 306 307 void CodeGenFunction::EmitMustTailThunk(const CXXMethodDecl *MD, 308 llvm::Value *AdjustedThisPtr, 309 llvm::Value *Callee) { 310 // Emitting a musttail call thunk doesn't use any of the CGCall.cpp machinery 311 // to translate AST arguments into LLVM IR arguments. For thunks, we know 312 // that the caller prototype more or less matches the callee prototype with 313 // the exception of 'this'. 314 SmallVector<llvm::Value *, 8> Args; 315 for (llvm::Argument &A : CurFn->args()) 316 Args.push_back(&A); 317 318 // Set the adjusted 'this' pointer. 319 const ABIArgInfo &ThisAI = CurFnInfo->arg_begin()->info; 320 if (ThisAI.isDirect()) { 321 const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo(); 322 int ThisArgNo = RetAI.isIndirect() && !RetAI.isSRetAfterThis() ? 1 : 0; 323 llvm::Type *ThisType = Args[ThisArgNo]->getType(); 324 if (ThisType != AdjustedThisPtr->getType()) 325 AdjustedThisPtr = Builder.CreateBitCast(AdjustedThisPtr, ThisType); 326 Args[ThisArgNo] = AdjustedThisPtr; 327 } else { 328 assert(ThisAI.isInAlloca() && "this is passed directly or inalloca"); 329 llvm::Value *ThisAddr = GetAddrOfLocalVar(CXXABIThisDecl); 330 llvm::Type *ThisType = 331 cast<llvm::PointerType>(ThisAddr->getType())->getElementType(); 332 if (ThisType != AdjustedThisPtr->getType()) 333 AdjustedThisPtr = Builder.CreateBitCast(AdjustedThisPtr, ThisType); 334 Builder.CreateStore(AdjustedThisPtr, ThisAddr); 335 } 336 337 // Emit the musttail call manually. Even if the prologue pushed cleanups, we 338 // don't actually want to run them. 339 llvm::CallInst *Call = Builder.CreateCall(Callee, Args); 340 Call->setTailCallKind(llvm::CallInst::TCK_MustTail); 341 342 // Apply the standard set of call attributes. 343 unsigned CallingConv; 344 CodeGen::AttributeListType AttributeList; 345 CGM.ConstructAttributeList(*CurFnInfo, MD, AttributeList, CallingConv, 346 /*AttrOnCallSite=*/true); 347 llvm::AttributeSet Attrs = 348 llvm::AttributeSet::get(getLLVMContext(), AttributeList); 349 Call->setAttributes(Attrs); 350 Call->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv)); 351 352 if (Call->getType()->isVoidTy()) 353 Builder.CreateRetVoid(); 354 else 355 Builder.CreateRet(Call); 356 357 // Finish the function to maintain CodeGenFunction invariants. 358 // FIXME: Don't emit unreachable code. 359 EmitBlock(createBasicBlock()); 360 FinishFunction(); 361 } 362 363 void CodeGenFunction::GenerateThunk(llvm::Function *Fn, 364 const CGFunctionInfo &FnInfo, 365 GlobalDecl GD, const ThunkInfo &Thunk) { 366 StartThunk(Fn, GD, FnInfo); 367 368 // Get our callee. 369 llvm::Type *Ty = 370 CGM.getTypes().GetFunctionType(CGM.getTypes().arrangeGlobalDeclaration(GD)); 371 llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true); 372 373 // Make the call and return the result. 374 EmitCallAndReturnForThunk(Callee, &Thunk); 375 376 // Set the right linkage. 377 CGM.setFunctionLinkage(GD, Fn); 378 379 // Set the right visibility. 380 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 381 setThunkVisibility(CGM, MD, Thunk, Fn); 382 } 383 384 void CodeGenVTables::emitThunk(GlobalDecl GD, const ThunkInfo &Thunk, 385 bool ForVTable) { 386 const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeGlobalDeclaration(GD); 387 388 // FIXME: re-use FnInfo in this computation. 389 llvm::Constant *C = CGM.GetAddrOfThunk(GD, Thunk); 390 llvm::GlobalValue *Entry; 391 392 // Strip off a bitcast if we got one back. 393 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(C)) { 394 assert(CE->getOpcode() == llvm::Instruction::BitCast); 395 Entry = cast<llvm::GlobalValue>(CE->getOperand(0)); 396 } else { 397 Entry = cast<llvm::GlobalValue>(C); 398 } 399 400 // There's already a declaration with the same name, check if it has the same 401 // type or if we need to replace it. 402 if (Entry->getType()->getElementType() != 403 CGM.getTypes().GetFunctionTypeForVTable(GD)) { 404 llvm::GlobalValue *OldThunkFn = Entry; 405 406 // If the types mismatch then we have to rewrite the definition. 407 assert(OldThunkFn->isDeclaration() && 408 "Shouldn't replace non-declaration"); 409 410 // Remove the name from the old thunk function and get a new thunk. 411 OldThunkFn->setName(StringRef()); 412 Entry = cast<llvm::GlobalValue>(CGM.GetAddrOfThunk(GD, Thunk)); 413 414 // If needed, replace the old thunk with a bitcast. 415 if (!OldThunkFn->use_empty()) { 416 llvm::Constant *NewPtrForOldDecl = 417 llvm::ConstantExpr::getBitCast(Entry, OldThunkFn->getType()); 418 OldThunkFn->replaceAllUsesWith(NewPtrForOldDecl); 419 } 420 421 // Remove the old thunk. 422 OldThunkFn->eraseFromParent(); 423 } 424 425 llvm::Function *ThunkFn = cast<llvm::Function>(Entry); 426 bool ABIHasKeyFunctions = CGM.getTarget().getCXXABI().hasKeyFunctions(); 427 bool UseAvailableExternallyLinkage = ForVTable && ABIHasKeyFunctions; 428 429 if (!ThunkFn->isDeclaration()) { 430 if (!ABIHasKeyFunctions || UseAvailableExternallyLinkage) { 431 // There is already a thunk emitted for this function, do nothing. 432 return; 433 } 434 435 // Change the linkage. 436 CGM.setFunctionLinkage(GD, ThunkFn); 437 return; 438 } 439 440 CGM.SetLLVMFunctionAttributesForDefinition(GD.getDecl(), ThunkFn); 441 442 if (ThunkFn->isVarArg()) { 443 // Varargs thunks are special; we can't just generate a call because 444 // we can't copy the varargs. Our implementation is rather 445 // expensive/sucky at the moment, so don't generate the thunk unless 446 // we have to. 447 // FIXME: Do something better here; GenerateVarArgsThunk is extremely ugly. 448 if (!UseAvailableExternallyLinkage) { 449 CodeGenFunction(CGM).GenerateVarArgsThunk(ThunkFn, FnInfo, GD, Thunk); 450 CGM.getCXXABI().setThunkLinkage(ThunkFn, ForVTable, GD, 451 !Thunk.Return.isEmpty()); 452 } 453 } else { 454 // Normal thunk body generation. 455 CodeGenFunction(CGM).GenerateThunk(ThunkFn, FnInfo, GD, Thunk); 456 CGM.getCXXABI().setThunkLinkage(ThunkFn, ForVTable, GD, 457 !Thunk.Return.isEmpty()); 458 } 459 } 460 461 void CodeGenVTables::maybeEmitThunkForVTable(GlobalDecl GD, 462 const ThunkInfo &Thunk) { 463 // If the ABI has key functions, only the TU with the key function should emit 464 // the thunk. However, we can allow inlining of thunks if we emit them with 465 // available_externally linkage together with vtables when optimizations are 466 // enabled. 467 if (CGM.getTarget().getCXXABI().hasKeyFunctions() && 468 !CGM.getCodeGenOpts().OptimizationLevel) 469 return; 470 471 // We can't emit thunks for member functions with incomplete types. 472 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 473 if (!CGM.getTypes().isFuncTypeConvertible( 474 MD->getType()->castAs<FunctionType>())) 475 return; 476 477 emitThunk(GD, Thunk, /*ForVTable=*/true); 478 } 479 480 void CodeGenVTables::EmitThunks(GlobalDecl GD) 481 { 482 const CXXMethodDecl *MD = 483 cast<CXXMethodDecl>(GD.getDecl())->getCanonicalDecl(); 484 485 // We don't need to generate thunks for the base destructor. 486 if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base) 487 return; 488 489 const VTableContextBase::ThunkInfoVectorTy *ThunkInfoVector = 490 VTContext->getThunkInfo(GD); 491 492 if (!ThunkInfoVector) 493 return; 494 495 for (unsigned I = 0, E = ThunkInfoVector->size(); I != E; ++I) 496 emitThunk(GD, (*ThunkInfoVector)[I], /*ForVTable=*/false); 497 } 498 499 llvm::Constant *CodeGenVTables::CreateVTableInitializer( 500 const CXXRecordDecl *RD, const VTableComponent *Components, 501 unsigned NumComponents, const VTableLayout::VTableThunkTy *VTableThunks, 502 unsigned NumVTableThunks, llvm::Constant *RTTI) { 503 SmallVector<llvm::Constant *, 64> Inits; 504 505 llvm::Type *Int8PtrTy = CGM.Int8PtrTy; 506 507 llvm::Type *PtrDiffTy = 508 CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType()); 509 510 unsigned NextVTableThunkIndex = 0; 511 512 llvm::Constant *PureVirtualFn = nullptr, *DeletedVirtualFn = nullptr; 513 514 for (unsigned I = 0; I != NumComponents; ++I) { 515 VTableComponent Component = Components[I]; 516 517 llvm::Constant *Init = nullptr; 518 519 switch (Component.getKind()) { 520 case VTableComponent::CK_VCallOffset: 521 Init = llvm::ConstantInt::get(PtrDiffTy, 522 Component.getVCallOffset().getQuantity()); 523 Init = llvm::ConstantExpr::getIntToPtr(Init, Int8PtrTy); 524 break; 525 case VTableComponent::CK_VBaseOffset: 526 Init = llvm::ConstantInt::get(PtrDiffTy, 527 Component.getVBaseOffset().getQuantity()); 528 Init = llvm::ConstantExpr::getIntToPtr(Init, Int8PtrTy); 529 break; 530 case VTableComponent::CK_OffsetToTop: 531 Init = llvm::ConstantInt::get(PtrDiffTy, 532 Component.getOffsetToTop().getQuantity()); 533 Init = llvm::ConstantExpr::getIntToPtr(Init, Int8PtrTy); 534 break; 535 case VTableComponent::CK_RTTI: 536 Init = llvm::ConstantExpr::getBitCast(RTTI, Int8PtrTy); 537 break; 538 case VTableComponent::CK_FunctionPointer: 539 case VTableComponent::CK_CompleteDtorPointer: 540 case VTableComponent::CK_DeletingDtorPointer: { 541 GlobalDecl GD; 542 543 // Get the right global decl. 544 switch (Component.getKind()) { 545 default: 546 llvm_unreachable("Unexpected vtable component kind"); 547 case VTableComponent::CK_FunctionPointer: 548 GD = Component.getFunctionDecl(); 549 break; 550 case VTableComponent::CK_CompleteDtorPointer: 551 GD = GlobalDecl(Component.getDestructorDecl(), Dtor_Complete); 552 break; 553 case VTableComponent::CK_DeletingDtorPointer: 554 GD = GlobalDecl(Component.getDestructorDecl(), Dtor_Deleting); 555 break; 556 } 557 558 if (cast<CXXMethodDecl>(GD.getDecl())->isPure()) { 559 // We have a pure virtual member function. 560 if (!PureVirtualFn) { 561 llvm::FunctionType *Ty = 562 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false); 563 StringRef PureCallName = CGM.getCXXABI().GetPureVirtualCallName(); 564 PureVirtualFn = CGM.CreateRuntimeFunction(Ty, PureCallName); 565 PureVirtualFn = llvm::ConstantExpr::getBitCast(PureVirtualFn, 566 CGM.Int8PtrTy); 567 } 568 Init = PureVirtualFn; 569 } else if (cast<CXXMethodDecl>(GD.getDecl())->isDeleted()) { 570 if (!DeletedVirtualFn) { 571 llvm::FunctionType *Ty = 572 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false); 573 StringRef DeletedCallName = 574 CGM.getCXXABI().GetDeletedVirtualCallName(); 575 DeletedVirtualFn = CGM.CreateRuntimeFunction(Ty, DeletedCallName); 576 DeletedVirtualFn = llvm::ConstantExpr::getBitCast(DeletedVirtualFn, 577 CGM.Int8PtrTy); 578 } 579 Init = DeletedVirtualFn; 580 } else { 581 // Check if we should use a thunk. 582 if (NextVTableThunkIndex < NumVTableThunks && 583 VTableThunks[NextVTableThunkIndex].first == I) { 584 const ThunkInfo &Thunk = VTableThunks[NextVTableThunkIndex].second; 585 586 maybeEmitThunkForVTable(GD, Thunk); 587 Init = CGM.GetAddrOfThunk(GD, Thunk); 588 589 NextVTableThunkIndex++; 590 } else { 591 llvm::Type *Ty = CGM.getTypes().GetFunctionTypeForVTable(GD); 592 593 Init = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true); 594 } 595 596 Init = llvm::ConstantExpr::getBitCast(Init, Int8PtrTy); 597 } 598 break; 599 } 600 601 case VTableComponent::CK_UnusedFunctionPointer: 602 Init = llvm::ConstantExpr::getNullValue(Int8PtrTy); 603 break; 604 }; 605 606 Inits.push_back(Init); 607 } 608 609 llvm::ArrayType *ArrayType = llvm::ArrayType::get(Int8PtrTy, NumComponents); 610 return llvm::ConstantArray::get(ArrayType, Inits); 611 } 612 613 llvm::GlobalVariable * 614 CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD, 615 const BaseSubobject &Base, 616 bool BaseIsVirtual, 617 llvm::GlobalVariable::LinkageTypes Linkage, 618 VTableAddressPointsMapTy& AddressPoints) { 619 if (CGDebugInfo *DI = CGM.getModuleDebugInfo()) 620 DI->completeClassData(Base.getBase()); 621 622 std::unique_ptr<VTableLayout> VTLayout( 623 getItaniumVTableContext().createConstructionVTableLayout( 624 Base.getBase(), Base.getBaseOffset(), BaseIsVirtual, RD)); 625 626 // Add the address points. 627 AddressPoints = VTLayout->getAddressPoints(); 628 629 // Get the mangled construction vtable name. 630 SmallString<256> OutName; 631 llvm::raw_svector_ostream Out(OutName); 632 cast<ItaniumMangleContext>(CGM.getCXXABI().getMangleContext()) 633 .mangleCXXCtorVTable(RD, Base.getBaseOffset().getQuantity(), 634 Base.getBase(), Out); 635 Out.flush(); 636 StringRef Name = OutName.str(); 637 638 llvm::ArrayType *ArrayType = 639 llvm::ArrayType::get(CGM.Int8PtrTy, VTLayout->getNumVTableComponents()); 640 641 // Construction vtable symbols are not part of the Itanium ABI, so we cannot 642 // guarantee that they actually will be available externally. Instead, when 643 // emitting an available_externally VTT, we provide references to an internal 644 // linkage construction vtable. The ABI only requires complete-object vtables 645 // to be the same for all instances of a type, not construction vtables. 646 if (Linkage == llvm::GlobalVariable::AvailableExternallyLinkage) 647 Linkage = llvm::GlobalVariable::InternalLinkage; 648 649 // Create the variable that will hold the construction vtable. 650 llvm::GlobalVariable *VTable = 651 CGM.CreateOrReplaceCXXRuntimeVariable(Name, ArrayType, Linkage); 652 CGM.setGlobalVisibility(VTable, RD); 653 654 // V-tables are always unnamed_addr. 655 VTable->setUnnamedAddr(true); 656 657 llvm::Constant *RTTI = CGM.GetAddrOfRTTIDescriptor( 658 CGM.getContext().getTagDeclType(Base.getBase())); 659 660 // Create and set the initializer. 661 llvm::Constant *Init = CreateVTableInitializer( 662 Base.getBase(), VTLayout->vtable_component_begin(), 663 VTLayout->getNumVTableComponents(), VTLayout->vtable_thunk_begin(), 664 VTLayout->getNumVTableThunks(), RTTI); 665 VTable->setInitializer(Init); 666 667 return VTable; 668 } 669 670 /// Compute the required linkage of the v-table for the given class. 671 /// 672 /// Note that we only call this at the end of the translation unit. 673 llvm::GlobalVariable::LinkageTypes 674 CodeGenModule::getVTableLinkage(const CXXRecordDecl *RD) { 675 if (!RD->isExternallyVisible()) 676 return llvm::GlobalVariable::InternalLinkage; 677 678 // We're at the end of the translation unit, so the current key 679 // function is fully correct. 680 if (const CXXMethodDecl *keyFunction = Context.getCurrentKeyFunction(RD)) { 681 // If this class has a key function, use that to determine the 682 // linkage of the vtable. 683 const FunctionDecl *def = nullptr; 684 if (keyFunction->hasBody(def)) 685 keyFunction = cast<CXXMethodDecl>(def); 686 687 switch (keyFunction->getTemplateSpecializationKind()) { 688 case TSK_Undeclared: 689 case TSK_ExplicitSpecialization: 690 assert(def && "Should not have been asked to emit this"); 691 if (keyFunction->isInlined()) 692 return !Context.getLangOpts().AppleKext ? 693 llvm::GlobalVariable::LinkOnceODRLinkage : 694 llvm::Function::InternalLinkage; 695 696 return llvm::GlobalVariable::ExternalLinkage; 697 698 case TSK_ImplicitInstantiation: 699 return !Context.getLangOpts().AppleKext ? 700 llvm::GlobalVariable::LinkOnceODRLinkage : 701 llvm::Function::InternalLinkage; 702 703 case TSK_ExplicitInstantiationDefinition: 704 return !Context.getLangOpts().AppleKext ? 705 llvm::GlobalVariable::WeakODRLinkage : 706 llvm::Function::InternalLinkage; 707 708 case TSK_ExplicitInstantiationDeclaration: 709 llvm_unreachable("Should not have been asked to emit this"); 710 } 711 } 712 713 // -fapple-kext mode does not support weak linkage, so we must use 714 // internal linkage. 715 if (Context.getLangOpts().AppleKext) 716 return llvm::Function::InternalLinkage; 717 718 llvm::GlobalVariable::LinkageTypes DiscardableODRLinkage = 719 llvm::GlobalValue::LinkOnceODRLinkage; 720 llvm::GlobalVariable::LinkageTypes NonDiscardableODRLinkage = 721 llvm::GlobalValue::WeakODRLinkage; 722 if (RD->hasAttr<DLLExportAttr>()) { 723 // Cannot discard exported vtables. 724 DiscardableODRLinkage = NonDiscardableODRLinkage; 725 } else if (RD->hasAttr<DLLImportAttr>()) { 726 // Imported vtables are available externally. 727 DiscardableODRLinkage = llvm::GlobalVariable::AvailableExternallyLinkage; 728 NonDiscardableODRLinkage = llvm::GlobalVariable::AvailableExternallyLinkage; 729 } 730 731 switch (RD->getTemplateSpecializationKind()) { 732 case TSK_Undeclared: 733 case TSK_ExplicitSpecialization: 734 case TSK_ImplicitInstantiation: 735 return DiscardableODRLinkage; 736 737 case TSK_ExplicitInstantiationDeclaration: 738 llvm_unreachable("Should not have been asked to emit this"); 739 740 case TSK_ExplicitInstantiationDefinition: 741 return NonDiscardableODRLinkage; 742 } 743 744 llvm_unreachable("Invalid TemplateSpecializationKind!"); 745 } 746 747 /// This is a callback from Sema to tell us that it believes that a 748 /// particular v-table is required to be emitted in this translation 749 /// unit. 750 /// 751 /// The reason we don't simply trust this callback is because Sema 752 /// will happily report that something is used even when it's used 753 /// only in code that we don't actually have to emit. 754 /// 755 /// \param isRequired - if true, the v-table is mandatory, e.g. 756 /// because the translation unit defines the key function 757 void CodeGenModule::EmitVTable(CXXRecordDecl *theClass, bool isRequired) { 758 if (!isRequired) return; 759 760 VTables.GenerateClassData(theClass); 761 } 762 763 void 764 CodeGenVTables::GenerateClassData(const CXXRecordDecl *RD) { 765 if (CGDebugInfo *DI = CGM.getModuleDebugInfo()) 766 DI->completeClassData(RD); 767 768 if (RD->getNumVBases()) 769 CGM.getCXXABI().emitVirtualInheritanceTables(RD); 770 771 CGM.getCXXABI().emitVTableDefinitions(*this, RD); 772 } 773 774 /// At this point in the translation unit, does it appear that can we 775 /// rely on the vtable being defined elsewhere in the program? 776 /// 777 /// The response is really only definitive when called at the end of 778 /// the translation unit. 779 /// 780 /// The only semantic restriction here is that the object file should 781 /// not contain a v-table definition when that v-table is defined 782 /// strongly elsewhere. Otherwise, we'd just like to avoid emitting 783 /// v-tables when unnecessary. 784 bool CodeGenVTables::isVTableExternal(const CXXRecordDecl *RD) { 785 assert(RD->isDynamicClass() && "Non-dynamic classes have no VTable."); 786 787 // If we have an explicit instantiation declaration (and not a 788 // definition), the v-table is defined elsewhere. 789 TemplateSpecializationKind TSK = RD->getTemplateSpecializationKind(); 790 if (TSK == TSK_ExplicitInstantiationDeclaration) 791 return true; 792 793 // Otherwise, if the class is an instantiated template, the 794 // v-table must be defined here. 795 if (TSK == TSK_ImplicitInstantiation || 796 TSK == TSK_ExplicitInstantiationDefinition) 797 return false; 798 799 // Otherwise, if the class doesn't have a key function (possibly 800 // anymore), the v-table must be defined here. 801 const CXXMethodDecl *keyFunction = CGM.getContext().getCurrentKeyFunction(RD); 802 if (!keyFunction) 803 return false; 804 805 // Otherwise, if we don't have a definition of the key function, the 806 // v-table must be defined somewhere else. 807 return !keyFunction->hasBody(); 808 } 809 810 /// Given that we're currently at the end of the translation unit, and 811 /// we've emitted a reference to the v-table for this class, should 812 /// we define that v-table? 813 static bool shouldEmitVTableAtEndOfTranslationUnit(CodeGenModule &CGM, 814 const CXXRecordDecl *RD) { 815 return !CGM.getVTables().isVTableExternal(RD); 816 } 817 818 /// Given that at some point we emitted a reference to one or more 819 /// v-tables, and that we are now at the end of the translation unit, 820 /// decide whether we should emit them. 821 void CodeGenModule::EmitDeferredVTables() { 822 #ifndef NDEBUG 823 // Remember the size of DeferredVTables, because we're going to assume 824 // that this entire operation doesn't modify it. 825 size_t savedSize = DeferredVTables.size(); 826 #endif 827 828 typedef std::vector<const CXXRecordDecl *>::const_iterator const_iterator; 829 for (const_iterator i = DeferredVTables.begin(), 830 e = DeferredVTables.end(); i != e; ++i) { 831 const CXXRecordDecl *RD = *i; 832 if (shouldEmitVTableAtEndOfTranslationUnit(*this, RD)) 833 VTables.GenerateClassData(RD); 834 } 835 836 assert(savedSize == DeferredVTables.size() && 837 "deferred extra v-tables during v-table emission?"); 838 DeferredVTables.clear(); 839 } 840