1 //===--- CGVTables.cpp - Emit LLVM Code for C++ vtables -------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code dealing with C++ code generation of virtual tables.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CodeGenFunction.h"
15 #include "CGCXXABI.h"
16 #include "CodeGenModule.h"
17 #include "clang/AST/CXXInheritance.h"
18 #include "clang/AST/RecordLayout.h"
19 #include "clang/CodeGen/CGFunctionInfo.h"
20 #include "clang/Frontend/CodeGenOptions.h"
21 #include "llvm/ADT/DenseSet.h"
22 #include "llvm/ADT/SetVector.h"
23 #include "llvm/Support/Compiler.h"
24 #include "llvm/Support/Format.h"
25 #include "llvm/Transforms/Utils/Cloning.h"
26 #include <algorithm>
27 #include <cstdio>
28 
29 using namespace clang;
30 using namespace CodeGen;
31 
32 CodeGenVTables::CodeGenVTables(CodeGenModule &CGM)
33     : CGM(CGM), VTContext(CGM.getContext().getVTableContext()) {}
34 
35 llvm::Constant *CodeGenModule::GetAddrOfThunk(GlobalDecl GD,
36                                               const ThunkInfo &Thunk) {
37   const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
38 
39   // Compute the mangled name.
40   SmallString<256> Name;
41   llvm::raw_svector_ostream Out(Name);
42   if (const CXXDestructorDecl* DD = dyn_cast<CXXDestructorDecl>(MD))
43     getCXXABI().getMangleContext().mangleCXXDtorThunk(DD, GD.getDtorType(),
44                                                       Thunk.This, Out);
45   else
46     getCXXABI().getMangleContext().mangleThunk(MD, Thunk, Out);
47   Out.flush();
48 
49   llvm::Type *Ty = getTypes().GetFunctionTypeForVTable(GD);
50   return GetOrCreateLLVMFunction(Name, Ty, GD, /*ForVTable=*/true,
51                                  /*DontDefer=*/true, /*IsThunk=*/true);
52 }
53 
54 static void setThunkVisibility(CodeGenModule &CGM, const CXXMethodDecl *MD,
55                                const ThunkInfo &Thunk, llvm::Function *Fn) {
56   CGM.setGlobalVisibility(Fn, MD);
57 }
58 
59 #ifndef NDEBUG
60 static bool similar(const ABIArgInfo &infoL, CanQualType typeL,
61                     const ABIArgInfo &infoR, CanQualType typeR) {
62   return (infoL.getKind() == infoR.getKind() &&
63           (typeL == typeR ||
64            (isa<PointerType>(typeL) && isa<PointerType>(typeR)) ||
65            (isa<ReferenceType>(typeL) && isa<ReferenceType>(typeR))));
66 }
67 #endif
68 
69 static RValue PerformReturnAdjustment(CodeGenFunction &CGF,
70                                       QualType ResultType, RValue RV,
71                                       const ThunkInfo &Thunk) {
72   // Emit the return adjustment.
73   bool NullCheckValue = !ResultType->isReferenceType();
74 
75   llvm::BasicBlock *AdjustNull = nullptr;
76   llvm::BasicBlock *AdjustNotNull = nullptr;
77   llvm::BasicBlock *AdjustEnd = nullptr;
78 
79   llvm::Value *ReturnValue = RV.getScalarVal();
80 
81   if (NullCheckValue) {
82     AdjustNull = CGF.createBasicBlock("adjust.null");
83     AdjustNotNull = CGF.createBasicBlock("adjust.notnull");
84     AdjustEnd = CGF.createBasicBlock("adjust.end");
85 
86     llvm::Value *IsNull = CGF.Builder.CreateIsNull(ReturnValue);
87     CGF.Builder.CreateCondBr(IsNull, AdjustNull, AdjustNotNull);
88     CGF.EmitBlock(AdjustNotNull);
89   }
90 
91   ReturnValue = CGF.CGM.getCXXABI().performReturnAdjustment(CGF, ReturnValue,
92                                                             Thunk.Return);
93 
94   if (NullCheckValue) {
95     CGF.Builder.CreateBr(AdjustEnd);
96     CGF.EmitBlock(AdjustNull);
97     CGF.Builder.CreateBr(AdjustEnd);
98     CGF.EmitBlock(AdjustEnd);
99 
100     llvm::PHINode *PHI = CGF.Builder.CreatePHI(ReturnValue->getType(), 2);
101     PHI->addIncoming(ReturnValue, AdjustNotNull);
102     PHI->addIncoming(llvm::Constant::getNullValue(ReturnValue->getType()),
103                      AdjustNull);
104     ReturnValue = PHI;
105   }
106 
107   return RValue::get(ReturnValue);
108 }
109 
110 // This function does roughly the same thing as GenerateThunk, but in a
111 // very different way, so that va_start and va_end work correctly.
112 // FIXME: This function assumes "this" is the first non-sret LLVM argument of
113 //        a function, and that there is an alloca built in the entry block
114 //        for all accesses to "this".
115 // FIXME: This function assumes there is only one "ret" statement per function.
116 // FIXME: Cloning isn't correct in the presence of indirect goto!
117 // FIXME: This implementation of thunks bloats codesize by duplicating the
118 //        function definition.  There are alternatives:
119 //        1. Add some sort of stub support to LLVM for cases where we can
120 //           do a this adjustment, then a sibcall.
121 //        2. We could transform the definition to take a va_list instead of an
122 //           actual variable argument list, then have the thunks (including a
123 //           no-op thunk for the regular definition) call va_start/va_end.
124 //           There's a bit of per-call overhead for this solution, but it's
125 //           better for codesize if the definition is long.
126 llvm::Function *
127 CodeGenFunction::GenerateVarArgsThunk(llvm::Function *Fn,
128                                       const CGFunctionInfo &FnInfo,
129                                       GlobalDecl GD, const ThunkInfo &Thunk) {
130   const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
131   const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
132   QualType ResultType = FPT->getReturnType();
133 
134   // Get the original function
135   assert(FnInfo.isVariadic());
136   llvm::Type *Ty = CGM.getTypes().GetFunctionType(FnInfo);
137   llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true);
138   llvm::Function *BaseFn = cast<llvm::Function>(Callee);
139 
140   // Clone to thunk.
141   llvm::ValueToValueMapTy VMap;
142   llvm::Function *NewFn = llvm::CloneFunction(BaseFn, VMap,
143                                               /*ModuleLevelChanges=*/false);
144   CGM.getModule().getFunctionList().push_back(NewFn);
145   Fn->replaceAllUsesWith(NewFn);
146   NewFn->takeName(Fn);
147   Fn->eraseFromParent();
148   Fn = NewFn;
149 
150   // "Initialize" CGF (minimally).
151   CurFn = Fn;
152 
153   // Get the "this" value
154   llvm::Function::arg_iterator AI = Fn->arg_begin();
155   if (CGM.ReturnTypeUsesSRet(FnInfo))
156     ++AI;
157 
158   // Find the first store of "this", which will be to the alloca associated
159   // with "this".
160   llvm::Value *ThisPtr = &*AI;
161   llvm::BasicBlock *EntryBB = Fn->begin();
162   llvm::Instruction *ThisStore =
163       std::find_if(EntryBB->begin(), EntryBB->end(), [&](llvm::Instruction &I) {
164     return isa<llvm::StoreInst>(I) && I.getOperand(0) == ThisPtr;
165   });
166   assert(ThisStore && "Store of this should be in entry block?");
167   // Adjust "this", if necessary.
168   Builder.SetInsertPoint(ThisStore);
169   llvm::Value *AdjustedThisPtr =
170       CGM.getCXXABI().performThisAdjustment(*this, ThisPtr, Thunk.This);
171   ThisStore->setOperand(0, AdjustedThisPtr);
172 
173   if (!Thunk.Return.isEmpty()) {
174     // Fix up the returned value, if necessary.
175     for (llvm::Function::iterator I = Fn->begin(), E = Fn->end(); I != E; I++) {
176       llvm::Instruction *T = I->getTerminator();
177       if (isa<llvm::ReturnInst>(T)) {
178         RValue RV = RValue::get(T->getOperand(0));
179         T->eraseFromParent();
180         Builder.SetInsertPoint(&*I);
181         RV = PerformReturnAdjustment(*this, ResultType, RV, Thunk);
182         Builder.CreateRet(RV.getScalarVal());
183         break;
184       }
185     }
186   }
187 
188   return Fn;
189 }
190 
191 void CodeGenFunction::StartThunk(llvm::Function *Fn, GlobalDecl GD,
192                                  const CGFunctionInfo &FnInfo) {
193   assert(!CurGD.getDecl() && "CurGD was already set!");
194   CurGD = GD;
195   CurFuncIsThunk = true;
196 
197   // Build FunctionArgs.
198   const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
199   QualType ThisType = MD->getThisType(getContext());
200   const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
201   QualType ResultType = CGM.getCXXABI().HasThisReturn(GD)
202                             ? ThisType
203                             : CGM.getCXXABI().hasMostDerivedReturn(GD)
204                                   ? CGM.getContext().VoidPtrTy
205                                   : FPT->getReturnType();
206   FunctionArgList FunctionArgs;
207 
208   // Create the implicit 'this' parameter declaration.
209   CGM.getCXXABI().buildThisParam(*this, FunctionArgs);
210 
211   // Add the rest of the parameters.
212   FunctionArgs.append(MD->param_begin(), MD->param_end());
213 
214   if (isa<CXXDestructorDecl>(MD))
215     CGM.getCXXABI().addImplicitStructorParams(*this, ResultType, FunctionArgs);
216 
217   // Start defining the function.
218   StartFunction(GlobalDecl(), ResultType, Fn, FnInfo, FunctionArgs,
219                 MD->getLocation(), MD->getLocation());
220 
221   // Since we didn't pass a GlobalDecl to StartFunction, do this ourselves.
222   CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
223   CXXThisValue = CXXABIThisValue;
224 }
225 
226 void CodeGenFunction::EmitCallAndReturnForThunk(llvm::Value *Callee,
227                                                 const ThunkInfo *Thunk) {
228   assert(isa<CXXMethodDecl>(CurGD.getDecl()) &&
229          "Please use a new CGF for this thunk");
230   const CXXMethodDecl *MD = cast<CXXMethodDecl>(CurGD.getDecl());
231 
232   // Adjust the 'this' pointer if necessary
233   llvm::Value *AdjustedThisPtr = Thunk ? CGM.getCXXABI().performThisAdjustment(
234                                              *this, LoadCXXThis(), Thunk->This)
235                                        : LoadCXXThis();
236 
237   if (CurFnInfo->usesInAlloca()) {
238     // We don't handle return adjusting thunks, because they require us to call
239     // the copy constructor.  For now, fall through and pretend the return
240     // adjustment was empty so we don't crash.
241     if (Thunk && !Thunk->Return.isEmpty()) {
242       CGM.ErrorUnsupported(
243           MD, "non-trivial argument copy for return-adjusting thunk");
244     }
245     EmitMustTailThunk(MD, AdjustedThisPtr, Callee);
246     return;
247   }
248 
249   // Start building CallArgs.
250   CallArgList CallArgs;
251   QualType ThisType = MD->getThisType(getContext());
252   CallArgs.add(RValue::get(AdjustedThisPtr), ThisType);
253 
254   if (isa<CXXDestructorDecl>(MD))
255     CGM.getCXXABI().adjustCallArgsForDestructorThunk(*this, CurGD, CallArgs);
256 
257   // Add the rest of the arguments.
258   for (const ParmVarDecl *PD : MD->params())
259     EmitDelegateCallArg(CallArgs, PD, PD->getLocStart());
260 
261   const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
262 
263 #ifndef NDEBUG
264   const CGFunctionInfo &CallFnInfo =
265     CGM.getTypes().arrangeCXXMethodCall(CallArgs, FPT,
266                                        RequiredArgs::forPrototypePlus(FPT, 1));
267   assert(CallFnInfo.getRegParm() == CurFnInfo->getRegParm() &&
268          CallFnInfo.isNoReturn() == CurFnInfo->isNoReturn() &&
269          CallFnInfo.getCallingConvention() == CurFnInfo->getCallingConvention());
270   assert(isa<CXXDestructorDecl>(MD) || // ignore dtor return types
271          similar(CallFnInfo.getReturnInfo(), CallFnInfo.getReturnType(),
272                  CurFnInfo->getReturnInfo(), CurFnInfo->getReturnType()));
273   assert(CallFnInfo.arg_size() == CurFnInfo->arg_size());
274   for (unsigned i = 0, e = CurFnInfo->arg_size(); i != e; ++i)
275     assert(similar(CallFnInfo.arg_begin()[i].info,
276                    CallFnInfo.arg_begin()[i].type,
277                    CurFnInfo->arg_begin()[i].info,
278                    CurFnInfo->arg_begin()[i].type));
279 #endif
280 
281   // Determine whether we have a return value slot to use.
282   QualType ResultType = CGM.getCXXABI().HasThisReturn(CurGD)
283                             ? ThisType
284                             : CGM.getCXXABI().hasMostDerivedReturn(CurGD)
285                                   ? CGM.getContext().VoidPtrTy
286                                   : FPT->getReturnType();
287   ReturnValueSlot Slot;
288   if (!ResultType->isVoidType() &&
289       CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect &&
290       !hasScalarEvaluationKind(CurFnInfo->getReturnType()))
291     Slot = ReturnValueSlot(ReturnValue, ResultType.isVolatileQualified());
292 
293   // Now emit our call.
294   llvm::Instruction *CallOrInvoke;
295   RValue RV = EmitCall(*CurFnInfo, Callee, Slot, CallArgs, MD, &CallOrInvoke);
296 
297   // Consider return adjustment if we have ThunkInfo.
298   if (Thunk && !Thunk->Return.isEmpty())
299     RV = PerformReturnAdjustment(*this, ResultType, RV, *Thunk);
300 
301   // Emit return.
302   if (!ResultType->isVoidType() && Slot.isNull())
303     CGM.getCXXABI().EmitReturnFromThunk(*this, RV, ResultType);
304 
305   // Disable the final ARC autorelease.
306   AutoreleaseResult = false;
307 
308   FinishFunction();
309 }
310 
311 void CodeGenFunction::EmitMustTailThunk(const CXXMethodDecl *MD,
312                                         llvm::Value *AdjustedThisPtr,
313                                         llvm::Value *Callee) {
314   // Emitting a musttail call thunk doesn't use any of the CGCall.cpp machinery
315   // to translate AST arguments into LLVM IR arguments.  For thunks, we know
316   // that the caller prototype more or less matches the callee prototype with
317   // the exception of 'this'.
318   SmallVector<llvm::Value *, 8> Args;
319   for (llvm::Argument &A : CurFn->args())
320     Args.push_back(&A);
321 
322   // Set the adjusted 'this' pointer.
323   const ABIArgInfo &ThisAI = CurFnInfo->arg_begin()->info;
324   if (ThisAI.isDirect()) {
325     const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
326     int ThisArgNo = RetAI.isIndirect() && !RetAI.isSRetAfterThis() ? 1 : 0;
327     llvm::Type *ThisType = Args[ThisArgNo]->getType();
328     if (ThisType != AdjustedThisPtr->getType())
329       AdjustedThisPtr = Builder.CreateBitCast(AdjustedThisPtr, ThisType);
330     Args[ThisArgNo] = AdjustedThisPtr;
331   } else {
332     assert(ThisAI.isInAlloca() && "this is passed directly or inalloca");
333     llvm::Value *ThisAddr = GetAddrOfLocalVar(CXXABIThisDecl);
334     llvm::Type *ThisType =
335         cast<llvm::PointerType>(ThisAddr->getType())->getElementType();
336     if (ThisType != AdjustedThisPtr->getType())
337       AdjustedThisPtr = Builder.CreateBitCast(AdjustedThisPtr, ThisType);
338     Builder.CreateStore(AdjustedThisPtr, ThisAddr);
339   }
340 
341   // Emit the musttail call manually.  Even if the prologue pushed cleanups, we
342   // don't actually want to run them.
343   llvm::CallInst *Call = Builder.CreateCall(Callee, Args);
344   Call->setTailCallKind(llvm::CallInst::TCK_MustTail);
345 
346   // Apply the standard set of call attributes.
347   unsigned CallingConv;
348   CodeGen::AttributeListType AttributeList;
349   CGM.ConstructAttributeList(*CurFnInfo, MD, AttributeList, CallingConv,
350                              /*AttrOnCallSite=*/true);
351   llvm::AttributeSet Attrs =
352       llvm::AttributeSet::get(getLLVMContext(), AttributeList);
353   Call->setAttributes(Attrs);
354   Call->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
355 
356   if (Call->getType()->isVoidTy())
357     Builder.CreateRetVoid();
358   else
359     Builder.CreateRet(Call);
360 
361   // Finish the function to maintain CodeGenFunction invariants.
362   // FIXME: Don't emit unreachable code.
363   EmitBlock(createBasicBlock());
364   FinishFunction();
365 }
366 
367 void CodeGenFunction::GenerateThunk(llvm::Function *Fn,
368                                     const CGFunctionInfo &FnInfo,
369                                     GlobalDecl GD, const ThunkInfo &Thunk) {
370   StartThunk(Fn, GD, FnInfo);
371 
372   // Get our callee.
373   llvm::Type *Ty =
374     CGM.getTypes().GetFunctionType(CGM.getTypes().arrangeGlobalDeclaration(GD));
375   llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true);
376 
377   // Make the call and return the result.
378   EmitCallAndReturnForThunk(Callee, &Thunk);
379 
380   // Set the right linkage.
381   CGM.setFunctionLinkage(GD, Fn);
382 
383   // Set the right visibility.
384   const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
385   setThunkVisibility(CGM, MD, Thunk, Fn);
386 }
387 
388 void CodeGenVTables::emitThunk(GlobalDecl GD, const ThunkInfo &Thunk,
389                                bool ForVTable) {
390   const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeGlobalDeclaration(GD);
391 
392   // FIXME: re-use FnInfo in this computation.
393   llvm::Constant *C = CGM.GetAddrOfThunk(GD, Thunk);
394   llvm::GlobalValue *Entry;
395 
396   // Strip off a bitcast if we got one back.
397   if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(C)) {
398     assert(CE->getOpcode() == llvm::Instruction::BitCast);
399     Entry = cast<llvm::GlobalValue>(CE->getOperand(0));
400   } else {
401     Entry = cast<llvm::GlobalValue>(C);
402   }
403 
404   // There's already a declaration with the same name, check if it has the same
405   // type or if we need to replace it.
406   if (Entry->getType()->getElementType() !=
407       CGM.getTypes().GetFunctionTypeForVTable(GD)) {
408     llvm::GlobalValue *OldThunkFn = Entry;
409 
410     // If the types mismatch then we have to rewrite the definition.
411     assert(OldThunkFn->isDeclaration() &&
412            "Shouldn't replace non-declaration");
413 
414     // Remove the name from the old thunk function and get a new thunk.
415     OldThunkFn->setName(StringRef());
416     Entry = cast<llvm::GlobalValue>(CGM.GetAddrOfThunk(GD, Thunk));
417 
418     // If needed, replace the old thunk with a bitcast.
419     if (!OldThunkFn->use_empty()) {
420       llvm::Constant *NewPtrForOldDecl =
421         llvm::ConstantExpr::getBitCast(Entry, OldThunkFn->getType());
422       OldThunkFn->replaceAllUsesWith(NewPtrForOldDecl);
423     }
424 
425     // Remove the old thunk.
426     OldThunkFn->eraseFromParent();
427   }
428 
429   llvm::Function *ThunkFn = cast<llvm::Function>(Entry);
430   bool ABIHasKeyFunctions = CGM.getTarget().getCXXABI().hasKeyFunctions();
431   bool UseAvailableExternallyLinkage = ForVTable && ABIHasKeyFunctions;
432 
433   if (!ThunkFn->isDeclaration()) {
434     if (!ABIHasKeyFunctions || UseAvailableExternallyLinkage) {
435       // There is already a thunk emitted for this function, do nothing.
436       return;
437     }
438 
439     // Change the linkage.
440     CGM.setFunctionLinkage(GD, ThunkFn);
441     return;
442   }
443 
444   CGM.SetLLVMFunctionAttributesForDefinition(GD.getDecl(), ThunkFn);
445 
446   if (ThunkFn->isVarArg()) {
447     // Varargs thunks are special; we can't just generate a call because
448     // we can't copy the varargs.  Our implementation is rather
449     // expensive/sucky at the moment, so don't generate the thunk unless
450     // we have to.
451     // FIXME: Do something better here; GenerateVarArgsThunk is extremely ugly.
452     if (UseAvailableExternallyLinkage)
453       return;
454     ThunkFn =
455         CodeGenFunction(CGM).GenerateVarArgsThunk(ThunkFn, FnInfo, GD, Thunk);
456   } else {
457     // Normal thunk body generation.
458     CodeGenFunction(CGM).GenerateThunk(ThunkFn, FnInfo, GD, Thunk);
459   }
460 
461   CGM.getCXXABI().setThunkLinkage(ThunkFn, ForVTable, GD,
462                                   !Thunk.Return.isEmpty());
463   if (CGM.supportsCOMDAT() && ThunkFn->isWeakForLinker())
464     ThunkFn->setComdat(CGM.getModule().getOrInsertComdat(ThunkFn->getName()));
465 }
466 
467 void CodeGenVTables::maybeEmitThunkForVTable(GlobalDecl GD,
468                                              const ThunkInfo &Thunk) {
469   // If the ABI has key functions, only the TU with the key function should emit
470   // the thunk. However, we can allow inlining of thunks if we emit them with
471   // available_externally linkage together with vtables when optimizations are
472   // enabled.
473   if (CGM.getTarget().getCXXABI().hasKeyFunctions() &&
474       !CGM.getCodeGenOpts().OptimizationLevel)
475     return;
476 
477   // We can't emit thunks for member functions with incomplete types.
478   const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
479   if (!CGM.getTypes().isFuncTypeConvertible(
480            MD->getType()->castAs<FunctionType>()))
481     return;
482 
483   emitThunk(GD, Thunk, /*ForVTable=*/true);
484 }
485 
486 void CodeGenVTables::EmitThunks(GlobalDecl GD)
487 {
488   const CXXMethodDecl *MD =
489     cast<CXXMethodDecl>(GD.getDecl())->getCanonicalDecl();
490 
491   // We don't need to generate thunks for the base destructor.
492   if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
493     return;
494 
495   const VTableContextBase::ThunkInfoVectorTy *ThunkInfoVector =
496       VTContext->getThunkInfo(GD);
497 
498   if (!ThunkInfoVector)
499     return;
500 
501   for (unsigned I = 0, E = ThunkInfoVector->size(); I != E; ++I)
502     emitThunk(GD, (*ThunkInfoVector)[I], /*ForVTable=*/false);
503 }
504 
505 llvm::Constant *CodeGenVTables::CreateVTableInitializer(
506     const CXXRecordDecl *RD, const VTableComponent *Components,
507     unsigned NumComponents, const VTableLayout::VTableThunkTy *VTableThunks,
508     unsigned NumVTableThunks, llvm::Constant *RTTI) {
509   SmallVector<llvm::Constant *, 64> Inits;
510 
511   llvm::Type *Int8PtrTy = CGM.Int8PtrTy;
512 
513   llvm::Type *PtrDiffTy =
514     CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
515 
516   unsigned NextVTableThunkIndex = 0;
517 
518   llvm::Constant *PureVirtualFn = nullptr, *DeletedVirtualFn = nullptr;
519 
520   for (unsigned I = 0; I != NumComponents; ++I) {
521     VTableComponent Component = Components[I];
522 
523     llvm::Constant *Init = nullptr;
524 
525     switch (Component.getKind()) {
526     case VTableComponent::CK_VCallOffset:
527       Init = llvm::ConstantInt::get(PtrDiffTy,
528                                     Component.getVCallOffset().getQuantity());
529       Init = llvm::ConstantExpr::getIntToPtr(Init, Int8PtrTy);
530       break;
531     case VTableComponent::CK_VBaseOffset:
532       Init = llvm::ConstantInt::get(PtrDiffTy,
533                                     Component.getVBaseOffset().getQuantity());
534       Init = llvm::ConstantExpr::getIntToPtr(Init, Int8PtrTy);
535       break;
536     case VTableComponent::CK_OffsetToTop:
537       Init = llvm::ConstantInt::get(PtrDiffTy,
538                                     Component.getOffsetToTop().getQuantity());
539       Init = llvm::ConstantExpr::getIntToPtr(Init, Int8PtrTy);
540       break;
541     case VTableComponent::CK_RTTI:
542       Init = llvm::ConstantExpr::getBitCast(RTTI, Int8PtrTy);
543       break;
544     case VTableComponent::CK_FunctionPointer:
545     case VTableComponent::CK_CompleteDtorPointer:
546     case VTableComponent::CK_DeletingDtorPointer: {
547       GlobalDecl GD;
548 
549       // Get the right global decl.
550       switch (Component.getKind()) {
551       default:
552         llvm_unreachable("Unexpected vtable component kind");
553       case VTableComponent::CK_FunctionPointer:
554         GD = Component.getFunctionDecl();
555         break;
556       case VTableComponent::CK_CompleteDtorPointer:
557         GD = GlobalDecl(Component.getDestructorDecl(), Dtor_Complete);
558         break;
559       case VTableComponent::CK_DeletingDtorPointer:
560         GD = GlobalDecl(Component.getDestructorDecl(), Dtor_Deleting);
561         break;
562       }
563 
564       if (cast<CXXMethodDecl>(GD.getDecl())->isPure()) {
565         // We have a pure virtual member function.
566         if (!PureVirtualFn) {
567           llvm::FunctionType *Ty =
568             llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
569           StringRef PureCallName = CGM.getCXXABI().GetPureVirtualCallName();
570           PureVirtualFn = CGM.CreateRuntimeFunction(Ty, PureCallName);
571           PureVirtualFn = llvm::ConstantExpr::getBitCast(PureVirtualFn,
572                                                          CGM.Int8PtrTy);
573         }
574         Init = PureVirtualFn;
575       } else if (cast<CXXMethodDecl>(GD.getDecl())->isDeleted()) {
576         if (!DeletedVirtualFn) {
577           llvm::FunctionType *Ty =
578             llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
579           StringRef DeletedCallName =
580             CGM.getCXXABI().GetDeletedVirtualCallName();
581           DeletedVirtualFn = CGM.CreateRuntimeFunction(Ty, DeletedCallName);
582           DeletedVirtualFn = llvm::ConstantExpr::getBitCast(DeletedVirtualFn,
583                                                          CGM.Int8PtrTy);
584         }
585         Init = DeletedVirtualFn;
586       } else {
587         // Check if we should use a thunk.
588         if (NextVTableThunkIndex < NumVTableThunks &&
589             VTableThunks[NextVTableThunkIndex].first == I) {
590           const ThunkInfo &Thunk = VTableThunks[NextVTableThunkIndex].second;
591 
592           maybeEmitThunkForVTable(GD, Thunk);
593           Init = CGM.GetAddrOfThunk(GD, Thunk);
594 
595           NextVTableThunkIndex++;
596         } else {
597           llvm::Type *Ty = CGM.getTypes().GetFunctionTypeForVTable(GD);
598 
599           Init = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true);
600         }
601 
602         Init = llvm::ConstantExpr::getBitCast(Init, Int8PtrTy);
603       }
604       break;
605     }
606 
607     case VTableComponent::CK_UnusedFunctionPointer:
608       Init = llvm::ConstantExpr::getNullValue(Int8PtrTy);
609       break;
610     };
611 
612     Inits.push_back(Init);
613   }
614 
615   llvm::ArrayType *ArrayType = llvm::ArrayType::get(Int8PtrTy, NumComponents);
616   return llvm::ConstantArray::get(ArrayType, Inits);
617 }
618 
619 llvm::GlobalVariable *
620 CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD,
621                                       const BaseSubobject &Base,
622                                       bool BaseIsVirtual,
623                                    llvm::GlobalVariable::LinkageTypes Linkage,
624                                       VTableAddressPointsMapTy& AddressPoints) {
625   if (CGDebugInfo *DI = CGM.getModuleDebugInfo())
626     DI->completeClassData(Base.getBase());
627 
628   std::unique_ptr<VTableLayout> VTLayout(
629       getItaniumVTableContext().createConstructionVTableLayout(
630           Base.getBase(), Base.getBaseOffset(), BaseIsVirtual, RD));
631 
632   // Add the address points.
633   AddressPoints = VTLayout->getAddressPoints();
634 
635   // Get the mangled construction vtable name.
636   SmallString<256> OutName;
637   llvm::raw_svector_ostream Out(OutName);
638   cast<ItaniumMangleContext>(CGM.getCXXABI().getMangleContext())
639       .mangleCXXCtorVTable(RD, Base.getBaseOffset().getQuantity(),
640                            Base.getBase(), Out);
641   Out.flush();
642   StringRef Name = OutName.str();
643 
644   llvm::ArrayType *ArrayType =
645     llvm::ArrayType::get(CGM.Int8PtrTy, VTLayout->getNumVTableComponents());
646 
647   // Construction vtable symbols are not part of the Itanium ABI, so we cannot
648   // guarantee that they actually will be available externally. Instead, when
649   // emitting an available_externally VTT, we provide references to an internal
650   // linkage construction vtable. The ABI only requires complete-object vtables
651   // to be the same for all instances of a type, not construction vtables.
652   if (Linkage == llvm::GlobalVariable::AvailableExternallyLinkage)
653     Linkage = llvm::GlobalVariable::InternalLinkage;
654 
655   // Create the variable that will hold the construction vtable.
656   llvm::GlobalVariable *VTable =
657     CGM.CreateOrReplaceCXXRuntimeVariable(Name, ArrayType, Linkage);
658   CGM.setGlobalVisibility(VTable, RD);
659 
660   // V-tables are always unnamed_addr.
661   VTable->setUnnamedAddr(true);
662 
663   llvm::Constant *RTTI = CGM.GetAddrOfRTTIDescriptor(
664       CGM.getContext().getTagDeclType(Base.getBase()));
665 
666   // Create and set the initializer.
667   llvm::Constant *Init = CreateVTableInitializer(
668       Base.getBase(), VTLayout->vtable_component_begin(),
669       VTLayout->getNumVTableComponents(), VTLayout->vtable_thunk_begin(),
670       VTLayout->getNumVTableThunks(), RTTI);
671   VTable->setInitializer(Init);
672 
673   CGM.EmitVTableBitSetEntries(VTable, *VTLayout.get());
674 
675   return VTable;
676 }
677 
678 /// Compute the required linkage of the v-table for the given class.
679 ///
680 /// Note that we only call this at the end of the translation unit.
681 llvm::GlobalVariable::LinkageTypes
682 CodeGenModule::getVTableLinkage(const CXXRecordDecl *RD) {
683   if (!RD->isExternallyVisible())
684     return llvm::GlobalVariable::InternalLinkage;
685 
686   // We're at the end of the translation unit, so the current key
687   // function is fully correct.
688   const CXXMethodDecl *keyFunction = Context.getCurrentKeyFunction(RD);
689   if (keyFunction && !RD->hasAttr<DLLImportAttr>()) {
690     // If this class has a key function, use that to determine the
691     // linkage of the vtable.
692     const FunctionDecl *def = nullptr;
693     if (keyFunction->hasBody(def))
694       keyFunction = cast<CXXMethodDecl>(def);
695 
696     switch (keyFunction->getTemplateSpecializationKind()) {
697       case TSK_Undeclared:
698       case TSK_ExplicitSpecialization:
699         assert(def && "Should not have been asked to emit this");
700         if (keyFunction->isInlined())
701           return !Context.getLangOpts().AppleKext ?
702                    llvm::GlobalVariable::LinkOnceODRLinkage :
703                    llvm::Function::InternalLinkage;
704 
705         return llvm::GlobalVariable::ExternalLinkage;
706 
707       case TSK_ImplicitInstantiation:
708         return !Context.getLangOpts().AppleKext ?
709                  llvm::GlobalVariable::LinkOnceODRLinkage :
710                  llvm::Function::InternalLinkage;
711 
712       case TSK_ExplicitInstantiationDefinition:
713         return !Context.getLangOpts().AppleKext ?
714                  llvm::GlobalVariable::WeakODRLinkage :
715                  llvm::Function::InternalLinkage;
716 
717       case TSK_ExplicitInstantiationDeclaration:
718         llvm_unreachable("Should not have been asked to emit this");
719     }
720   }
721 
722   // -fapple-kext mode does not support weak linkage, so we must use
723   // internal linkage.
724   if (Context.getLangOpts().AppleKext)
725     return llvm::Function::InternalLinkage;
726 
727   llvm::GlobalVariable::LinkageTypes DiscardableODRLinkage =
728       llvm::GlobalValue::LinkOnceODRLinkage;
729   llvm::GlobalVariable::LinkageTypes NonDiscardableODRLinkage =
730       llvm::GlobalValue::WeakODRLinkage;
731   if (RD->hasAttr<DLLExportAttr>()) {
732     // Cannot discard exported vtables.
733     DiscardableODRLinkage = NonDiscardableODRLinkage;
734   } else if (RD->hasAttr<DLLImportAttr>()) {
735     // Imported vtables are available externally.
736     DiscardableODRLinkage = llvm::GlobalVariable::AvailableExternallyLinkage;
737     NonDiscardableODRLinkage = llvm::GlobalVariable::AvailableExternallyLinkage;
738   }
739 
740   switch (RD->getTemplateSpecializationKind()) {
741   case TSK_Undeclared:
742   case TSK_ExplicitSpecialization:
743   case TSK_ImplicitInstantiation:
744     return DiscardableODRLinkage;
745 
746   case TSK_ExplicitInstantiationDeclaration:
747     return llvm::GlobalVariable::ExternalLinkage;
748 
749   case TSK_ExplicitInstantiationDefinition:
750     return NonDiscardableODRLinkage;
751   }
752 
753   llvm_unreachable("Invalid TemplateSpecializationKind!");
754 }
755 
756 /// This is a callback from Sema to tell us that that a particular v-table is
757 /// required to be emitted in this translation unit.
758 ///
759 /// This is only called for vtables that _must_ be emitted (mainly due to key
760 /// functions).  For weak vtables, CodeGen tracks when they are needed and
761 /// emits them as-needed.
762 void CodeGenModule::EmitVTable(CXXRecordDecl *theClass) {
763   VTables.GenerateClassData(theClass);
764 }
765 
766 void
767 CodeGenVTables::GenerateClassData(const CXXRecordDecl *RD) {
768   if (CGDebugInfo *DI = CGM.getModuleDebugInfo())
769     DI->completeClassData(RD);
770 
771   if (RD->getNumVBases())
772     CGM.getCXXABI().emitVirtualInheritanceTables(RD);
773 
774   CGM.getCXXABI().emitVTableDefinitions(*this, RD);
775 }
776 
777 /// At this point in the translation unit, does it appear that can we
778 /// rely on the vtable being defined elsewhere in the program?
779 ///
780 /// The response is really only definitive when called at the end of
781 /// the translation unit.
782 ///
783 /// The only semantic restriction here is that the object file should
784 /// not contain a v-table definition when that v-table is defined
785 /// strongly elsewhere.  Otherwise, we'd just like to avoid emitting
786 /// v-tables when unnecessary.
787 bool CodeGenVTables::isVTableExternal(const CXXRecordDecl *RD) {
788   assert(RD->isDynamicClass() && "Non-dynamic classes have no VTable.");
789 
790   // If we have an explicit instantiation declaration (and not a
791   // definition), the v-table is defined elsewhere.
792   TemplateSpecializationKind TSK = RD->getTemplateSpecializationKind();
793   if (TSK == TSK_ExplicitInstantiationDeclaration)
794     return true;
795 
796   // Otherwise, if the class is an instantiated template, the
797   // v-table must be defined here.
798   if (TSK == TSK_ImplicitInstantiation ||
799       TSK == TSK_ExplicitInstantiationDefinition)
800     return false;
801 
802   // Otherwise, if the class doesn't have a key function (possibly
803   // anymore), the v-table must be defined here.
804   const CXXMethodDecl *keyFunction = CGM.getContext().getCurrentKeyFunction(RD);
805   if (!keyFunction)
806     return false;
807 
808   // Otherwise, if we don't have a definition of the key function, the
809   // v-table must be defined somewhere else.
810   return !keyFunction->hasBody();
811 }
812 
813 /// Given that we're currently at the end of the translation unit, and
814 /// we've emitted a reference to the v-table for this class, should
815 /// we define that v-table?
816 static bool shouldEmitVTableAtEndOfTranslationUnit(CodeGenModule &CGM,
817                                                    const CXXRecordDecl *RD) {
818   return !CGM.getVTables().isVTableExternal(RD);
819 }
820 
821 /// Given that at some point we emitted a reference to one or more
822 /// v-tables, and that we are now at the end of the translation unit,
823 /// decide whether we should emit them.
824 void CodeGenModule::EmitDeferredVTables() {
825 #ifndef NDEBUG
826   // Remember the size of DeferredVTables, because we're going to assume
827   // that this entire operation doesn't modify it.
828   size_t savedSize = DeferredVTables.size();
829 #endif
830 
831   typedef std::vector<const CXXRecordDecl *>::const_iterator const_iterator;
832   for (const_iterator i = DeferredVTables.begin(),
833                       e = DeferredVTables.end(); i != e; ++i) {
834     const CXXRecordDecl *RD = *i;
835     if (shouldEmitVTableAtEndOfTranslationUnit(*this, RD))
836       VTables.GenerateClassData(RD);
837   }
838 
839   assert(savedSize == DeferredVTables.size() &&
840          "deferred extra v-tables during v-table emission?");
841   DeferredVTables.clear();
842 }
843 
844 bool CodeGenModule::IsCFIBlacklistedRecord(const CXXRecordDecl *RD) {
845   // FIXME: Make this user configurable.
846   return RD->isInStdNamespace();
847 }
848 
849 void CodeGenModule::EmitVTableBitSetEntries(llvm::GlobalVariable *VTable,
850                                             const VTableLayout &VTLayout) {
851   if (!LangOpts.Sanitize.has(SanitizerKind::CFIVCall) &&
852       !LangOpts.Sanitize.has(SanitizerKind::CFINVCall) &&
853       !LangOpts.Sanitize.has(SanitizerKind::CFIDerivedCast) &&
854       !LangOpts.Sanitize.has(SanitizerKind::CFIUnrelatedCast))
855     return;
856 
857   CharUnits PointerWidth =
858       Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
859 
860   std::vector<llvm::MDTuple *> BitsetEntries;
861   // Create a bit set entry for each address point.
862   for (auto &&AP : VTLayout.getAddressPoints()) {
863     if (IsCFIBlacklistedRecord(AP.first.getBase()))
864       continue;
865 
866     BitsetEntries.push_back(CreateVTableBitSetEntry(
867         VTable, PointerWidth * AP.second, AP.first.getBase()));
868   }
869 
870   // Sort the bit set entries for determinism.
871   std::sort(BitsetEntries.begin(), BitsetEntries.end(), [](llvm::MDTuple *T1,
872                                                            llvm::MDTuple *T2) {
873     if (T1 == T2)
874       return false;
875 
876     StringRef S1 = cast<llvm::MDString>(T1->getOperand(0))->getString();
877     StringRef S2 = cast<llvm::MDString>(T2->getOperand(0))->getString();
878     if (S1 < S2)
879       return true;
880     if (S1 != S2)
881       return false;
882 
883     uint64_t Offset1 = cast<llvm::ConstantInt>(
884                            cast<llvm::ConstantAsMetadata>(T1->getOperand(2))
885                                ->getValue())->getZExtValue();
886     uint64_t Offset2 = cast<llvm::ConstantInt>(
887                            cast<llvm::ConstantAsMetadata>(T2->getOperand(2))
888                                ->getValue())->getZExtValue();
889     assert(Offset1 != Offset2);
890     return Offset1 < Offset2;
891   });
892 
893   llvm::NamedMDNode *BitsetsMD =
894       getModule().getOrInsertNamedMetadata("llvm.bitsets");
895   for (auto BitsetEntry : BitsetEntries)
896     BitsetsMD->addOperand(BitsetEntry);
897 }
898