1 //===--- CGDeclCXX.cpp - Emit LLVM Code for C++ declarations --------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code dealing with code generation of C++ declarations 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CodeGenFunction.h" 15 #include "CGCXXABI.h" 16 #include "CGObjCRuntime.h" 17 #include "CGOpenMPRuntime.h" 18 #include "clang/Frontend/CodeGenOptions.h" 19 #include "llvm/ADT/StringExtras.h" 20 #include "llvm/IR/Intrinsics.h" 21 #include "llvm/Support/Path.h" 22 23 using namespace clang; 24 using namespace CodeGen; 25 26 static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D, 27 ConstantAddress DeclPtr) { 28 assert(D.hasGlobalStorage() && "VarDecl must have global storage!"); 29 assert(!D.getType()->isReferenceType() && 30 "Should not call EmitDeclInit on a reference!"); 31 32 QualType type = D.getType(); 33 LValue lv = CGF.MakeAddrLValue(DeclPtr, type); 34 35 const Expr *Init = D.getInit(); 36 switch (CGF.getEvaluationKind(type)) { 37 case TEK_Scalar: { 38 CodeGenModule &CGM = CGF.CGM; 39 if (lv.isObjCStrong()) 40 CGM.getObjCRuntime().EmitObjCGlobalAssign(CGF, CGF.EmitScalarExpr(Init), 41 DeclPtr, D.getTLSKind()); 42 else if (lv.isObjCWeak()) 43 CGM.getObjCRuntime().EmitObjCWeakAssign(CGF, CGF.EmitScalarExpr(Init), 44 DeclPtr); 45 else 46 CGF.EmitScalarInit(Init, &D, lv, false); 47 return; 48 } 49 case TEK_Complex: 50 CGF.EmitComplexExprIntoLValue(Init, lv, /*isInit*/ true); 51 return; 52 case TEK_Aggregate: 53 CGF.EmitAggExpr(Init, AggValueSlot::forLValue(lv,AggValueSlot::IsDestructed, 54 AggValueSlot::DoesNotNeedGCBarriers, 55 AggValueSlot::IsNotAliased)); 56 return; 57 } 58 llvm_unreachable("bad evaluation kind"); 59 } 60 61 /// Emit code to cause the destruction of the given variable with 62 /// static storage duration. 63 static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D, 64 ConstantAddress addr) { 65 CodeGenModule &CGM = CGF.CGM; 66 67 // FIXME: __attribute__((cleanup)) ? 68 69 QualType type = D.getType(); 70 QualType::DestructionKind dtorKind = type.isDestructedType(); 71 72 switch (dtorKind) { 73 case QualType::DK_none: 74 return; 75 76 case QualType::DK_cxx_destructor: 77 break; 78 79 case QualType::DK_objc_strong_lifetime: 80 case QualType::DK_objc_weak_lifetime: 81 // We don't care about releasing objects during process teardown. 82 assert(!D.getTLSKind() && "should have rejected this"); 83 return; 84 } 85 86 llvm::Constant *function; 87 llvm::Constant *argument; 88 89 // Special-case non-array C++ destructors, if they have the right signature. 90 // Under some ABIs, destructors return this instead of void, and cannot be 91 // passed directly to __cxa_atexit if the target does not allow this mismatch. 92 const CXXRecordDecl *Record = type->getAsCXXRecordDecl(); 93 bool CanRegisterDestructor = 94 Record && (!CGM.getCXXABI().HasThisReturn( 95 GlobalDecl(Record->getDestructor(), Dtor_Complete)) || 96 CGM.getCXXABI().canCallMismatchedFunctionType()); 97 // If __cxa_atexit is disabled via a flag, a different helper function is 98 // generated elsewhere which uses atexit instead, and it takes the destructor 99 // directly. 100 bool UsingExternalHelper = !CGM.getCodeGenOpts().CXAAtExit; 101 if (Record && (CanRegisterDestructor || UsingExternalHelper)) { 102 assert(!Record->hasTrivialDestructor()); 103 CXXDestructorDecl *dtor = Record->getDestructor(); 104 105 function = CGM.getAddrOfCXXStructor(dtor, StructorType::Complete); 106 argument = llvm::ConstantExpr::getBitCast( 107 addr.getPointer(), CGF.getTypes().ConvertType(type)->getPointerTo()); 108 109 // Otherwise, the standard logic requires a helper function. 110 } else { 111 function = CodeGenFunction(CGM) 112 .generateDestroyHelper(addr, type, CGF.getDestroyer(dtorKind), 113 CGF.needsEHCleanup(dtorKind), &D); 114 argument = llvm::Constant::getNullValue(CGF.Int8PtrTy); 115 } 116 117 CGM.getCXXABI().registerGlobalDtor(CGF, D, function, argument); 118 } 119 120 /// Emit code to cause the variable at the given address to be considered as 121 /// constant from this point onwards. 122 static void EmitDeclInvariant(CodeGenFunction &CGF, const VarDecl &D, 123 llvm::Constant *Addr) { 124 // Do not emit the intrinsic if we're not optimizing. 125 if (!CGF.CGM.getCodeGenOpts().OptimizationLevel) 126 return; 127 128 // Grab the llvm.invariant.start intrinsic. 129 llvm::Intrinsic::ID InvStartID = llvm::Intrinsic::invariant_start; 130 // Overloaded address space type. 131 llvm::Type *ObjectPtr[1] = {CGF.Int8PtrTy}; 132 llvm::Constant *InvariantStart = CGF.CGM.getIntrinsic(InvStartID, ObjectPtr); 133 134 // Emit a call with the size in bytes of the object. 135 CharUnits WidthChars = CGF.getContext().getTypeSizeInChars(D.getType()); 136 uint64_t Width = WidthChars.getQuantity(); 137 llvm::Value *Args[2] = { llvm::ConstantInt::getSigned(CGF.Int64Ty, Width), 138 llvm::ConstantExpr::getBitCast(Addr, CGF.Int8PtrTy)}; 139 CGF.Builder.CreateCall(InvariantStart, Args); 140 } 141 142 void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D, 143 llvm::Constant *DeclPtr, 144 bool PerformInit) { 145 146 const Expr *Init = D.getInit(); 147 QualType T = D.getType(); 148 149 // The address space of a static local variable (DeclPtr) may be different 150 // from the address space of the "this" argument of the constructor. In that 151 // case, we need an addrspacecast before calling the constructor. 152 // 153 // struct StructWithCtor { 154 // __device__ StructWithCtor() {...} 155 // }; 156 // __device__ void foo() { 157 // __shared__ StructWithCtor s; 158 // ... 159 // } 160 // 161 // For example, in the above CUDA code, the static local variable s has a 162 // "shared" address space qualifier, but the constructor of StructWithCtor 163 // expects "this" in the "generic" address space. 164 unsigned ExpectedAddrSpace = getContext().getTargetAddressSpace(T); 165 unsigned ActualAddrSpace = DeclPtr->getType()->getPointerAddressSpace(); 166 if (ActualAddrSpace != ExpectedAddrSpace) { 167 llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(T); 168 llvm::PointerType *PTy = llvm::PointerType::get(LTy, ExpectedAddrSpace); 169 DeclPtr = llvm::ConstantExpr::getAddrSpaceCast(DeclPtr, PTy); 170 } 171 172 ConstantAddress DeclAddr(DeclPtr, getContext().getDeclAlign(&D)); 173 174 if (!T->isReferenceType()) { 175 if (getLangOpts().OpenMP && D.hasAttr<OMPThreadPrivateDeclAttr>()) 176 (void)CGM.getOpenMPRuntime().emitThreadPrivateVarDefinition( 177 &D, DeclAddr, D.getAttr<OMPThreadPrivateDeclAttr>()->getLocation(), 178 PerformInit, this); 179 if (PerformInit) 180 EmitDeclInit(*this, D, DeclAddr); 181 if (CGM.isTypeConstant(D.getType(), true)) 182 EmitDeclInvariant(*this, D, DeclPtr); 183 else 184 EmitDeclDestroy(*this, D, DeclAddr); 185 return; 186 } 187 188 assert(PerformInit && "cannot have constant initializer which needs " 189 "destruction for reference"); 190 RValue RV = EmitReferenceBindingToExpr(Init); 191 EmitStoreOfScalar(RV.getScalarVal(), DeclAddr, false, T); 192 } 193 194 /// Create a stub function, suitable for being passed to atexit, 195 /// which passes the given address to the given destructor function. 196 llvm::Constant *CodeGenFunction::createAtExitStub(const VarDecl &VD, 197 llvm::Constant *dtor, 198 llvm::Constant *addr) { 199 // Get the destructor function type, void(*)(void). 200 llvm::FunctionType *ty = llvm::FunctionType::get(CGM.VoidTy, false); 201 SmallString<256> FnName; 202 { 203 llvm::raw_svector_ostream Out(FnName); 204 CGM.getCXXABI().getMangleContext().mangleDynamicAtExitDestructor(&VD, Out); 205 } 206 207 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction(); 208 llvm::Function *fn = CGM.CreateGlobalInitOrDestructFunction(ty, FnName.str(), 209 FI, 210 VD.getLocation()); 211 212 CodeGenFunction CGF(CGM); 213 214 CGF.StartFunction(&VD, CGM.getContext().VoidTy, fn, FI, FunctionArgList()); 215 216 llvm::CallInst *call = CGF.Builder.CreateCall(dtor, addr); 217 218 // Make sure the call and the callee agree on calling convention. 219 if (llvm::Function *dtorFn = 220 dyn_cast<llvm::Function>(dtor->stripPointerCasts())) 221 call->setCallingConv(dtorFn->getCallingConv()); 222 223 CGF.FinishFunction(); 224 225 return fn; 226 } 227 228 /// Register a global destructor using the C atexit runtime function. 229 void CodeGenFunction::registerGlobalDtorWithAtExit(const VarDecl &VD, 230 llvm::Constant *dtor, 231 llvm::Constant *addr) { 232 // Create a function which calls the destructor. 233 llvm::Constant *dtorStub = createAtExitStub(VD, dtor, addr); 234 235 // extern "C" int atexit(void (*f)(void)); 236 llvm::FunctionType *atexitTy = 237 llvm::FunctionType::get(IntTy, dtorStub->getType(), false); 238 239 llvm::Constant *atexit = 240 CGM.CreateRuntimeFunction(atexitTy, "atexit", llvm::AttributeSet(), 241 /*Local=*/true); 242 if (llvm::Function *atexitFn = dyn_cast<llvm::Function>(atexit)) 243 atexitFn->setDoesNotThrow(); 244 245 EmitNounwindRuntimeCall(atexit, dtorStub); 246 } 247 248 void CodeGenFunction::EmitCXXGuardedInit(const VarDecl &D, 249 llvm::GlobalVariable *DeclPtr, 250 bool PerformInit) { 251 // If we've been asked to forbid guard variables, emit an error now. 252 // This diagnostic is hard-coded for Darwin's use case; we can find 253 // better phrasing if someone else needs it. 254 if (CGM.getCodeGenOpts().ForbidGuardVariables) 255 CGM.Error(D.getLocation(), 256 "this initialization requires a guard variable, which " 257 "the kernel does not support"); 258 259 CGM.getCXXABI().EmitGuardedInit(*this, D, DeclPtr, PerformInit); 260 } 261 262 llvm::Function *CodeGenModule::CreateGlobalInitOrDestructFunction( 263 llvm::FunctionType *FTy, const Twine &Name, const CGFunctionInfo &FI, 264 SourceLocation Loc, bool TLS) { 265 llvm::Function *Fn = 266 llvm::Function::Create(FTy, llvm::GlobalValue::InternalLinkage, 267 Name, &getModule()); 268 if (!getLangOpts().AppleKext && !TLS) { 269 // Set the section if needed. 270 if (const char *Section = getTarget().getStaticInitSectionSpecifier()) 271 Fn->setSection(Section); 272 } 273 274 SetInternalFunctionAttributes(nullptr, Fn, FI); 275 276 Fn->setCallingConv(getRuntimeCC()); 277 278 if (!getLangOpts().Exceptions) 279 Fn->setDoesNotThrow(); 280 281 if (!isInSanitizerBlacklist(Fn, Loc)) { 282 if (getLangOpts().Sanitize.hasOneOf(SanitizerKind::Address | 283 SanitizerKind::KernelAddress)) 284 Fn->addFnAttr(llvm::Attribute::SanitizeAddress); 285 if (getLangOpts().Sanitize.has(SanitizerKind::Thread)) 286 Fn->addFnAttr(llvm::Attribute::SanitizeThread); 287 if (getLangOpts().Sanitize.has(SanitizerKind::Memory)) 288 Fn->addFnAttr(llvm::Attribute::SanitizeMemory); 289 if (getLangOpts().Sanitize.has(SanitizerKind::SafeStack)) 290 Fn->addFnAttr(llvm::Attribute::SafeStack); 291 } 292 293 return Fn; 294 } 295 296 /// Create a global pointer to a function that will initialize a global 297 /// variable. The user has requested that this pointer be emitted in a specific 298 /// section. 299 void CodeGenModule::EmitPointerToInitFunc(const VarDecl *D, 300 llvm::GlobalVariable *GV, 301 llvm::Function *InitFunc, 302 InitSegAttr *ISA) { 303 llvm::GlobalVariable *PtrArray = new llvm::GlobalVariable( 304 TheModule, InitFunc->getType(), /*isConstant=*/true, 305 llvm::GlobalValue::PrivateLinkage, InitFunc, "__cxx_init_fn_ptr"); 306 PtrArray->setSection(ISA->getSection()); 307 addUsedGlobal(PtrArray); 308 309 // If the GV is already in a comdat group, then we have to join it. 310 if (llvm::Comdat *C = GV->getComdat()) 311 PtrArray->setComdat(C); 312 } 313 314 void 315 CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D, 316 llvm::GlobalVariable *Addr, 317 bool PerformInit) { 318 319 // According to E.2.3.1 in CUDA-7.5 Programming guide: __device__, 320 // __constant__ and __shared__ variables defined in namespace scope, 321 // that are of class type, cannot have a non-empty constructor. All 322 // the checks have been done in Sema by now. Whatever initializers 323 // are allowed are empty and we just need to ignore them here. 324 if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice && 325 (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() || 326 D->hasAttr<CUDASharedAttr>())) 327 return; 328 329 // Check if we've already initialized this decl. 330 auto I = DelayedCXXInitPosition.find(D); 331 if (I != DelayedCXXInitPosition.end() && I->second == ~0U) 332 return; 333 334 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false); 335 SmallString<256> FnName; 336 { 337 llvm::raw_svector_ostream Out(FnName); 338 getCXXABI().getMangleContext().mangleDynamicInitializer(D, Out); 339 } 340 341 // Create a variable initialization function. 342 llvm::Function *Fn = 343 CreateGlobalInitOrDestructFunction(FTy, FnName.str(), 344 getTypes().arrangeNullaryFunction(), 345 D->getLocation()); 346 347 auto *ISA = D->getAttr<InitSegAttr>(); 348 CodeGenFunction(*this).GenerateCXXGlobalVarDeclInitFunc(Fn, D, Addr, 349 PerformInit); 350 351 llvm::GlobalVariable *COMDATKey = 352 supportsCOMDAT() && D->isExternallyVisible() ? Addr : nullptr; 353 354 if (D->getTLSKind()) { 355 // FIXME: Should we support init_priority for thread_local? 356 // FIXME: Ideally, initialization of instantiated thread_local static data 357 // members of class templates should not trigger initialization of other 358 // entities in the TU. 359 // FIXME: We only need to register one __cxa_thread_atexit function for the 360 // entire TU. 361 CXXThreadLocalInits.push_back(Fn); 362 CXXThreadLocalInitVars.push_back(D); 363 } else if (PerformInit && ISA) { 364 EmitPointerToInitFunc(D, Addr, Fn, ISA); 365 } else if (auto *IPA = D->getAttr<InitPriorityAttr>()) { 366 OrderGlobalInits Key(IPA->getPriority(), PrioritizedCXXGlobalInits.size()); 367 PrioritizedCXXGlobalInits.push_back(std::make_pair(Key, Fn)); 368 } else if (isTemplateInstantiation(D->getTemplateSpecializationKind())) { 369 // C++ [basic.start.init]p2: 370 // Definitions of explicitly specialized class template static data 371 // members have ordered initialization. Other class template static data 372 // members (i.e., implicitly or explicitly instantiated specializations) 373 // have unordered initialization. 374 // 375 // As a consequence, we can put them into their own llvm.global_ctors entry. 376 // 377 // If the global is externally visible, put the initializer into a COMDAT 378 // group with the global being initialized. On most platforms, this is a 379 // minor startup time optimization. In the MS C++ ABI, there are no guard 380 // variables, so this COMDAT key is required for correctness. 381 AddGlobalCtor(Fn, 65535, COMDATKey); 382 } else if (D->hasAttr<SelectAnyAttr>()) { 383 // SelectAny globals will be comdat-folded. Put the initializer into a 384 // COMDAT group associated with the global, so the initializers get folded 385 // too. 386 AddGlobalCtor(Fn, 65535, COMDATKey); 387 } else { 388 I = DelayedCXXInitPosition.find(D); // Re-do lookup in case of re-hash. 389 if (I == DelayedCXXInitPosition.end()) { 390 CXXGlobalInits.push_back(Fn); 391 } else if (I->second != ~0U) { 392 assert(I->second < CXXGlobalInits.size() && 393 CXXGlobalInits[I->second] == nullptr); 394 CXXGlobalInits[I->second] = Fn; 395 } 396 } 397 398 // Remember that we already emitted the initializer for this global. 399 DelayedCXXInitPosition[D] = ~0U; 400 } 401 402 void CodeGenModule::EmitCXXThreadLocalInitFunc() { 403 getCXXABI().EmitThreadLocalInitFuncs( 404 *this, CXXThreadLocals, CXXThreadLocalInits, CXXThreadLocalInitVars); 405 406 CXXThreadLocalInits.clear(); 407 CXXThreadLocalInitVars.clear(); 408 CXXThreadLocals.clear(); 409 } 410 411 void 412 CodeGenModule::EmitCXXGlobalInitFunc() { 413 while (!CXXGlobalInits.empty() && !CXXGlobalInits.back()) 414 CXXGlobalInits.pop_back(); 415 416 if (CXXGlobalInits.empty() && PrioritizedCXXGlobalInits.empty()) 417 return; 418 419 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false); 420 const CGFunctionInfo &FI = getTypes().arrangeNullaryFunction(); 421 422 // Create our global initialization function. 423 if (!PrioritizedCXXGlobalInits.empty()) { 424 SmallVector<llvm::Function *, 8> LocalCXXGlobalInits; 425 llvm::array_pod_sort(PrioritizedCXXGlobalInits.begin(), 426 PrioritizedCXXGlobalInits.end()); 427 // Iterate over "chunks" of ctors with same priority and emit each chunk 428 // into separate function. Note - everything is sorted first by priority, 429 // second - by lex order, so we emit ctor functions in proper order. 430 for (SmallVectorImpl<GlobalInitData >::iterator 431 I = PrioritizedCXXGlobalInits.begin(), 432 E = PrioritizedCXXGlobalInits.end(); I != E; ) { 433 SmallVectorImpl<GlobalInitData >::iterator 434 PrioE = std::upper_bound(I + 1, E, *I, GlobalInitPriorityCmp()); 435 436 LocalCXXGlobalInits.clear(); 437 unsigned Priority = I->first.priority; 438 // Compute the function suffix from priority. Prepend with zeroes to make 439 // sure the function names are also ordered as priorities. 440 std::string PrioritySuffix = llvm::utostr(Priority); 441 // Priority is always <= 65535 (enforced by sema). 442 PrioritySuffix = std::string(6-PrioritySuffix.size(), '0')+PrioritySuffix; 443 llvm::Function *Fn = CreateGlobalInitOrDestructFunction( 444 FTy, "_GLOBAL__I_" + PrioritySuffix, FI); 445 446 for (; I < PrioE; ++I) 447 LocalCXXGlobalInits.push_back(I->second); 448 449 CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, LocalCXXGlobalInits); 450 AddGlobalCtor(Fn, Priority); 451 } 452 PrioritizedCXXGlobalInits.clear(); 453 } 454 455 SmallString<128> FileName; 456 SourceManager &SM = Context.getSourceManager(); 457 if (const FileEntry *MainFile = SM.getFileEntryForID(SM.getMainFileID())) { 458 // Include the filename in the symbol name. Including "sub_" matches gcc and 459 // makes sure these symbols appear lexicographically behind the symbols with 460 // priority emitted above. 461 FileName = llvm::sys::path::filename(MainFile->getName()); 462 } else { 463 FileName = "<null>"; 464 } 465 466 for (size_t i = 0; i < FileName.size(); ++i) { 467 // Replace everything that's not [a-zA-Z0-9._] with a _. This set happens 468 // to be the set of C preprocessing numbers. 469 if (!isPreprocessingNumberBody(FileName[i])) 470 FileName[i] = '_'; 471 } 472 473 llvm::Function *Fn = CreateGlobalInitOrDestructFunction( 474 FTy, llvm::Twine("_GLOBAL__sub_I_", FileName), FI); 475 476 CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, CXXGlobalInits); 477 AddGlobalCtor(Fn); 478 479 CXXGlobalInits.clear(); 480 } 481 482 void CodeGenModule::EmitCXXGlobalDtorFunc() { 483 if (CXXGlobalDtors.empty()) 484 return; 485 486 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false); 487 488 // Create our global destructor function. 489 const CGFunctionInfo &FI = getTypes().arrangeNullaryFunction(); 490 llvm::Function *Fn = 491 CreateGlobalInitOrDestructFunction(FTy, "_GLOBAL__D_a", FI); 492 493 CodeGenFunction(*this).GenerateCXXGlobalDtorsFunc(Fn, CXXGlobalDtors); 494 AddGlobalDtor(Fn); 495 } 496 497 /// Emit the code necessary to initialize the given global variable. 498 void CodeGenFunction::GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn, 499 const VarDecl *D, 500 llvm::GlobalVariable *Addr, 501 bool PerformInit) { 502 // Check if we need to emit debug info for variable initializer. 503 if (D->hasAttr<NoDebugAttr>()) 504 DebugInfo = nullptr; // disable debug info indefinitely for this function 505 506 CurEHLocation = D->getLocStart(); 507 508 StartFunction(GlobalDecl(D), getContext().VoidTy, Fn, 509 getTypes().arrangeNullaryFunction(), 510 FunctionArgList(), D->getLocation(), 511 D->getInit()->getExprLoc()); 512 513 // Use guarded initialization if the global variable is weak. This 514 // occurs for, e.g., instantiated static data members and 515 // definitions explicitly marked weak. 516 if (Addr->hasWeakLinkage() || Addr->hasLinkOnceLinkage()) { 517 EmitCXXGuardedInit(*D, Addr, PerformInit); 518 } else { 519 EmitCXXGlobalVarDeclInit(*D, Addr, PerformInit); 520 } 521 522 FinishFunction(); 523 } 524 525 void 526 CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn, 527 ArrayRef<llvm::Function *> Decls, 528 Address Guard) { 529 { 530 auto NL = ApplyDebugLocation::CreateEmpty(*this); 531 StartFunction(GlobalDecl(), getContext().VoidTy, Fn, 532 getTypes().arrangeNullaryFunction(), FunctionArgList()); 533 // Emit an artificial location for this function. 534 auto AL = ApplyDebugLocation::CreateArtificial(*this); 535 536 llvm::BasicBlock *ExitBlock = nullptr; 537 if (Guard.isValid()) { 538 // If we have a guard variable, check whether we've already performed 539 // these initializations. This happens for TLS initialization functions. 540 llvm::Value *GuardVal = Builder.CreateLoad(Guard); 541 llvm::Value *Uninit = Builder.CreateIsNull(GuardVal, 542 "guard.uninitialized"); 543 llvm::BasicBlock *InitBlock = createBasicBlock("init"); 544 ExitBlock = createBasicBlock("exit"); 545 Builder.CreateCondBr(Uninit, InitBlock, ExitBlock); 546 EmitBlock(InitBlock); 547 // Mark as initialized before initializing anything else. If the 548 // initializers use previously-initialized thread_local vars, that's 549 // probably supposed to be OK, but the standard doesn't say. 550 Builder.CreateStore(llvm::ConstantInt::get(GuardVal->getType(),1), Guard); 551 } 552 553 RunCleanupsScope Scope(*this); 554 555 // When building in Objective-C++ ARC mode, create an autorelease pool 556 // around the global initializers. 557 if (getLangOpts().ObjCAutoRefCount && getLangOpts().CPlusPlus) { 558 llvm::Value *token = EmitObjCAutoreleasePoolPush(); 559 EmitObjCAutoreleasePoolCleanup(token); 560 } 561 562 for (unsigned i = 0, e = Decls.size(); i != e; ++i) 563 if (Decls[i]) 564 EmitRuntimeCall(Decls[i]); 565 566 Scope.ForceCleanup(); 567 568 if (ExitBlock) { 569 Builder.CreateBr(ExitBlock); 570 EmitBlock(ExitBlock); 571 } 572 } 573 574 FinishFunction(); 575 } 576 577 void CodeGenFunction::GenerateCXXGlobalDtorsFunc(llvm::Function *Fn, 578 const std::vector<std::pair<llvm::WeakVH, llvm::Constant*> > 579 &DtorsAndObjects) { 580 { 581 auto NL = ApplyDebugLocation::CreateEmpty(*this); 582 StartFunction(GlobalDecl(), getContext().VoidTy, Fn, 583 getTypes().arrangeNullaryFunction(), FunctionArgList()); 584 // Emit an artificial location for this function. 585 auto AL = ApplyDebugLocation::CreateArtificial(*this); 586 587 // Emit the dtors, in reverse order from construction. 588 for (unsigned i = 0, e = DtorsAndObjects.size(); i != e; ++i) { 589 llvm::Value *Callee = DtorsAndObjects[e - i - 1].first; 590 llvm::CallInst *CI = Builder.CreateCall(Callee, 591 DtorsAndObjects[e - i - 1].second); 592 // Make sure the call and the callee agree on calling convention. 593 if (llvm::Function *F = dyn_cast<llvm::Function>(Callee)) 594 CI->setCallingConv(F->getCallingConv()); 595 } 596 } 597 598 FinishFunction(); 599 } 600 601 /// generateDestroyHelper - Generates a helper function which, when 602 /// invoked, destroys the given object. The address of the object 603 /// should be in global memory. 604 llvm::Function *CodeGenFunction::generateDestroyHelper( 605 Address addr, QualType type, Destroyer *destroyer, 606 bool useEHCleanupForArray, const VarDecl *VD) { 607 FunctionArgList args; 608 ImplicitParamDecl dst(getContext(), nullptr, SourceLocation(), nullptr, 609 getContext().VoidPtrTy); 610 args.push_back(&dst); 611 612 const CGFunctionInfo &FI = 613 CGM.getTypes().arrangeBuiltinFunctionDeclaration(getContext().VoidTy, args); 614 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI); 615 llvm::Function *fn = CGM.CreateGlobalInitOrDestructFunction( 616 FTy, "__cxx_global_array_dtor", FI, VD->getLocation()); 617 618 CurEHLocation = VD->getLocStart(); 619 620 StartFunction(VD, getContext().VoidTy, fn, FI, args); 621 622 emitDestroy(addr, type, destroyer, useEHCleanupForArray); 623 624 FinishFunction(); 625 626 return fn; 627 } 628