1 //===--- CGDecl.cpp - Emit LLVM Code for declarations ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code to emit Decl nodes as LLVM code. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CodeGenFunction.h" 15 #include "CGBlocks.h" 16 #include "CGCleanup.h" 17 #include "CGDebugInfo.h" 18 #include "CGOpenCLRuntime.h" 19 #include "CodeGenModule.h" 20 #include "clang/AST/ASTContext.h" 21 #include "clang/AST/CharUnits.h" 22 #include "clang/AST/Decl.h" 23 #include "clang/AST/DeclObjC.h" 24 #include "clang/Basic/SourceManager.h" 25 #include "clang/Basic/TargetInfo.h" 26 #include "clang/CodeGen/CGFunctionInfo.h" 27 #include "clang/Frontend/CodeGenOptions.h" 28 #include "llvm/IR/DataLayout.h" 29 #include "llvm/IR/GlobalVariable.h" 30 #include "llvm/IR/Intrinsics.h" 31 #include "llvm/IR/Type.h" 32 33 using namespace clang; 34 using namespace CodeGen; 35 36 void CodeGenFunction::EmitDecl(const Decl &D) { 37 switch (D.getKind()) { 38 case Decl::BuiltinTemplate: 39 case Decl::TranslationUnit: 40 case Decl::ExternCContext: 41 case Decl::Namespace: 42 case Decl::UnresolvedUsingTypename: 43 case Decl::ClassTemplateSpecialization: 44 case Decl::ClassTemplatePartialSpecialization: 45 case Decl::VarTemplateSpecialization: 46 case Decl::VarTemplatePartialSpecialization: 47 case Decl::TemplateTypeParm: 48 case Decl::UnresolvedUsingValue: 49 case Decl::NonTypeTemplateParm: 50 case Decl::CXXMethod: 51 case Decl::CXXConstructor: 52 case Decl::CXXDestructor: 53 case Decl::CXXConversion: 54 case Decl::Field: 55 case Decl::MSProperty: 56 case Decl::IndirectField: 57 case Decl::ObjCIvar: 58 case Decl::ObjCAtDefsField: 59 case Decl::ParmVar: 60 case Decl::ImplicitParam: 61 case Decl::ClassTemplate: 62 case Decl::VarTemplate: 63 case Decl::FunctionTemplate: 64 case Decl::TypeAliasTemplate: 65 case Decl::TemplateTemplateParm: 66 case Decl::ObjCMethod: 67 case Decl::ObjCCategory: 68 case Decl::ObjCProtocol: 69 case Decl::ObjCInterface: 70 case Decl::ObjCCategoryImpl: 71 case Decl::ObjCImplementation: 72 case Decl::ObjCProperty: 73 case Decl::ObjCCompatibleAlias: 74 case Decl::AccessSpec: 75 case Decl::LinkageSpec: 76 case Decl::ObjCPropertyImpl: 77 case Decl::FileScopeAsm: 78 case Decl::Friend: 79 case Decl::FriendTemplate: 80 case Decl::Block: 81 case Decl::Captured: 82 case Decl::ClassScopeFunctionSpecialization: 83 case Decl::UsingShadow: 84 case Decl::ObjCTypeParam: 85 llvm_unreachable("Declaration should not be in declstmts!"); 86 case Decl::Function: // void X(); 87 case Decl::Record: // struct/union/class X; 88 case Decl::Enum: // enum X; 89 case Decl::EnumConstant: // enum ? { X = ? } 90 case Decl::CXXRecord: // struct/union/class X; [C++] 91 case Decl::StaticAssert: // static_assert(X, ""); [C++0x] 92 case Decl::Label: // __label__ x; 93 case Decl::Import: 94 case Decl::OMPThreadPrivate: 95 case Decl::OMPCapturedExpr: 96 case Decl::Empty: 97 // None of these decls require codegen support. 98 return; 99 100 case Decl::NamespaceAlias: 101 if (CGDebugInfo *DI = getDebugInfo()) 102 DI->EmitNamespaceAlias(cast<NamespaceAliasDecl>(D)); 103 return; 104 case Decl::Using: // using X; [C++] 105 if (CGDebugInfo *DI = getDebugInfo()) 106 DI->EmitUsingDecl(cast<UsingDecl>(D)); 107 return; 108 case Decl::UsingDirective: // using namespace X; [C++] 109 if (CGDebugInfo *DI = getDebugInfo()) 110 DI->EmitUsingDirective(cast<UsingDirectiveDecl>(D)); 111 return; 112 case Decl::Var: { 113 const VarDecl &VD = cast<VarDecl>(D); 114 assert(VD.isLocalVarDecl() && 115 "Should not see file-scope variables inside a function!"); 116 return EmitVarDecl(VD); 117 } 118 119 case Decl::Typedef: // typedef int X; 120 case Decl::TypeAlias: { // using X = int; [C++0x] 121 const TypedefNameDecl &TD = cast<TypedefNameDecl>(D); 122 QualType Ty = TD.getUnderlyingType(); 123 124 if (Ty->isVariablyModifiedType()) 125 EmitVariablyModifiedType(Ty); 126 } 127 } 128 } 129 130 /// EmitVarDecl - This method handles emission of any variable declaration 131 /// inside a function, including static vars etc. 132 void CodeGenFunction::EmitVarDecl(const VarDecl &D) { 133 if (D.isStaticLocal()) { 134 llvm::GlobalValue::LinkageTypes Linkage = 135 CGM.getLLVMLinkageVarDefinition(&D, /*isConstant=*/false); 136 137 // FIXME: We need to force the emission/use of a guard variable for 138 // some variables even if we can constant-evaluate them because 139 // we can't guarantee every translation unit will constant-evaluate them. 140 141 return EmitStaticVarDecl(D, Linkage); 142 } 143 144 if (D.hasExternalStorage()) 145 // Don't emit it now, allow it to be emitted lazily on its first use. 146 return; 147 148 if (D.getType().getAddressSpace() == LangAS::opencl_local) 149 return CGM.getOpenCLRuntime().EmitWorkGroupLocalVarDecl(*this, D); 150 151 assert(D.hasLocalStorage()); 152 return EmitAutoVarDecl(D); 153 } 154 155 static std::string getStaticDeclName(CodeGenModule &CGM, const VarDecl &D) { 156 if (CGM.getLangOpts().CPlusPlus) 157 return CGM.getMangledName(&D).str(); 158 159 // If this isn't C++, we don't need a mangled name, just a pretty one. 160 assert(!D.isExternallyVisible() && "name shouldn't matter"); 161 std::string ContextName; 162 const DeclContext *DC = D.getDeclContext(); 163 if (auto *CD = dyn_cast<CapturedDecl>(DC)) 164 DC = cast<DeclContext>(CD->getNonClosureContext()); 165 if (const auto *FD = dyn_cast<FunctionDecl>(DC)) 166 ContextName = CGM.getMangledName(FD); 167 else if (const auto *BD = dyn_cast<BlockDecl>(DC)) 168 ContextName = CGM.getBlockMangledName(GlobalDecl(), BD); 169 else if (const auto *OMD = dyn_cast<ObjCMethodDecl>(DC)) 170 ContextName = OMD->getSelector().getAsString(); 171 else 172 llvm_unreachable("Unknown context for static var decl"); 173 174 ContextName += "." + D.getNameAsString(); 175 return ContextName; 176 } 177 178 llvm::Constant *CodeGenModule::getOrCreateStaticVarDecl( 179 const VarDecl &D, llvm::GlobalValue::LinkageTypes Linkage) { 180 // In general, we don't always emit static var decls once before we reference 181 // them. It is possible to reference them before emitting the function that 182 // contains them, and it is possible to emit the containing function multiple 183 // times. 184 if (llvm::Constant *ExistingGV = StaticLocalDeclMap[&D]) 185 return ExistingGV; 186 187 QualType Ty = D.getType(); 188 assert(Ty->isConstantSizeType() && "VLAs can't be static"); 189 190 // Use the label if the variable is renamed with the asm-label extension. 191 std::string Name; 192 if (D.hasAttr<AsmLabelAttr>()) 193 Name = getMangledName(&D); 194 else 195 Name = getStaticDeclName(*this, D); 196 197 llvm::Type *LTy = getTypes().ConvertTypeForMem(Ty); 198 unsigned AddrSpace = 199 GetGlobalVarAddressSpace(&D, getContext().getTargetAddressSpace(Ty)); 200 201 // Local address space cannot have an initializer. 202 llvm::Constant *Init = nullptr; 203 if (Ty.getAddressSpace() != LangAS::opencl_local) 204 Init = EmitNullConstant(Ty); 205 else 206 Init = llvm::UndefValue::get(LTy); 207 208 llvm::GlobalVariable *GV = 209 new llvm::GlobalVariable(getModule(), LTy, 210 Ty.isConstant(getContext()), Linkage, 211 Init, Name, nullptr, 212 llvm::GlobalVariable::NotThreadLocal, 213 AddrSpace); 214 GV->setAlignment(getContext().getDeclAlign(&D).getQuantity()); 215 setGlobalVisibility(GV, &D); 216 217 if (supportsCOMDAT() && GV->isWeakForLinker()) 218 GV->setComdat(TheModule.getOrInsertComdat(GV->getName())); 219 220 if (D.getTLSKind()) 221 setTLSMode(GV, D); 222 223 if (D.isExternallyVisible()) { 224 if (D.hasAttr<DLLImportAttr>()) 225 GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass); 226 else if (D.hasAttr<DLLExportAttr>()) 227 GV->setDLLStorageClass(llvm::GlobalVariable::DLLExportStorageClass); 228 } 229 230 // Make sure the result is of the correct type. 231 unsigned ExpectedAddrSpace = getContext().getTargetAddressSpace(Ty); 232 llvm::Constant *Addr = GV; 233 if (AddrSpace != ExpectedAddrSpace) { 234 llvm::PointerType *PTy = llvm::PointerType::get(LTy, ExpectedAddrSpace); 235 Addr = llvm::ConstantExpr::getAddrSpaceCast(GV, PTy); 236 } 237 238 setStaticLocalDeclAddress(&D, Addr); 239 240 // Ensure that the static local gets initialized by making sure the parent 241 // function gets emitted eventually. 242 const Decl *DC = cast<Decl>(D.getDeclContext()); 243 244 // We can't name blocks or captured statements directly, so try to emit their 245 // parents. 246 if (isa<BlockDecl>(DC) || isa<CapturedDecl>(DC)) { 247 DC = DC->getNonClosureContext(); 248 // FIXME: Ensure that global blocks get emitted. 249 if (!DC) 250 return Addr; 251 } 252 253 GlobalDecl GD; 254 if (const auto *CD = dyn_cast<CXXConstructorDecl>(DC)) 255 GD = GlobalDecl(CD, Ctor_Base); 256 else if (const auto *DD = dyn_cast<CXXDestructorDecl>(DC)) 257 GD = GlobalDecl(DD, Dtor_Base); 258 else if (const auto *FD = dyn_cast<FunctionDecl>(DC)) 259 GD = GlobalDecl(FD); 260 else { 261 // Don't do anything for Obj-C method decls or global closures. We should 262 // never defer them. 263 assert(isa<ObjCMethodDecl>(DC) && "unexpected parent code decl"); 264 } 265 if (GD.getDecl()) 266 (void)GetAddrOfGlobal(GD); 267 268 return Addr; 269 } 270 271 /// hasNontrivialDestruction - Determine whether a type's destruction is 272 /// non-trivial. If so, and the variable uses static initialization, we must 273 /// register its destructor to run on exit. 274 static bool hasNontrivialDestruction(QualType T) { 275 CXXRecordDecl *RD = T->getBaseElementTypeUnsafe()->getAsCXXRecordDecl(); 276 return RD && !RD->hasTrivialDestructor(); 277 } 278 279 /// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the 280 /// global variable that has already been created for it. If the initializer 281 /// has a different type than GV does, this may free GV and return a different 282 /// one. Otherwise it just returns GV. 283 llvm::GlobalVariable * 284 CodeGenFunction::AddInitializerToStaticVarDecl(const VarDecl &D, 285 llvm::GlobalVariable *GV) { 286 llvm::Constant *Init = CGM.EmitConstantInit(D, this); 287 288 // If constant emission failed, then this should be a C++ static 289 // initializer. 290 if (!Init) { 291 if (!getLangOpts().CPlusPlus) 292 CGM.ErrorUnsupported(D.getInit(), "constant l-value expression"); 293 else if (Builder.GetInsertBlock()) { 294 // Since we have a static initializer, this global variable can't 295 // be constant. 296 GV->setConstant(false); 297 298 EmitCXXGuardedInit(D, GV, /*PerformInit*/true); 299 } 300 return GV; 301 } 302 303 // The initializer may differ in type from the global. Rewrite 304 // the global to match the initializer. (We have to do this 305 // because some types, like unions, can't be completely represented 306 // in the LLVM type system.) 307 if (GV->getType()->getElementType() != Init->getType()) { 308 llvm::GlobalVariable *OldGV = GV; 309 310 GV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(), 311 OldGV->isConstant(), 312 OldGV->getLinkage(), Init, "", 313 /*InsertBefore*/ OldGV, 314 OldGV->getThreadLocalMode(), 315 CGM.getContext().getTargetAddressSpace(D.getType())); 316 GV->setVisibility(OldGV->getVisibility()); 317 GV->setComdat(OldGV->getComdat()); 318 319 // Steal the name of the old global 320 GV->takeName(OldGV); 321 322 // Replace all uses of the old global with the new global 323 llvm::Constant *NewPtrForOldDecl = 324 llvm::ConstantExpr::getBitCast(GV, OldGV->getType()); 325 OldGV->replaceAllUsesWith(NewPtrForOldDecl); 326 327 // Erase the old global, since it is no longer used. 328 OldGV->eraseFromParent(); 329 } 330 331 GV->setConstant(CGM.isTypeConstant(D.getType(), true)); 332 GV->setInitializer(Init); 333 334 if (hasNontrivialDestruction(D.getType())) { 335 // We have a constant initializer, but a nontrivial destructor. We still 336 // need to perform a guarded "initialization" in order to register the 337 // destructor. 338 EmitCXXGuardedInit(D, GV, /*PerformInit*/false); 339 } 340 341 return GV; 342 } 343 344 void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D, 345 llvm::GlobalValue::LinkageTypes Linkage) { 346 // Check to see if we already have a global variable for this 347 // declaration. This can happen when double-emitting function 348 // bodies, e.g. with complete and base constructors. 349 llvm::Constant *addr = CGM.getOrCreateStaticVarDecl(D, Linkage); 350 CharUnits alignment = getContext().getDeclAlign(&D); 351 352 // Store into LocalDeclMap before generating initializer to handle 353 // circular references. 354 setAddrOfLocalVar(&D, Address(addr, alignment)); 355 356 // We can't have a VLA here, but we can have a pointer to a VLA, 357 // even though that doesn't really make any sense. 358 // Make sure to evaluate VLA bounds now so that we have them for later. 359 if (D.getType()->isVariablyModifiedType()) 360 EmitVariablyModifiedType(D.getType()); 361 362 // Save the type in case adding the initializer forces a type change. 363 llvm::Type *expectedType = addr->getType(); 364 365 llvm::GlobalVariable *var = 366 cast<llvm::GlobalVariable>(addr->stripPointerCasts()); 367 // If this value has an initializer, emit it. 368 if (D.getInit()) 369 var = AddInitializerToStaticVarDecl(D, var); 370 371 var->setAlignment(alignment.getQuantity()); 372 373 if (D.hasAttr<AnnotateAttr>()) 374 CGM.AddGlobalAnnotations(&D, var); 375 376 if (const SectionAttr *SA = D.getAttr<SectionAttr>()) 377 var->setSection(SA->getName()); 378 379 if (D.hasAttr<UsedAttr>()) 380 CGM.addUsedGlobal(var); 381 382 // We may have to cast the constant because of the initializer 383 // mismatch above. 384 // 385 // FIXME: It is really dangerous to store this in the map; if anyone 386 // RAUW's the GV uses of this constant will be invalid. 387 llvm::Constant *castedAddr = 388 llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(var, expectedType); 389 if (var != castedAddr) 390 LocalDeclMap.find(&D)->second = Address(castedAddr, alignment); 391 CGM.setStaticLocalDeclAddress(&D, castedAddr); 392 393 CGM.getSanitizerMetadata()->reportGlobalToASan(var, D); 394 395 // Emit global variable debug descriptor for static vars. 396 CGDebugInfo *DI = getDebugInfo(); 397 if (DI && 398 CGM.getCodeGenOpts().getDebugInfo() >= codegenoptions::LimitedDebugInfo) { 399 DI->setLocation(D.getLocation()); 400 DI->EmitGlobalVariable(var, &D); 401 } 402 } 403 404 namespace { 405 struct DestroyObject final : EHScopeStack::Cleanup { 406 DestroyObject(Address addr, QualType type, 407 CodeGenFunction::Destroyer *destroyer, 408 bool useEHCleanupForArray) 409 : addr(addr), type(type), destroyer(destroyer), 410 useEHCleanupForArray(useEHCleanupForArray) {} 411 412 Address addr; 413 QualType type; 414 CodeGenFunction::Destroyer *destroyer; 415 bool useEHCleanupForArray; 416 417 void Emit(CodeGenFunction &CGF, Flags flags) override { 418 // Don't use an EH cleanup recursively from an EH cleanup. 419 bool useEHCleanupForArray = 420 flags.isForNormalCleanup() && this->useEHCleanupForArray; 421 422 CGF.emitDestroy(addr, type, destroyer, useEHCleanupForArray); 423 } 424 }; 425 426 struct DestroyNRVOVariable final : EHScopeStack::Cleanup { 427 DestroyNRVOVariable(Address addr, 428 const CXXDestructorDecl *Dtor, 429 llvm::Value *NRVOFlag) 430 : Dtor(Dtor), NRVOFlag(NRVOFlag), Loc(addr) {} 431 432 const CXXDestructorDecl *Dtor; 433 llvm::Value *NRVOFlag; 434 Address Loc; 435 436 void Emit(CodeGenFunction &CGF, Flags flags) override { 437 // Along the exceptions path we always execute the dtor. 438 bool NRVO = flags.isForNormalCleanup() && NRVOFlag; 439 440 llvm::BasicBlock *SkipDtorBB = nullptr; 441 if (NRVO) { 442 // If we exited via NRVO, we skip the destructor call. 443 llvm::BasicBlock *RunDtorBB = CGF.createBasicBlock("nrvo.unused"); 444 SkipDtorBB = CGF.createBasicBlock("nrvo.skipdtor"); 445 llvm::Value *DidNRVO = 446 CGF.Builder.CreateFlagLoad(NRVOFlag, "nrvo.val"); 447 CGF.Builder.CreateCondBr(DidNRVO, SkipDtorBB, RunDtorBB); 448 CGF.EmitBlock(RunDtorBB); 449 } 450 451 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, 452 /*ForVirtualBase=*/false, 453 /*Delegating=*/false, 454 Loc); 455 456 if (NRVO) CGF.EmitBlock(SkipDtorBB); 457 } 458 }; 459 460 struct CallStackRestore final : EHScopeStack::Cleanup { 461 Address Stack; 462 CallStackRestore(Address Stack) : Stack(Stack) {} 463 void Emit(CodeGenFunction &CGF, Flags flags) override { 464 llvm::Value *V = CGF.Builder.CreateLoad(Stack); 465 llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore); 466 CGF.Builder.CreateCall(F, V); 467 } 468 }; 469 470 struct ExtendGCLifetime final : EHScopeStack::Cleanup { 471 const VarDecl &Var; 472 ExtendGCLifetime(const VarDecl *var) : Var(*var) {} 473 474 void Emit(CodeGenFunction &CGF, Flags flags) override { 475 // Compute the address of the local variable, in case it's a 476 // byref or something. 477 DeclRefExpr DRE(const_cast<VarDecl*>(&Var), false, 478 Var.getType(), VK_LValue, SourceLocation()); 479 llvm::Value *value = CGF.EmitLoadOfScalar(CGF.EmitDeclRefLValue(&DRE), 480 SourceLocation()); 481 CGF.EmitExtendGCLifetime(value); 482 } 483 }; 484 485 struct CallCleanupFunction final : EHScopeStack::Cleanup { 486 llvm::Constant *CleanupFn; 487 const CGFunctionInfo &FnInfo; 488 const VarDecl &Var; 489 490 CallCleanupFunction(llvm::Constant *CleanupFn, const CGFunctionInfo *Info, 491 const VarDecl *Var) 492 : CleanupFn(CleanupFn), FnInfo(*Info), Var(*Var) {} 493 494 void Emit(CodeGenFunction &CGF, Flags flags) override { 495 DeclRefExpr DRE(const_cast<VarDecl*>(&Var), false, 496 Var.getType(), VK_LValue, SourceLocation()); 497 // Compute the address of the local variable, in case it's a byref 498 // or something. 499 llvm::Value *Addr = CGF.EmitDeclRefLValue(&DRE).getPointer(); 500 501 // In some cases, the type of the function argument will be different from 502 // the type of the pointer. An example of this is 503 // void f(void* arg); 504 // __attribute__((cleanup(f))) void *g; 505 // 506 // To fix this we insert a bitcast here. 507 QualType ArgTy = FnInfo.arg_begin()->type; 508 llvm::Value *Arg = 509 CGF.Builder.CreateBitCast(Addr, CGF.ConvertType(ArgTy)); 510 511 CallArgList Args; 512 Args.add(RValue::get(Arg), 513 CGF.getContext().getPointerType(Var.getType())); 514 CGF.EmitCall(FnInfo, CleanupFn, ReturnValueSlot(), Args); 515 } 516 }; 517 518 /// A cleanup to call @llvm.lifetime.end. 519 class CallLifetimeEnd final : public EHScopeStack::Cleanup { 520 llvm::Value *Addr; 521 llvm::Value *Size; 522 public: 523 CallLifetimeEnd(Address addr, llvm::Value *size) 524 : Addr(addr.getPointer()), Size(size) {} 525 526 void Emit(CodeGenFunction &CGF, Flags flags) override { 527 CGF.EmitLifetimeEnd(Size, Addr); 528 } 529 }; 530 } // end anonymous namespace 531 532 /// EmitAutoVarWithLifetime - Does the setup required for an automatic 533 /// variable with lifetime. 534 static void EmitAutoVarWithLifetime(CodeGenFunction &CGF, const VarDecl &var, 535 Address addr, 536 Qualifiers::ObjCLifetime lifetime) { 537 switch (lifetime) { 538 case Qualifiers::OCL_None: 539 llvm_unreachable("present but none"); 540 541 case Qualifiers::OCL_ExplicitNone: 542 // nothing to do 543 break; 544 545 case Qualifiers::OCL_Strong: { 546 CodeGenFunction::Destroyer *destroyer = 547 (var.hasAttr<ObjCPreciseLifetimeAttr>() 548 ? CodeGenFunction::destroyARCStrongPrecise 549 : CodeGenFunction::destroyARCStrongImprecise); 550 551 CleanupKind cleanupKind = CGF.getARCCleanupKind(); 552 CGF.pushDestroy(cleanupKind, addr, var.getType(), destroyer, 553 cleanupKind & EHCleanup); 554 break; 555 } 556 case Qualifiers::OCL_Autoreleasing: 557 // nothing to do 558 break; 559 560 case Qualifiers::OCL_Weak: 561 // __weak objects always get EH cleanups; otherwise, exceptions 562 // could cause really nasty crashes instead of mere leaks. 563 CGF.pushDestroy(NormalAndEHCleanup, addr, var.getType(), 564 CodeGenFunction::destroyARCWeak, 565 /*useEHCleanup*/ true); 566 break; 567 } 568 } 569 570 static bool isAccessedBy(const VarDecl &var, const Stmt *s) { 571 if (const Expr *e = dyn_cast<Expr>(s)) { 572 // Skip the most common kinds of expressions that make 573 // hierarchy-walking expensive. 574 s = e = e->IgnoreParenCasts(); 575 576 if (const DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e)) 577 return (ref->getDecl() == &var); 578 if (const BlockExpr *be = dyn_cast<BlockExpr>(e)) { 579 const BlockDecl *block = be->getBlockDecl(); 580 for (const auto &I : block->captures()) { 581 if (I.getVariable() == &var) 582 return true; 583 } 584 } 585 } 586 587 for (const Stmt *SubStmt : s->children()) 588 // SubStmt might be null; as in missing decl or conditional of an if-stmt. 589 if (SubStmt && isAccessedBy(var, SubStmt)) 590 return true; 591 592 return false; 593 } 594 595 static bool isAccessedBy(const ValueDecl *decl, const Expr *e) { 596 if (!decl) return false; 597 if (!isa<VarDecl>(decl)) return false; 598 const VarDecl *var = cast<VarDecl>(decl); 599 return isAccessedBy(*var, e); 600 } 601 602 static bool tryEmitARCCopyWeakInit(CodeGenFunction &CGF, 603 const LValue &destLV, const Expr *init) { 604 bool needsCast = false; 605 606 while (auto castExpr = dyn_cast<CastExpr>(init->IgnoreParens())) { 607 switch (castExpr->getCastKind()) { 608 // Look through casts that don't require representation changes. 609 case CK_NoOp: 610 case CK_BitCast: 611 case CK_BlockPointerToObjCPointerCast: 612 needsCast = true; 613 break; 614 615 // If we find an l-value to r-value cast from a __weak variable, 616 // emit this operation as a copy or move. 617 case CK_LValueToRValue: { 618 const Expr *srcExpr = castExpr->getSubExpr(); 619 if (srcExpr->getType().getObjCLifetime() != Qualifiers::OCL_Weak) 620 return false; 621 622 // Emit the source l-value. 623 LValue srcLV = CGF.EmitLValue(srcExpr); 624 625 // Handle a formal type change to avoid asserting. 626 auto srcAddr = srcLV.getAddress(); 627 if (needsCast) { 628 srcAddr = CGF.Builder.CreateElementBitCast(srcAddr, 629 destLV.getAddress().getElementType()); 630 } 631 632 // If it was an l-value, use objc_copyWeak. 633 if (srcExpr->getValueKind() == VK_LValue) { 634 CGF.EmitARCCopyWeak(destLV.getAddress(), srcAddr); 635 } else { 636 assert(srcExpr->getValueKind() == VK_XValue); 637 CGF.EmitARCMoveWeak(destLV.getAddress(), srcAddr); 638 } 639 return true; 640 } 641 642 // Stop at anything else. 643 default: 644 return false; 645 } 646 647 init = castExpr->getSubExpr(); 648 } 649 return false; 650 } 651 652 static void drillIntoBlockVariable(CodeGenFunction &CGF, 653 LValue &lvalue, 654 const VarDecl *var) { 655 lvalue.setAddress(CGF.emitBlockByrefAddress(lvalue.getAddress(), var)); 656 } 657 658 void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D, 659 LValue lvalue, bool capturedByInit) { 660 Qualifiers::ObjCLifetime lifetime = lvalue.getObjCLifetime(); 661 if (!lifetime) { 662 llvm::Value *value = EmitScalarExpr(init); 663 if (capturedByInit) 664 drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D)); 665 EmitStoreThroughLValue(RValue::get(value), lvalue, true); 666 return; 667 } 668 669 if (const CXXDefaultInitExpr *DIE = dyn_cast<CXXDefaultInitExpr>(init)) 670 init = DIE->getExpr(); 671 672 // If we're emitting a value with lifetime, we have to do the 673 // initialization *before* we leave the cleanup scopes. 674 if (const ExprWithCleanups *ewc = dyn_cast<ExprWithCleanups>(init)) { 675 enterFullExpression(ewc); 676 init = ewc->getSubExpr(); 677 } 678 CodeGenFunction::RunCleanupsScope Scope(*this); 679 680 // We have to maintain the illusion that the variable is 681 // zero-initialized. If the variable might be accessed in its 682 // initializer, zero-initialize before running the initializer, then 683 // actually perform the initialization with an assign. 684 bool accessedByInit = false; 685 if (lifetime != Qualifiers::OCL_ExplicitNone) 686 accessedByInit = (capturedByInit || isAccessedBy(D, init)); 687 if (accessedByInit) { 688 LValue tempLV = lvalue; 689 // Drill down to the __block object if necessary. 690 if (capturedByInit) { 691 // We can use a simple GEP for this because it can't have been 692 // moved yet. 693 tempLV.setAddress(emitBlockByrefAddress(tempLV.getAddress(), 694 cast<VarDecl>(D), 695 /*follow*/ false)); 696 } 697 698 auto ty = cast<llvm::PointerType>(tempLV.getAddress().getElementType()); 699 llvm::Value *zero = llvm::ConstantPointerNull::get(ty); 700 701 // If __weak, we want to use a barrier under certain conditions. 702 if (lifetime == Qualifiers::OCL_Weak) 703 EmitARCInitWeak(tempLV.getAddress(), zero); 704 705 // Otherwise just do a simple store. 706 else 707 EmitStoreOfScalar(zero, tempLV, /* isInitialization */ true); 708 } 709 710 // Emit the initializer. 711 llvm::Value *value = nullptr; 712 713 switch (lifetime) { 714 case Qualifiers::OCL_None: 715 llvm_unreachable("present but none"); 716 717 case Qualifiers::OCL_ExplicitNone: 718 value = EmitARCUnsafeUnretainedScalarExpr(init); 719 break; 720 721 case Qualifiers::OCL_Strong: { 722 value = EmitARCRetainScalarExpr(init); 723 break; 724 } 725 726 case Qualifiers::OCL_Weak: { 727 // If it's not accessed by the initializer, try to emit the 728 // initialization with a copy or move. 729 if (!accessedByInit && tryEmitARCCopyWeakInit(*this, lvalue, init)) { 730 return; 731 } 732 733 // No way to optimize a producing initializer into this. It's not 734 // worth optimizing for, because the value will immediately 735 // disappear in the common case. 736 value = EmitScalarExpr(init); 737 738 if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D)); 739 if (accessedByInit) 740 EmitARCStoreWeak(lvalue.getAddress(), value, /*ignored*/ true); 741 else 742 EmitARCInitWeak(lvalue.getAddress(), value); 743 return; 744 } 745 746 case Qualifiers::OCL_Autoreleasing: 747 value = EmitARCRetainAutoreleaseScalarExpr(init); 748 break; 749 } 750 751 if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D)); 752 753 // If the variable might have been accessed by its initializer, we 754 // might have to initialize with a barrier. We have to do this for 755 // both __weak and __strong, but __weak got filtered out above. 756 if (accessedByInit && lifetime == Qualifiers::OCL_Strong) { 757 llvm::Value *oldValue = EmitLoadOfScalar(lvalue, init->getExprLoc()); 758 EmitStoreOfScalar(value, lvalue, /* isInitialization */ true); 759 EmitARCRelease(oldValue, ARCImpreciseLifetime); 760 return; 761 } 762 763 EmitStoreOfScalar(value, lvalue, /* isInitialization */ true); 764 } 765 766 /// EmitScalarInit - Initialize the given lvalue with the given object. 767 void CodeGenFunction::EmitScalarInit(llvm::Value *init, LValue lvalue) { 768 Qualifiers::ObjCLifetime lifetime = lvalue.getObjCLifetime(); 769 if (!lifetime) 770 return EmitStoreThroughLValue(RValue::get(init), lvalue, true); 771 772 switch (lifetime) { 773 case Qualifiers::OCL_None: 774 llvm_unreachable("present but none"); 775 776 case Qualifiers::OCL_ExplicitNone: 777 // nothing to do 778 break; 779 780 case Qualifiers::OCL_Strong: 781 init = EmitARCRetain(lvalue.getType(), init); 782 break; 783 784 case Qualifiers::OCL_Weak: 785 // Initialize and then skip the primitive store. 786 EmitARCInitWeak(lvalue.getAddress(), init); 787 return; 788 789 case Qualifiers::OCL_Autoreleasing: 790 init = EmitARCRetainAutorelease(lvalue.getType(), init); 791 break; 792 } 793 794 EmitStoreOfScalar(init, lvalue, /* isInitialization */ true); 795 } 796 797 /// canEmitInitWithFewStoresAfterMemset - Decide whether we can emit the 798 /// non-zero parts of the specified initializer with equal or fewer than 799 /// NumStores scalar stores. 800 static bool canEmitInitWithFewStoresAfterMemset(llvm::Constant *Init, 801 unsigned &NumStores) { 802 // Zero and Undef never requires any extra stores. 803 if (isa<llvm::ConstantAggregateZero>(Init) || 804 isa<llvm::ConstantPointerNull>(Init) || 805 isa<llvm::UndefValue>(Init)) 806 return true; 807 if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) || 808 isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) || 809 isa<llvm::ConstantExpr>(Init)) 810 return Init->isNullValue() || NumStores--; 811 812 // See if we can emit each element. 813 if (isa<llvm::ConstantArray>(Init) || isa<llvm::ConstantStruct>(Init)) { 814 for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) { 815 llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i)); 816 if (!canEmitInitWithFewStoresAfterMemset(Elt, NumStores)) 817 return false; 818 } 819 return true; 820 } 821 822 if (llvm::ConstantDataSequential *CDS = 823 dyn_cast<llvm::ConstantDataSequential>(Init)) { 824 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) { 825 llvm::Constant *Elt = CDS->getElementAsConstant(i); 826 if (!canEmitInitWithFewStoresAfterMemset(Elt, NumStores)) 827 return false; 828 } 829 return true; 830 } 831 832 // Anything else is hard and scary. 833 return false; 834 } 835 836 /// emitStoresForInitAfterMemset - For inits that 837 /// canEmitInitWithFewStoresAfterMemset returned true for, emit the scalar 838 /// stores that would be required. 839 static void emitStoresForInitAfterMemset(llvm::Constant *Init, llvm::Value *Loc, 840 bool isVolatile, CGBuilderTy &Builder) { 841 assert(!Init->isNullValue() && !isa<llvm::UndefValue>(Init) && 842 "called emitStoresForInitAfterMemset for zero or undef value."); 843 844 if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) || 845 isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) || 846 isa<llvm::ConstantExpr>(Init)) { 847 Builder.CreateDefaultAlignedStore(Init, Loc, isVolatile); 848 return; 849 } 850 851 if (llvm::ConstantDataSequential *CDS = 852 dyn_cast<llvm::ConstantDataSequential>(Init)) { 853 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) { 854 llvm::Constant *Elt = CDS->getElementAsConstant(i); 855 856 // If necessary, get a pointer to the element and emit it. 857 if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Elt)) 858 emitStoresForInitAfterMemset( 859 Elt, Builder.CreateConstGEP2_32(Init->getType(), Loc, 0, i), 860 isVolatile, Builder); 861 } 862 return; 863 } 864 865 assert((isa<llvm::ConstantStruct>(Init) || isa<llvm::ConstantArray>(Init)) && 866 "Unknown value type!"); 867 868 for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) { 869 llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i)); 870 871 // If necessary, get a pointer to the element and emit it. 872 if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Elt)) 873 emitStoresForInitAfterMemset( 874 Elt, Builder.CreateConstGEP2_32(Init->getType(), Loc, 0, i), 875 isVolatile, Builder); 876 } 877 } 878 879 /// shouldUseMemSetPlusStoresToInitialize - Decide whether we should use memset 880 /// plus some stores to initialize a local variable instead of using a memcpy 881 /// from a constant global. It is beneficial to use memset if the global is all 882 /// zeros, or mostly zeros and large. 883 static bool shouldUseMemSetPlusStoresToInitialize(llvm::Constant *Init, 884 uint64_t GlobalSize) { 885 // If a global is all zeros, always use a memset. 886 if (isa<llvm::ConstantAggregateZero>(Init)) return true; 887 888 // If a non-zero global is <= 32 bytes, always use a memcpy. If it is large, 889 // do it if it will require 6 or fewer scalar stores. 890 // TODO: Should budget depends on the size? Avoiding a large global warrants 891 // plopping in more stores. 892 unsigned StoreBudget = 6; 893 uint64_t SizeLimit = 32; 894 895 return GlobalSize > SizeLimit && 896 canEmitInitWithFewStoresAfterMemset(Init, StoreBudget); 897 } 898 899 /// EmitAutoVarDecl - Emit code and set up an entry in LocalDeclMap for a 900 /// variable declaration with auto, register, or no storage class specifier. 901 /// These turn into simple stack objects, or GlobalValues depending on target. 902 void CodeGenFunction::EmitAutoVarDecl(const VarDecl &D) { 903 AutoVarEmission emission = EmitAutoVarAlloca(D); 904 EmitAutoVarInit(emission); 905 EmitAutoVarCleanups(emission); 906 } 907 908 /// Emit a lifetime.begin marker if some criteria are satisfied. 909 /// \return a pointer to the temporary size Value if a marker was emitted, null 910 /// otherwise 911 llvm::Value *CodeGenFunction::EmitLifetimeStart(uint64_t Size, 912 llvm::Value *Addr) { 913 // For now, only in optimized builds. 914 if (CGM.getCodeGenOpts().OptimizationLevel == 0) 915 return nullptr; 916 917 // Disable lifetime markers in msan builds. 918 // FIXME: Remove this when msan works with lifetime markers. 919 if (getLangOpts().Sanitize.has(SanitizerKind::Memory)) 920 return nullptr; 921 922 llvm::Value *SizeV = llvm::ConstantInt::get(Int64Ty, Size); 923 Addr = Builder.CreateBitCast(Addr, Int8PtrTy); 924 llvm::CallInst *C = 925 Builder.CreateCall(CGM.getLLVMLifetimeStartFn(), {SizeV, Addr}); 926 C->setDoesNotThrow(); 927 return SizeV; 928 } 929 930 void CodeGenFunction::EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr) { 931 Addr = Builder.CreateBitCast(Addr, Int8PtrTy); 932 llvm::CallInst *C = 933 Builder.CreateCall(CGM.getLLVMLifetimeEndFn(), {Size, Addr}); 934 C->setDoesNotThrow(); 935 } 936 937 /// EmitAutoVarAlloca - Emit the alloca and debug information for a 938 /// local variable. Does not emit initialization or destruction. 939 CodeGenFunction::AutoVarEmission 940 CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) { 941 QualType Ty = D.getType(); 942 943 AutoVarEmission emission(D); 944 945 bool isByRef = D.hasAttr<BlocksAttr>(); 946 emission.IsByRef = isByRef; 947 948 CharUnits alignment = getContext().getDeclAlign(&D); 949 950 // If the type is variably-modified, emit all the VLA sizes for it. 951 if (Ty->isVariablyModifiedType()) 952 EmitVariablyModifiedType(Ty); 953 954 Address address = Address::invalid(); 955 if (Ty->isConstantSizeType()) { 956 bool NRVO = getLangOpts().ElideConstructors && 957 D.isNRVOVariable(); 958 959 // If this value is an array or struct with a statically determinable 960 // constant initializer, there are optimizations we can do. 961 // 962 // TODO: We should constant-evaluate the initializer of any variable, 963 // as long as it is initialized by a constant expression. Currently, 964 // isConstantInitializer produces wrong answers for structs with 965 // reference or bitfield members, and a few other cases, and checking 966 // for POD-ness protects us from some of these. 967 if (D.getInit() && (Ty->isArrayType() || Ty->isRecordType()) && 968 (D.isConstexpr() || 969 ((Ty.isPODType(getContext()) || 970 getContext().getBaseElementType(Ty)->isObjCObjectPointerType()) && 971 D.getInit()->isConstantInitializer(getContext(), false)))) { 972 973 // If the variable's a const type, and it's neither an NRVO 974 // candidate nor a __block variable and has no mutable members, 975 // emit it as a global instead. 976 if (CGM.getCodeGenOpts().MergeAllConstants && !NRVO && !isByRef && 977 CGM.isTypeConstant(Ty, true)) { 978 EmitStaticVarDecl(D, llvm::GlobalValue::InternalLinkage); 979 980 // Signal this condition to later callbacks. 981 emission.Addr = Address::invalid(); 982 assert(emission.wasEmittedAsGlobal()); 983 return emission; 984 } 985 986 // Otherwise, tell the initialization code that we're in this case. 987 emission.IsConstantAggregate = true; 988 } 989 990 // A normal fixed sized variable becomes an alloca in the entry block, 991 // unless it's an NRVO variable. 992 993 if (NRVO) { 994 // The named return value optimization: allocate this variable in the 995 // return slot, so that we can elide the copy when returning this 996 // variable (C++0x [class.copy]p34). 997 address = ReturnValue; 998 999 if (const RecordType *RecordTy = Ty->getAs<RecordType>()) { 1000 if (!cast<CXXRecordDecl>(RecordTy->getDecl())->hasTrivialDestructor()) { 1001 // Create a flag that is used to indicate when the NRVO was applied 1002 // to this variable. Set it to zero to indicate that NRVO was not 1003 // applied. 1004 llvm::Value *Zero = Builder.getFalse(); 1005 Address NRVOFlag = 1006 CreateTempAlloca(Zero->getType(), CharUnits::One(), "nrvo"); 1007 EnsureInsertPoint(); 1008 Builder.CreateStore(Zero, NRVOFlag); 1009 1010 // Record the NRVO flag for this variable. 1011 NRVOFlags[&D] = NRVOFlag.getPointer(); 1012 emission.NRVOFlag = NRVOFlag.getPointer(); 1013 } 1014 } 1015 } else { 1016 CharUnits allocaAlignment; 1017 llvm::Type *allocaTy; 1018 if (isByRef) { 1019 auto &byrefInfo = getBlockByrefInfo(&D); 1020 allocaTy = byrefInfo.Type; 1021 allocaAlignment = byrefInfo.ByrefAlignment; 1022 } else { 1023 allocaTy = ConvertTypeForMem(Ty); 1024 allocaAlignment = alignment; 1025 } 1026 1027 // Create the alloca. Note that we set the name separately from 1028 // building the instruction so that it's there even in no-asserts 1029 // builds. 1030 address = CreateTempAlloca(allocaTy, allocaAlignment); 1031 address.getPointer()->setName(D.getName()); 1032 1033 // Don't emit lifetime markers for MSVC catch parameters. The lifetime of 1034 // the catch parameter starts in the catchpad instruction, and we can't 1035 // insert code in those basic blocks. 1036 bool IsMSCatchParam = 1037 D.isExceptionVariable() && getTarget().getCXXABI().isMicrosoft(); 1038 1039 // Emit a lifetime intrinsic if meaningful. There's no point 1040 // in doing this if we don't have a valid insertion point (?). 1041 if (HaveInsertPoint() && !IsMSCatchParam) { 1042 uint64_t size = CGM.getDataLayout().getTypeAllocSize(allocaTy); 1043 emission.SizeForLifetimeMarkers = 1044 EmitLifetimeStart(size, address.getPointer()); 1045 } else { 1046 assert(!emission.useLifetimeMarkers()); 1047 } 1048 } 1049 } else { 1050 EnsureInsertPoint(); 1051 1052 if (!DidCallStackSave) { 1053 // Save the stack. 1054 Address Stack = 1055 CreateTempAlloca(Int8PtrTy, getPointerAlign(), "saved_stack"); 1056 1057 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave); 1058 llvm::Value *V = Builder.CreateCall(F); 1059 Builder.CreateStore(V, Stack); 1060 1061 DidCallStackSave = true; 1062 1063 // Push a cleanup block and restore the stack there. 1064 // FIXME: in general circumstances, this should be an EH cleanup. 1065 pushStackRestore(NormalCleanup, Stack); 1066 } 1067 1068 llvm::Value *elementCount; 1069 QualType elementType; 1070 std::tie(elementCount, elementType) = getVLASize(Ty); 1071 1072 llvm::Type *llvmTy = ConvertTypeForMem(elementType); 1073 1074 // Allocate memory for the array. 1075 llvm::AllocaInst *vla = Builder.CreateAlloca(llvmTy, elementCount, "vla"); 1076 vla->setAlignment(alignment.getQuantity()); 1077 1078 address = Address(vla, alignment); 1079 } 1080 1081 setAddrOfLocalVar(&D, address); 1082 emission.Addr = address; 1083 1084 // Emit debug info for local var declaration. 1085 if (HaveInsertPoint()) 1086 if (CGDebugInfo *DI = getDebugInfo()) { 1087 if (CGM.getCodeGenOpts().getDebugInfo() >= 1088 codegenoptions::LimitedDebugInfo) { 1089 DI->setLocation(D.getLocation()); 1090 DI->EmitDeclareOfAutoVariable(&D, address.getPointer(), Builder); 1091 } 1092 } 1093 1094 if (D.hasAttr<AnnotateAttr>()) 1095 EmitVarAnnotations(&D, address.getPointer()); 1096 1097 return emission; 1098 } 1099 1100 /// Determines whether the given __block variable is potentially 1101 /// captured by the given expression. 1102 static bool isCapturedBy(const VarDecl &var, const Expr *e) { 1103 // Skip the most common kinds of expressions that make 1104 // hierarchy-walking expensive. 1105 e = e->IgnoreParenCasts(); 1106 1107 if (const BlockExpr *be = dyn_cast<BlockExpr>(e)) { 1108 const BlockDecl *block = be->getBlockDecl(); 1109 for (const auto &I : block->captures()) { 1110 if (I.getVariable() == &var) 1111 return true; 1112 } 1113 1114 // No need to walk into the subexpressions. 1115 return false; 1116 } 1117 1118 if (const StmtExpr *SE = dyn_cast<StmtExpr>(e)) { 1119 const CompoundStmt *CS = SE->getSubStmt(); 1120 for (const auto *BI : CS->body()) 1121 if (const auto *E = dyn_cast<Expr>(BI)) { 1122 if (isCapturedBy(var, E)) 1123 return true; 1124 } 1125 else if (const auto *DS = dyn_cast<DeclStmt>(BI)) { 1126 // special case declarations 1127 for (const auto *I : DS->decls()) { 1128 if (const auto *VD = dyn_cast<VarDecl>((I))) { 1129 const Expr *Init = VD->getInit(); 1130 if (Init && isCapturedBy(var, Init)) 1131 return true; 1132 } 1133 } 1134 } 1135 else 1136 // FIXME. Make safe assumption assuming arbitrary statements cause capturing. 1137 // Later, provide code to poke into statements for capture analysis. 1138 return true; 1139 return false; 1140 } 1141 1142 for (const Stmt *SubStmt : e->children()) 1143 if (isCapturedBy(var, cast<Expr>(SubStmt))) 1144 return true; 1145 1146 return false; 1147 } 1148 1149 /// \brief Determine whether the given initializer is trivial in the sense 1150 /// that it requires no code to be generated. 1151 bool CodeGenFunction::isTrivialInitializer(const Expr *Init) { 1152 if (!Init) 1153 return true; 1154 1155 if (const CXXConstructExpr *Construct = dyn_cast<CXXConstructExpr>(Init)) 1156 if (CXXConstructorDecl *Constructor = Construct->getConstructor()) 1157 if (Constructor->isTrivial() && 1158 Constructor->isDefaultConstructor() && 1159 !Construct->requiresZeroInitialization()) 1160 return true; 1161 1162 return false; 1163 } 1164 1165 void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) { 1166 assert(emission.Variable && "emission was not valid!"); 1167 1168 // If this was emitted as a global constant, we're done. 1169 if (emission.wasEmittedAsGlobal()) return; 1170 1171 const VarDecl &D = *emission.Variable; 1172 auto DL = ApplyDebugLocation::CreateDefaultArtificial(*this, D.getLocation()); 1173 QualType type = D.getType(); 1174 1175 // If this local has an initializer, emit it now. 1176 const Expr *Init = D.getInit(); 1177 1178 // If we are at an unreachable point, we don't need to emit the initializer 1179 // unless it contains a label. 1180 if (!HaveInsertPoint()) { 1181 if (!Init || !ContainsLabel(Init)) return; 1182 EnsureInsertPoint(); 1183 } 1184 1185 // Initialize the structure of a __block variable. 1186 if (emission.IsByRef) 1187 emitByrefStructureInit(emission); 1188 1189 if (isTrivialInitializer(Init)) 1190 return; 1191 1192 // Check whether this is a byref variable that's potentially 1193 // captured and moved by its own initializer. If so, we'll need to 1194 // emit the initializer first, then copy into the variable. 1195 bool capturedByInit = emission.IsByRef && isCapturedBy(D, Init); 1196 1197 Address Loc = 1198 capturedByInit ? emission.Addr : emission.getObjectAddress(*this); 1199 1200 llvm::Constant *constant = nullptr; 1201 if (emission.IsConstantAggregate || D.isConstexpr()) { 1202 assert(!capturedByInit && "constant init contains a capturing block?"); 1203 constant = CGM.EmitConstantInit(D, this); 1204 } 1205 1206 if (!constant) { 1207 LValue lv = MakeAddrLValue(Loc, type); 1208 lv.setNonGC(true); 1209 return EmitExprAsInit(Init, &D, lv, capturedByInit); 1210 } 1211 1212 if (!emission.IsConstantAggregate) { 1213 // For simple scalar/complex initialization, store the value directly. 1214 LValue lv = MakeAddrLValue(Loc, type); 1215 lv.setNonGC(true); 1216 return EmitStoreThroughLValue(RValue::get(constant), lv, true); 1217 } 1218 1219 // If this is a simple aggregate initialization, we can optimize it 1220 // in various ways. 1221 bool isVolatile = type.isVolatileQualified(); 1222 1223 llvm::Value *SizeVal = 1224 llvm::ConstantInt::get(IntPtrTy, 1225 getContext().getTypeSizeInChars(type).getQuantity()); 1226 1227 llvm::Type *BP = Int8PtrTy; 1228 if (Loc.getType() != BP) 1229 Loc = Builder.CreateBitCast(Loc, BP); 1230 1231 // If the initializer is all or mostly zeros, codegen with memset then do 1232 // a few stores afterward. 1233 if (shouldUseMemSetPlusStoresToInitialize(constant, 1234 CGM.getDataLayout().getTypeAllocSize(constant->getType()))) { 1235 Builder.CreateMemSet(Loc, llvm::ConstantInt::get(Int8Ty, 0), SizeVal, 1236 isVolatile); 1237 // Zero and undef don't require a stores. 1238 if (!constant->isNullValue() && !isa<llvm::UndefValue>(constant)) { 1239 Loc = Builder.CreateBitCast(Loc, constant->getType()->getPointerTo()); 1240 emitStoresForInitAfterMemset(constant, Loc.getPointer(), 1241 isVolatile, Builder); 1242 } 1243 } else { 1244 // Otherwise, create a temporary global with the initializer then 1245 // memcpy from the global to the alloca. 1246 std::string Name = getStaticDeclName(CGM, D); 1247 llvm::GlobalVariable *GV = 1248 new llvm::GlobalVariable(CGM.getModule(), constant->getType(), true, 1249 llvm::GlobalValue::PrivateLinkage, 1250 constant, Name); 1251 GV->setAlignment(Loc.getAlignment().getQuantity()); 1252 GV->setUnnamedAddr(true); 1253 1254 Address SrcPtr = Address(GV, Loc.getAlignment()); 1255 if (SrcPtr.getType() != BP) 1256 SrcPtr = Builder.CreateBitCast(SrcPtr, BP); 1257 1258 Builder.CreateMemCpy(Loc, SrcPtr, SizeVal, isVolatile); 1259 } 1260 } 1261 1262 /// Emit an expression as an initializer for a variable at the given 1263 /// location. The expression is not necessarily the normal 1264 /// initializer for the variable, and the address is not necessarily 1265 /// its normal location. 1266 /// 1267 /// \param init the initializing expression 1268 /// \param var the variable to act as if we're initializing 1269 /// \param loc the address to initialize; its type is a pointer 1270 /// to the LLVM mapping of the variable's type 1271 /// \param alignment the alignment of the address 1272 /// \param capturedByInit true if the variable is a __block variable 1273 /// whose address is potentially changed by the initializer 1274 void CodeGenFunction::EmitExprAsInit(const Expr *init, const ValueDecl *D, 1275 LValue lvalue, bool capturedByInit) { 1276 QualType type = D->getType(); 1277 1278 if (type->isReferenceType()) { 1279 RValue rvalue = EmitReferenceBindingToExpr(init); 1280 if (capturedByInit) 1281 drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D)); 1282 EmitStoreThroughLValue(rvalue, lvalue, true); 1283 return; 1284 } 1285 switch (getEvaluationKind(type)) { 1286 case TEK_Scalar: 1287 EmitScalarInit(init, D, lvalue, capturedByInit); 1288 return; 1289 case TEK_Complex: { 1290 ComplexPairTy complex = EmitComplexExpr(init); 1291 if (capturedByInit) 1292 drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D)); 1293 EmitStoreOfComplex(complex, lvalue, /*init*/ true); 1294 return; 1295 } 1296 case TEK_Aggregate: 1297 if (type->isAtomicType()) { 1298 EmitAtomicInit(const_cast<Expr*>(init), lvalue); 1299 } else { 1300 // TODO: how can we delay here if D is captured by its initializer? 1301 EmitAggExpr(init, AggValueSlot::forLValue(lvalue, 1302 AggValueSlot::IsDestructed, 1303 AggValueSlot::DoesNotNeedGCBarriers, 1304 AggValueSlot::IsNotAliased)); 1305 } 1306 return; 1307 } 1308 llvm_unreachable("bad evaluation kind"); 1309 } 1310 1311 /// Enter a destroy cleanup for the given local variable. 1312 void CodeGenFunction::emitAutoVarTypeCleanup( 1313 const CodeGenFunction::AutoVarEmission &emission, 1314 QualType::DestructionKind dtorKind) { 1315 assert(dtorKind != QualType::DK_none); 1316 1317 // Note that for __block variables, we want to destroy the 1318 // original stack object, not the possibly forwarded object. 1319 Address addr = emission.getObjectAddress(*this); 1320 1321 const VarDecl *var = emission.Variable; 1322 QualType type = var->getType(); 1323 1324 CleanupKind cleanupKind = NormalAndEHCleanup; 1325 CodeGenFunction::Destroyer *destroyer = nullptr; 1326 1327 switch (dtorKind) { 1328 case QualType::DK_none: 1329 llvm_unreachable("no cleanup for trivially-destructible variable"); 1330 1331 case QualType::DK_cxx_destructor: 1332 // If there's an NRVO flag on the emission, we need a different 1333 // cleanup. 1334 if (emission.NRVOFlag) { 1335 assert(!type->isArrayType()); 1336 CXXDestructorDecl *dtor = type->getAsCXXRecordDecl()->getDestructor(); 1337 EHStack.pushCleanup<DestroyNRVOVariable>(cleanupKind, addr, 1338 dtor, emission.NRVOFlag); 1339 return; 1340 } 1341 break; 1342 1343 case QualType::DK_objc_strong_lifetime: 1344 // Suppress cleanups for pseudo-strong variables. 1345 if (var->isARCPseudoStrong()) return; 1346 1347 // Otherwise, consider whether to use an EH cleanup or not. 1348 cleanupKind = getARCCleanupKind(); 1349 1350 // Use the imprecise destroyer by default. 1351 if (!var->hasAttr<ObjCPreciseLifetimeAttr>()) 1352 destroyer = CodeGenFunction::destroyARCStrongImprecise; 1353 break; 1354 1355 case QualType::DK_objc_weak_lifetime: 1356 break; 1357 } 1358 1359 // If we haven't chosen a more specific destroyer, use the default. 1360 if (!destroyer) destroyer = getDestroyer(dtorKind); 1361 1362 // Use an EH cleanup in array destructors iff the destructor itself 1363 // is being pushed as an EH cleanup. 1364 bool useEHCleanup = (cleanupKind & EHCleanup); 1365 EHStack.pushCleanup<DestroyObject>(cleanupKind, addr, type, destroyer, 1366 useEHCleanup); 1367 } 1368 1369 void CodeGenFunction::EmitAutoVarCleanups(const AutoVarEmission &emission) { 1370 assert(emission.Variable && "emission was not valid!"); 1371 1372 // If this was emitted as a global constant, we're done. 1373 if (emission.wasEmittedAsGlobal()) return; 1374 1375 // If we don't have an insertion point, we're done. Sema prevents 1376 // us from jumping into any of these scopes anyway. 1377 if (!HaveInsertPoint()) return; 1378 1379 const VarDecl &D = *emission.Variable; 1380 1381 // Make sure we call @llvm.lifetime.end. This needs to happen 1382 // *last*, so the cleanup needs to be pushed *first*. 1383 if (emission.useLifetimeMarkers()) { 1384 EHStack.pushCleanup<CallLifetimeEnd>(NormalCleanup, 1385 emission.getAllocatedAddress(), 1386 emission.getSizeForLifetimeMarkers()); 1387 EHCleanupScope &cleanup = cast<EHCleanupScope>(*EHStack.begin()); 1388 cleanup.setLifetimeMarker(); 1389 } 1390 1391 // Check the type for a cleanup. 1392 if (QualType::DestructionKind dtorKind = D.getType().isDestructedType()) 1393 emitAutoVarTypeCleanup(emission, dtorKind); 1394 1395 // In GC mode, honor objc_precise_lifetime. 1396 if (getLangOpts().getGC() != LangOptions::NonGC && 1397 D.hasAttr<ObjCPreciseLifetimeAttr>()) { 1398 EHStack.pushCleanup<ExtendGCLifetime>(NormalCleanup, &D); 1399 } 1400 1401 // Handle the cleanup attribute. 1402 if (const CleanupAttr *CA = D.getAttr<CleanupAttr>()) { 1403 const FunctionDecl *FD = CA->getFunctionDecl(); 1404 1405 llvm::Constant *F = CGM.GetAddrOfFunction(FD); 1406 assert(F && "Could not find function!"); 1407 1408 const CGFunctionInfo &Info = CGM.getTypes().arrangeFunctionDeclaration(FD); 1409 EHStack.pushCleanup<CallCleanupFunction>(NormalAndEHCleanup, F, &Info, &D); 1410 } 1411 1412 // If this is a block variable, call _Block_object_destroy 1413 // (on the unforwarded address). 1414 if (emission.IsByRef) 1415 enterByrefCleanup(emission); 1416 } 1417 1418 CodeGenFunction::Destroyer * 1419 CodeGenFunction::getDestroyer(QualType::DestructionKind kind) { 1420 switch (kind) { 1421 case QualType::DK_none: llvm_unreachable("no destroyer for trivial dtor"); 1422 case QualType::DK_cxx_destructor: 1423 return destroyCXXObject; 1424 case QualType::DK_objc_strong_lifetime: 1425 return destroyARCStrongPrecise; 1426 case QualType::DK_objc_weak_lifetime: 1427 return destroyARCWeak; 1428 } 1429 llvm_unreachable("Unknown DestructionKind"); 1430 } 1431 1432 /// pushEHDestroy - Push the standard destructor for the given type as 1433 /// an EH-only cleanup. 1434 void CodeGenFunction::pushEHDestroy(QualType::DestructionKind dtorKind, 1435 Address addr, QualType type) { 1436 assert(dtorKind && "cannot push destructor for trivial type"); 1437 assert(needsEHCleanup(dtorKind)); 1438 1439 pushDestroy(EHCleanup, addr, type, getDestroyer(dtorKind), true); 1440 } 1441 1442 /// pushDestroy - Push the standard destructor for the given type as 1443 /// at least a normal cleanup. 1444 void CodeGenFunction::pushDestroy(QualType::DestructionKind dtorKind, 1445 Address addr, QualType type) { 1446 assert(dtorKind && "cannot push destructor for trivial type"); 1447 1448 CleanupKind cleanupKind = getCleanupKind(dtorKind); 1449 pushDestroy(cleanupKind, addr, type, getDestroyer(dtorKind), 1450 cleanupKind & EHCleanup); 1451 } 1452 1453 void CodeGenFunction::pushDestroy(CleanupKind cleanupKind, Address addr, 1454 QualType type, Destroyer *destroyer, 1455 bool useEHCleanupForArray) { 1456 pushFullExprCleanup<DestroyObject>(cleanupKind, addr, type, 1457 destroyer, useEHCleanupForArray); 1458 } 1459 1460 void CodeGenFunction::pushStackRestore(CleanupKind Kind, Address SPMem) { 1461 EHStack.pushCleanup<CallStackRestore>(Kind, SPMem); 1462 } 1463 1464 void CodeGenFunction::pushLifetimeExtendedDestroy( 1465 CleanupKind cleanupKind, Address addr, QualType type, 1466 Destroyer *destroyer, bool useEHCleanupForArray) { 1467 assert(!isInConditionalBranch() && 1468 "performing lifetime extension from within conditional"); 1469 1470 // Push an EH-only cleanup for the object now. 1471 // FIXME: When popping normal cleanups, we need to keep this EH cleanup 1472 // around in case a temporary's destructor throws an exception. 1473 if (cleanupKind & EHCleanup) 1474 EHStack.pushCleanup<DestroyObject>( 1475 static_cast<CleanupKind>(cleanupKind & ~NormalCleanup), addr, type, 1476 destroyer, useEHCleanupForArray); 1477 1478 // Remember that we need to push a full cleanup for the object at the 1479 // end of the full-expression. 1480 pushCleanupAfterFullExpr<DestroyObject>( 1481 cleanupKind, addr, type, destroyer, useEHCleanupForArray); 1482 } 1483 1484 /// emitDestroy - Immediately perform the destruction of the given 1485 /// object. 1486 /// 1487 /// \param addr - the address of the object; a type* 1488 /// \param type - the type of the object; if an array type, all 1489 /// objects are destroyed in reverse order 1490 /// \param destroyer - the function to call to destroy individual 1491 /// elements 1492 /// \param useEHCleanupForArray - whether an EH cleanup should be 1493 /// used when destroying array elements, in case one of the 1494 /// destructions throws an exception 1495 void CodeGenFunction::emitDestroy(Address addr, QualType type, 1496 Destroyer *destroyer, 1497 bool useEHCleanupForArray) { 1498 const ArrayType *arrayType = getContext().getAsArrayType(type); 1499 if (!arrayType) 1500 return destroyer(*this, addr, type); 1501 1502 llvm::Value *length = emitArrayLength(arrayType, type, addr); 1503 1504 CharUnits elementAlign = 1505 addr.getAlignment() 1506 .alignmentOfArrayElement(getContext().getTypeSizeInChars(type)); 1507 1508 // Normally we have to check whether the array is zero-length. 1509 bool checkZeroLength = true; 1510 1511 // But if the array length is constant, we can suppress that. 1512 if (llvm::ConstantInt *constLength = dyn_cast<llvm::ConstantInt>(length)) { 1513 // ...and if it's constant zero, we can just skip the entire thing. 1514 if (constLength->isZero()) return; 1515 checkZeroLength = false; 1516 } 1517 1518 llvm::Value *begin = addr.getPointer(); 1519 llvm::Value *end = Builder.CreateInBoundsGEP(begin, length); 1520 emitArrayDestroy(begin, end, type, elementAlign, destroyer, 1521 checkZeroLength, useEHCleanupForArray); 1522 } 1523 1524 /// emitArrayDestroy - Destroys all the elements of the given array, 1525 /// beginning from last to first. The array cannot be zero-length. 1526 /// 1527 /// \param begin - a type* denoting the first element of the array 1528 /// \param end - a type* denoting one past the end of the array 1529 /// \param elementType - the element type of the array 1530 /// \param destroyer - the function to call to destroy elements 1531 /// \param useEHCleanup - whether to push an EH cleanup to destroy 1532 /// the remaining elements in case the destruction of a single 1533 /// element throws 1534 void CodeGenFunction::emitArrayDestroy(llvm::Value *begin, 1535 llvm::Value *end, 1536 QualType elementType, 1537 CharUnits elementAlign, 1538 Destroyer *destroyer, 1539 bool checkZeroLength, 1540 bool useEHCleanup) { 1541 assert(!elementType->isArrayType()); 1542 1543 // The basic structure here is a do-while loop, because we don't 1544 // need to check for the zero-element case. 1545 llvm::BasicBlock *bodyBB = createBasicBlock("arraydestroy.body"); 1546 llvm::BasicBlock *doneBB = createBasicBlock("arraydestroy.done"); 1547 1548 if (checkZeroLength) { 1549 llvm::Value *isEmpty = Builder.CreateICmpEQ(begin, end, 1550 "arraydestroy.isempty"); 1551 Builder.CreateCondBr(isEmpty, doneBB, bodyBB); 1552 } 1553 1554 // Enter the loop body, making that address the current address. 1555 llvm::BasicBlock *entryBB = Builder.GetInsertBlock(); 1556 EmitBlock(bodyBB); 1557 llvm::PHINode *elementPast = 1558 Builder.CreatePHI(begin->getType(), 2, "arraydestroy.elementPast"); 1559 elementPast->addIncoming(end, entryBB); 1560 1561 // Shift the address back by one element. 1562 llvm::Value *negativeOne = llvm::ConstantInt::get(SizeTy, -1, true); 1563 llvm::Value *element = Builder.CreateInBoundsGEP(elementPast, negativeOne, 1564 "arraydestroy.element"); 1565 1566 if (useEHCleanup) 1567 pushRegularPartialArrayCleanup(begin, element, elementType, elementAlign, 1568 destroyer); 1569 1570 // Perform the actual destruction there. 1571 destroyer(*this, Address(element, elementAlign), elementType); 1572 1573 if (useEHCleanup) 1574 PopCleanupBlock(); 1575 1576 // Check whether we've reached the end. 1577 llvm::Value *done = Builder.CreateICmpEQ(element, begin, "arraydestroy.done"); 1578 Builder.CreateCondBr(done, doneBB, bodyBB); 1579 elementPast->addIncoming(element, Builder.GetInsertBlock()); 1580 1581 // Done. 1582 EmitBlock(doneBB); 1583 } 1584 1585 /// Perform partial array destruction as if in an EH cleanup. Unlike 1586 /// emitArrayDestroy, the element type here may still be an array type. 1587 static void emitPartialArrayDestroy(CodeGenFunction &CGF, 1588 llvm::Value *begin, llvm::Value *end, 1589 QualType type, CharUnits elementAlign, 1590 CodeGenFunction::Destroyer *destroyer) { 1591 // If the element type is itself an array, drill down. 1592 unsigned arrayDepth = 0; 1593 while (const ArrayType *arrayType = CGF.getContext().getAsArrayType(type)) { 1594 // VLAs don't require a GEP index to walk into. 1595 if (!isa<VariableArrayType>(arrayType)) 1596 arrayDepth++; 1597 type = arrayType->getElementType(); 1598 } 1599 1600 if (arrayDepth) { 1601 llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0); 1602 1603 SmallVector<llvm::Value*,4> gepIndices(arrayDepth+1, zero); 1604 begin = CGF.Builder.CreateInBoundsGEP(begin, gepIndices, "pad.arraybegin"); 1605 end = CGF.Builder.CreateInBoundsGEP(end, gepIndices, "pad.arrayend"); 1606 } 1607 1608 // Destroy the array. We don't ever need an EH cleanup because we 1609 // assume that we're in an EH cleanup ourselves, so a throwing 1610 // destructor causes an immediate terminate. 1611 CGF.emitArrayDestroy(begin, end, type, elementAlign, destroyer, 1612 /*checkZeroLength*/ true, /*useEHCleanup*/ false); 1613 } 1614 1615 namespace { 1616 /// RegularPartialArrayDestroy - a cleanup which performs a partial 1617 /// array destroy where the end pointer is regularly determined and 1618 /// does not need to be loaded from a local. 1619 class RegularPartialArrayDestroy final : public EHScopeStack::Cleanup { 1620 llvm::Value *ArrayBegin; 1621 llvm::Value *ArrayEnd; 1622 QualType ElementType; 1623 CodeGenFunction::Destroyer *Destroyer; 1624 CharUnits ElementAlign; 1625 public: 1626 RegularPartialArrayDestroy(llvm::Value *arrayBegin, llvm::Value *arrayEnd, 1627 QualType elementType, CharUnits elementAlign, 1628 CodeGenFunction::Destroyer *destroyer) 1629 : ArrayBegin(arrayBegin), ArrayEnd(arrayEnd), 1630 ElementType(elementType), Destroyer(destroyer), 1631 ElementAlign(elementAlign) {} 1632 1633 void Emit(CodeGenFunction &CGF, Flags flags) override { 1634 emitPartialArrayDestroy(CGF, ArrayBegin, ArrayEnd, 1635 ElementType, ElementAlign, Destroyer); 1636 } 1637 }; 1638 1639 /// IrregularPartialArrayDestroy - a cleanup which performs a 1640 /// partial array destroy where the end pointer is irregularly 1641 /// determined and must be loaded from a local. 1642 class IrregularPartialArrayDestroy final : public EHScopeStack::Cleanup { 1643 llvm::Value *ArrayBegin; 1644 Address ArrayEndPointer; 1645 QualType ElementType; 1646 CodeGenFunction::Destroyer *Destroyer; 1647 CharUnits ElementAlign; 1648 public: 1649 IrregularPartialArrayDestroy(llvm::Value *arrayBegin, 1650 Address arrayEndPointer, 1651 QualType elementType, 1652 CharUnits elementAlign, 1653 CodeGenFunction::Destroyer *destroyer) 1654 : ArrayBegin(arrayBegin), ArrayEndPointer(arrayEndPointer), 1655 ElementType(elementType), Destroyer(destroyer), 1656 ElementAlign(elementAlign) {} 1657 1658 void Emit(CodeGenFunction &CGF, Flags flags) override { 1659 llvm::Value *arrayEnd = CGF.Builder.CreateLoad(ArrayEndPointer); 1660 emitPartialArrayDestroy(CGF, ArrayBegin, arrayEnd, 1661 ElementType, ElementAlign, Destroyer); 1662 } 1663 }; 1664 } // end anonymous namespace 1665 1666 /// pushIrregularPartialArrayCleanup - Push an EH cleanup to destroy 1667 /// already-constructed elements of the given array. The cleanup 1668 /// may be popped with DeactivateCleanupBlock or PopCleanupBlock. 1669 /// 1670 /// \param elementType - the immediate element type of the array; 1671 /// possibly still an array type 1672 void CodeGenFunction::pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin, 1673 Address arrayEndPointer, 1674 QualType elementType, 1675 CharUnits elementAlign, 1676 Destroyer *destroyer) { 1677 pushFullExprCleanup<IrregularPartialArrayDestroy>(EHCleanup, 1678 arrayBegin, arrayEndPointer, 1679 elementType, elementAlign, 1680 destroyer); 1681 } 1682 1683 /// pushRegularPartialArrayCleanup - Push an EH cleanup to destroy 1684 /// already-constructed elements of the given array. The cleanup 1685 /// may be popped with DeactivateCleanupBlock or PopCleanupBlock. 1686 /// 1687 /// \param elementType - the immediate element type of the array; 1688 /// possibly still an array type 1689 void CodeGenFunction::pushRegularPartialArrayCleanup(llvm::Value *arrayBegin, 1690 llvm::Value *arrayEnd, 1691 QualType elementType, 1692 CharUnits elementAlign, 1693 Destroyer *destroyer) { 1694 pushFullExprCleanup<RegularPartialArrayDestroy>(EHCleanup, 1695 arrayBegin, arrayEnd, 1696 elementType, elementAlign, 1697 destroyer); 1698 } 1699 1700 /// Lazily declare the @llvm.lifetime.start intrinsic. 1701 llvm::Constant *CodeGenModule::getLLVMLifetimeStartFn() { 1702 if (LifetimeStartFn) return LifetimeStartFn; 1703 LifetimeStartFn = llvm::Intrinsic::getDeclaration(&getModule(), 1704 llvm::Intrinsic::lifetime_start); 1705 return LifetimeStartFn; 1706 } 1707 1708 /// Lazily declare the @llvm.lifetime.end intrinsic. 1709 llvm::Constant *CodeGenModule::getLLVMLifetimeEndFn() { 1710 if (LifetimeEndFn) return LifetimeEndFn; 1711 LifetimeEndFn = llvm::Intrinsic::getDeclaration(&getModule(), 1712 llvm::Intrinsic::lifetime_end); 1713 return LifetimeEndFn; 1714 } 1715 1716 namespace { 1717 /// A cleanup to perform a release of an object at the end of a 1718 /// function. This is used to balance out the incoming +1 of a 1719 /// ns_consumed argument when we can't reasonably do that just by 1720 /// not doing the initial retain for a __block argument. 1721 struct ConsumeARCParameter final : EHScopeStack::Cleanup { 1722 ConsumeARCParameter(llvm::Value *param, 1723 ARCPreciseLifetime_t precise) 1724 : Param(param), Precise(precise) {} 1725 1726 llvm::Value *Param; 1727 ARCPreciseLifetime_t Precise; 1728 1729 void Emit(CodeGenFunction &CGF, Flags flags) override { 1730 CGF.EmitARCRelease(Param, Precise); 1731 } 1732 }; 1733 } // end anonymous namespace 1734 1735 /// Emit an alloca (or GlobalValue depending on target) 1736 /// for the specified parameter and set up LocalDeclMap. 1737 void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg, 1738 unsigned ArgNo) { 1739 // FIXME: Why isn't ImplicitParamDecl a ParmVarDecl? 1740 assert((isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)) && 1741 "Invalid argument to EmitParmDecl"); 1742 1743 Arg.getAnyValue()->setName(D.getName()); 1744 1745 QualType Ty = D.getType(); 1746 1747 // Use better IR generation for certain implicit parameters. 1748 if (auto IPD = dyn_cast<ImplicitParamDecl>(&D)) { 1749 // The only implicit argument a block has is its literal. 1750 // We assume this is always passed directly. 1751 if (BlockInfo) { 1752 setBlockContextParameter(IPD, ArgNo, Arg.getDirectValue()); 1753 return; 1754 } 1755 } 1756 1757 Address DeclPtr = Address::invalid(); 1758 bool DoStore = false; 1759 bool IsScalar = hasScalarEvaluationKind(Ty); 1760 // If we already have a pointer to the argument, reuse the input pointer. 1761 if (Arg.isIndirect()) { 1762 DeclPtr = Arg.getIndirectAddress(); 1763 // If we have a prettier pointer type at this point, bitcast to that. 1764 unsigned AS = DeclPtr.getType()->getAddressSpace(); 1765 llvm::Type *IRTy = ConvertTypeForMem(Ty)->getPointerTo(AS); 1766 if (DeclPtr.getType() != IRTy) 1767 DeclPtr = Builder.CreateBitCast(DeclPtr, IRTy, D.getName()); 1768 1769 // Push a destructor cleanup for this parameter if the ABI requires it. 1770 // Don't push a cleanup in a thunk for a method that will also emit a 1771 // cleanup. 1772 if (!IsScalar && !CurFuncIsThunk && 1773 getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { 1774 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); 1775 if (RD && RD->hasNonTrivialDestructor()) 1776 pushDestroy(QualType::DK_cxx_destructor, DeclPtr, Ty); 1777 } 1778 } else { 1779 // Otherwise, create a temporary to hold the value. 1780 DeclPtr = CreateMemTemp(Ty, getContext().getDeclAlign(&D), 1781 D.getName() + ".addr"); 1782 DoStore = true; 1783 } 1784 1785 llvm::Value *ArgVal = (DoStore ? Arg.getDirectValue() : nullptr); 1786 1787 LValue lv = MakeAddrLValue(DeclPtr, Ty); 1788 if (IsScalar) { 1789 Qualifiers qs = Ty.getQualifiers(); 1790 if (Qualifiers::ObjCLifetime lt = qs.getObjCLifetime()) { 1791 // We honor __attribute__((ns_consumed)) for types with lifetime. 1792 // For __strong, it's handled by just skipping the initial retain; 1793 // otherwise we have to balance out the initial +1 with an extra 1794 // cleanup to do the release at the end of the function. 1795 bool isConsumed = D.hasAttr<NSConsumedAttr>(); 1796 1797 // 'self' is always formally __strong, but if this is not an 1798 // init method then we don't want to retain it. 1799 if (D.isARCPseudoStrong()) { 1800 const ObjCMethodDecl *method = cast<ObjCMethodDecl>(CurCodeDecl); 1801 assert(&D == method->getSelfDecl()); 1802 assert(lt == Qualifiers::OCL_Strong); 1803 assert(qs.hasConst()); 1804 assert(method->getMethodFamily() != OMF_init); 1805 (void) method; 1806 lt = Qualifiers::OCL_ExplicitNone; 1807 } 1808 1809 if (lt == Qualifiers::OCL_Strong) { 1810 if (!isConsumed) { 1811 if (CGM.getCodeGenOpts().OptimizationLevel == 0) { 1812 // use objc_storeStrong(&dest, value) for retaining the 1813 // object. But first, store a null into 'dest' because 1814 // objc_storeStrong attempts to release its old value. 1815 llvm::Value *Null = CGM.EmitNullConstant(D.getType()); 1816 EmitStoreOfScalar(Null, lv, /* isInitialization */ true); 1817 EmitARCStoreStrongCall(lv.getAddress(), ArgVal, true); 1818 DoStore = false; 1819 } 1820 else 1821 // Don't use objc_retainBlock for block pointers, because we 1822 // don't want to Block_copy something just because we got it 1823 // as a parameter. 1824 ArgVal = EmitARCRetainNonBlock(ArgVal); 1825 } 1826 } else { 1827 // Push the cleanup for a consumed parameter. 1828 if (isConsumed) { 1829 ARCPreciseLifetime_t precise = (D.hasAttr<ObjCPreciseLifetimeAttr>() 1830 ? ARCPreciseLifetime : ARCImpreciseLifetime); 1831 EHStack.pushCleanup<ConsumeARCParameter>(getARCCleanupKind(), ArgVal, 1832 precise); 1833 } 1834 1835 if (lt == Qualifiers::OCL_Weak) { 1836 EmitARCInitWeak(DeclPtr, ArgVal); 1837 DoStore = false; // The weak init is a store, no need to do two. 1838 } 1839 } 1840 1841 // Enter the cleanup scope. 1842 EmitAutoVarWithLifetime(*this, D, DeclPtr, lt); 1843 } 1844 } 1845 1846 // Store the initial value into the alloca. 1847 if (DoStore) 1848 EmitStoreOfScalar(ArgVal, lv, /* isInitialization */ true); 1849 1850 setAddrOfLocalVar(&D, DeclPtr); 1851 1852 // Emit debug info for param declaration. 1853 if (CGDebugInfo *DI = getDebugInfo()) { 1854 if (CGM.getCodeGenOpts().getDebugInfo() >= 1855 codegenoptions::LimitedDebugInfo) { 1856 DI->EmitDeclareOfArgVariable(&D, DeclPtr.getPointer(), ArgNo, Builder); 1857 } 1858 } 1859 1860 if (D.hasAttr<AnnotateAttr>()) 1861 EmitVarAnnotations(&D, DeclPtr.getPointer()); 1862 } 1863