1 //===--- CGCleanup.cpp - Bookkeeping and code emission for cleanups -------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains code dealing with the IR generation for cleanups 10 // and related information. 11 // 12 // A "cleanup" is a piece of code which needs to be executed whenever 13 // control transfers out of a particular scope. This can be 14 // conditionalized to occur only on exceptional control flow, only on 15 // normal control flow, or both. 16 // 17 //===----------------------------------------------------------------------===// 18 19 #include "CGCleanup.h" 20 #include "CodeGenFunction.h" 21 #include "llvm/Support/SaveAndRestore.h" 22 23 using namespace clang; 24 using namespace CodeGen; 25 26 bool DominatingValue<RValue>::saved_type::needsSaving(RValue rv) { 27 if (rv.isScalar()) 28 return DominatingLLVMValue::needsSaving(rv.getScalarVal()); 29 if (rv.isAggregate()) 30 return DominatingLLVMValue::needsSaving(rv.getAggregatePointer()); 31 return true; 32 } 33 34 DominatingValue<RValue>::saved_type 35 DominatingValue<RValue>::saved_type::save(CodeGenFunction &CGF, RValue rv) { 36 if (rv.isScalar()) { 37 llvm::Value *V = rv.getScalarVal(); 38 39 // These automatically dominate and don't need to be saved. 40 if (!DominatingLLVMValue::needsSaving(V)) 41 return saved_type(V, ScalarLiteral); 42 43 // Everything else needs an alloca. 44 Address addr = 45 CGF.CreateDefaultAlignTempAlloca(V->getType(), "saved-rvalue"); 46 CGF.Builder.CreateStore(V, addr); 47 return saved_type(addr.getPointer(), ScalarAddress); 48 } 49 50 if (rv.isComplex()) { 51 CodeGenFunction::ComplexPairTy V = rv.getComplexVal(); 52 llvm::Type *ComplexTy = 53 llvm::StructType::get(V.first->getType(), V.second->getType()); 54 Address addr = CGF.CreateDefaultAlignTempAlloca(ComplexTy, "saved-complex"); 55 CGF.Builder.CreateStore(V.first, CGF.Builder.CreateStructGEP(addr, 0)); 56 CGF.Builder.CreateStore(V.second, CGF.Builder.CreateStructGEP(addr, 1)); 57 return saved_type(addr.getPointer(), ComplexAddress); 58 } 59 60 assert(rv.isAggregate()); 61 Address V = rv.getAggregateAddress(); // TODO: volatile? 62 if (!DominatingLLVMValue::needsSaving(V.getPointer())) 63 return saved_type(V.getPointer(), AggregateLiteral, 64 V.getAlignment().getQuantity()); 65 66 Address addr = 67 CGF.CreateTempAlloca(V.getType(), CGF.getPointerAlign(), "saved-rvalue"); 68 CGF.Builder.CreateStore(V.getPointer(), addr); 69 return saved_type(addr.getPointer(), AggregateAddress, 70 V.getAlignment().getQuantity()); 71 } 72 73 /// Given a saved r-value produced by SaveRValue, perform the code 74 /// necessary to restore it to usability at the current insertion 75 /// point. 76 RValue DominatingValue<RValue>::saved_type::restore(CodeGenFunction &CGF) { 77 auto getSavingAddress = [&](llvm::Value *value) { 78 auto alignment = cast<llvm::AllocaInst>(value)->getAlignment(); 79 return Address(value, CharUnits::fromQuantity(alignment)); 80 }; 81 switch (K) { 82 case ScalarLiteral: 83 return RValue::get(Value); 84 case ScalarAddress: 85 return RValue::get(CGF.Builder.CreateLoad(getSavingAddress(Value))); 86 case AggregateLiteral: 87 return RValue::getAggregate(Address(Value, CharUnits::fromQuantity(Align))); 88 case AggregateAddress: { 89 auto addr = CGF.Builder.CreateLoad(getSavingAddress(Value)); 90 return RValue::getAggregate(Address(addr, CharUnits::fromQuantity(Align))); 91 } 92 case ComplexAddress: { 93 Address address = getSavingAddress(Value); 94 llvm::Value *real = 95 CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(address, 0)); 96 llvm::Value *imag = 97 CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(address, 1)); 98 return RValue::getComplex(real, imag); 99 } 100 } 101 102 llvm_unreachable("bad saved r-value kind"); 103 } 104 105 /// Push an entry of the given size onto this protected-scope stack. 106 char *EHScopeStack::allocate(size_t Size) { 107 Size = llvm::alignTo(Size, ScopeStackAlignment); 108 if (!StartOfBuffer) { 109 unsigned Capacity = 1024; 110 while (Capacity < Size) Capacity *= 2; 111 StartOfBuffer = new char[Capacity]; 112 StartOfData = EndOfBuffer = StartOfBuffer + Capacity; 113 } else if (static_cast<size_t>(StartOfData - StartOfBuffer) < Size) { 114 unsigned CurrentCapacity = EndOfBuffer - StartOfBuffer; 115 unsigned UsedCapacity = CurrentCapacity - (StartOfData - StartOfBuffer); 116 117 unsigned NewCapacity = CurrentCapacity; 118 do { 119 NewCapacity *= 2; 120 } while (NewCapacity < UsedCapacity + Size); 121 122 char *NewStartOfBuffer = new char[NewCapacity]; 123 char *NewEndOfBuffer = NewStartOfBuffer + NewCapacity; 124 char *NewStartOfData = NewEndOfBuffer - UsedCapacity; 125 memcpy(NewStartOfData, StartOfData, UsedCapacity); 126 delete [] StartOfBuffer; 127 StartOfBuffer = NewStartOfBuffer; 128 EndOfBuffer = NewEndOfBuffer; 129 StartOfData = NewStartOfData; 130 } 131 132 assert(StartOfBuffer + Size <= StartOfData); 133 StartOfData -= Size; 134 return StartOfData; 135 } 136 137 void EHScopeStack::deallocate(size_t Size) { 138 StartOfData += llvm::alignTo(Size, ScopeStackAlignment); 139 } 140 141 bool EHScopeStack::containsOnlyLifetimeMarkers( 142 EHScopeStack::stable_iterator Old) const { 143 for (EHScopeStack::iterator it = begin(); stabilize(it) != Old; it++) { 144 EHCleanupScope *cleanup = dyn_cast<EHCleanupScope>(&*it); 145 if (!cleanup || !cleanup->isLifetimeMarker()) 146 return false; 147 } 148 149 return true; 150 } 151 152 bool EHScopeStack::requiresLandingPad() const { 153 for (stable_iterator si = getInnermostEHScope(); si != stable_end(); ) { 154 // Skip lifetime markers. 155 if (auto *cleanup = dyn_cast<EHCleanupScope>(&*find(si))) 156 if (cleanup->isLifetimeMarker()) { 157 si = cleanup->getEnclosingEHScope(); 158 continue; 159 } 160 return true; 161 } 162 163 return false; 164 } 165 166 EHScopeStack::stable_iterator 167 EHScopeStack::getInnermostActiveNormalCleanup() const { 168 for (stable_iterator si = getInnermostNormalCleanup(), se = stable_end(); 169 si != se; ) { 170 EHCleanupScope &cleanup = cast<EHCleanupScope>(*find(si)); 171 if (cleanup.isActive()) return si; 172 si = cleanup.getEnclosingNormalCleanup(); 173 } 174 return stable_end(); 175 } 176 177 178 void *EHScopeStack::pushCleanup(CleanupKind Kind, size_t Size) { 179 char *Buffer = allocate(EHCleanupScope::getSizeForCleanupSize(Size)); 180 bool IsNormalCleanup = Kind & NormalCleanup; 181 bool IsEHCleanup = Kind & EHCleanup; 182 bool IsActive = !(Kind & InactiveCleanup); 183 bool IsLifetimeMarker = Kind & LifetimeMarker; 184 EHCleanupScope *Scope = 185 new (Buffer) EHCleanupScope(IsNormalCleanup, 186 IsEHCleanup, 187 IsActive, 188 Size, 189 BranchFixups.size(), 190 InnermostNormalCleanup, 191 InnermostEHScope); 192 if (IsNormalCleanup) 193 InnermostNormalCleanup = stable_begin(); 194 if (IsEHCleanup) 195 InnermostEHScope = stable_begin(); 196 if (IsLifetimeMarker) 197 Scope->setLifetimeMarker(); 198 199 return Scope->getCleanupBuffer(); 200 } 201 202 void EHScopeStack::popCleanup() { 203 assert(!empty() && "popping exception stack when not empty"); 204 205 assert(isa<EHCleanupScope>(*begin())); 206 EHCleanupScope &Cleanup = cast<EHCleanupScope>(*begin()); 207 InnermostNormalCleanup = Cleanup.getEnclosingNormalCleanup(); 208 InnermostEHScope = Cleanup.getEnclosingEHScope(); 209 deallocate(Cleanup.getAllocatedSize()); 210 211 // Destroy the cleanup. 212 Cleanup.Destroy(); 213 214 // Check whether we can shrink the branch-fixups stack. 215 if (!BranchFixups.empty()) { 216 // If we no longer have any normal cleanups, all the fixups are 217 // complete. 218 if (!hasNormalCleanups()) 219 BranchFixups.clear(); 220 221 // Otherwise we can still trim out unnecessary nulls. 222 else 223 popNullFixups(); 224 } 225 } 226 227 EHFilterScope *EHScopeStack::pushFilter(unsigned numFilters) { 228 assert(getInnermostEHScope() == stable_end()); 229 char *buffer = allocate(EHFilterScope::getSizeForNumFilters(numFilters)); 230 EHFilterScope *filter = new (buffer) EHFilterScope(numFilters); 231 InnermostEHScope = stable_begin(); 232 return filter; 233 } 234 235 void EHScopeStack::popFilter() { 236 assert(!empty() && "popping exception stack when not empty"); 237 238 EHFilterScope &filter = cast<EHFilterScope>(*begin()); 239 deallocate(EHFilterScope::getSizeForNumFilters(filter.getNumFilters())); 240 241 InnermostEHScope = filter.getEnclosingEHScope(); 242 } 243 244 EHCatchScope *EHScopeStack::pushCatch(unsigned numHandlers) { 245 char *buffer = allocate(EHCatchScope::getSizeForNumHandlers(numHandlers)); 246 EHCatchScope *scope = 247 new (buffer) EHCatchScope(numHandlers, InnermostEHScope); 248 InnermostEHScope = stable_begin(); 249 return scope; 250 } 251 252 void EHScopeStack::pushTerminate() { 253 char *Buffer = allocate(EHTerminateScope::getSize()); 254 new (Buffer) EHTerminateScope(InnermostEHScope); 255 InnermostEHScope = stable_begin(); 256 } 257 258 /// Remove any 'null' fixups on the stack. However, we can't pop more 259 /// fixups than the fixup depth on the innermost normal cleanup, or 260 /// else fixups that we try to add to that cleanup will end up in the 261 /// wrong place. We *could* try to shrink fixup depths, but that's 262 /// actually a lot of work for little benefit. 263 void EHScopeStack::popNullFixups() { 264 // We expect this to only be called when there's still an innermost 265 // normal cleanup; otherwise there really shouldn't be any fixups. 266 assert(hasNormalCleanups()); 267 268 EHScopeStack::iterator it = find(InnermostNormalCleanup); 269 unsigned MinSize = cast<EHCleanupScope>(*it).getFixupDepth(); 270 assert(BranchFixups.size() >= MinSize && "fixup stack out of order"); 271 272 while (BranchFixups.size() > MinSize && 273 BranchFixups.back().Destination == nullptr) 274 BranchFixups.pop_back(); 275 } 276 277 Address CodeGenFunction::createCleanupActiveFlag() { 278 // Create a variable to decide whether the cleanup needs to be run. 279 Address active = CreateTempAllocaWithoutCast( 280 Builder.getInt1Ty(), CharUnits::One(), "cleanup.cond"); 281 282 // Initialize it to false at a site that's guaranteed to be run 283 // before each evaluation. 284 setBeforeOutermostConditional(Builder.getFalse(), active); 285 286 // Initialize it to true at the current location. 287 Builder.CreateStore(Builder.getTrue(), active); 288 289 return active; 290 } 291 292 void CodeGenFunction::initFullExprCleanupWithFlag(Address ActiveFlag) { 293 // Set that as the active flag in the cleanup. 294 EHCleanupScope &cleanup = cast<EHCleanupScope>(*EHStack.begin()); 295 assert(!cleanup.hasActiveFlag() && "cleanup already has active flag?"); 296 cleanup.setActiveFlag(ActiveFlag); 297 298 if (cleanup.isNormalCleanup()) cleanup.setTestFlagInNormalCleanup(); 299 if (cleanup.isEHCleanup()) cleanup.setTestFlagInEHCleanup(); 300 } 301 302 void EHScopeStack::Cleanup::anchor() {} 303 304 static void createStoreInstBefore(llvm::Value *value, Address addr, 305 llvm::Instruction *beforeInst) { 306 auto store = new llvm::StoreInst(value, addr.getPointer(), beforeInst); 307 store->setAlignment(addr.getAlignment().getAsAlign()); 308 } 309 310 static llvm::LoadInst *createLoadInstBefore(Address addr, const Twine &name, 311 llvm::Instruction *beforeInst) { 312 return new llvm::LoadInst(addr.getElementType(), addr.getPointer(), name, 313 false, addr.getAlignment().getAsAlign(), 314 beforeInst); 315 } 316 317 /// All the branch fixups on the EH stack have propagated out past the 318 /// outermost normal cleanup; resolve them all by adding cases to the 319 /// given switch instruction. 320 static void ResolveAllBranchFixups(CodeGenFunction &CGF, 321 llvm::SwitchInst *Switch, 322 llvm::BasicBlock *CleanupEntry) { 323 llvm::SmallPtrSet<llvm::BasicBlock*, 4> CasesAdded; 324 325 for (unsigned I = 0, E = CGF.EHStack.getNumBranchFixups(); I != E; ++I) { 326 // Skip this fixup if its destination isn't set. 327 BranchFixup &Fixup = CGF.EHStack.getBranchFixup(I); 328 if (Fixup.Destination == nullptr) continue; 329 330 // If there isn't an OptimisticBranchBlock, then InitialBranch is 331 // still pointing directly to its destination; forward it to the 332 // appropriate cleanup entry. This is required in the specific 333 // case of 334 // { std::string s; goto lbl; } 335 // lbl: 336 // i.e. where there's an unresolved fixup inside a single cleanup 337 // entry which we're currently popping. 338 if (Fixup.OptimisticBranchBlock == nullptr) { 339 createStoreInstBefore(CGF.Builder.getInt32(Fixup.DestinationIndex), 340 CGF.getNormalCleanupDestSlot(), 341 Fixup.InitialBranch); 342 Fixup.InitialBranch->setSuccessor(0, CleanupEntry); 343 } 344 345 // Don't add this case to the switch statement twice. 346 if (!CasesAdded.insert(Fixup.Destination).second) 347 continue; 348 349 Switch->addCase(CGF.Builder.getInt32(Fixup.DestinationIndex), 350 Fixup.Destination); 351 } 352 353 CGF.EHStack.clearFixups(); 354 } 355 356 /// Transitions the terminator of the given exit-block of a cleanup to 357 /// be a cleanup switch. 358 static llvm::SwitchInst *TransitionToCleanupSwitch(CodeGenFunction &CGF, 359 llvm::BasicBlock *Block) { 360 // If it's a branch, turn it into a switch whose default 361 // destination is its original target. 362 llvm::Instruction *Term = Block->getTerminator(); 363 assert(Term && "can't transition block without terminator"); 364 365 if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Term)) { 366 assert(Br->isUnconditional()); 367 auto Load = createLoadInstBefore(CGF.getNormalCleanupDestSlot(), 368 "cleanup.dest", Term); 369 llvm::SwitchInst *Switch = 370 llvm::SwitchInst::Create(Load, Br->getSuccessor(0), 4, Block); 371 Br->eraseFromParent(); 372 return Switch; 373 } else { 374 return cast<llvm::SwitchInst>(Term); 375 } 376 } 377 378 void CodeGenFunction::ResolveBranchFixups(llvm::BasicBlock *Block) { 379 assert(Block && "resolving a null target block"); 380 if (!EHStack.getNumBranchFixups()) return; 381 382 assert(EHStack.hasNormalCleanups() && 383 "branch fixups exist with no normal cleanups on stack"); 384 385 llvm::SmallPtrSet<llvm::BasicBlock*, 4> ModifiedOptimisticBlocks; 386 bool ResolvedAny = false; 387 388 for (unsigned I = 0, E = EHStack.getNumBranchFixups(); I != E; ++I) { 389 // Skip this fixup if its destination doesn't match. 390 BranchFixup &Fixup = EHStack.getBranchFixup(I); 391 if (Fixup.Destination != Block) continue; 392 393 Fixup.Destination = nullptr; 394 ResolvedAny = true; 395 396 // If it doesn't have an optimistic branch block, LatestBranch is 397 // already pointing to the right place. 398 llvm::BasicBlock *BranchBB = Fixup.OptimisticBranchBlock; 399 if (!BranchBB) 400 continue; 401 402 // Don't process the same optimistic branch block twice. 403 if (!ModifiedOptimisticBlocks.insert(BranchBB).second) 404 continue; 405 406 llvm::SwitchInst *Switch = TransitionToCleanupSwitch(*this, BranchBB); 407 408 // Add a case to the switch. 409 Switch->addCase(Builder.getInt32(Fixup.DestinationIndex), Block); 410 } 411 412 if (ResolvedAny) 413 EHStack.popNullFixups(); 414 } 415 416 /// Pops cleanup blocks until the given savepoint is reached. 417 void CodeGenFunction::PopCleanupBlocks( 418 EHScopeStack::stable_iterator Old, 419 std::initializer_list<llvm::Value **> ValuesToReload) { 420 assert(Old.isValid()); 421 422 bool HadBranches = false; 423 while (EHStack.stable_begin() != Old) { 424 EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin()); 425 HadBranches |= Scope.hasBranches(); 426 427 // As long as Old strictly encloses the scope's enclosing normal 428 // cleanup, we're going to emit another normal cleanup which 429 // fallthrough can propagate through. 430 bool FallThroughIsBranchThrough = 431 Old.strictlyEncloses(Scope.getEnclosingNormalCleanup()); 432 433 PopCleanupBlock(FallThroughIsBranchThrough); 434 } 435 436 // If we didn't have any branches, the insertion point before cleanups must 437 // dominate the current insertion point and we don't need to reload any 438 // values. 439 if (!HadBranches) 440 return; 441 442 // Spill and reload all values that the caller wants to be live at the current 443 // insertion point. 444 for (llvm::Value **ReloadedValue : ValuesToReload) { 445 auto *Inst = dyn_cast_or_null<llvm::Instruction>(*ReloadedValue); 446 if (!Inst) 447 continue; 448 449 // Don't spill static allocas, they dominate all cleanups. These are created 450 // by binding a reference to a local variable or temporary. 451 auto *AI = dyn_cast<llvm::AllocaInst>(Inst); 452 if (AI && AI->isStaticAlloca()) 453 continue; 454 455 Address Tmp = 456 CreateDefaultAlignTempAlloca(Inst->getType(), "tmp.exprcleanup"); 457 458 // Find an insertion point after Inst and spill it to the temporary. 459 llvm::BasicBlock::iterator InsertBefore; 460 if (auto *Invoke = dyn_cast<llvm::InvokeInst>(Inst)) 461 InsertBefore = Invoke->getNormalDest()->getFirstInsertionPt(); 462 else 463 InsertBefore = std::next(Inst->getIterator()); 464 CGBuilderTy(CGM, &*InsertBefore).CreateStore(Inst, Tmp); 465 466 // Reload the value at the current insertion point. 467 *ReloadedValue = Builder.CreateLoad(Tmp); 468 } 469 } 470 471 /// Pops cleanup blocks until the given savepoint is reached, then add the 472 /// cleanups from the given savepoint in the lifetime-extended cleanups stack. 473 void CodeGenFunction::PopCleanupBlocks( 474 EHScopeStack::stable_iterator Old, size_t OldLifetimeExtendedSize, 475 std::initializer_list<llvm::Value **> ValuesToReload) { 476 PopCleanupBlocks(Old, ValuesToReload); 477 478 // Move our deferred cleanups onto the EH stack. 479 for (size_t I = OldLifetimeExtendedSize, 480 E = LifetimeExtendedCleanupStack.size(); I != E; /**/) { 481 // Alignment should be guaranteed by the vptrs in the individual cleanups. 482 assert((I % alignof(LifetimeExtendedCleanupHeader) == 0) && 483 "misaligned cleanup stack entry"); 484 485 LifetimeExtendedCleanupHeader &Header = 486 reinterpret_cast<LifetimeExtendedCleanupHeader&>( 487 LifetimeExtendedCleanupStack[I]); 488 I += sizeof(Header); 489 490 EHStack.pushCopyOfCleanup(Header.getKind(), 491 &LifetimeExtendedCleanupStack[I], 492 Header.getSize()); 493 I += Header.getSize(); 494 495 if (Header.isConditional()) { 496 Address ActiveFlag = 497 reinterpret_cast<Address &>(LifetimeExtendedCleanupStack[I]); 498 initFullExprCleanupWithFlag(ActiveFlag); 499 I += sizeof(ActiveFlag); 500 } 501 } 502 LifetimeExtendedCleanupStack.resize(OldLifetimeExtendedSize); 503 } 504 505 static llvm::BasicBlock *CreateNormalEntry(CodeGenFunction &CGF, 506 EHCleanupScope &Scope) { 507 assert(Scope.isNormalCleanup()); 508 llvm::BasicBlock *Entry = Scope.getNormalBlock(); 509 if (!Entry) { 510 Entry = CGF.createBasicBlock("cleanup"); 511 Scope.setNormalBlock(Entry); 512 } 513 return Entry; 514 } 515 516 /// Attempts to reduce a cleanup's entry block to a fallthrough. This 517 /// is basically llvm::MergeBlockIntoPredecessor, except 518 /// simplified/optimized for the tighter constraints on cleanup blocks. 519 /// 520 /// Returns the new block, whatever it is. 521 static llvm::BasicBlock *SimplifyCleanupEntry(CodeGenFunction &CGF, 522 llvm::BasicBlock *Entry) { 523 llvm::BasicBlock *Pred = Entry->getSinglePredecessor(); 524 if (!Pred) return Entry; 525 526 llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Pred->getTerminator()); 527 if (!Br || Br->isConditional()) return Entry; 528 assert(Br->getSuccessor(0) == Entry); 529 530 // If we were previously inserting at the end of the cleanup entry 531 // block, we'll need to continue inserting at the end of the 532 // predecessor. 533 bool WasInsertBlock = CGF.Builder.GetInsertBlock() == Entry; 534 assert(!WasInsertBlock || CGF.Builder.GetInsertPoint() == Entry->end()); 535 536 // Kill the branch. 537 Br->eraseFromParent(); 538 539 // Replace all uses of the entry with the predecessor, in case there 540 // are phis in the cleanup. 541 Entry->replaceAllUsesWith(Pred); 542 543 // Merge the blocks. 544 Pred->getInstList().splice(Pred->end(), Entry->getInstList()); 545 546 // Kill the entry block. 547 Entry->eraseFromParent(); 548 549 if (WasInsertBlock) 550 CGF.Builder.SetInsertPoint(Pred); 551 552 return Pred; 553 } 554 555 static void EmitCleanup(CodeGenFunction &CGF, 556 EHScopeStack::Cleanup *Fn, 557 EHScopeStack::Cleanup::Flags flags, 558 Address ActiveFlag) { 559 // If there's an active flag, load it and skip the cleanup if it's 560 // false. 561 llvm::BasicBlock *ContBB = nullptr; 562 if (ActiveFlag.isValid()) { 563 ContBB = CGF.createBasicBlock("cleanup.done"); 564 llvm::BasicBlock *CleanupBB = CGF.createBasicBlock("cleanup.action"); 565 llvm::Value *IsActive 566 = CGF.Builder.CreateLoad(ActiveFlag, "cleanup.is_active"); 567 CGF.Builder.CreateCondBr(IsActive, CleanupBB, ContBB); 568 CGF.EmitBlock(CleanupBB); 569 } 570 571 // Ask the cleanup to emit itself. 572 Fn->Emit(CGF, flags); 573 assert(CGF.HaveInsertPoint() && "cleanup ended with no insertion point?"); 574 575 // Emit the continuation block if there was an active flag. 576 if (ActiveFlag.isValid()) 577 CGF.EmitBlock(ContBB); 578 } 579 580 static void ForwardPrebranchedFallthrough(llvm::BasicBlock *Exit, 581 llvm::BasicBlock *From, 582 llvm::BasicBlock *To) { 583 // Exit is the exit block of a cleanup, so it always terminates in 584 // an unconditional branch or a switch. 585 llvm::Instruction *Term = Exit->getTerminator(); 586 587 if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Term)) { 588 assert(Br->isUnconditional() && Br->getSuccessor(0) == From); 589 Br->setSuccessor(0, To); 590 } else { 591 llvm::SwitchInst *Switch = cast<llvm::SwitchInst>(Term); 592 for (unsigned I = 0, E = Switch->getNumSuccessors(); I != E; ++I) 593 if (Switch->getSuccessor(I) == From) 594 Switch->setSuccessor(I, To); 595 } 596 } 597 598 /// We don't need a normal entry block for the given cleanup. 599 /// Optimistic fixup branches can cause these blocks to come into 600 /// existence anyway; if so, destroy it. 601 /// 602 /// The validity of this transformation is very much specific to the 603 /// exact ways in which we form branches to cleanup entries. 604 static void destroyOptimisticNormalEntry(CodeGenFunction &CGF, 605 EHCleanupScope &scope) { 606 llvm::BasicBlock *entry = scope.getNormalBlock(); 607 if (!entry) return; 608 609 // Replace all the uses with unreachable. 610 llvm::BasicBlock *unreachableBB = CGF.getUnreachableBlock(); 611 for (llvm::BasicBlock::use_iterator 612 i = entry->use_begin(), e = entry->use_end(); i != e; ) { 613 llvm::Use &use = *i; 614 ++i; 615 616 use.set(unreachableBB); 617 618 // The only uses should be fixup switches. 619 llvm::SwitchInst *si = cast<llvm::SwitchInst>(use.getUser()); 620 if (si->getNumCases() == 1 && si->getDefaultDest() == unreachableBB) { 621 // Replace the switch with a branch. 622 llvm::BranchInst::Create(si->case_begin()->getCaseSuccessor(), si); 623 624 // The switch operand is a load from the cleanup-dest alloca. 625 llvm::LoadInst *condition = cast<llvm::LoadInst>(si->getCondition()); 626 627 // Destroy the switch. 628 si->eraseFromParent(); 629 630 // Destroy the load. 631 assert(condition->getOperand(0) == CGF.NormalCleanupDest.getPointer()); 632 assert(condition->use_empty()); 633 condition->eraseFromParent(); 634 } 635 } 636 637 assert(entry->use_empty()); 638 delete entry; 639 } 640 641 /// Pops a cleanup block. If the block includes a normal cleanup, the 642 /// current insertion point is threaded through the cleanup, as are 643 /// any branch fixups on the cleanup. 644 void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { 645 assert(!EHStack.empty() && "cleanup stack is empty!"); 646 assert(isa<EHCleanupScope>(*EHStack.begin()) && "top not a cleanup!"); 647 EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin()); 648 assert(Scope.getFixupDepth() <= EHStack.getNumBranchFixups()); 649 650 // Remember activation information. 651 bool IsActive = Scope.isActive(); 652 Address NormalActiveFlag = 653 Scope.shouldTestFlagInNormalCleanup() ? Scope.getActiveFlag() 654 : Address::invalid(); 655 Address EHActiveFlag = 656 Scope.shouldTestFlagInEHCleanup() ? Scope.getActiveFlag() 657 : Address::invalid(); 658 659 // Check whether we need an EH cleanup. This is only true if we've 660 // generated a lazy EH cleanup block. 661 llvm::BasicBlock *EHEntry = Scope.getCachedEHDispatchBlock(); 662 assert(Scope.hasEHBranches() == (EHEntry != nullptr)); 663 bool RequiresEHCleanup = (EHEntry != nullptr); 664 EHScopeStack::stable_iterator EHParent = Scope.getEnclosingEHScope(); 665 666 // Check the three conditions which might require a normal cleanup: 667 668 // - whether there are branch fix-ups through this cleanup 669 unsigned FixupDepth = Scope.getFixupDepth(); 670 bool HasFixups = EHStack.getNumBranchFixups() != FixupDepth; 671 672 // - whether there are branch-throughs or branch-afters 673 bool HasExistingBranches = Scope.hasBranches(); 674 675 // - whether there's a fallthrough 676 llvm::BasicBlock *FallthroughSource = Builder.GetInsertBlock(); 677 bool HasFallthrough = (FallthroughSource != nullptr && IsActive); 678 679 // Branch-through fall-throughs leave the insertion point set to the 680 // end of the last cleanup, which points to the current scope. The 681 // rest of IR gen doesn't need to worry about this; it only happens 682 // during the execution of PopCleanupBlocks(). 683 bool HasPrebranchedFallthrough = 684 (FallthroughSource && FallthroughSource->getTerminator()); 685 686 // If this is a normal cleanup, then having a prebranched 687 // fallthrough implies that the fallthrough source unconditionally 688 // jumps here. 689 assert(!Scope.isNormalCleanup() || !HasPrebranchedFallthrough || 690 (Scope.getNormalBlock() && 691 FallthroughSource->getTerminator()->getSuccessor(0) 692 == Scope.getNormalBlock())); 693 694 bool RequiresNormalCleanup = false; 695 if (Scope.isNormalCleanup() && 696 (HasFixups || HasExistingBranches || HasFallthrough)) { 697 RequiresNormalCleanup = true; 698 } 699 700 // If we have a prebranched fallthrough into an inactive normal 701 // cleanup, rewrite it so that it leads to the appropriate place. 702 if (Scope.isNormalCleanup() && HasPrebranchedFallthrough && !IsActive) { 703 llvm::BasicBlock *prebranchDest; 704 705 // If the prebranch is semantically branching through the next 706 // cleanup, just forward it to the next block, leaving the 707 // insertion point in the prebranched block. 708 if (FallthroughIsBranchThrough) { 709 EHScope &enclosing = *EHStack.find(Scope.getEnclosingNormalCleanup()); 710 prebranchDest = CreateNormalEntry(*this, cast<EHCleanupScope>(enclosing)); 711 712 // Otherwise, we need to make a new block. If the normal cleanup 713 // isn't being used at all, we could actually reuse the normal 714 // entry block, but this is simpler, and it avoids conflicts with 715 // dead optimistic fixup branches. 716 } else { 717 prebranchDest = createBasicBlock("forwarded-prebranch"); 718 EmitBlock(prebranchDest); 719 } 720 721 llvm::BasicBlock *normalEntry = Scope.getNormalBlock(); 722 assert(normalEntry && !normalEntry->use_empty()); 723 724 ForwardPrebranchedFallthrough(FallthroughSource, 725 normalEntry, prebranchDest); 726 } 727 728 // If we don't need the cleanup at all, we're done. 729 if (!RequiresNormalCleanup && !RequiresEHCleanup) { 730 destroyOptimisticNormalEntry(*this, Scope); 731 EHStack.popCleanup(); // safe because there are no fixups 732 assert(EHStack.getNumBranchFixups() == 0 || 733 EHStack.hasNormalCleanups()); 734 return; 735 } 736 737 // Copy the cleanup emission data out. This uses either a stack 738 // array or malloc'd memory, depending on the size, which is 739 // behavior that SmallVector would provide, if we could use it 740 // here. Unfortunately, if you ask for a SmallVector<char>, the 741 // alignment isn't sufficient. 742 auto *CleanupSource = reinterpret_cast<char *>(Scope.getCleanupBuffer()); 743 alignas(EHScopeStack::ScopeStackAlignment) char 744 CleanupBufferStack[8 * sizeof(void *)]; 745 std::unique_ptr<char[]> CleanupBufferHeap; 746 size_t CleanupSize = Scope.getCleanupSize(); 747 EHScopeStack::Cleanup *Fn; 748 749 if (CleanupSize <= sizeof(CleanupBufferStack)) { 750 memcpy(CleanupBufferStack, CleanupSource, CleanupSize); 751 Fn = reinterpret_cast<EHScopeStack::Cleanup *>(CleanupBufferStack); 752 } else { 753 CleanupBufferHeap.reset(new char[CleanupSize]); 754 memcpy(CleanupBufferHeap.get(), CleanupSource, CleanupSize); 755 Fn = reinterpret_cast<EHScopeStack::Cleanup *>(CleanupBufferHeap.get()); 756 } 757 758 EHScopeStack::Cleanup::Flags cleanupFlags; 759 if (Scope.isNormalCleanup()) 760 cleanupFlags.setIsNormalCleanupKind(); 761 if (Scope.isEHCleanup()) 762 cleanupFlags.setIsEHCleanupKind(); 763 764 if (!RequiresNormalCleanup) { 765 destroyOptimisticNormalEntry(*this, Scope); 766 EHStack.popCleanup(); 767 } else { 768 // If we have a fallthrough and no other need for the cleanup, 769 // emit it directly. 770 if (HasFallthrough && !HasPrebranchedFallthrough && 771 !HasFixups && !HasExistingBranches) { 772 773 destroyOptimisticNormalEntry(*this, Scope); 774 EHStack.popCleanup(); 775 776 EmitCleanup(*this, Fn, cleanupFlags, NormalActiveFlag); 777 778 // Otherwise, the best approach is to thread everything through 779 // the cleanup block and then try to clean up after ourselves. 780 } else { 781 // Force the entry block to exist. 782 llvm::BasicBlock *NormalEntry = CreateNormalEntry(*this, Scope); 783 784 // I. Set up the fallthrough edge in. 785 786 CGBuilderTy::InsertPoint savedInactiveFallthroughIP; 787 788 // If there's a fallthrough, we need to store the cleanup 789 // destination index. For fall-throughs this is always zero. 790 if (HasFallthrough) { 791 if (!HasPrebranchedFallthrough) 792 Builder.CreateStore(Builder.getInt32(0), getNormalCleanupDestSlot()); 793 794 // Otherwise, save and clear the IP if we don't have fallthrough 795 // because the cleanup is inactive. 796 } else if (FallthroughSource) { 797 assert(!IsActive && "source without fallthrough for active cleanup"); 798 savedInactiveFallthroughIP = Builder.saveAndClearIP(); 799 } 800 801 // II. Emit the entry block. This implicitly branches to it if 802 // we have fallthrough. All the fixups and existing branches 803 // should already be branched to it. 804 EmitBlock(NormalEntry); 805 806 // III. Figure out where we're going and build the cleanup 807 // epilogue. 808 809 bool HasEnclosingCleanups = 810 (Scope.getEnclosingNormalCleanup() != EHStack.stable_end()); 811 812 // Compute the branch-through dest if we need it: 813 // - if there are branch-throughs threaded through the scope 814 // - if fall-through is a branch-through 815 // - if there are fixups that will be optimistically forwarded 816 // to the enclosing cleanup 817 llvm::BasicBlock *BranchThroughDest = nullptr; 818 if (Scope.hasBranchThroughs() || 819 (FallthroughSource && FallthroughIsBranchThrough) || 820 (HasFixups && HasEnclosingCleanups)) { 821 assert(HasEnclosingCleanups); 822 EHScope &S = *EHStack.find(Scope.getEnclosingNormalCleanup()); 823 BranchThroughDest = CreateNormalEntry(*this, cast<EHCleanupScope>(S)); 824 } 825 826 llvm::BasicBlock *FallthroughDest = nullptr; 827 SmallVector<llvm::Instruction*, 2> InstsToAppend; 828 829 // If there's exactly one branch-after and no other threads, 830 // we can route it without a switch. 831 if (!Scope.hasBranchThroughs() && !HasFixups && !HasFallthrough && 832 Scope.getNumBranchAfters() == 1) { 833 assert(!BranchThroughDest || !IsActive); 834 835 // Clean up the possibly dead store to the cleanup dest slot. 836 llvm::Instruction *NormalCleanupDestSlot = 837 cast<llvm::Instruction>(getNormalCleanupDestSlot().getPointer()); 838 if (NormalCleanupDestSlot->hasOneUse()) { 839 NormalCleanupDestSlot->user_back()->eraseFromParent(); 840 NormalCleanupDestSlot->eraseFromParent(); 841 NormalCleanupDest = Address::invalid(); 842 } 843 844 llvm::BasicBlock *BranchAfter = Scope.getBranchAfterBlock(0); 845 InstsToAppend.push_back(llvm::BranchInst::Create(BranchAfter)); 846 847 // Build a switch-out if we need it: 848 // - if there are branch-afters threaded through the scope 849 // - if fall-through is a branch-after 850 // - if there are fixups that have nowhere left to go and 851 // so must be immediately resolved 852 } else if (Scope.getNumBranchAfters() || 853 (HasFallthrough && !FallthroughIsBranchThrough) || 854 (HasFixups && !HasEnclosingCleanups)) { 855 856 llvm::BasicBlock *Default = 857 (BranchThroughDest ? BranchThroughDest : getUnreachableBlock()); 858 859 // TODO: base this on the number of branch-afters and fixups 860 const unsigned SwitchCapacity = 10; 861 862 // pass the abnormal exit flag to Fn (SEH cleanup) 863 cleanupFlags.setHasExitSwitch(); 864 865 llvm::LoadInst *Load = 866 createLoadInstBefore(getNormalCleanupDestSlot(), "cleanup.dest", 867 nullptr); 868 llvm::SwitchInst *Switch = 869 llvm::SwitchInst::Create(Load, Default, SwitchCapacity); 870 871 InstsToAppend.push_back(Load); 872 InstsToAppend.push_back(Switch); 873 874 // Branch-after fallthrough. 875 if (FallthroughSource && !FallthroughIsBranchThrough) { 876 FallthroughDest = createBasicBlock("cleanup.cont"); 877 if (HasFallthrough) 878 Switch->addCase(Builder.getInt32(0), FallthroughDest); 879 } 880 881 for (unsigned I = 0, E = Scope.getNumBranchAfters(); I != E; ++I) { 882 Switch->addCase(Scope.getBranchAfterIndex(I), 883 Scope.getBranchAfterBlock(I)); 884 } 885 886 // If there aren't any enclosing cleanups, we can resolve all 887 // the fixups now. 888 if (HasFixups && !HasEnclosingCleanups) 889 ResolveAllBranchFixups(*this, Switch, NormalEntry); 890 } else { 891 // We should always have a branch-through destination in this case. 892 assert(BranchThroughDest); 893 InstsToAppend.push_back(llvm::BranchInst::Create(BranchThroughDest)); 894 } 895 896 // IV. Pop the cleanup and emit it. 897 EHStack.popCleanup(); 898 assert(EHStack.hasNormalCleanups() == HasEnclosingCleanups); 899 900 EmitCleanup(*this, Fn, cleanupFlags, NormalActiveFlag); 901 902 // Append the prepared cleanup prologue from above. 903 llvm::BasicBlock *NormalExit = Builder.GetInsertBlock(); 904 for (unsigned I = 0, E = InstsToAppend.size(); I != E; ++I) 905 NormalExit->getInstList().push_back(InstsToAppend[I]); 906 907 // Optimistically hope that any fixups will continue falling through. 908 for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups(); 909 I < E; ++I) { 910 BranchFixup &Fixup = EHStack.getBranchFixup(I); 911 if (!Fixup.Destination) continue; 912 if (!Fixup.OptimisticBranchBlock) { 913 createStoreInstBefore(Builder.getInt32(Fixup.DestinationIndex), 914 getNormalCleanupDestSlot(), 915 Fixup.InitialBranch); 916 Fixup.InitialBranch->setSuccessor(0, NormalEntry); 917 } 918 Fixup.OptimisticBranchBlock = NormalExit; 919 } 920 921 // V. Set up the fallthrough edge out. 922 923 // Case 1: a fallthrough source exists but doesn't branch to the 924 // cleanup because the cleanup is inactive. 925 if (!HasFallthrough && FallthroughSource) { 926 // Prebranched fallthrough was forwarded earlier. 927 // Non-prebranched fallthrough doesn't need to be forwarded. 928 // Either way, all we need to do is restore the IP we cleared before. 929 assert(!IsActive); 930 Builder.restoreIP(savedInactiveFallthroughIP); 931 932 // Case 2: a fallthrough source exists and should branch to the 933 // cleanup, but we're not supposed to branch through to the next 934 // cleanup. 935 } else if (HasFallthrough && FallthroughDest) { 936 assert(!FallthroughIsBranchThrough); 937 EmitBlock(FallthroughDest); 938 939 // Case 3: a fallthrough source exists and should branch to the 940 // cleanup and then through to the next. 941 } else if (HasFallthrough) { 942 // Everything is already set up for this. 943 944 // Case 4: no fallthrough source exists. 945 } else { 946 Builder.ClearInsertionPoint(); 947 } 948 949 // VI. Assorted cleaning. 950 951 // Check whether we can merge NormalEntry into a single predecessor. 952 // This might invalidate (non-IR) pointers to NormalEntry. 953 llvm::BasicBlock *NewNormalEntry = 954 SimplifyCleanupEntry(*this, NormalEntry); 955 956 // If it did invalidate those pointers, and NormalEntry was the same 957 // as NormalExit, go back and patch up the fixups. 958 if (NewNormalEntry != NormalEntry && NormalEntry == NormalExit) 959 for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups(); 960 I < E; ++I) 961 EHStack.getBranchFixup(I).OptimisticBranchBlock = NewNormalEntry; 962 } 963 } 964 965 assert(EHStack.hasNormalCleanups() || EHStack.getNumBranchFixups() == 0); 966 967 // Emit the EH cleanup if required. 968 if (RequiresEHCleanup) { 969 CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP(); 970 971 EmitBlock(EHEntry); 972 973 llvm::BasicBlock *NextAction = getEHDispatchBlock(EHParent); 974 975 // Push a terminate scope or cleanupendpad scope around the potentially 976 // throwing cleanups. For funclet EH personalities, the cleanupendpad models 977 // program termination when cleanups throw. 978 bool PushedTerminate = false; 979 SaveAndRestore<llvm::Instruction *> RestoreCurrentFuncletPad( 980 CurrentFuncletPad); 981 llvm::CleanupPadInst *CPI = nullptr; 982 983 const EHPersonality &Personality = EHPersonality::get(*this); 984 if (Personality.usesFuncletPads()) { 985 llvm::Value *ParentPad = CurrentFuncletPad; 986 if (!ParentPad) 987 ParentPad = llvm::ConstantTokenNone::get(CGM.getLLVMContext()); 988 CurrentFuncletPad = CPI = Builder.CreateCleanupPad(ParentPad); 989 } 990 991 // Non-MSVC personalities need to terminate when an EH cleanup throws. 992 if (!Personality.isMSVCPersonality()) { 993 EHStack.pushTerminate(); 994 PushedTerminate = true; 995 } 996 997 // We only actually emit the cleanup code if the cleanup is either 998 // active or was used before it was deactivated. 999 if (EHActiveFlag.isValid() || IsActive) { 1000 cleanupFlags.setIsForEHCleanup(); 1001 EmitCleanup(*this, Fn, cleanupFlags, EHActiveFlag); 1002 } 1003 1004 if (CPI) 1005 Builder.CreateCleanupRet(CPI, NextAction); 1006 else 1007 Builder.CreateBr(NextAction); 1008 1009 // Leave the terminate scope. 1010 if (PushedTerminate) 1011 EHStack.popTerminate(); 1012 1013 Builder.restoreIP(SavedIP); 1014 1015 SimplifyCleanupEntry(*this, EHEntry); 1016 } 1017 } 1018 1019 /// isObviouslyBranchWithoutCleanups - Return true if a branch to the 1020 /// specified destination obviously has no cleanups to run. 'false' is always 1021 /// a conservatively correct answer for this method. 1022 bool CodeGenFunction::isObviouslyBranchWithoutCleanups(JumpDest Dest) const { 1023 assert(Dest.getScopeDepth().encloses(EHStack.stable_begin()) 1024 && "stale jump destination"); 1025 1026 // Calculate the innermost active normal cleanup. 1027 EHScopeStack::stable_iterator TopCleanup = 1028 EHStack.getInnermostActiveNormalCleanup(); 1029 1030 // If we're not in an active normal cleanup scope, or if the 1031 // destination scope is within the innermost active normal cleanup 1032 // scope, we don't need to worry about fixups. 1033 if (TopCleanup == EHStack.stable_end() || 1034 TopCleanup.encloses(Dest.getScopeDepth())) // works for invalid 1035 return true; 1036 1037 // Otherwise, we might need some cleanups. 1038 return false; 1039 } 1040 1041 1042 /// Terminate the current block by emitting a branch which might leave 1043 /// the current cleanup-protected scope. The target scope may not yet 1044 /// be known, in which case this will require a fixup. 1045 /// 1046 /// As a side-effect, this method clears the insertion point. 1047 void CodeGenFunction::EmitBranchThroughCleanup(JumpDest Dest) { 1048 assert(Dest.getScopeDepth().encloses(EHStack.stable_begin()) 1049 && "stale jump destination"); 1050 1051 if (!HaveInsertPoint()) 1052 return; 1053 1054 // Create the branch. 1055 llvm::BranchInst *BI = Builder.CreateBr(Dest.getBlock()); 1056 1057 // Calculate the innermost active normal cleanup. 1058 EHScopeStack::stable_iterator 1059 TopCleanup = EHStack.getInnermostActiveNormalCleanup(); 1060 1061 // If we're not in an active normal cleanup scope, or if the 1062 // destination scope is within the innermost active normal cleanup 1063 // scope, we don't need to worry about fixups. 1064 if (TopCleanup == EHStack.stable_end() || 1065 TopCleanup.encloses(Dest.getScopeDepth())) { // works for invalid 1066 Builder.ClearInsertionPoint(); 1067 return; 1068 } 1069 1070 // If we can't resolve the destination cleanup scope, just add this 1071 // to the current cleanup scope as a branch fixup. 1072 if (!Dest.getScopeDepth().isValid()) { 1073 BranchFixup &Fixup = EHStack.addBranchFixup(); 1074 Fixup.Destination = Dest.getBlock(); 1075 Fixup.DestinationIndex = Dest.getDestIndex(); 1076 Fixup.InitialBranch = BI; 1077 Fixup.OptimisticBranchBlock = nullptr; 1078 1079 Builder.ClearInsertionPoint(); 1080 return; 1081 } 1082 1083 // Otherwise, thread through all the normal cleanups in scope. 1084 1085 // Store the index at the start. 1086 llvm::ConstantInt *Index = Builder.getInt32(Dest.getDestIndex()); 1087 createStoreInstBefore(Index, getNormalCleanupDestSlot(), BI); 1088 1089 // Adjust BI to point to the first cleanup block. 1090 { 1091 EHCleanupScope &Scope = 1092 cast<EHCleanupScope>(*EHStack.find(TopCleanup)); 1093 BI->setSuccessor(0, CreateNormalEntry(*this, Scope)); 1094 } 1095 1096 // Add this destination to all the scopes involved. 1097 EHScopeStack::stable_iterator I = TopCleanup; 1098 EHScopeStack::stable_iterator E = Dest.getScopeDepth(); 1099 if (E.strictlyEncloses(I)) { 1100 while (true) { 1101 EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(I)); 1102 assert(Scope.isNormalCleanup()); 1103 I = Scope.getEnclosingNormalCleanup(); 1104 1105 // If this is the last cleanup we're propagating through, tell it 1106 // that there's a resolved jump moving through it. 1107 if (!E.strictlyEncloses(I)) { 1108 Scope.addBranchAfter(Index, Dest.getBlock()); 1109 break; 1110 } 1111 1112 // Otherwise, tell the scope that there's a jump propagating 1113 // through it. If this isn't new information, all the rest of 1114 // the work has been done before. 1115 if (!Scope.addBranchThrough(Dest.getBlock())) 1116 break; 1117 } 1118 } 1119 1120 Builder.ClearInsertionPoint(); 1121 } 1122 1123 static bool IsUsedAsNormalCleanup(EHScopeStack &EHStack, 1124 EHScopeStack::stable_iterator C) { 1125 // If we needed a normal block for any reason, that counts. 1126 if (cast<EHCleanupScope>(*EHStack.find(C)).getNormalBlock()) 1127 return true; 1128 1129 // Check whether any enclosed cleanups were needed. 1130 for (EHScopeStack::stable_iterator 1131 I = EHStack.getInnermostNormalCleanup(); 1132 I != C; ) { 1133 assert(C.strictlyEncloses(I)); 1134 EHCleanupScope &S = cast<EHCleanupScope>(*EHStack.find(I)); 1135 if (S.getNormalBlock()) return true; 1136 I = S.getEnclosingNormalCleanup(); 1137 } 1138 1139 return false; 1140 } 1141 1142 static bool IsUsedAsEHCleanup(EHScopeStack &EHStack, 1143 EHScopeStack::stable_iterator cleanup) { 1144 // If we needed an EH block for any reason, that counts. 1145 if (EHStack.find(cleanup)->hasEHBranches()) 1146 return true; 1147 1148 // Check whether any enclosed cleanups were needed. 1149 for (EHScopeStack::stable_iterator 1150 i = EHStack.getInnermostEHScope(); i != cleanup; ) { 1151 assert(cleanup.strictlyEncloses(i)); 1152 1153 EHScope &scope = *EHStack.find(i); 1154 if (scope.hasEHBranches()) 1155 return true; 1156 1157 i = scope.getEnclosingEHScope(); 1158 } 1159 1160 return false; 1161 } 1162 1163 enum ForActivation_t { 1164 ForActivation, 1165 ForDeactivation 1166 }; 1167 1168 /// The given cleanup block is changing activation state. Configure a 1169 /// cleanup variable if necessary. 1170 /// 1171 /// It would be good if we had some way of determining if there were 1172 /// extra uses *after* the change-over point. 1173 static void SetupCleanupBlockActivation(CodeGenFunction &CGF, 1174 EHScopeStack::stable_iterator C, 1175 ForActivation_t kind, 1176 llvm::Instruction *dominatingIP) { 1177 EHCleanupScope &Scope = cast<EHCleanupScope>(*CGF.EHStack.find(C)); 1178 1179 // We always need the flag if we're activating the cleanup in a 1180 // conditional context, because we have to assume that the current 1181 // location doesn't necessarily dominate the cleanup's code. 1182 bool isActivatedInConditional = 1183 (kind == ForActivation && CGF.isInConditionalBranch()); 1184 1185 bool needFlag = false; 1186 1187 // Calculate whether the cleanup was used: 1188 1189 // - as a normal cleanup 1190 if (Scope.isNormalCleanup() && 1191 (isActivatedInConditional || IsUsedAsNormalCleanup(CGF.EHStack, C))) { 1192 Scope.setTestFlagInNormalCleanup(); 1193 needFlag = true; 1194 } 1195 1196 // - as an EH cleanup 1197 if (Scope.isEHCleanup() && 1198 (isActivatedInConditional || IsUsedAsEHCleanup(CGF.EHStack, C))) { 1199 Scope.setTestFlagInEHCleanup(); 1200 needFlag = true; 1201 } 1202 1203 // If it hasn't yet been used as either, we're done. 1204 if (!needFlag) return; 1205 1206 Address var = Scope.getActiveFlag(); 1207 if (!var.isValid()) { 1208 var = CGF.CreateTempAlloca(CGF.Builder.getInt1Ty(), CharUnits::One(), 1209 "cleanup.isactive"); 1210 Scope.setActiveFlag(var); 1211 1212 assert(dominatingIP && "no existing variable and no dominating IP!"); 1213 1214 // Initialize to true or false depending on whether it was 1215 // active up to this point. 1216 llvm::Constant *value = CGF.Builder.getInt1(kind == ForDeactivation); 1217 1218 // If we're in a conditional block, ignore the dominating IP and 1219 // use the outermost conditional branch. 1220 if (CGF.isInConditionalBranch()) { 1221 CGF.setBeforeOutermostConditional(value, var); 1222 } else { 1223 createStoreInstBefore(value, var, dominatingIP); 1224 } 1225 } 1226 1227 CGF.Builder.CreateStore(CGF.Builder.getInt1(kind == ForActivation), var); 1228 } 1229 1230 /// Activate a cleanup that was created in an inactivated state. 1231 void CodeGenFunction::ActivateCleanupBlock(EHScopeStack::stable_iterator C, 1232 llvm::Instruction *dominatingIP) { 1233 assert(C != EHStack.stable_end() && "activating bottom of stack?"); 1234 EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(C)); 1235 assert(!Scope.isActive() && "double activation"); 1236 1237 SetupCleanupBlockActivation(*this, C, ForActivation, dominatingIP); 1238 1239 Scope.setActive(true); 1240 } 1241 1242 /// Deactive a cleanup that was created in an active state. 1243 void CodeGenFunction::DeactivateCleanupBlock(EHScopeStack::stable_iterator C, 1244 llvm::Instruction *dominatingIP) { 1245 assert(C != EHStack.stable_end() && "deactivating bottom of stack?"); 1246 EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(C)); 1247 assert(Scope.isActive() && "double deactivation"); 1248 1249 // If it's the top of the stack, just pop it, but do so only if it belongs 1250 // to the current RunCleanupsScope. 1251 if (C == EHStack.stable_begin() && 1252 CurrentCleanupScopeDepth.strictlyEncloses(C)) { 1253 // If it's a normal cleanup, we need to pretend that the 1254 // fallthrough is unreachable. 1255 CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP(); 1256 PopCleanupBlock(); 1257 Builder.restoreIP(SavedIP); 1258 return; 1259 } 1260 1261 // Otherwise, follow the general case. 1262 SetupCleanupBlockActivation(*this, C, ForDeactivation, dominatingIP); 1263 1264 Scope.setActive(false); 1265 } 1266 1267 Address CodeGenFunction::getNormalCleanupDestSlot() { 1268 if (!NormalCleanupDest.isValid()) 1269 NormalCleanupDest = 1270 CreateDefaultAlignTempAlloca(Builder.getInt32Ty(), "cleanup.dest.slot"); 1271 return NormalCleanupDest; 1272 } 1273 1274 /// Emits all the code to cause the given temporary to be cleaned up. 1275 void CodeGenFunction::EmitCXXTemporary(const CXXTemporary *Temporary, 1276 QualType TempType, 1277 Address Ptr) { 1278 pushDestroy(NormalAndEHCleanup, Ptr, TempType, destroyCXXObject, 1279 /*useEHCleanup*/ true); 1280 } 1281