1 //===--- CGCleanup.cpp - Bookkeeping and code emission for cleanups -------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains code dealing with the IR generation for cleanups 11 // and related information. 12 // 13 // A "cleanup" is a piece of code which needs to be executed whenever 14 // control transfers out of a particular scope. This can be 15 // conditionalized to occur only on exceptional control flow, only on 16 // normal control flow, or both. 17 // 18 //===----------------------------------------------------------------------===// 19 20 #include "CGCleanup.h" 21 #include "CodeGenFunction.h" 22 #include "llvm/Support/SaveAndRestore.h" 23 24 using namespace clang; 25 using namespace CodeGen; 26 27 bool DominatingValue<RValue>::saved_type::needsSaving(RValue rv) { 28 if (rv.isScalar()) 29 return DominatingLLVMValue::needsSaving(rv.getScalarVal()); 30 if (rv.isAggregate()) 31 return DominatingLLVMValue::needsSaving(rv.getAggregatePointer()); 32 return true; 33 } 34 35 DominatingValue<RValue>::saved_type 36 DominatingValue<RValue>::saved_type::save(CodeGenFunction &CGF, RValue rv) { 37 if (rv.isScalar()) { 38 llvm::Value *V = rv.getScalarVal(); 39 40 // These automatically dominate and don't need to be saved. 41 if (!DominatingLLVMValue::needsSaving(V)) 42 return saved_type(V, ScalarLiteral); 43 44 // Everything else needs an alloca. 45 Address addr = 46 CGF.CreateDefaultAlignTempAlloca(V->getType(), "saved-rvalue"); 47 CGF.Builder.CreateStore(V, addr); 48 return saved_type(addr.getPointer(), ScalarAddress); 49 } 50 51 if (rv.isComplex()) { 52 CodeGenFunction::ComplexPairTy V = rv.getComplexVal(); 53 llvm::Type *ComplexTy = 54 llvm::StructType::get(V.first->getType(), V.second->getType(), 55 (void*) nullptr); 56 Address addr = CGF.CreateDefaultAlignTempAlloca(ComplexTy, "saved-complex"); 57 CGF.Builder.CreateStore(V.first, 58 CGF.Builder.CreateStructGEP(addr, 0, CharUnits())); 59 CharUnits offset = CharUnits::fromQuantity( 60 CGF.CGM.getDataLayout().getTypeAllocSize(V.first->getType())); 61 CGF.Builder.CreateStore(V.second, 62 CGF.Builder.CreateStructGEP(addr, 1, offset)); 63 return saved_type(addr.getPointer(), ComplexAddress); 64 } 65 66 assert(rv.isAggregate()); 67 Address V = rv.getAggregateAddress(); // TODO: volatile? 68 if (!DominatingLLVMValue::needsSaving(V.getPointer())) 69 return saved_type(V.getPointer(), AggregateLiteral, 70 V.getAlignment().getQuantity()); 71 72 Address addr = 73 CGF.CreateTempAlloca(V.getType(), CGF.getPointerAlign(), "saved-rvalue"); 74 CGF.Builder.CreateStore(V.getPointer(), addr); 75 return saved_type(addr.getPointer(), AggregateAddress, 76 V.getAlignment().getQuantity()); 77 } 78 79 /// Given a saved r-value produced by SaveRValue, perform the code 80 /// necessary to restore it to usability at the current insertion 81 /// point. 82 RValue DominatingValue<RValue>::saved_type::restore(CodeGenFunction &CGF) { 83 auto getSavingAddress = [&](llvm::Value *value) { 84 auto alignment = cast<llvm::AllocaInst>(value)->getAlignment(); 85 return Address(value, CharUnits::fromQuantity(alignment)); 86 }; 87 switch (K) { 88 case ScalarLiteral: 89 return RValue::get(Value); 90 case ScalarAddress: 91 return RValue::get(CGF.Builder.CreateLoad(getSavingAddress(Value))); 92 case AggregateLiteral: 93 return RValue::getAggregate(Address(Value, CharUnits::fromQuantity(Align))); 94 case AggregateAddress: { 95 auto addr = CGF.Builder.CreateLoad(getSavingAddress(Value)); 96 return RValue::getAggregate(Address(addr, CharUnits::fromQuantity(Align))); 97 } 98 case ComplexAddress: { 99 Address address = getSavingAddress(Value); 100 llvm::Value *real = CGF.Builder.CreateLoad( 101 CGF.Builder.CreateStructGEP(address, 0, CharUnits())); 102 CharUnits offset = CharUnits::fromQuantity( 103 CGF.CGM.getDataLayout().getTypeAllocSize(real->getType())); 104 llvm::Value *imag = CGF.Builder.CreateLoad( 105 CGF.Builder.CreateStructGEP(address, 1, offset)); 106 return RValue::getComplex(real, imag); 107 } 108 } 109 110 llvm_unreachable("bad saved r-value kind"); 111 } 112 113 /// Push an entry of the given size onto this protected-scope stack. 114 char *EHScopeStack::allocate(size_t Size) { 115 Size = llvm::alignTo(Size, ScopeStackAlignment); 116 if (!StartOfBuffer) { 117 unsigned Capacity = 1024; 118 while (Capacity < Size) Capacity *= 2; 119 StartOfBuffer = new char[Capacity]; 120 StartOfData = EndOfBuffer = StartOfBuffer + Capacity; 121 } else if (static_cast<size_t>(StartOfData - StartOfBuffer) < Size) { 122 unsigned CurrentCapacity = EndOfBuffer - StartOfBuffer; 123 unsigned UsedCapacity = CurrentCapacity - (StartOfData - StartOfBuffer); 124 125 unsigned NewCapacity = CurrentCapacity; 126 do { 127 NewCapacity *= 2; 128 } while (NewCapacity < UsedCapacity + Size); 129 130 char *NewStartOfBuffer = new char[NewCapacity]; 131 char *NewEndOfBuffer = NewStartOfBuffer + NewCapacity; 132 char *NewStartOfData = NewEndOfBuffer - UsedCapacity; 133 memcpy(NewStartOfData, StartOfData, UsedCapacity); 134 delete [] StartOfBuffer; 135 StartOfBuffer = NewStartOfBuffer; 136 EndOfBuffer = NewEndOfBuffer; 137 StartOfData = NewStartOfData; 138 } 139 140 assert(StartOfBuffer + Size <= StartOfData); 141 StartOfData -= Size; 142 return StartOfData; 143 } 144 145 void EHScopeStack::deallocate(size_t Size) { 146 StartOfData += llvm::alignTo(Size, ScopeStackAlignment); 147 } 148 149 bool EHScopeStack::containsOnlyLifetimeMarkers( 150 EHScopeStack::stable_iterator Old) const { 151 for (EHScopeStack::iterator it = begin(); stabilize(it) != Old; it++) { 152 EHCleanupScope *cleanup = dyn_cast<EHCleanupScope>(&*it); 153 if (!cleanup || !cleanup->isLifetimeMarker()) 154 return false; 155 } 156 157 return true; 158 } 159 160 bool EHScopeStack::requiresLandingPad() const { 161 for (stable_iterator si = getInnermostEHScope(); si != stable_end(); ) { 162 // Skip lifetime markers. 163 if (auto *cleanup = dyn_cast<EHCleanupScope>(&*find(si))) 164 if (cleanup->isLifetimeMarker()) { 165 si = cleanup->getEnclosingEHScope(); 166 continue; 167 } 168 return true; 169 } 170 171 return false; 172 } 173 174 EHScopeStack::stable_iterator 175 EHScopeStack::getInnermostActiveNormalCleanup() const { 176 for (stable_iterator si = getInnermostNormalCleanup(), se = stable_end(); 177 si != se; ) { 178 EHCleanupScope &cleanup = cast<EHCleanupScope>(*find(si)); 179 if (cleanup.isActive()) return si; 180 si = cleanup.getEnclosingNormalCleanup(); 181 } 182 return stable_end(); 183 } 184 185 186 void *EHScopeStack::pushCleanup(CleanupKind Kind, size_t Size) { 187 char *Buffer = allocate(EHCleanupScope::getSizeForCleanupSize(Size)); 188 bool IsNormalCleanup = Kind & NormalCleanup; 189 bool IsEHCleanup = Kind & EHCleanup; 190 bool IsActive = !(Kind & InactiveCleanup); 191 EHCleanupScope *Scope = 192 new (Buffer) EHCleanupScope(IsNormalCleanup, 193 IsEHCleanup, 194 IsActive, 195 Size, 196 BranchFixups.size(), 197 InnermostNormalCleanup, 198 InnermostEHScope); 199 if (IsNormalCleanup) 200 InnermostNormalCleanup = stable_begin(); 201 if (IsEHCleanup) 202 InnermostEHScope = stable_begin(); 203 204 return Scope->getCleanupBuffer(); 205 } 206 207 void EHScopeStack::popCleanup() { 208 assert(!empty() && "popping exception stack when not empty"); 209 210 assert(isa<EHCleanupScope>(*begin())); 211 EHCleanupScope &Cleanup = cast<EHCleanupScope>(*begin()); 212 InnermostNormalCleanup = Cleanup.getEnclosingNormalCleanup(); 213 InnermostEHScope = Cleanup.getEnclosingEHScope(); 214 deallocate(Cleanup.getAllocatedSize()); 215 216 // Destroy the cleanup. 217 Cleanup.Destroy(); 218 219 // Check whether we can shrink the branch-fixups stack. 220 if (!BranchFixups.empty()) { 221 // If we no longer have any normal cleanups, all the fixups are 222 // complete. 223 if (!hasNormalCleanups()) 224 BranchFixups.clear(); 225 226 // Otherwise we can still trim out unnecessary nulls. 227 else 228 popNullFixups(); 229 } 230 } 231 232 EHFilterScope *EHScopeStack::pushFilter(unsigned numFilters) { 233 assert(getInnermostEHScope() == stable_end()); 234 char *buffer = allocate(EHFilterScope::getSizeForNumFilters(numFilters)); 235 EHFilterScope *filter = new (buffer) EHFilterScope(numFilters); 236 InnermostEHScope = stable_begin(); 237 return filter; 238 } 239 240 void EHScopeStack::popFilter() { 241 assert(!empty() && "popping exception stack when not empty"); 242 243 EHFilterScope &filter = cast<EHFilterScope>(*begin()); 244 deallocate(EHFilterScope::getSizeForNumFilters(filter.getNumFilters())); 245 246 InnermostEHScope = filter.getEnclosingEHScope(); 247 } 248 249 EHCatchScope *EHScopeStack::pushCatch(unsigned numHandlers) { 250 char *buffer = allocate(EHCatchScope::getSizeForNumHandlers(numHandlers)); 251 EHCatchScope *scope = 252 new (buffer) EHCatchScope(numHandlers, InnermostEHScope); 253 InnermostEHScope = stable_begin(); 254 return scope; 255 } 256 257 void EHScopeStack::pushTerminate() { 258 char *Buffer = allocate(EHTerminateScope::getSize()); 259 new (Buffer) EHTerminateScope(InnermostEHScope); 260 InnermostEHScope = stable_begin(); 261 } 262 263 /// Remove any 'null' fixups on the stack. However, we can't pop more 264 /// fixups than the fixup depth on the innermost normal cleanup, or 265 /// else fixups that we try to add to that cleanup will end up in the 266 /// wrong place. We *could* try to shrink fixup depths, but that's 267 /// actually a lot of work for little benefit. 268 void EHScopeStack::popNullFixups() { 269 // We expect this to only be called when there's still an innermost 270 // normal cleanup; otherwise there really shouldn't be any fixups. 271 assert(hasNormalCleanups()); 272 273 EHScopeStack::iterator it = find(InnermostNormalCleanup); 274 unsigned MinSize = cast<EHCleanupScope>(*it).getFixupDepth(); 275 assert(BranchFixups.size() >= MinSize && "fixup stack out of order"); 276 277 while (BranchFixups.size() > MinSize && 278 BranchFixups.back().Destination == nullptr) 279 BranchFixups.pop_back(); 280 } 281 282 void CodeGenFunction::initFullExprCleanup() { 283 // Create a variable to decide whether the cleanup needs to be run. 284 Address active = CreateTempAlloca(Builder.getInt1Ty(), CharUnits::One(), 285 "cleanup.cond"); 286 287 // Initialize it to false at a site that's guaranteed to be run 288 // before each evaluation. 289 setBeforeOutermostConditional(Builder.getFalse(), active); 290 291 // Initialize it to true at the current location. 292 Builder.CreateStore(Builder.getTrue(), active); 293 294 // Set that as the active flag in the cleanup. 295 EHCleanupScope &cleanup = cast<EHCleanupScope>(*EHStack.begin()); 296 assert(!cleanup.hasActiveFlag() && "cleanup already has active flag?"); 297 cleanup.setActiveFlag(active); 298 299 if (cleanup.isNormalCleanup()) cleanup.setTestFlagInNormalCleanup(); 300 if (cleanup.isEHCleanup()) cleanup.setTestFlagInEHCleanup(); 301 } 302 303 void EHScopeStack::Cleanup::anchor() {} 304 305 static void createStoreInstBefore(llvm::Value *value, Address addr, 306 llvm::Instruction *beforeInst) { 307 auto store = new llvm::StoreInst(value, addr.getPointer(), beforeInst); 308 store->setAlignment(addr.getAlignment().getQuantity()); 309 } 310 311 static llvm::LoadInst *createLoadInstBefore(Address addr, const Twine &name, 312 llvm::Instruction *beforeInst) { 313 auto load = new llvm::LoadInst(addr.getPointer(), name, beforeInst); 314 load->setAlignment(addr.getAlignment().getQuantity()); 315 return load; 316 } 317 318 /// All the branch fixups on the EH stack have propagated out past the 319 /// outermost normal cleanup; resolve them all by adding cases to the 320 /// given switch instruction. 321 static void ResolveAllBranchFixups(CodeGenFunction &CGF, 322 llvm::SwitchInst *Switch, 323 llvm::BasicBlock *CleanupEntry) { 324 llvm::SmallPtrSet<llvm::BasicBlock*, 4> CasesAdded; 325 326 for (unsigned I = 0, E = CGF.EHStack.getNumBranchFixups(); I != E; ++I) { 327 // Skip this fixup if its destination isn't set. 328 BranchFixup &Fixup = CGF.EHStack.getBranchFixup(I); 329 if (Fixup.Destination == nullptr) continue; 330 331 // If there isn't an OptimisticBranchBlock, then InitialBranch is 332 // still pointing directly to its destination; forward it to the 333 // appropriate cleanup entry. This is required in the specific 334 // case of 335 // { std::string s; goto lbl; } 336 // lbl: 337 // i.e. where there's an unresolved fixup inside a single cleanup 338 // entry which we're currently popping. 339 if (Fixup.OptimisticBranchBlock == nullptr) { 340 createStoreInstBefore(CGF.Builder.getInt32(Fixup.DestinationIndex), 341 CGF.getNormalCleanupDestSlot(), 342 Fixup.InitialBranch); 343 Fixup.InitialBranch->setSuccessor(0, CleanupEntry); 344 } 345 346 // Don't add this case to the switch statement twice. 347 if (!CasesAdded.insert(Fixup.Destination).second) 348 continue; 349 350 Switch->addCase(CGF.Builder.getInt32(Fixup.DestinationIndex), 351 Fixup.Destination); 352 } 353 354 CGF.EHStack.clearFixups(); 355 } 356 357 /// Transitions the terminator of the given exit-block of a cleanup to 358 /// be a cleanup switch. 359 static llvm::SwitchInst *TransitionToCleanupSwitch(CodeGenFunction &CGF, 360 llvm::BasicBlock *Block) { 361 // If it's a branch, turn it into a switch whose default 362 // destination is its original target. 363 llvm::TerminatorInst *Term = Block->getTerminator(); 364 assert(Term && "can't transition block without terminator"); 365 366 if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Term)) { 367 assert(Br->isUnconditional()); 368 auto Load = createLoadInstBefore(CGF.getNormalCleanupDestSlot(), 369 "cleanup.dest", Term); 370 llvm::SwitchInst *Switch = 371 llvm::SwitchInst::Create(Load, Br->getSuccessor(0), 4, Block); 372 Br->eraseFromParent(); 373 return Switch; 374 } else { 375 return cast<llvm::SwitchInst>(Term); 376 } 377 } 378 379 void CodeGenFunction::ResolveBranchFixups(llvm::BasicBlock *Block) { 380 assert(Block && "resolving a null target block"); 381 if (!EHStack.getNumBranchFixups()) return; 382 383 assert(EHStack.hasNormalCleanups() && 384 "branch fixups exist with no normal cleanups on stack"); 385 386 llvm::SmallPtrSet<llvm::BasicBlock*, 4> ModifiedOptimisticBlocks; 387 bool ResolvedAny = false; 388 389 for (unsigned I = 0, E = EHStack.getNumBranchFixups(); I != E; ++I) { 390 // Skip this fixup if its destination doesn't match. 391 BranchFixup &Fixup = EHStack.getBranchFixup(I); 392 if (Fixup.Destination != Block) continue; 393 394 Fixup.Destination = nullptr; 395 ResolvedAny = true; 396 397 // If it doesn't have an optimistic branch block, LatestBranch is 398 // already pointing to the right place. 399 llvm::BasicBlock *BranchBB = Fixup.OptimisticBranchBlock; 400 if (!BranchBB) 401 continue; 402 403 // Don't process the same optimistic branch block twice. 404 if (!ModifiedOptimisticBlocks.insert(BranchBB).second) 405 continue; 406 407 llvm::SwitchInst *Switch = TransitionToCleanupSwitch(*this, BranchBB); 408 409 // Add a case to the switch. 410 Switch->addCase(Builder.getInt32(Fixup.DestinationIndex), Block); 411 } 412 413 if (ResolvedAny) 414 EHStack.popNullFixups(); 415 } 416 417 /// Pops cleanup blocks until the given savepoint is reached. 418 void CodeGenFunction::PopCleanupBlocks(EHScopeStack::stable_iterator Old) { 419 assert(Old.isValid()); 420 421 while (EHStack.stable_begin() != Old) { 422 EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin()); 423 424 // As long as Old strictly encloses the scope's enclosing normal 425 // cleanup, we're going to emit another normal cleanup which 426 // fallthrough can propagate through. 427 bool FallThroughIsBranchThrough = 428 Old.strictlyEncloses(Scope.getEnclosingNormalCleanup()); 429 430 PopCleanupBlock(FallThroughIsBranchThrough); 431 } 432 } 433 434 /// Pops cleanup blocks until the given savepoint is reached, then add the 435 /// cleanups from the given savepoint in the lifetime-extended cleanups stack. 436 void 437 CodeGenFunction::PopCleanupBlocks(EHScopeStack::stable_iterator Old, 438 size_t OldLifetimeExtendedSize) { 439 PopCleanupBlocks(Old); 440 441 // Move our deferred cleanups onto the EH stack. 442 for (size_t I = OldLifetimeExtendedSize, 443 E = LifetimeExtendedCleanupStack.size(); I != E; /**/) { 444 // Alignment should be guaranteed by the vptrs in the individual cleanups. 445 assert((I % llvm::alignOf<LifetimeExtendedCleanupHeader>() == 0) && 446 "misaligned cleanup stack entry"); 447 448 LifetimeExtendedCleanupHeader &Header = 449 reinterpret_cast<LifetimeExtendedCleanupHeader&>( 450 LifetimeExtendedCleanupStack[I]); 451 I += sizeof(Header); 452 453 EHStack.pushCopyOfCleanup(Header.getKind(), 454 &LifetimeExtendedCleanupStack[I], 455 Header.getSize()); 456 I += Header.getSize(); 457 } 458 LifetimeExtendedCleanupStack.resize(OldLifetimeExtendedSize); 459 } 460 461 static llvm::BasicBlock *CreateNormalEntry(CodeGenFunction &CGF, 462 EHCleanupScope &Scope) { 463 assert(Scope.isNormalCleanup()); 464 llvm::BasicBlock *Entry = Scope.getNormalBlock(); 465 if (!Entry) { 466 Entry = CGF.createBasicBlock("cleanup"); 467 Scope.setNormalBlock(Entry); 468 } 469 return Entry; 470 } 471 472 /// Attempts to reduce a cleanup's entry block to a fallthrough. This 473 /// is basically llvm::MergeBlockIntoPredecessor, except 474 /// simplified/optimized for the tighter constraints on cleanup blocks. 475 /// 476 /// Returns the new block, whatever it is. 477 static llvm::BasicBlock *SimplifyCleanupEntry(CodeGenFunction &CGF, 478 llvm::BasicBlock *Entry) { 479 llvm::BasicBlock *Pred = Entry->getSinglePredecessor(); 480 if (!Pred) return Entry; 481 482 llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Pred->getTerminator()); 483 if (!Br || Br->isConditional()) return Entry; 484 assert(Br->getSuccessor(0) == Entry); 485 486 // If we were previously inserting at the end of the cleanup entry 487 // block, we'll need to continue inserting at the end of the 488 // predecessor. 489 bool WasInsertBlock = CGF.Builder.GetInsertBlock() == Entry; 490 assert(!WasInsertBlock || CGF.Builder.GetInsertPoint() == Entry->end()); 491 492 // Kill the branch. 493 Br->eraseFromParent(); 494 495 // Replace all uses of the entry with the predecessor, in case there 496 // are phis in the cleanup. 497 Entry->replaceAllUsesWith(Pred); 498 499 // Merge the blocks. 500 Pred->getInstList().splice(Pred->end(), Entry->getInstList()); 501 502 // Kill the entry block. 503 Entry->eraseFromParent(); 504 505 if (WasInsertBlock) 506 CGF.Builder.SetInsertPoint(Pred); 507 508 return Pred; 509 } 510 511 static void EmitCleanup(CodeGenFunction &CGF, 512 EHScopeStack::Cleanup *Fn, 513 EHScopeStack::Cleanup::Flags flags, 514 Address ActiveFlag) { 515 // If there's an active flag, load it and skip the cleanup if it's 516 // false. 517 llvm::BasicBlock *ContBB = nullptr; 518 if (ActiveFlag.isValid()) { 519 ContBB = CGF.createBasicBlock("cleanup.done"); 520 llvm::BasicBlock *CleanupBB = CGF.createBasicBlock("cleanup.action"); 521 llvm::Value *IsActive 522 = CGF.Builder.CreateLoad(ActiveFlag, "cleanup.is_active"); 523 CGF.Builder.CreateCondBr(IsActive, CleanupBB, ContBB); 524 CGF.EmitBlock(CleanupBB); 525 } 526 527 // Ask the cleanup to emit itself. 528 Fn->Emit(CGF, flags); 529 assert(CGF.HaveInsertPoint() && "cleanup ended with no insertion point?"); 530 531 // Emit the continuation block if there was an active flag. 532 if (ActiveFlag.isValid()) 533 CGF.EmitBlock(ContBB); 534 } 535 536 static void ForwardPrebranchedFallthrough(llvm::BasicBlock *Exit, 537 llvm::BasicBlock *From, 538 llvm::BasicBlock *To) { 539 // Exit is the exit block of a cleanup, so it always terminates in 540 // an unconditional branch or a switch. 541 llvm::TerminatorInst *Term = Exit->getTerminator(); 542 543 if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Term)) { 544 assert(Br->isUnconditional() && Br->getSuccessor(0) == From); 545 Br->setSuccessor(0, To); 546 } else { 547 llvm::SwitchInst *Switch = cast<llvm::SwitchInst>(Term); 548 for (unsigned I = 0, E = Switch->getNumSuccessors(); I != E; ++I) 549 if (Switch->getSuccessor(I) == From) 550 Switch->setSuccessor(I, To); 551 } 552 } 553 554 /// We don't need a normal entry block for the given cleanup. 555 /// Optimistic fixup branches can cause these blocks to come into 556 /// existence anyway; if so, destroy it. 557 /// 558 /// The validity of this transformation is very much specific to the 559 /// exact ways in which we form branches to cleanup entries. 560 static void destroyOptimisticNormalEntry(CodeGenFunction &CGF, 561 EHCleanupScope &scope) { 562 llvm::BasicBlock *entry = scope.getNormalBlock(); 563 if (!entry) return; 564 565 // Replace all the uses with unreachable. 566 llvm::BasicBlock *unreachableBB = CGF.getUnreachableBlock(); 567 for (llvm::BasicBlock::use_iterator 568 i = entry->use_begin(), e = entry->use_end(); i != e; ) { 569 llvm::Use &use = *i; 570 ++i; 571 572 use.set(unreachableBB); 573 574 // The only uses should be fixup switches. 575 llvm::SwitchInst *si = cast<llvm::SwitchInst>(use.getUser()); 576 if (si->getNumCases() == 1 && si->getDefaultDest() == unreachableBB) { 577 // Replace the switch with a branch. 578 llvm::BranchInst::Create(si->case_begin().getCaseSuccessor(), si); 579 580 // The switch operand is a load from the cleanup-dest alloca. 581 llvm::LoadInst *condition = cast<llvm::LoadInst>(si->getCondition()); 582 583 // Destroy the switch. 584 si->eraseFromParent(); 585 586 // Destroy the load. 587 assert(condition->getOperand(0) == CGF.NormalCleanupDest); 588 assert(condition->use_empty()); 589 condition->eraseFromParent(); 590 } 591 } 592 593 assert(entry->use_empty()); 594 delete entry; 595 } 596 597 /// Pops a cleanup block. If the block includes a normal cleanup, the 598 /// current insertion point is threaded through the cleanup, as are 599 /// any branch fixups on the cleanup. 600 void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { 601 assert(!EHStack.empty() && "cleanup stack is empty!"); 602 assert(isa<EHCleanupScope>(*EHStack.begin()) && "top not a cleanup!"); 603 EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin()); 604 assert(Scope.getFixupDepth() <= EHStack.getNumBranchFixups()); 605 606 // Remember activation information. 607 bool IsActive = Scope.isActive(); 608 Address NormalActiveFlag = 609 Scope.shouldTestFlagInNormalCleanup() ? Scope.getActiveFlag() 610 : Address::invalid(); 611 Address EHActiveFlag = 612 Scope.shouldTestFlagInEHCleanup() ? Scope.getActiveFlag() 613 : Address::invalid(); 614 615 // Check whether we need an EH cleanup. This is only true if we've 616 // generated a lazy EH cleanup block. 617 llvm::BasicBlock *EHEntry = Scope.getCachedEHDispatchBlock(); 618 assert(Scope.hasEHBranches() == (EHEntry != nullptr)); 619 bool RequiresEHCleanup = (EHEntry != nullptr); 620 EHScopeStack::stable_iterator EHParent = Scope.getEnclosingEHScope(); 621 622 // Check the three conditions which might require a normal cleanup: 623 624 // - whether there are branch fix-ups through this cleanup 625 unsigned FixupDepth = Scope.getFixupDepth(); 626 bool HasFixups = EHStack.getNumBranchFixups() != FixupDepth; 627 628 // - whether there are branch-throughs or branch-afters 629 bool HasExistingBranches = Scope.hasBranches(); 630 631 // - whether there's a fallthrough 632 llvm::BasicBlock *FallthroughSource = Builder.GetInsertBlock(); 633 bool HasFallthrough = (FallthroughSource != nullptr && IsActive); 634 635 // Branch-through fall-throughs leave the insertion point set to the 636 // end of the last cleanup, which points to the current scope. The 637 // rest of IR gen doesn't need to worry about this; it only happens 638 // during the execution of PopCleanupBlocks(). 639 bool HasPrebranchedFallthrough = 640 (FallthroughSource && FallthroughSource->getTerminator()); 641 642 // If this is a normal cleanup, then having a prebranched 643 // fallthrough implies that the fallthrough source unconditionally 644 // jumps here. 645 assert(!Scope.isNormalCleanup() || !HasPrebranchedFallthrough || 646 (Scope.getNormalBlock() && 647 FallthroughSource->getTerminator()->getSuccessor(0) 648 == Scope.getNormalBlock())); 649 650 bool RequiresNormalCleanup = false; 651 if (Scope.isNormalCleanup() && 652 (HasFixups || HasExistingBranches || HasFallthrough)) { 653 RequiresNormalCleanup = true; 654 } 655 656 // If we have a prebranched fallthrough into an inactive normal 657 // cleanup, rewrite it so that it leads to the appropriate place. 658 if (Scope.isNormalCleanup() && HasPrebranchedFallthrough && !IsActive) { 659 llvm::BasicBlock *prebranchDest; 660 661 // If the prebranch is semantically branching through the next 662 // cleanup, just forward it to the next block, leaving the 663 // insertion point in the prebranched block. 664 if (FallthroughIsBranchThrough) { 665 EHScope &enclosing = *EHStack.find(Scope.getEnclosingNormalCleanup()); 666 prebranchDest = CreateNormalEntry(*this, cast<EHCleanupScope>(enclosing)); 667 668 // Otherwise, we need to make a new block. If the normal cleanup 669 // isn't being used at all, we could actually reuse the normal 670 // entry block, but this is simpler, and it avoids conflicts with 671 // dead optimistic fixup branches. 672 } else { 673 prebranchDest = createBasicBlock("forwarded-prebranch"); 674 EmitBlock(prebranchDest); 675 } 676 677 llvm::BasicBlock *normalEntry = Scope.getNormalBlock(); 678 assert(normalEntry && !normalEntry->use_empty()); 679 680 ForwardPrebranchedFallthrough(FallthroughSource, 681 normalEntry, prebranchDest); 682 } 683 684 // If we don't need the cleanup at all, we're done. 685 if (!RequiresNormalCleanup && !RequiresEHCleanup) { 686 destroyOptimisticNormalEntry(*this, Scope); 687 EHStack.popCleanup(); // safe because there are no fixups 688 assert(EHStack.getNumBranchFixups() == 0 || 689 EHStack.hasNormalCleanups()); 690 return; 691 } 692 693 // Copy the cleanup emission data out. This uses either a stack 694 // array or malloc'd memory, depending on the size, which is 695 // behavior that SmallVector would provide, if we could use it 696 // here. Unfortunately, if you ask for a SmallVector<char>, the 697 // alignment isn't sufficient. 698 auto *CleanupSource = reinterpret_cast<char *>(Scope.getCleanupBuffer()); 699 llvm::AlignedCharArray<EHScopeStack::ScopeStackAlignment, 8 * sizeof(void *)> CleanupBufferStack; 700 std::unique_ptr<char[]> CleanupBufferHeap; 701 size_t CleanupSize = Scope.getCleanupSize(); 702 EHScopeStack::Cleanup *Fn; 703 704 if (CleanupSize <= sizeof(CleanupBufferStack)) { 705 memcpy(CleanupBufferStack.buffer, CleanupSource, CleanupSize); 706 Fn = reinterpret_cast<EHScopeStack::Cleanup *>(CleanupBufferStack.buffer); 707 } else { 708 CleanupBufferHeap.reset(new char[CleanupSize]); 709 memcpy(CleanupBufferHeap.get(), CleanupSource, CleanupSize); 710 Fn = reinterpret_cast<EHScopeStack::Cleanup *>(CleanupBufferHeap.get()); 711 } 712 713 EHScopeStack::Cleanup::Flags cleanupFlags; 714 if (Scope.isNormalCleanup()) 715 cleanupFlags.setIsNormalCleanupKind(); 716 if (Scope.isEHCleanup()) 717 cleanupFlags.setIsEHCleanupKind(); 718 719 if (!RequiresNormalCleanup) { 720 destroyOptimisticNormalEntry(*this, Scope); 721 EHStack.popCleanup(); 722 } else { 723 // If we have a fallthrough and no other need for the cleanup, 724 // emit it directly. 725 if (HasFallthrough && !HasPrebranchedFallthrough && 726 !HasFixups && !HasExistingBranches) { 727 728 destroyOptimisticNormalEntry(*this, Scope); 729 EHStack.popCleanup(); 730 731 EmitCleanup(*this, Fn, cleanupFlags, NormalActiveFlag); 732 733 // Otherwise, the best approach is to thread everything through 734 // the cleanup block and then try to clean up after ourselves. 735 } else { 736 // Force the entry block to exist. 737 llvm::BasicBlock *NormalEntry = CreateNormalEntry(*this, Scope); 738 739 // I. Set up the fallthrough edge in. 740 741 CGBuilderTy::InsertPoint savedInactiveFallthroughIP; 742 743 // If there's a fallthrough, we need to store the cleanup 744 // destination index. For fall-throughs this is always zero. 745 if (HasFallthrough) { 746 if (!HasPrebranchedFallthrough) 747 Builder.CreateStore(Builder.getInt32(0), getNormalCleanupDestSlot()); 748 749 // Otherwise, save and clear the IP if we don't have fallthrough 750 // because the cleanup is inactive. 751 } else if (FallthroughSource) { 752 assert(!IsActive && "source without fallthrough for active cleanup"); 753 savedInactiveFallthroughIP = Builder.saveAndClearIP(); 754 } 755 756 // II. Emit the entry block. This implicitly branches to it if 757 // we have fallthrough. All the fixups and existing branches 758 // should already be branched to it. 759 EmitBlock(NormalEntry); 760 761 // III. Figure out where we're going and build the cleanup 762 // epilogue. 763 764 bool HasEnclosingCleanups = 765 (Scope.getEnclosingNormalCleanup() != EHStack.stable_end()); 766 767 // Compute the branch-through dest if we need it: 768 // - if there are branch-throughs threaded through the scope 769 // - if fall-through is a branch-through 770 // - if there are fixups that will be optimistically forwarded 771 // to the enclosing cleanup 772 llvm::BasicBlock *BranchThroughDest = nullptr; 773 if (Scope.hasBranchThroughs() || 774 (FallthroughSource && FallthroughIsBranchThrough) || 775 (HasFixups && HasEnclosingCleanups)) { 776 assert(HasEnclosingCleanups); 777 EHScope &S = *EHStack.find(Scope.getEnclosingNormalCleanup()); 778 BranchThroughDest = CreateNormalEntry(*this, cast<EHCleanupScope>(S)); 779 } 780 781 llvm::BasicBlock *FallthroughDest = nullptr; 782 SmallVector<llvm::Instruction*, 2> InstsToAppend; 783 784 // If there's exactly one branch-after and no other threads, 785 // we can route it without a switch. 786 if (!Scope.hasBranchThroughs() && !HasFixups && !HasFallthrough && 787 Scope.getNumBranchAfters() == 1) { 788 assert(!BranchThroughDest || !IsActive); 789 790 // Clean up the possibly dead store to the cleanup dest slot. 791 llvm::Instruction *NormalCleanupDestSlot = 792 cast<llvm::Instruction>(getNormalCleanupDestSlot().getPointer()); 793 if (NormalCleanupDestSlot->hasOneUse()) { 794 NormalCleanupDestSlot->user_back()->eraseFromParent(); 795 NormalCleanupDestSlot->eraseFromParent(); 796 NormalCleanupDest = nullptr; 797 } 798 799 llvm::BasicBlock *BranchAfter = Scope.getBranchAfterBlock(0); 800 InstsToAppend.push_back(llvm::BranchInst::Create(BranchAfter)); 801 802 // Build a switch-out if we need it: 803 // - if there are branch-afters threaded through the scope 804 // - if fall-through is a branch-after 805 // - if there are fixups that have nowhere left to go and 806 // so must be immediately resolved 807 } else if (Scope.getNumBranchAfters() || 808 (HasFallthrough && !FallthroughIsBranchThrough) || 809 (HasFixups && !HasEnclosingCleanups)) { 810 811 llvm::BasicBlock *Default = 812 (BranchThroughDest ? BranchThroughDest : getUnreachableBlock()); 813 814 // TODO: base this on the number of branch-afters and fixups 815 const unsigned SwitchCapacity = 10; 816 817 llvm::LoadInst *Load = 818 createLoadInstBefore(getNormalCleanupDestSlot(), "cleanup.dest", 819 nullptr); 820 llvm::SwitchInst *Switch = 821 llvm::SwitchInst::Create(Load, Default, SwitchCapacity); 822 823 InstsToAppend.push_back(Load); 824 InstsToAppend.push_back(Switch); 825 826 // Branch-after fallthrough. 827 if (FallthroughSource && !FallthroughIsBranchThrough) { 828 FallthroughDest = createBasicBlock("cleanup.cont"); 829 if (HasFallthrough) 830 Switch->addCase(Builder.getInt32(0), FallthroughDest); 831 } 832 833 for (unsigned I = 0, E = Scope.getNumBranchAfters(); I != E; ++I) { 834 Switch->addCase(Scope.getBranchAfterIndex(I), 835 Scope.getBranchAfterBlock(I)); 836 } 837 838 // If there aren't any enclosing cleanups, we can resolve all 839 // the fixups now. 840 if (HasFixups && !HasEnclosingCleanups) 841 ResolveAllBranchFixups(*this, Switch, NormalEntry); 842 } else { 843 // We should always have a branch-through destination in this case. 844 assert(BranchThroughDest); 845 InstsToAppend.push_back(llvm::BranchInst::Create(BranchThroughDest)); 846 } 847 848 // IV. Pop the cleanup and emit it. 849 EHStack.popCleanup(); 850 assert(EHStack.hasNormalCleanups() == HasEnclosingCleanups); 851 852 EmitCleanup(*this, Fn, cleanupFlags, NormalActiveFlag); 853 854 // Append the prepared cleanup prologue from above. 855 llvm::BasicBlock *NormalExit = Builder.GetInsertBlock(); 856 for (unsigned I = 0, E = InstsToAppend.size(); I != E; ++I) 857 NormalExit->getInstList().push_back(InstsToAppend[I]); 858 859 // Optimistically hope that any fixups will continue falling through. 860 for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups(); 861 I < E; ++I) { 862 BranchFixup &Fixup = EHStack.getBranchFixup(I); 863 if (!Fixup.Destination) continue; 864 if (!Fixup.OptimisticBranchBlock) { 865 createStoreInstBefore(Builder.getInt32(Fixup.DestinationIndex), 866 getNormalCleanupDestSlot(), 867 Fixup.InitialBranch); 868 Fixup.InitialBranch->setSuccessor(0, NormalEntry); 869 } 870 Fixup.OptimisticBranchBlock = NormalExit; 871 } 872 873 // V. Set up the fallthrough edge out. 874 875 // Case 1: a fallthrough source exists but doesn't branch to the 876 // cleanup because the cleanup is inactive. 877 if (!HasFallthrough && FallthroughSource) { 878 // Prebranched fallthrough was forwarded earlier. 879 // Non-prebranched fallthrough doesn't need to be forwarded. 880 // Either way, all we need to do is restore the IP we cleared before. 881 assert(!IsActive); 882 Builder.restoreIP(savedInactiveFallthroughIP); 883 884 // Case 2: a fallthrough source exists and should branch to the 885 // cleanup, but we're not supposed to branch through to the next 886 // cleanup. 887 } else if (HasFallthrough && FallthroughDest) { 888 assert(!FallthroughIsBranchThrough); 889 EmitBlock(FallthroughDest); 890 891 // Case 3: a fallthrough source exists and should branch to the 892 // cleanup and then through to the next. 893 } else if (HasFallthrough) { 894 // Everything is already set up for this. 895 896 // Case 4: no fallthrough source exists. 897 } else { 898 Builder.ClearInsertionPoint(); 899 } 900 901 // VI. Assorted cleaning. 902 903 // Check whether we can merge NormalEntry into a single predecessor. 904 // This might invalidate (non-IR) pointers to NormalEntry. 905 llvm::BasicBlock *NewNormalEntry = 906 SimplifyCleanupEntry(*this, NormalEntry); 907 908 // If it did invalidate those pointers, and NormalEntry was the same 909 // as NormalExit, go back and patch up the fixups. 910 if (NewNormalEntry != NormalEntry && NormalEntry == NormalExit) 911 for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups(); 912 I < E; ++I) 913 EHStack.getBranchFixup(I).OptimisticBranchBlock = NewNormalEntry; 914 } 915 } 916 917 assert(EHStack.hasNormalCleanups() || EHStack.getNumBranchFixups() == 0); 918 919 // Emit the EH cleanup if required. 920 if (RequiresEHCleanup) { 921 CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP(); 922 923 EmitBlock(EHEntry); 924 925 llvm::BasicBlock *NextAction = getEHDispatchBlock(EHParent); 926 927 // Push a terminate scope or cleanupendpad scope around the potentially 928 // throwing cleanups. For funclet EH personalities, the cleanupendpad models 929 // program termination when cleanups throw. 930 bool PushedTerminate = false; 931 SaveAndRestore<llvm::Instruction *> RestoreCurrentFuncletPad( 932 CurrentFuncletPad); 933 llvm::CleanupPadInst *CPI = nullptr; 934 if (!EHPersonality::get(*this).usesFuncletPads()) { 935 EHStack.pushTerminate(); 936 PushedTerminate = true; 937 } else { 938 llvm::Value *ParentPad = CurrentFuncletPad; 939 if (!ParentPad) 940 ParentPad = llvm::ConstantTokenNone::get(CGM.getLLVMContext()); 941 CurrentFuncletPad = CPI = Builder.CreateCleanupPad(ParentPad); 942 } 943 944 // We only actually emit the cleanup code if the cleanup is either 945 // active or was used before it was deactivated. 946 if (EHActiveFlag.isValid() || IsActive) { 947 cleanupFlags.setIsForEHCleanup(); 948 EmitCleanup(*this, Fn, cleanupFlags, EHActiveFlag); 949 } 950 951 if (CPI) 952 Builder.CreateCleanupRet(CPI, NextAction); 953 else 954 Builder.CreateBr(NextAction); 955 956 // Leave the terminate scope. 957 if (PushedTerminate) 958 EHStack.popTerminate(); 959 960 Builder.restoreIP(SavedIP); 961 962 SimplifyCleanupEntry(*this, EHEntry); 963 } 964 } 965 966 /// isObviouslyBranchWithoutCleanups - Return true if a branch to the 967 /// specified destination obviously has no cleanups to run. 'false' is always 968 /// a conservatively correct answer for this method. 969 bool CodeGenFunction::isObviouslyBranchWithoutCleanups(JumpDest Dest) const { 970 assert(Dest.getScopeDepth().encloses(EHStack.stable_begin()) 971 && "stale jump destination"); 972 973 // Calculate the innermost active normal cleanup. 974 EHScopeStack::stable_iterator TopCleanup = 975 EHStack.getInnermostActiveNormalCleanup(); 976 977 // If we're not in an active normal cleanup scope, or if the 978 // destination scope is within the innermost active normal cleanup 979 // scope, we don't need to worry about fixups. 980 if (TopCleanup == EHStack.stable_end() || 981 TopCleanup.encloses(Dest.getScopeDepth())) // works for invalid 982 return true; 983 984 // Otherwise, we might need some cleanups. 985 return false; 986 } 987 988 989 /// Terminate the current block by emitting a branch which might leave 990 /// the current cleanup-protected scope. The target scope may not yet 991 /// be known, in which case this will require a fixup. 992 /// 993 /// As a side-effect, this method clears the insertion point. 994 void CodeGenFunction::EmitBranchThroughCleanup(JumpDest Dest) { 995 assert(Dest.getScopeDepth().encloses(EHStack.stable_begin()) 996 && "stale jump destination"); 997 998 if (!HaveInsertPoint()) 999 return; 1000 1001 // Create the branch. 1002 llvm::BranchInst *BI = Builder.CreateBr(Dest.getBlock()); 1003 1004 // Calculate the innermost active normal cleanup. 1005 EHScopeStack::stable_iterator 1006 TopCleanup = EHStack.getInnermostActiveNormalCleanup(); 1007 1008 // If we're not in an active normal cleanup scope, or if the 1009 // destination scope is within the innermost active normal cleanup 1010 // scope, we don't need to worry about fixups. 1011 if (TopCleanup == EHStack.stable_end() || 1012 TopCleanup.encloses(Dest.getScopeDepth())) { // works for invalid 1013 Builder.ClearInsertionPoint(); 1014 return; 1015 } 1016 1017 // If we can't resolve the destination cleanup scope, just add this 1018 // to the current cleanup scope as a branch fixup. 1019 if (!Dest.getScopeDepth().isValid()) { 1020 BranchFixup &Fixup = EHStack.addBranchFixup(); 1021 Fixup.Destination = Dest.getBlock(); 1022 Fixup.DestinationIndex = Dest.getDestIndex(); 1023 Fixup.InitialBranch = BI; 1024 Fixup.OptimisticBranchBlock = nullptr; 1025 1026 Builder.ClearInsertionPoint(); 1027 return; 1028 } 1029 1030 // Otherwise, thread through all the normal cleanups in scope. 1031 1032 // Store the index at the start. 1033 llvm::ConstantInt *Index = Builder.getInt32(Dest.getDestIndex()); 1034 createStoreInstBefore(Index, getNormalCleanupDestSlot(), BI); 1035 1036 // Adjust BI to point to the first cleanup block. 1037 { 1038 EHCleanupScope &Scope = 1039 cast<EHCleanupScope>(*EHStack.find(TopCleanup)); 1040 BI->setSuccessor(0, CreateNormalEntry(*this, Scope)); 1041 } 1042 1043 // Add this destination to all the scopes involved. 1044 EHScopeStack::stable_iterator I = TopCleanup; 1045 EHScopeStack::stable_iterator E = Dest.getScopeDepth(); 1046 if (E.strictlyEncloses(I)) { 1047 while (true) { 1048 EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(I)); 1049 assert(Scope.isNormalCleanup()); 1050 I = Scope.getEnclosingNormalCleanup(); 1051 1052 // If this is the last cleanup we're propagating through, tell it 1053 // that there's a resolved jump moving through it. 1054 if (!E.strictlyEncloses(I)) { 1055 Scope.addBranchAfter(Index, Dest.getBlock()); 1056 break; 1057 } 1058 1059 // Otherwise, tell the scope that there's a jump propoagating 1060 // through it. If this isn't new information, all the rest of 1061 // the work has been done before. 1062 if (!Scope.addBranchThrough(Dest.getBlock())) 1063 break; 1064 } 1065 } 1066 1067 Builder.ClearInsertionPoint(); 1068 } 1069 1070 static bool IsUsedAsNormalCleanup(EHScopeStack &EHStack, 1071 EHScopeStack::stable_iterator C) { 1072 // If we needed a normal block for any reason, that counts. 1073 if (cast<EHCleanupScope>(*EHStack.find(C)).getNormalBlock()) 1074 return true; 1075 1076 // Check whether any enclosed cleanups were needed. 1077 for (EHScopeStack::stable_iterator 1078 I = EHStack.getInnermostNormalCleanup(); 1079 I != C; ) { 1080 assert(C.strictlyEncloses(I)); 1081 EHCleanupScope &S = cast<EHCleanupScope>(*EHStack.find(I)); 1082 if (S.getNormalBlock()) return true; 1083 I = S.getEnclosingNormalCleanup(); 1084 } 1085 1086 return false; 1087 } 1088 1089 static bool IsUsedAsEHCleanup(EHScopeStack &EHStack, 1090 EHScopeStack::stable_iterator cleanup) { 1091 // If we needed an EH block for any reason, that counts. 1092 if (EHStack.find(cleanup)->hasEHBranches()) 1093 return true; 1094 1095 // Check whether any enclosed cleanups were needed. 1096 for (EHScopeStack::stable_iterator 1097 i = EHStack.getInnermostEHScope(); i != cleanup; ) { 1098 assert(cleanup.strictlyEncloses(i)); 1099 1100 EHScope &scope = *EHStack.find(i); 1101 if (scope.hasEHBranches()) 1102 return true; 1103 1104 i = scope.getEnclosingEHScope(); 1105 } 1106 1107 return false; 1108 } 1109 1110 enum ForActivation_t { 1111 ForActivation, 1112 ForDeactivation 1113 }; 1114 1115 /// The given cleanup block is changing activation state. Configure a 1116 /// cleanup variable if necessary. 1117 /// 1118 /// It would be good if we had some way of determining if there were 1119 /// extra uses *after* the change-over point. 1120 static void SetupCleanupBlockActivation(CodeGenFunction &CGF, 1121 EHScopeStack::stable_iterator C, 1122 ForActivation_t kind, 1123 llvm::Instruction *dominatingIP) { 1124 EHCleanupScope &Scope = cast<EHCleanupScope>(*CGF.EHStack.find(C)); 1125 1126 // We always need the flag if we're activating the cleanup in a 1127 // conditional context, because we have to assume that the current 1128 // location doesn't necessarily dominate the cleanup's code. 1129 bool isActivatedInConditional = 1130 (kind == ForActivation && CGF.isInConditionalBranch()); 1131 1132 bool needFlag = false; 1133 1134 // Calculate whether the cleanup was used: 1135 1136 // - as a normal cleanup 1137 if (Scope.isNormalCleanup() && 1138 (isActivatedInConditional || IsUsedAsNormalCleanup(CGF.EHStack, C))) { 1139 Scope.setTestFlagInNormalCleanup(); 1140 needFlag = true; 1141 } 1142 1143 // - as an EH cleanup 1144 if (Scope.isEHCleanup() && 1145 (isActivatedInConditional || IsUsedAsEHCleanup(CGF.EHStack, C))) { 1146 Scope.setTestFlagInEHCleanup(); 1147 needFlag = true; 1148 } 1149 1150 // If it hasn't yet been used as either, we're done. 1151 if (!needFlag) return; 1152 1153 Address var = Scope.getActiveFlag(); 1154 if (!var.isValid()) { 1155 var = CGF.CreateTempAlloca(CGF.Builder.getInt1Ty(), CharUnits::One(), 1156 "cleanup.isactive"); 1157 Scope.setActiveFlag(var); 1158 1159 assert(dominatingIP && "no existing variable and no dominating IP!"); 1160 1161 // Initialize to true or false depending on whether it was 1162 // active up to this point. 1163 llvm::Constant *value = CGF.Builder.getInt1(kind == ForDeactivation); 1164 1165 // If we're in a conditional block, ignore the dominating IP and 1166 // use the outermost conditional branch. 1167 if (CGF.isInConditionalBranch()) { 1168 CGF.setBeforeOutermostConditional(value, var); 1169 } else { 1170 createStoreInstBefore(value, var, dominatingIP); 1171 } 1172 } 1173 1174 CGF.Builder.CreateStore(CGF.Builder.getInt1(kind == ForActivation), var); 1175 } 1176 1177 /// Activate a cleanup that was created in an inactivated state. 1178 void CodeGenFunction::ActivateCleanupBlock(EHScopeStack::stable_iterator C, 1179 llvm::Instruction *dominatingIP) { 1180 assert(C != EHStack.stable_end() && "activating bottom of stack?"); 1181 EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(C)); 1182 assert(!Scope.isActive() && "double activation"); 1183 1184 SetupCleanupBlockActivation(*this, C, ForActivation, dominatingIP); 1185 1186 Scope.setActive(true); 1187 } 1188 1189 /// Deactive a cleanup that was created in an active state. 1190 void CodeGenFunction::DeactivateCleanupBlock(EHScopeStack::stable_iterator C, 1191 llvm::Instruction *dominatingIP) { 1192 assert(C != EHStack.stable_end() && "deactivating bottom of stack?"); 1193 EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(C)); 1194 assert(Scope.isActive() && "double deactivation"); 1195 1196 // If it's the top of the stack, just pop it. 1197 if (C == EHStack.stable_begin()) { 1198 // If it's a normal cleanup, we need to pretend that the 1199 // fallthrough is unreachable. 1200 CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP(); 1201 PopCleanupBlock(); 1202 Builder.restoreIP(SavedIP); 1203 return; 1204 } 1205 1206 // Otherwise, follow the general case. 1207 SetupCleanupBlockActivation(*this, C, ForDeactivation, dominatingIP); 1208 1209 Scope.setActive(false); 1210 } 1211 1212 Address CodeGenFunction::getNormalCleanupDestSlot() { 1213 if (!NormalCleanupDest) 1214 NormalCleanupDest = 1215 CreateTempAlloca(Builder.getInt32Ty(), "cleanup.dest.slot"); 1216 return Address(NormalCleanupDest, CharUnits::fromQuantity(4)); 1217 } 1218 1219 /// Emits all the code to cause the given temporary to be cleaned up. 1220 void CodeGenFunction::EmitCXXTemporary(const CXXTemporary *Temporary, 1221 QualType TempType, 1222 Address Ptr) { 1223 pushDestroy(NormalAndEHCleanup, Ptr, TempType, destroyCXXObject, 1224 /*useEHCleanup*/ true); 1225 } 1226