1 //===- CodeExtractor.cpp - Pull code region into a new function -----------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the interface to tear out a code region, such as an 11 // individual loop or a parallel section, into a new function, replacing it with 12 // a call to the new function. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "llvm/Transforms/Utils/CodeExtractor.h" 17 #include "llvm/ADT/ArrayRef.h" 18 #include "llvm/ADT/DenseMap.h" 19 #include "llvm/ADT/Optional.h" 20 #include "llvm/ADT/STLExtras.h" 21 #include "llvm/ADT/SetVector.h" 22 #include "llvm/ADT/SmallPtrSet.h" 23 #include "llvm/ADT/SmallVector.h" 24 #include "llvm/Analysis/BlockFrequencyInfo.h" 25 #include "llvm/Analysis/BlockFrequencyInfoImpl.h" 26 #include "llvm/Analysis/BranchProbabilityInfo.h" 27 #include "llvm/Analysis/LoopInfo.h" 28 #include "llvm/IR/Argument.h" 29 #include "llvm/IR/Attributes.h" 30 #include "llvm/IR/BasicBlock.h" 31 #include "llvm/IR/CFG.h" 32 #include "llvm/IR/Constant.h" 33 #include "llvm/IR/Constants.h" 34 #include "llvm/IR/DataLayout.h" 35 #include "llvm/IR/DerivedTypes.h" 36 #include "llvm/IR/Dominators.h" 37 #include "llvm/IR/Function.h" 38 #include "llvm/IR/GlobalValue.h" 39 #include "llvm/IR/InstrTypes.h" 40 #include "llvm/IR/Instruction.h" 41 #include "llvm/IR/Instructions.h" 42 #include "llvm/IR/IntrinsicInst.h" 43 #include "llvm/IR/Intrinsics.h" 44 #include "llvm/IR/LLVMContext.h" 45 #include "llvm/IR/MDBuilder.h" 46 #include "llvm/IR/Module.h" 47 #include "llvm/IR/Type.h" 48 #include "llvm/IR/User.h" 49 #include "llvm/IR/Value.h" 50 #include "llvm/IR/Verifier.h" 51 #include "llvm/Pass.h" 52 #include "llvm/Support/BlockFrequency.h" 53 #include "llvm/Support/BranchProbability.h" 54 #include "llvm/Support/Casting.h" 55 #include "llvm/Support/CommandLine.h" 56 #include "llvm/Support/Debug.h" 57 #include "llvm/Support/ErrorHandling.h" 58 #include "llvm/Support/raw_ostream.h" 59 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 60 #include <cassert> 61 #include <cstdint> 62 #include <iterator> 63 #include <map> 64 #include <set> 65 #include <utility> 66 #include <vector> 67 68 using namespace llvm; 69 70 #define DEBUG_TYPE "code-extractor" 71 72 // Provide a command-line option to aggregate function arguments into a struct 73 // for functions produced by the code extractor. This is useful when converting 74 // extracted functions to pthread-based code, as only one argument (void*) can 75 // be passed in to pthread_create(). 76 static cl::opt<bool> 77 AggregateArgsOpt("aggregate-extracted-args", cl::Hidden, 78 cl::desc("Aggregate arguments to code-extracted functions")); 79 80 /// \brief Test whether a block is valid for extraction. 81 bool CodeExtractor::isBlockValidForExtraction(const BasicBlock &BB, 82 bool AllowVarArgs) { 83 // Landing pads must be in the function where they were inserted for cleanup. 84 if (BB.isEHPad()) 85 return false; 86 // taking the address of a basic block moved to another function is illegal 87 if (BB.hasAddressTaken()) 88 return false; 89 90 // don't hoist code that uses another basicblock address, as it's likely to 91 // lead to unexpected behavior, like cross-function jumps 92 SmallPtrSet<User const *, 16> Visited; 93 SmallVector<User const *, 16> ToVisit; 94 95 for (Instruction const &Inst : BB) 96 ToVisit.push_back(&Inst); 97 98 while (!ToVisit.empty()) { 99 User const *Curr = ToVisit.pop_back_val(); 100 if (!Visited.insert(Curr).second) 101 continue; 102 if (isa<BlockAddress const>(Curr)) 103 return false; // even a reference to self is likely to be not compatible 104 105 if (isa<Instruction>(Curr) && cast<Instruction>(Curr)->getParent() != &BB) 106 continue; 107 108 for (auto const &U : Curr->operands()) { 109 if (auto *UU = dyn_cast<User>(U)) 110 ToVisit.push_back(UU); 111 } 112 } 113 114 // Don't hoist code containing allocas or invokes. If explicitly requested, 115 // allow vastart. 116 for (BasicBlock::const_iterator I = BB.begin(), E = BB.end(); I != E; ++I) { 117 if (isa<AllocaInst>(I) || isa<InvokeInst>(I)) 118 return false; 119 if (const CallInst *CI = dyn_cast<CallInst>(I)) 120 if (const Function *F = CI->getCalledFunction()) 121 if (F->getIntrinsicID() == Intrinsic::vastart) { 122 if (AllowVarArgs) 123 continue; 124 else 125 return false; 126 } 127 } 128 129 return true; 130 } 131 132 /// \brief Build a set of blocks to extract if the input blocks are viable. 133 static SetVector<BasicBlock *> 134 buildExtractionBlockSet(ArrayRef<BasicBlock *> BBs, DominatorTree *DT, 135 bool AllowVarArgs) { 136 assert(!BBs.empty() && "The set of blocks to extract must be non-empty"); 137 SetVector<BasicBlock *> Result; 138 139 // Loop over the blocks, adding them to our set-vector, and aborting with an 140 // empty set if we encounter invalid blocks. 141 for (BasicBlock *BB : BBs) { 142 // If this block is dead, don't process it. 143 if (DT && !DT->isReachableFromEntry(BB)) 144 continue; 145 146 if (!Result.insert(BB)) 147 llvm_unreachable("Repeated basic blocks in extraction input"); 148 if (!CodeExtractor::isBlockValidForExtraction(*BB, AllowVarArgs)) { 149 Result.clear(); 150 return Result; 151 } 152 } 153 154 #ifndef NDEBUG 155 for (SetVector<BasicBlock *>::iterator I = std::next(Result.begin()), 156 E = Result.end(); 157 I != E; ++I) 158 for (pred_iterator PI = pred_begin(*I), PE = pred_end(*I); 159 PI != PE; ++PI) 160 assert(Result.count(*PI) && 161 "No blocks in this region may have entries from outside the region" 162 " except for the first block!"); 163 #endif 164 165 return Result; 166 } 167 168 CodeExtractor::CodeExtractor(ArrayRef<BasicBlock *> BBs, DominatorTree *DT, 169 bool AggregateArgs, BlockFrequencyInfo *BFI, 170 BranchProbabilityInfo *BPI, bool AllowVarArgs) 171 : DT(DT), AggregateArgs(AggregateArgs || AggregateArgsOpt), BFI(BFI), 172 BPI(BPI), AllowVarArgs(AllowVarArgs), 173 Blocks(buildExtractionBlockSet(BBs, DT, AllowVarArgs)) {} 174 175 CodeExtractor::CodeExtractor(DominatorTree &DT, Loop &L, bool AggregateArgs, 176 BlockFrequencyInfo *BFI, 177 BranchProbabilityInfo *BPI) 178 : DT(&DT), AggregateArgs(AggregateArgs || AggregateArgsOpt), BFI(BFI), 179 BPI(BPI), AllowVarArgs(false), 180 Blocks(buildExtractionBlockSet(L.getBlocks(), &DT, 181 /* AllowVarArgs */ false)) {} 182 183 /// definedInRegion - Return true if the specified value is defined in the 184 /// extracted region. 185 static bool definedInRegion(const SetVector<BasicBlock *> &Blocks, Value *V) { 186 if (Instruction *I = dyn_cast<Instruction>(V)) 187 if (Blocks.count(I->getParent())) 188 return true; 189 return false; 190 } 191 192 /// definedInCaller - Return true if the specified value is defined in the 193 /// function being code extracted, but not in the region being extracted. 194 /// These values must be passed in as live-ins to the function. 195 static bool definedInCaller(const SetVector<BasicBlock *> &Blocks, Value *V) { 196 if (isa<Argument>(V)) return true; 197 if (Instruction *I = dyn_cast<Instruction>(V)) 198 if (!Blocks.count(I->getParent())) 199 return true; 200 return false; 201 } 202 203 static BasicBlock *getCommonExitBlock(const SetVector<BasicBlock *> &Blocks) { 204 BasicBlock *CommonExitBlock = nullptr; 205 auto hasNonCommonExitSucc = [&](BasicBlock *Block) { 206 for (auto *Succ : successors(Block)) { 207 // Internal edges, ok. 208 if (Blocks.count(Succ)) 209 continue; 210 if (!CommonExitBlock) { 211 CommonExitBlock = Succ; 212 continue; 213 } 214 if (CommonExitBlock == Succ) 215 continue; 216 217 return true; 218 } 219 return false; 220 }; 221 222 if (any_of(Blocks, hasNonCommonExitSucc)) 223 return nullptr; 224 225 return CommonExitBlock; 226 } 227 228 bool CodeExtractor::isLegalToShrinkwrapLifetimeMarkers( 229 Instruction *Addr) const { 230 AllocaInst *AI = cast<AllocaInst>(Addr->stripInBoundsConstantOffsets()); 231 Function *Func = (*Blocks.begin())->getParent(); 232 for (BasicBlock &BB : *Func) { 233 if (Blocks.count(&BB)) 234 continue; 235 for (Instruction &II : BB) { 236 if (isa<DbgInfoIntrinsic>(II)) 237 continue; 238 239 unsigned Opcode = II.getOpcode(); 240 Value *MemAddr = nullptr; 241 switch (Opcode) { 242 case Instruction::Store: 243 case Instruction::Load: { 244 if (Opcode == Instruction::Store) { 245 StoreInst *SI = cast<StoreInst>(&II); 246 MemAddr = SI->getPointerOperand(); 247 } else { 248 LoadInst *LI = cast<LoadInst>(&II); 249 MemAddr = LI->getPointerOperand(); 250 } 251 // Global variable can not be aliased with locals. 252 if (dyn_cast<Constant>(MemAddr)) 253 break; 254 Value *Base = MemAddr->stripInBoundsConstantOffsets(); 255 if (!dyn_cast<AllocaInst>(Base) || Base == AI) 256 return false; 257 break; 258 } 259 default: { 260 IntrinsicInst *IntrInst = dyn_cast<IntrinsicInst>(&II); 261 if (IntrInst) { 262 if (IntrInst->getIntrinsicID() == Intrinsic::lifetime_start || 263 IntrInst->getIntrinsicID() == Intrinsic::lifetime_end) 264 break; 265 return false; 266 } 267 // Treat all the other cases conservatively if it has side effects. 268 if (II.mayHaveSideEffects()) 269 return false; 270 } 271 } 272 } 273 } 274 275 return true; 276 } 277 278 BasicBlock * 279 CodeExtractor::findOrCreateBlockForHoisting(BasicBlock *CommonExitBlock) { 280 BasicBlock *SinglePredFromOutlineRegion = nullptr; 281 assert(!Blocks.count(CommonExitBlock) && 282 "Expect a block outside the region!"); 283 for (auto *Pred : predecessors(CommonExitBlock)) { 284 if (!Blocks.count(Pred)) 285 continue; 286 if (!SinglePredFromOutlineRegion) { 287 SinglePredFromOutlineRegion = Pred; 288 } else if (SinglePredFromOutlineRegion != Pred) { 289 SinglePredFromOutlineRegion = nullptr; 290 break; 291 } 292 } 293 294 if (SinglePredFromOutlineRegion) 295 return SinglePredFromOutlineRegion; 296 297 #ifndef NDEBUG 298 auto getFirstPHI = [](BasicBlock *BB) { 299 BasicBlock::iterator I = BB->begin(); 300 PHINode *FirstPhi = nullptr; 301 while (I != BB->end()) { 302 PHINode *Phi = dyn_cast<PHINode>(I); 303 if (!Phi) 304 break; 305 if (!FirstPhi) { 306 FirstPhi = Phi; 307 break; 308 } 309 } 310 return FirstPhi; 311 }; 312 // If there are any phi nodes, the single pred either exists or has already 313 // be created before code extraction. 314 assert(!getFirstPHI(CommonExitBlock) && "Phi not expected"); 315 #endif 316 317 BasicBlock *NewExitBlock = CommonExitBlock->splitBasicBlock( 318 CommonExitBlock->getFirstNonPHI()->getIterator()); 319 320 for (auto PI = pred_begin(CommonExitBlock), PE = pred_end(CommonExitBlock); 321 PI != PE;) { 322 BasicBlock *Pred = *PI++; 323 if (Blocks.count(Pred)) 324 continue; 325 Pred->getTerminator()->replaceUsesOfWith(CommonExitBlock, NewExitBlock); 326 } 327 // Now add the old exit block to the outline region. 328 Blocks.insert(CommonExitBlock); 329 return CommonExitBlock; 330 } 331 332 void CodeExtractor::findAllocas(ValueSet &SinkCands, ValueSet &HoistCands, 333 BasicBlock *&ExitBlock) const { 334 Function *Func = (*Blocks.begin())->getParent(); 335 ExitBlock = getCommonExitBlock(Blocks); 336 337 for (BasicBlock &BB : *Func) { 338 if (Blocks.count(&BB)) 339 continue; 340 for (Instruction &II : BB) { 341 auto *AI = dyn_cast<AllocaInst>(&II); 342 if (!AI) 343 continue; 344 345 // Find the pair of life time markers for address 'Addr' that are either 346 // defined inside the outline region or can legally be shrinkwrapped into 347 // the outline region. If there are not other untracked uses of the 348 // address, return the pair of markers if found; otherwise return a pair 349 // of nullptr. 350 auto GetLifeTimeMarkers = 351 [&](Instruction *Addr, bool &SinkLifeStart, 352 bool &HoistLifeEnd) -> std::pair<Instruction *, Instruction *> { 353 Instruction *LifeStart = nullptr, *LifeEnd = nullptr; 354 355 for (User *U : Addr->users()) { 356 IntrinsicInst *IntrInst = dyn_cast<IntrinsicInst>(U); 357 if (IntrInst) { 358 if (IntrInst->getIntrinsicID() == Intrinsic::lifetime_start) { 359 // Do not handle the case where AI has multiple start markers. 360 if (LifeStart) 361 return std::make_pair<Instruction *>(nullptr, nullptr); 362 LifeStart = IntrInst; 363 } 364 if (IntrInst->getIntrinsicID() == Intrinsic::lifetime_end) { 365 if (LifeEnd) 366 return std::make_pair<Instruction *>(nullptr, nullptr); 367 LifeEnd = IntrInst; 368 } 369 continue; 370 } 371 // Find untracked uses of the address, bail. 372 if (!definedInRegion(Blocks, U)) 373 return std::make_pair<Instruction *>(nullptr, nullptr); 374 } 375 376 if (!LifeStart || !LifeEnd) 377 return std::make_pair<Instruction *>(nullptr, nullptr); 378 379 SinkLifeStart = !definedInRegion(Blocks, LifeStart); 380 HoistLifeEnd = !definedInRegion(Blocks, LifeEnd); 381 // Do legality Check. 382 if ((SinkLifeStart || HoistLifeEnd) && 383 !isLegalToShrinkwrapLifetimeMarkers(Addr)) 384 return std::make_pair<Instruction *>(nullptr, nullptr); 385 386 // Check to see if we have a place to do hoisting, if not, bail. 387 if (HoistLifeEnd && !ExitBlock) 388 return std::make_pair<Instruction *>(nullptr, nullptr); 389 390 return std::make_pair(LifeStart, LifeEnd); 391 }; 392 393 bool SinkLifeStart = false, HoistLifeEnd = false; 394 auto Markers = GetLifeTimeMarkers(AI, SinkLifeStart, HoistLifeEnd); 395 396 if (Markers.first) { 397 if (SinkLifeStart) 398 SinkCands.insert(Markers.first); 399 SinkCands.insert(AI); 400 if (HoistLifeEnd) 401 HoistCands.insert(Markers.second); 402 continue; 403 } 404 405 // Follow the bitcast. 406 Instruction *MarkerAddr = nullptr; 407 for (User *U : AI->users()) { 408 if (U->stripInBoundsConstantOffsets() == AI) { 409 SinkLifeStart = false; 410 HoistLifeEnd = false; 411 Instruction *Bitcast = cast<Instruction>(U); 412 Markers = GetLifeTimeMarkers(Bitcast, SinkLifeStart, HoistLifeEnd); 413 if (Markers.first) { 414 MarkerAddr = Bitcast; 415 continue; 416 } 417 } 418 419 // Found unknown use of AI. 420 if (!definedInRegion(Blocks, U)) { 421 MarkerAddr = nullptr; 422 break; 423 } 424 } 425 426 if (MarkerAddr) { 427 if (SinkLifeStart) 428 SinkCands.insert(Markers.first); 429 if (!definedInRegion(Blocks, MarkerAddr)) 430 SinkCands.insert(MarkerAddr); 431 SinkCands.insert(AI); 432 if (HoistLifeEnd) 433 HoistCands.insert(Markers.second); 434 } 435 } 436 } 437 } 438 439 void CodeExtractor::findInputsOutputs(ValueSet &Inputs, ValueSet &Outputs, 440 const ValueSet &SinkCands) const { 441 for (BasicBlock *BB : Blocks) { 442 // If a used value is defined outside the region, it's an input. If an 443 // instruction is used outside the region, it's an output. 444 for (Instruction &II : *BB) { 445 for (User::op_iterator OI = II.op_begin(), OE = II.op_end(); OI != OE; 446 ++OI) { 447 Value *V = *OI; 448 if (!SinkCands.count(V) && definedInCaller(Blocks, V)) 449 Inputs.insert(V); 450 } 451 452 for (User *U : II.users()) 453 if (!definedInRegion(Blocks, U)) { 454 Outputs.insert(&II); 455 break; 456 } 457 } 458 } 459 } 460 461 /// severSplitPHINodes - If a PHI node has multiple inputs from outside of the 462 /// region, we need to split the entry block of the region so that the PHI node 463 /// is easier to deal with. 464 void CodeExtractor::severSplitPHINodes(BasicBlock *&Header) { 465 unsigned NumPredsFromRegion = 0; 466 unsigned NumPredsOutsideRegion = 0; 467 468 if (Header != &Header->getParent()->getEntryBlock()) { 469 PHINode *PN = dyn_cast<PHINode>(Header->begin()); 470 if (!PN) return; // No PHI nodes. 471 472 // If the header node contains any PHI nodes, check to see if there is more 473 // than one entry from outside the region. If so, we need to sever the 474 // header block into two. 475 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 476 if (Blocks.count(PN->getIncomingBlock(i))) 477 ++NumPredsFromRegion; 478 else 479 ++NumPredsOutsideRegion; 480 481 // If there is one (or fewer) predecessor from outside the region, we don't 482 // need to do anything special. 483 if (NumPredsOutsideRegion <= 1) return; 484 } 485 486 // Otherwise, we need to split the header block into two pieces: one 487 // containing PHI nodes merging values from outside of the region, and a 488 // second that contains all of the code for the block and merges back any 489 // incoming values from inside of the region. 490 BasicBlock *NewBB = SplitBlock(Header, Header->getFirstNonPHI(), DT); 491 492 // We only want to code extract the second block now, and it becomes the new 493 // header of the region. 494 BasicBlock *OldPred = Header; 495 Blocks.remove(OldPred); 496 Blocks.insert(NewBB); 497 Header = NewBB; 498 499 // Okay, now we need to adjust the PHI nodes and any branches from within the 500 // region to go to the new header block instead of the old header block. 501 if (NumPredsFromRegion) { 502 PHINode *PN = cast<PHINode>(OldPred->begin()); 503 // Loop over all of the predecessors of OldPred that are in the region, 504 // changing them to branch to NewBB instead. 505 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 506 if (Blocks.count(PN->getIncomingBlock(i))) { 507 TerminatorInst *TI = PN->getIncomingBlock(i)->getTerminator(); 508 TI->replaceUsesOfWith(OldPred, NewBB); 509 } 510 511 // Okay, everything within the region is now branching to the right block, we 512 // just have to update the PHI nodes now, inserting PHI nodes into NewBB. 513 BasicBlock::iterator AfterPHIs; 514 for (AfterPHIs = OldPred->begin(); isa<PHINode>(AfterPHIs); ++AfterPHIs) { 515 PHINode *PN = cast<PHINode>(AfterPHIs); 516 // Create a new PHI node in the new region, which has an incoming value 517 // from OldPred of PN. 518 PHINode *NewPN = PHINode::Create(PN->getType(), 1 + NumPredsFromRegion, 519 PN->getName() + ".ce", &NewBB->front()); 520 PN->replaceAllUsesWith(NewPN); 521 NewPN->addIncoming(PN, OldPred); 522 523 // Loop over all of the incoming value in PN, moving them to NewPN if they 524 // are from the extracted region. 525 for (unsigned i = 0; i != PN->getNumIncomingValues(); ++i) { 526 if (Blocks.count(PN->getIncomingBlock(i))) { 527 NewPN->addIncoming(PN->getIncomingValue(i), PN->getIncomingBlock(i)); 528 PN->removeIncomingValue(i); 529 --i; 530 } 531 } 532 } 533 } 534 } 535 536 void CodeExtractor::splitReturnBlocks() { 537 for (BasicBlock *Block : Blocks) 538 if (ReturnInst *RI = dyn_cast<ReturnInst>(Block->getTerminator())) { 539 BasicBlock *New = 540 Block->splitBasicBlock(RI->getIterator(), Block->getName() + ".ret"); 541 if (DT) { 542 // Old dominates New. New node dominates all other nodes dominated 543 // by Old. 544 DomTreeNode *OldNode = DT->getNode(Block); 545 SmallVector<DomTreeNode *, 8> Children(OldNode->begin(), 546 OldNode->end()); 547 548 DomTreeNode *NewNode = DT->addNewBlock(New, Block); 549 550 for (DomTreeNode *I : Children) 551 DT->changeImmediateDominator(I, NewNode); 552 } 553 } 554 } 555 556 /// constructFunction - make a function based on inputs and outputs, as follows: 557 /// f(in0, ..., inN, out0, ..., outN) 558 Function *CodeExtractor::constructFunction(const ValueSet &inputs, 559 const ValueSet &outputs, 560 BasicBlock *header, 561 BasicBlock *newRootNode, 562 BasicBlock *newHeader, 563 Function *oldFunction, 564 Module *M) { 565 DEBUG(dbgs() << "inputs: " << inputs.size() << "\n"); 566 DEBUG(dbgs() << "outputs: " << outputs.size() << "\n"); 567 568 // This function returns unsigned, outputs will go back by reference. 569 switch (NumExitBlocks) { 570 case 0: 571 case 1: RetTy = Type::getVoidTy(header->getContext()); break; 572 case 2: RetTy = Type::getInt1Ty(header->getContext()); break; 573 default: RetTy = Type::getInt16Ty(header->getContext()); break; 574 } 575 576 std::vector<Type *> paramTy; 577 578 // Add the types of the input values to the function's argument list 579 for (Value *value : inputs) { 580 DEBUG(dbgs() << "value used in func: " << *value << "\n"); 581 paramTy.push_back(value->getType()); 582 } 583 584 // Add the types of the output values to the function's argument list. 585 for (Value *output : outputs) { 586 DEBUG(dbgs() << "instr used in func: " << *output << "\n"); 587 if (AggregateArgs) 588 paramTy.push_back(output->getType()); 589 else 590 paramTy.push_back(PointerType::getUnqual(output->getType())); 591 } 592 593 DEBUG({ 594 dbgs() << "Function type: " << *RetTy << " f("; 595 for (Type *i : paramTy) 596 dbgs() << *i << ", "; 597 dbgs() << ")\n"; 598 }); 599 600 StructType *StructTy; 601 if (AggregateArgs && (inputs.size() + outputs.size() > 0)) { 602 StructTy = StructType::get(M->getContext(), paramTy); 603 paramTy.clear(); 604 paramTy.push_back(PointerType::getUnqual(StructTy)); 605 } 606 FunctionType *funcType = 607 FunctionType::get(RetTy, paramTy, 608 AllowVarArgs && oldFunction->isVarArg()); 609 610 // Create the new function 611 Function *newFunction = Function::Create(funcType, 612 GlobalValue::InternalLinkage, 613 oldFunction->getName() + "_" + 614 header->getName(), M); 615 // If the old function is no-throw, so is the new one. 616 if (oldFunction->doesNotThrow()) 617 newFunction->setDoesNotThrow(); 618 619 // Inherit the uwtable attribute if we need to. 620 if (oldFunction->hasUWTable()) 621 newFunction->setHasUWTable(); 622 623 // Inherit all of the target dependent attributes and white-listed 624 // target independent attributes. 625 // (e.g. If the extracted region contains a call to an x86.sse 626 // instruction we need to make sure that the extracted region has the 627 // "target-features" attribute allowing it to be lowered. 628 // FIXME: This should be changed to check to see if a specific 629 // attribute can not be inherited. 630 for (const auto &Attr : oldFunction->getAttributes().getFnAttributes()) { 631 if (Attr.isStringAttribute()) { 632 if (Attr.getKindAsString() == "thunk") 633 continue; 634 } else 635 switch (Attr.getKindAsEnum()) { 636 // Those attributes cannot be propagated safely. Explicitly list them 637 // here so we get a warning if new attributes are added. This list also 638 // includes non-function attributes. 639 case Attribute::Alignment: 640 case Attribute::AllocSize: 641 case Attribute::ArgMemOnly: 642 case Attribute::Builtin: 643 case Attribute::ByVal: 644 case Attribute::Convergent: 645 case Attribute::Dereferenceable: 646 case Attribute::DereferenceableOrNull: 647 case Attribute::InAlloca: 648 case Attribute::InReg: 649 case Attribute::InaccessibleMemOnly: 650 case Attribute::InaccessibleMemOrArgMemOnly: 651 case Attribute::JumpTable: 652 case Attribute::Naked: 653 case Attribute::Nest: 654 case Attribute::NoAlias: 655 case Attribute::NoBuiltin: 656 case Attribute::NoCapture: 657 case Attribute::NoReturn: 658 case Attribute::None: 659 case Attribute::NonNull: 660 case Attribute::ReadNone: 661 case Attribute::ReadOnly: 662 case Attribute::Returned: 663 case Attribute::ReturnsTwice: 664 case Attribute::SExt: 665 case Attribute::Speculatable: 666 case Attribute::StackAlignment: 667 case Attribute::StructRet: 668 case Attribute::SwiftError: 669 case Attribute::SwiftSelf: 670 case Attribute::WriteOnly: 671 case Attribute::ZExt: 672 case Attribute::EndAttrKinds: 673 continue; 674 // Those attributes should be safe to propagate to the extracted function. 675 case Attribute::AlwaysInline: 676 case Attribute::Cold: 677 case Attribute::NoRecurse: 678 case Attribute::InlineHint: 679 case Attribute::MinSize: 680 case Attribute::NoDuplicate: 681 case Attribute::NoImplicitFloat: 682 case Attribute::NoInline: 683 case Attribute::NonLazyBind: 684 case Attribute::NoRedZone: 685 case Attribute::NoUnwind: 686 case Attribute::OptimizeNone: 687 case Attribute::OptimizeForSize: 688 case Attribute::SafeStack: 689 case Attribute::SanitizeAddress: 690 case Attribute::SanitizeMemory: 691 case Attribute::SanitizeThread: 692 case Attribute::SanitizeHWAddress: 693 case Attribute::StackProtect: 694 case Attribute::StackProtectReq: 695 case Attribute::StackProtectStrong: 696 case Attribute::StrictFP: 697 case Attribute::UWTable: 698 break; 699 } 700 701 newFunction->addFnAttr(Attr); 702 } 703 newFunction->getBasicBlockList().push_back(newRootNode); 704 705 // Create an iterator to name all of the arguments we inserted. 706 Function::arg_iterator AI = newFunction->arg_begin(); 707 708 // Rewrite all users of the inputs in the extracted region to use the 709 // arguments (or appropriate addressing into struct) instead. 710 for (unsigned i = 0, e = inputs.size(); i != e; ++i) { 711 Value *RewriteVal; 712 if (AggregateArgs) { 713 Value *Idx[2]; 714 Idx[0] = Constant::getNullValue(Type::getInt32Ty(header->getContext())); 715 Idx[1] = ConstantInt::get(Type::getInt32Ty(header->getContext()), i); 716 TerminatorInst *TI = newFunction->begin()->getTerminator(); 717 GetElementPtrInst *GEP = GetElementPtrInst::Create( 718 StructTy, &*AI, Idx, "gep_" + inputs[i]->getName(), TI); 719 RewriteVal = new LoadInst(GEP, "loadgep_" + inputs[i]->getName(), TI); 720 } else 721 RewriteVal = &*AI++; 722 723 std::vector<User *> Users(inputs[i]->user_begin(), inputs[i]->user_end()); 724 for (User *use : Users) 725 if (Instruction *inst = dyn_cast<Instruction>(use)) 726 if (Blocks.count(inst->getParent())) 727 inst->replaceUsesOfWith(inputs[i], RewriteVal); 728 } 729 730 // Set names for input and output arguments. 731 if (!AggregateArgs) { 732 AI = newFunction->arg_begin(); 733 for (unsigned i = 0, e = inputs.size(); i != e; ++i, ++AI) 734 AI->setName(inputs[i]->getName()); 735 for (unsigned i = 0, e = outputs.size(); i != e; ++i, ++AI) 736 AI->setName(outputs[i]->getName()+".out"); 737 } 738 739 // Rewrite branches to basic blocks outside of the loop to new dummy blocks 740 // within the new function. This must be done before we lose track of which 741 // blocks were originally in the code region. 742 std::vector<User *> Users(header->user_begin(), header->user_end()); 743 for (unsigned i = 0, e = Users.size(); i != e; ++i) 744 // The BasicBlock which contains the branch is not in the region 745 // modify the branch target to a new block 746 if (TerminatorInst *TI = dyn_cast<TerminatorInst>(Users[i])) 747 if (!Blocks.count(TI->getParent()) && 748 TI->getParent()->getParent() == oldFunction) 749 TI->replaceUsesOfWith(header, newHeader); 750 751 return newFunction; 752 } 753 754 /// emitCallAndSwitchStatement - This method sets up the caller side by adding 755 /// the call instruction, splitting any PHI nodes in the header block as 756 /// necessary. 757 void CodeExtractor:: 758 emitCallAndSwitchStatement(Function *newFunction, BasicBlock *codeReplacer, 759 ValueSet &inputs, ValueSet &outputs) { 760 // Emit a call to the new function, passing in: *pointer to struct (if 761 // aggregating parameters), or plan inputs and allocated memory for outputs 762 std::vector<Value *> params, StructValues, ReloadOutputs, Reloads; 763 764 Module *M = newFunction->getParent(); 765 LLVMContext &Context = M->getContext(); 766 const DataLayout &DL = M->getDataLayout(); 767 768 // Add inputs as params, or to be filled into the struct 769 for (Value *input : inputs) 770 if (AggregateArgs) 771 StructValues.push_back(input); 772 else 773 params.push_back(input); 774 775 // Create allocas for the outputs 776 for (Value *output : outputs) { 777 if (AggregateArgs) { 778 StructValues.push_back(output); 779 } else { 780 AllocaInst *alloca = 781 new AllocaInst(output->getType(), DL.getAllocaAddrSpace(), 782 nullptr, output->getName() + ".loc", 783 &codeReplacer->getParent()->front().front()); 784 ReloadOutputs.push_back(alloca); 785 params.push_back(alloca); 786 } 787 } 788 789 StructType *StructArgTy = nullptr; 790 AllocaInst *Struct = nullptr; 791 if (AggregateArgs && (inputs.size() + outputs.size() > 0)) { 792 std::vector<Type *> ArgTypes; 793 for (ValueSet::iterator v = StructValues.begin(), 794 ve = StructValues.end(); v != ve; ++v) 795 ArgTypes.push_back((*v)->getType()); 796 797 // Allocate a struct at the beginning of this function 798 StructArgTy = StructType::get(newFunction->getContext(), ArgTypes); 799 Struct = new AllocaInst(StructArgTy, DL.getAllocaAddrSpace(), nullptr, 800 "structArg", 801 &codeReplacer->getParent()->front().front()); 802 params.push_back(Struct); 803 804 for (unsigned i = 0, e = inputs.size(); i != e; ++i) { 805 Value *Idx[2]; 806 Idx[0] = Constant::getNullValue(Type::getInt32Ty(Context)); 807 Idx[1] = ConstantInt::get(Type::getInt32Ty(Context), i); 808 GetElementPtrInst *GEP = GetElementPtrInst::Create( 809 StructArgTy, Struct, Idx, "gep_" + StructValues[i]->getName()); 810 codeReplacer->getInstList().push_back(GEP); 811 StoreInst *SI = new StoreInst(StructValues[i], GEP); 812 codeReplacer->getInstList().push_back(SI); 813 } 814 } 815 816 // Emit the call to the function 817 CallInst *call = CallInst::Create(newFunction, params, 818 NumExitBlocks > 1 ? "targetBlock" : ""); 819 // Add debug location to the new call, if the original function has debug 820 // info. In that case, the terminator of the entry block of the extracted 821 // function contains the first debug location of the extracted function, 822 // set in extractCodeRegion. 823 if (codeReplacer->getParent()->getSubprogram()) { 824 if (auto DL = newFunction->getEntryBlock().getTerminator()->getDebugLoc()) 825 call->setDebugLoc(DL); 826 } 827 codeReplacer->getInstList().push_back(call); 828 829 Function::arg_iterator OutputArgBegin = newFunction->arg_begin(); 830 unsigned FirstOut = inputs.size(); 831 if (!AggregateArgs) 832 std::advance(OutputArgBegin, inputs.size()); 833 834 // Reload the outputs passed in by reference. 835 Function::arg_iterator OAI = OutputArgBegin; 836 for (unsigned i = 0, e = outputs.size(); i != e; ++i) { 837 Value *Output = nullptr; 838 if (AggregateArgs) { 839 Value *Idx[2]; 840 Idx[0] = Constant::getNullValue(Type::getInt32Ty(Context)); 841 Idx[1] = ConstantInt::get(Type::getInt32Ty(Context), FirstOut + i); 842 GetElementPtrInst *GEP = GetElementPtrInst::Create( 843 StructArgTy, Struct, Idx, "gep_reload_" + outputs[i]->getName()); 844 codeReplacer->getInstList().push_back(GEP); 845 Output = GEP; 846 } else { 847 Output = ReloadOutputs[i]; 848 } 849 LoadInst *load = new LoadInst(Output, outputs[i]->getName()+".reload"); 850 Reloads.push_back(load); 851 codeReplacer->getInstList().push_back(load); 852 std::vector<User *> Users(outputs[i]->user_begin(), outputs[i]->user_end()); 853 for (unsigned u = 0, e = Users.size(); u != e; ++u) { 854 Instruction *inst = cast<Instruction>(Users[u]); 855 if (!Blocks.count(inst->getParent())) 856 inst->replaceUsesOfWith(outputs[i], load); 857 } 858 859 // Store to argument right after the definition of output value. 860 auto *OutI = dyn_cast<Instruction>(outputs[i]); 861 if (!OutI) 862 continue; 863 // Find proper insertion point. 864 Instruction *InsertPt = OutI->getNextNode(); 865 // Let's assume that there is no other guy interleave non-PHI in PHIs. 866 if (isa<PHINode>(InsertPt)) 867 InsertPt = InsertPt->getParent()->getFirstNonPHI(); 868 869 assert(OAI != newFunction->arg_end() && 870 "Number of output arguments should match " 871 "the amount of defined values"); 872 if (AggregateArgs) { 873 Value *Idx[2]; 874 Idx[0] = Constant::getNullValue(Type::getInt32Ty(Context)); 875 Idx[1] = ConstantInt::get(Type::getInt32Ty(Context), FirstOut + i); 876 GetElementPtrInst *GEP = GetElementPtrInst::Create( 877 StructArgTy, &*OAI, Idx, "gep_" + outputs[i]->getName(), InsertPt); 878 new StoreInst(outputs[i], GEP, InsertPt); 879 // Since there should be only one struct argument aggregating 880 // all the output values, we shouldn't increment OAI, which always 881 // points to the struct argument, in this case. 882 } else { 883 new StoreInst(outputs[i], &*OAI, InsertPt); 884 ++OAI; 885 } 886 } 887 888 // Now we can emit a switch statement using the call as a value. 889 SwitchInst *TheSwitch = 890 SwitchInst::Create(Constant::getNullValue(Type::getInt16Ty(Context)), 891 codeReplacer, 0, codeReplacer); 892 893 // Since there may be multiple exits from the original region, make the new 894 // function return an unsigned, switch on that number. This loop iterates 895 // over all of the blocks in the extracted region, updating any terminator 896 // instructions in the to-be-extracted region that branch to blocks that are 897 // not in the region to be extracted. 898 std::map<BasicBlock *, BasicBlock *> ExitBlockMap; 899 900 unsigned switchVal = 0; 901 for (BasicBlock *Block : Blocks) { 902 TerminatorInst *TI = Block->getTerminator(); 903 for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) 904 if (!Blocks.count(TI->getSuccessor(i))) { 905 BasicBlock *OldTarget = TI->getSuccessor(i); 906 // add a new basic block which returns the appropriate value 907 BasicBlock *&NewTarget = ExitBlockMap[OldTarget]; 908 if (!NewTarget) { 909 // If we don't already have an exit stub for this non-extracted 910 // destination, create one now! 911 NewTarget = BasicBlock::Create(Context, 912 OldTarget->getName() + ".exitStub", 913 newFunction); 914 unsigned SuccNum = switchVal++; 915 916 Value *brVal = nullptr; 917 switch (NumExitBlocks) { 918 case 0: 919 case 1: break; // No value needed. 920 case 2: // Conditional branch, return a bool 921 brVal = ConstantInt::get(Type::getInt1Ty(Context), !SuccNum); 922 break; 923 default: 924 brVal = ConstantInt::get(Type::getInt16Ty(Context), SuccNum); 925 break; 926 } 927 928 ReturnInst::Create(Context, brVal, NewTarget); 929 930 // Update the switch instruction. 931 TheSwitch->addCase(ConstantInt::get(Type::getInt16Ty(Context), 932 SuccNum), 933 OldTarget); 934 } 935 936 // rewrite the original branch instruction with this new target 937 TI->setSuccessor(i, NewTarget); 938 } 939 } 940 941 // Now that we've done the deed, simplify the switch instruction. 942 Type *OldFnRetTy = TheSwitch->getParent()->getParent()->getReturnType(); 943 switch (NumExitBlocks) { 944 case 0: 945 // There are no successors (the block containing the switch itself), which 946 // means that previously this was the last part of the function, and hence 947 // this should be rewritten as a `ret' 948 949 // Check if the function should return a value 950 if (OldFnRetTy->isVoidTy()) { 951 ReturnInst::Create(Context, nullptr, TheSwitch); // Return void 952 } else if (OldFnRetTy == TheSwitch->getCondition()->getType()) { 953 // return what we have 954 ReturnInst::Create(Context, TheSwitch->getCondition(), TheSwitch); 955 } else { 956 // Otherwise we must have code extracted an unwind or something, just 957 // return whatever we want. 958 ReturnInst::Create(Context, 959 Constant::getNullValue(OldFnRetTy), TheSwitch); 960 } 961 962 TheSwitch->eraseFromParent(); 963 break; 964 case 1: 965 // Only a single destination, change the switch into an unconditional 966 // branch. 967 BranchInst::Create(TheSwitch->getSuccessor(1), TheSwitch); 968 TheSwitch->eraseFromParent(); 969 break; 970 case 2: 971 BranchInst::Create(TheSwitch->getSuccessor(1), TheSwitch->getSuccessor(2), 972 call, TheSwitch); 973 TheSwitch->eraseFromParent(); 974 break; 975 default: 976 // Otherwise, make the default destination of the switch instruction be one 977 // of the other successors. 978 TheSwitch->setCondition(call); 979 TheSwitch->setDefaultDest(TheSwitch->getSuccessor(NumExitBlocks)); 980 // Remove redundant case 981 TheSwitch->removeCase(SwitchInst::CaseIt(TheSwitch, NumExitBlocks-1)); 982 break; 983 } 984 } 985 986 void CodeExtractor::moveCodeToFunction(Function *newFunction) { 987 Function *oldFunc = (*Blocks.begin())->getParent(); 988 Function::BasicBlockListType &oldBlocks = oldFunc->getBasicBlockList(); 989 Function::BasicBlockListType &newBlocks = newFunction->getBasicBlockList(); 990 991 for (BasicBlock *Block : Blocks) { 992 // Delete the basic block from the old function, and the list of blocks 993 oldBlocks.remove(Block); 994 995 // Insert this basic block into the new function 996 newBlocks.push_back(Block); 997 } 998 } 999 1000 void CodeExtractor::calculateNewCallTerminatorWeights( 1001 BasicBlock *CodeReplacer, 1002 DenseMap<BasicBlock *, BlockFrequency> &ExitWeights, 1003 BranchProbabilityInfo *BPI) { 1004 using Distribution = BlockFrequencyInfoImplBase::Distribution; 1005 using BlockNode = BlockFrequencyInfoImplBase::BlockNode; 1006 1007 // Update the branch weights for the exit block. 1008 TerminatorInst *TI = CodeReplacer->getTerminator(); 1009 SmallVector<unsigned, 8> BranchWeights(TI->getNumSuccessors(), 0); 1010 1011 // Block Frequency distribution with dummy node. 1012 Distribution BranchDist; 1013 1014 // Add each of the frequencies of the successors. 1015 for (unsigned i = 0, e = TI->getNumSuccessors(); i < e; ++i) { 1016 BlockNode ExitNode(i); 1017 uint64_t ExitFreq = ExitWeights[TI->getSuccessor(i)].getFrequency(); 1018 if (ExitFreq != 0) 1019 BranchDist.addExit(ExitNode, ExitFreq); 1020 else 1021 BPI->setEdgeProbability(CodeReplacer, i, BranchProbability::getZero()); 1022 } 1023 1024 // Check for no total weight. 1025 if (BranchDist.Total == 0) 1026 return; 1027 1028 // Normalize the distribution so that they can fit in unsigned. 1029 BranchDist.normalize(); 1030 1031 // Create normalized branch weights and set the metadata. 1032 for (unsigned I = 0, E = BranchDist.Weights.size(); I < E; ++I) { 1033 const auto &Weight = BranchDist.Weights[I]; 1034 1035 // Get the weight and update the current BFI. 1036 BranchWeights[Weight.TargetNode.Index] = Weight.Amount; 1037 BranchProbability BP(Weight.Amount, BranchDist.Total); 1038 BPI->setEdgeProbability(CodeReplacer, Weight.TargetNode.Index, BP); 1039 } 1040 TI->setMetadata( 1041 LLVMContext::MD_prof, 1042 MDBuilder(TI->getContext()).createBranchWeights(BranchWeights)); 1043 } 1044 1045 Function *CodeExtractor::extractCodeRegion() { 1046 if (!isEligible()) 1047 return nullptr; 1048 1049 // Assumption: this is a single-entry code region, and the header is the first 1050 // block in the region. 1051 BasicBlock *header = *Blocks.begin(); 1052 Function *oldFunction = header->getParent(); 1053 1054 // For functions with varargs, check that varargs handling is only done in the 1055 // outlined function, i.e vastart and vaend are only used in outlined blocks. 1056 if (AllowVarArgs && oldFunction->getFunctionType()->isVarArg()) { 1057 auto containsVarArgIntrinsic = [](Instruction &I) { 1058 if (const CallInst *CI = dyn_cast<CallInst>(&I)) 1059 if (const Function *F = CI->getCalledFunction()) 1060 return F->getIntrinsicID() == Intrinsic::vastart || 1061 F->getIntrinsicID() == Intrinsic::vaend; 1062 return false; 1063 }; 1064 1065 for (auto &BB : *oldFunction) { 1066 if (Blocks.count(&BB)) 1067 continue; 1068 if (llvm::any_of(BB, containsVarArgIntrinsic)) 1069 return nullptr; 1070 } 1071 } 1072 ValueSet inputs, outputs, SinkingCands, HoistingCands; 1073 BasicBlock *CommonExit = nullptr; 1074 1075 // Calculate the entry frequency of the new function before we change the root 1076 // block. 1077 BlockFrequency EntryFreq; 1078 if (BFI) { 1079 assert(BPI && "Both BPI and BFI are required to preserve profile info"); 1080 for (BasicBlock *Pred : predecessors(header)) { 1081 if (Blocks.count(Pred)) 1082 continue; 1083 EntryFreq += 1084 BFI->getBlockFreq(Pred) * BPI->getEdgeProbability(Pred, header); 1085 } 1086 } 1087 1088 // If we have to split PHI nodes or the entry block, do so now. 1089 severSplitPHINodes(header); 1090 1091 // If we have any return instructions in the region, split those blocks so 1092 // that the return is not in the region. 1093 splitReturnBlocks(); 1094 1095 // This takes place of the original loop 1096 BasicBlock *codeReplacer = BasicBlock::Create(header->getContext(), 1097 "codeRepl", oldFunction, 1098 header); 1099 1100 // The new function needs a root node because other nodes can branch to the 1101 // head of the region, but the entry node of a function cannot have preds. 1102 BasicBlock *newFuncRoot = BasicBlock::Create(header->getContext(), 1103 "newFuncRoot"); 1104 auto *BranchI = BranchInst::Create(header); 1105 // If the original function has debug info, we have to add a debug location 1106 // to the new branch instruction from the artificial entry block. 1107 // We use the debug location of the first instruction in the extracted 1108 // blocks, as there is no other equivalent line in the source code. 1109 if (oldFunction->getSubprogram()) { 1110 any_of(Blocks, [&BranchI](const BasicBlock *BB) { 1111 return any_of(*BB, [&BranchI](const Instruction &I) { 1112 if (!I.getDebugLoc()) 1113 return false; 1114 BranchI->setDebugLoc(I.getDebugLoc()); 1115 return true; 1116 }); 1117 }); 1118 } 1119 newFuncRoot->getInstList().push_back(BranchI); 1120 1121 findAllocas(SinkingCands, HoistingCands, CommonExit); 1122 assert(HoistingCands.empty() || CommonExit); 1123 1124 // Find inputs to, outputs from the code region. 1125 findInputsOutputs(inputs, outputs, SinkingCands); 1126 1127 // Now sink all instructions which only have non-phi uses inside the region 1128 for (auto *II : SinkingCands) 1129 cast<Instruction>(II)->moveBefore(*newFuncRoot, 1130 newFuncRoot->getFirstInsertionPt()); 1131 1132 if (!HoistingCands.empty()) { 1133 auto *HoistToBlock = findOrCreateBlockForHoisting(CommonExit); 1134 Instruction *TI = HoistToBlock->getTerminator(); 1135 for (auto *II : HoistingCands) 1136 cast<Instruction>(II)->moveBefore(TI); 1137 } 1138 1139 // Calculate the exit blocks for the extracted region and the total exit 1140 // weights for each of those blocks. 1141 DenseMap<BasicBlock *, BlockFrequency> ExitWeights; 1142 SmallPtrSet<BasicBlock *, 1> ExitBlocks; 1143 for (BasicBlock *Block : Blocks) { 1144 for (succ_iterator SI = succ_begin(Block), SE = succ_end(Block); SI != SE; 1145 ++SI) { 1146 if (!Blocks.count(*SI)) { 1147 // Update the branch weight for this successor. 1148 if (BFI) { 1149 BlockFrequency &BF = ExitWeights[*SI]; 1150 BF += BFI->getBlockFreq(Block) * BPI->getEdgeProbability(Block, *SI); 1151 } 1152 ExitBlocks.insert(*SI); 1153 } 1154 } 1155 } 1156 NumExitBlocks = ExitBlocks.size(); 1157 1158 // Construct new function based on inputs/outputs & add allocas for all defs. 1159 Function *newFunction = constructFunction(inputs, outputs, header, 1160 newFuncRoot, 1161 codeReplacer, oldFunction, 1162 oldFunction->getParent()); 1163 1164 // Update the entry count of the function. 1165 if (BFI) { 1166 Optional<uint64_t> EntryCount = 1167 BFI->getProfileCountFromFreq(EntryFreq.getFrequency()); 1168 if (EntryCount.hasValue()) 1169 newFunction->setEntryCount(EntryCount.getValue()); 1170 BFI->setBlockFreq(codeReplacer, EntryFreq.getFrequency()); 1171 } 1172 1173 emitCallAndSwitchStatement(newFunction, codeReplacer, inputs, outputs); 1174 1175 moveCodeToFunction(newFunction); 1176 1177 // Update the branch weights for the exit block. 1178 if (BFI && NumExitBlocks > 1) 1179 calculateNewCallTerminatorWeights(codeReplacer, ExitWeights, BPI); 1180 1181 // Loop over all of the PHI nodes in the header block, and change any 1182 // references to the old incoming edge to be the new incoming edge. 1183 for (BasicBlock::iterator I = header->begin(); isa<PHINode>(I); ++I) { 1184 PHINode *PN = cast<PHINode>(I); 1185 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 1186 if (!Blocks.count(PN->getIncomingBlock(i))) 1187 PN->setIncomingBlock(i, newFuncRoot); 1188 } 1189 1190 // Look at all successors of the codeReplacer block. If any of these blocks 1191 // had PHI nodes in them, we need to update the "from" block to be the code 1192 // replacer, not the original block in the extracted region. 1193 std::vector<BasicBlock *> Succs(succ_begin(codeReplacer), 1194 succ_end(codeReplacer)); 1195 for (unsigned i = 0, e = Succs.size(); i != e; ++i) 1196 for (BasicBlock::iterator I = Succs[i]->begin(); isa<PHINode>(I); ++I) { 1197 PHINode *PN = cast<PHINode>(I); 1198 std::set<BasicBlock*> ProcessedPreds; 1199 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 1200 if (Blocks.count(PN->getIncomingBlock(i))) { 1201 if (ProcessedPreds.insert(PN->getIncomingBlock(i)).second) 1202 PN->setIncomingBlock(i, codeReplacer); 1203 else { 1204 // There were multiple entries in the PHI for this block, now there 1205 // is only one, so remove the duplicated entries. 1206 PN->removeIncomingValue(i, false); 1207 --i; --e; 1208 } 1209 } 1210 } 1211 1212 DEBUG(if (verifyFunction(*newFunction)) 1213 report_fatal_error("verifyFunction failed!")); 1214 return newFunction; 1215 } 1216