1 //===- Local.cpp - Functions to perform local transformations -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This family of functions perform various local transformations to the 10 // program. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Transforms/Utils/Local.h" 15 #include "llvm/ADT/APInt.h" 16 #include "llvm/ADT/DenseMap.h" 17 #include "llvm/ADT/DenseMapInfo.h" 18 #include "llvm/ADT/DenseSet.h" 19 #include "llvm/ADT/Hashing.h" 20 #include "llvm/ADT/None.h" 21 #include "llvm/ADT/Optional.h" 22 #include "llvm/ADT/STLExtras.h" 23 #include "llvm/ADT/SetVector.h" 24 #include "llvm/ADT/SmallPtrSet.h" 25 #include "llvm/ADT/SmallVector.h" 26 #include "llvm/ADT/Statistic.h" 27 #include "llvm/ADT/TinyPtrVector.h" 28 #include "llvm/Analysis/AssumeBundleQueries.h" 29 #include "llvm/Analysis/ConstantFolding.h" 30 #include "llvm/Analysis/DomTreeUpdater.h" 31 #include "llvm/Analysis/EHPersonalities.h" 32 #include "llvm/Analysis/InstructionSimplify.h" 33 #include "llvm/Analysis/LazyValueInfo.h" 34 #include "llvm/Analysis/MemoryBuiltins.h" 35 #include "llvm/Analysis/MemorySSAUpdater.h" 36 #include "llvm/Analysis/TargetLibraryInfo.h" 37 #include "llvm/Analysis/ValueTracking.h" 38 #include "llvm/Analysis/VectorUtils.h" 39 #include "llvm/BinaryFormat/Dwarf.h" 40 #include "llvm/IR/Argument.h" 41 #include "llvm/IR/Attributes.h" 42 #include "llvm/IR/BasicBlock.h" 43 #include "llvm/IR/CFG.h" 44 #include "llvm/IR/Constant.h" 45 #include "llvm/IR/ConstantRange.h" 46 #include "llvm/IR/Constants.h" 47 #include "llvm/IR/DIBuilder.h" 48 #include "llvm/IR/DataLayout.h" 49 #include "llvm/IR/DebugInfoMetadata.h" 50 #include "llvm/IR/DebugLoc.h" 51 #include "llvm/IR/DerivedTypes.h" 52 #include "llvm/IR/Dominators.h" 53 #include "llvm/IR/Function.h" 54 #include "llvm/IR/GetElementPtrTypeIterator.h" 55 #include "llvm/IR/GlobalObject.h" 56 #include "llvm/IR/IRBuilder.h" 57 #include "llvm/IR/InstrTypes.h" 58 #include "llvm/IR/Instruction.h" 59 #include "llvm/IR/Instructions.h" 60 #include "llvm/IR/IntrinsicInst.h" 61 #include "llvm/IR/Intrinsics.h" 62 #include "llvm/IR/LLVMContext.h" 63 #include "llvm/IR/MDBuilder.h" 64 #include "llvm/IR/Metadata.h" 65 #include "llvm/IR/Module.h" 66 #include "llvm/IR/Operator.h" 67 #include "llvm/IR/PatternMatch.h" 68 #include "llvm/IR/PseudoProbe.h" 69 #include "llvm/IR/Type.h" 70 #include "llvm/IR/Use.h" 71 #include "llvm/IR/User.h" 72 #include "llvm/IR/Value.h" 73 #include "llvm/IR/ValueHandle.h" 74 #include "llvm/Support/Casting.h" 75 #include "llvm/Support/Debug.h" 76 #include "llvm/Support/ErrorHandling.h" 77 #include "llvm/Support/KnownBits.h" 78 #include "llvm/Support/raw_ostream.h" 79 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 80 #include "llvm/Transforms/Utils/ValueMapper.h" 81 #include <algorithm> 82 #include <cassert> 83 #include <climits> 84 #include <cstdint> 85 #include <iterator> 86 #include <map> 87 #include <utility> 88 89 using namespace llvm; 90 using namespace llvm::PatternMatch; 91 92 #define DEBUG_TYPE "local" 93 94 STATISTIC(NumRemoved, "Number of unreachable basic blocks removed"); 95 STATISTIC(NumPHICSEs, "Number of PHI's that got CSE'd"); 96 97 static cl::opt<bool> PHICSEDebugHash( 98 "phicse-debug-hash", 99 #ifdef EXPENSIVE_CHECKS 100 cl::init(true), 101 #else 102 cl::init(false), 103 #endif 104 cl::Hidden, 105 cl::desc("Perform extra assertion checking to verify that PHINodes's hash " 106 "function is well-behaved w.r.t. its isEqual predicate")); 107 108 static cl::opt<unsigned> PHICSENumPHISmallSize( 109 "phicse-num-phi-smallsize", cl::init(32), cl::Hidden, 110 cl::desc( 111 "When the basic block contains not more than this number of PHI nodes, " 112 "perform a (faster!) exhaustive search instead of set-driven one.")); 113 114 // Max recursion depth for collectBitParts used when detecting bswap and 115 // bitreverse idioms 116 static const unsigned BitPartRecursionMaxDepth = 64; 117 118 //===----------------------------------------------------------------------===// 119 // Local constant propagation. 120 // 121 122 /// ConstantFoldTerminator - If a terminator instruction is predicated on a 123 /// constant value, convert it into an unconditional branch to the constant 124 /// destination. This is a nontrivial operation because the successors of this 125 /// basic block must have their PHI nodes updated. 126 /// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch 127 /// conditions and indirectbr addresses this might make dead if 128 /// DeleteDeadConditions is true. 129 bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions, 130 const TargetLibraryInfo *TLI, 131 DomTreeUpdater *DTU) { 132 Instruction *T = BB->getTerminator(); 133 IRBuilder<> Builder(T); 134 135 // Branch - See if we are conditional jumping on constant 136 if (auto *BI = dyn_cast<BranchInst>(T)) { 137 if (BI->isUnconditional()) return false; // Can't optimize uncond branch 138 139 BasicBlock *Dest1 = BI->getSuccessor(0); 140 BasicBlock *Dest2 = BI->getSuccessor(1); 141 142 if (Dest2 == Dest1) { // Conditional branch to same location? 143 // This branch matches something like this: 144 // br bool %cond, label %Dest, label %Dest 145 // and changes it into: br label %Dest 146 147 // Let the basic block know that we are letting go of one copy of it. 148 assert(BI->getParent() && "Terminator not inserted in block!"); 149 Dest1->removePredecessor(BI->getParent()); 150 151 // Replace the conditional branch with an unconditional one. 152 Builder.CreateBr(Dest1); 153 Value *Cond = BI->getCondition(); 154 BI->eraseFromParent(); 155 if (DeleteDeadConditions) 156 RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI); 157 return true; 158 } 159 160 if (auto *Cond = dyn_cast<ConstantInt>(BI->getCondition())) { 161 // Are we branching on constant? 162 // YES. Change to unconditional branch... 163 BasicBlock *Destination = Cond->getZExtValue() ? Dest1 : Dest2; 164 BasicBlock *OldDest = Cond->getZExtValue() ? Dest2 : Dest1; 165 166 // Let the basic block know that we are letting go of it. Based on this, 167 // it will adjust it's PHI nodes. 168 OldDest->removePredecessor(BB); 169 170 // Replace the conditional branch with an unconditional one. 171 Builder.CreateBr(Destination); 172 BI->eraseFromParent(); 173 if (DTU) 174 DTU->applyUpdates({{DominatorTree::Delete, BB, OldDest}}); 175 return true; 176 } 177 178 return false; 179 } 180 181 if (auto *SI = dyn_cast<SwitchInst>(T)) { 182 // If we are switching on a constant, we can convert the switch to an 183 // unconditional branch. 184 auto *CI = dyn_cast<ConstantInt>(SI->getCondition()); 185 BasicBlock *DefaultDest = SI->getDefaultDest(); 186 BasicBlock *TheOnlyDest = DefaultDest; 187 188 // If the default is unreachable, ignore it when searching for TheOnlyDest. 189 if (isa<UnreachableInst>(DefaultDest->getFirstNonPHIOrDbg()) && 190 SI->getNumCases() > 0) { 191 TheOnlyDest = SI->case_begin()->getCaseSuccessor(); 192 } 193 194 bool Changed = false; 195 196 // Figure out which case it goes to. 197 for (auto i = SI->case_begin(), e = SI->case_end(); i != e;) { 198 // Found case matching a constant operand? 199 if (i->getCaseValue() == CI) { 200 TheOnlyDest = i->getCaseSuccessor(); 201 break; 202 } 203 204 // Check to see if this branch is going to the same place as the default 205 // dest. If so, eliminate it as an explicit compare. 206 if (i->getCaseSuccessor() == DefaultDest) { 207 MDNode *MD = SI->getMetadata(LLVMContext::MD_prof); 208 unsigned NCases = SI->getNumCases(); 209 // Fold the case metadata into the default if there will be any branches 210 // left, unless the metadata doesn't match the switch. 211 if (NCases > 1 && MD && MD->getNumOperands() == 2 + NCases) { 212 // Collect branch weights into a vector. 213 SmallVector<uint32_t, 8> Weights; 214 for (unsigned MD_i = 1, MD_e = MD->getNumOperands(); MD_i < MD_e; 215 ++MD_i) { 216 auto *CI = mdconst::extract<ConstantInt>(MD->getOperand(MD_i)); 217 Weights.push_back(CI->getValue().getZExtValue()); 218 } 219 // Merge weight of this case to the default weight. 220 unsigned idx = i->getCaseIndex(); 221 Weights[0] += Weights[idx+1]; 222 // Remove weight for this case. 223 std::swap(Weights[idx+1], Weights.back()); 224 Weights.pop_back(); 225 SI->setMetadata(LLVMContext::MD_prof, 226 MDBuilder(BB->getContext()). 227 createBranchWeights(Weights)); 228 } 229 // Remove this entry. 230 BasicBlock *ParentBB = SI->getParent(); 231 DefaultDest->removePredecessor(ParentBB); 232 i = SI->removeCase(i); 233 e = SI->case_end(); 234 Changed = true; 235 continue; 236 } 237 238 // Otherwise, check to see if the switch only branches to one destination. 239 // We do this by reseting "TheOnlyDest" to null when we find two non-equal 240 // destinations. 241 if (i->getCaseSuccessor() != TheOnlyDest) 242 TheOnlyDest = nullptr; 243 244 // Increment this iterator as we haven't removed the case. 245 ++i; 246 } 247 248 if (CI && !TheOnlyDest) { 249 // Branching on a constant, but not any of the cases, go to the default 250 // successor. 251 TheOnlyDest = SI->getDefaultDest(); 252 } 253 254 // If we found a single destination that we can fold the switch into, do so 255 // now. 256 if (TheOnlyDest) { 257 // Insert the new branch. 258 Builder.CreateBr(TheOnlyDest); 259 BasicBlock *BB = SI->getParent(); 260 261 SmallSetVector<BasicBlock *, 8> RemovedSuccessors; 262 263 // Remove entries from PHI nodes which we no longer branch to... 264 BasicBlock *SuccToKeep = TheOnlyDest; 265 for (BasicBlock *Succ : successors(SI)) { 266 if (DTU && Succ != TheOnlyDest) 267 RemovedSuccessors.insert(Succ); 268 // Found case matching a constant operand? 269 if (Succ == SuccToKeep) { 270 SuccToKeep = nullptr; // Don't modify the first branch to TheOnlyDest 271 } else { 272 Succ->removePredecessor(BB); 273 } 274 } 275 276 // Delete the old switch. 277 Value *Cond = SI->getCondition(); 278 SI->eraseFromParent(); 279 if (DeleteDeadConditions) 280 RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI); 281 if (DTU) { 282 std::vector<DominatorTree::UpdateType> Updates; 283 Updates.reserve(RemovedSuccessors.size()); 284 for (auto *RemovedSuccessor : RemovedSuccessors) 285 Updates.push_back({DominatorTree::Delete, BB, RemovedSuccessor}); 286 DTU->applyUpdates(Updates); 287 } 288 return true; 289 } 290 291 if (SI->getNumCases() == 1) { 292 // Otherwise, we can fold this switch into a conditional branch 293 // instruction if it has only one non-default destination. 294 auto FirstCase = *SI->case_begin(); 295 Value *Cond = Builder.CreateICmpEQ(SI->getCondition(), 296 FirstCase.getCaseValue(), "cond"); 297 298 // Insert the new branch. 299 BranchInst *NewBr = Builder.CreateCondBr(Cond, 300 FirstCase.getCaseSuccessor(), 301 SI->getDefaultDest()); 302 MDNode *MD = SI->getMetadata(LLVMContext::MD_prof); 303 if (MD && MD->getNumOperands() == 3) { 304 ConstantInt *SICase = 305 mdconst::dyn_extract<ConstantInt>(MD->getOperand(2)); 306 ConstantInt *SIDef = 307 mdconst::dyn_extract<ConstantInt>(MD->getOperand(1)); 308 assert(SICase && SIDef); 309 // The TrueWeight should be the weight for the single case of SI. 310 NewBr->setMetadata(LLVMContext::MD_prof, 311 MDBuilder(BB->getContext()). 312 createBranchWeights(SICase->getValue().getZExtValue(), 313 SIDef->getValue().getZExtValue())); 314 } 315 316 // Update make.implicit metadata to the newly-created conditional branch. 317 MDNode *MakeImplicitMD = SI->getMetadata(LLVMContext::MD_make_implicit); 318 if (MakeImplicitMD) 319 NewBr->setMetadata(LLVMContext::MD_make_implicit, MakeImplicitMD); 320 321 // Delete the old switch. 322 SI->eraseFromParent(); 323 return true; 324 } 325 return Changed; 326 } 327 328 if (auto *IBI = dyn_cast<IndirectBrInst>(T)) { 329 // indirectbr blockaddress(@F, @BB) -> br label @BB 330 if (auto *BA = 331 dyn_cast<BlockAddress>(IBI->getAddress()->stripPointerCasts())) { 332 BasicBlock *TheOnlyDest = BA->getBasicBlock(); 333 SmallSetVector<BasicBlock *, 8> RemovedSuccessors; 334 335 // Insert the new branch. 336 Builder.CreateBr(TheOnlyDest); 337 338 BasicBlock *SuccToKeep = TheOnlyDest; 339 for (unsigned i = 0, e = IBI->getNumDestinations(); i != e; ++i) { 340 BasicBlock *DestBB = IBI->getDestination(i); 341 if (DTU && DestBB != TheOnlyDest) 342 RemovedSuccessors.insert(DestBB); 343 if (IBI->getDestination(i) == SuccToKeep) { 344 SuccToKeep = nullptr; 345 } else { 346 DestBB->removePredecessor(BB); 347 } 348 } 349 Value *Address = IBI->getAddress(); 350 IBI->eraseFromParent(); 351 if (DeleteDeadConditions) 352 // Delete pointer cast instructions. 353 RecursivelyDeleteTriviallyDeadInstructions(Address, TLI); 354 355 // Also zap the blockaddress constant if there are no users remaining, 356 // otherwise the destination is still marked as having its address taken. 357 if (BA->use_empty()) 358 BA->destroyConstant(); 359 360 // If we didn't find our destination in the IBI successor list, then we 361 // have undefined behavior. Replace the unconditional branch with an 362 // 'unreachable' instruction. 363 if (SuccToKeep) { 364 BB->getTerminator()->eraseFromParent(); 365 new UnreachableInst(BB->getContext(), BB); 366 } 367 368 if (DTU) { 369 std::vector<DominatorTree::UpdateType> Updates; 370 Updates.reserve(RemovedSuccessors.size()); 371 for (auto *RemovedSuccessor : RemovedSuccessors) 372 Updates.push_back({DominatorTree::Delete, BB, RemovedSuccessor}); 373 DTU->applyUpdates(Updates); 374 } 375 return true; 376 } 377 } 378 379 return false; 380 } 381 382 //===----------------------------------------------------------------------===// 383 // Local dead code elimination. 384 // 385 386 /// isInstructionTriviallyDead - Return true if the result produced by the 387 /// instruction is not used, and the instruction has no side effects. 388 /// 389 bool llvm::isInstructionTriviallyDead(Instruction *I, 390 const TargetLibraryInfo *TLI) { 391 if (!I->use_empty()) 392 return false; 393 return wouldInstructionBeTriviallyDead(I, TLI); 394 } 395 396 bool llvm::wouldInstructionBeTriviallyDead(Instruction *I, 397 const TargetLibraryInfo *TLI) { 398 if (I->isTerminator()) 399 return false; 400 401 // We don't want the landingpad-like instructions removed by anything this 402 // general. 403 if (I->isEHPad()) 404 return false; 405 406 // We don't want debug info removed by anything this general, unless 407 // debug info is empty. 408 if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(I)) { 409 if (DDI->getAddress()) 410 return false; 411 return true; 412 } 413 if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(I)) { 414 if (DVI->getValue()) 415 return false; 416 return true; 417 } 418 if (DbgLabelInst *DLI = dyn_cast<DbgLabelInst>(I)) { 419 if (DLI->getLabel()) 420 return false; 421 return true; 422 } 423 424 if (!I->willReturn()) 425 return false; 426 427 if (!I->mayHaveSideEffects()) 428 return true; 429 430 // Special case intrinsics that "may have side effects" but can be deleted 431 // when dead. 432 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 433 // Safe to delete llvm.stacksave and launder.invariant.group if dead. 434 if (II->getIntrinsicID() == Intrinsic::stacksave || 435 II->getIntrinsicID() == Intrinsic::launder_invariant_group) 436 return true; 437 438 if (II->isLifetimeStartOrEnd()) { 439 auto *Arg = II->getArgOperand(1); 440 // Lifetime intrinsics are dead when their right-hand is undef. 441 if (isa<UndefValue>(Arg)) 442 return true; 443 // If the right-hand is an alloc, global, or argument and the only uses 444 // are lifetime intrinsics then the intrinsics are dead. 445 if (isa<AllocaInst>(Arg) || isa<GlobalValue>(Arg) || isa<Argument>(Arg)) 446 return llvm::all_of(Arg->uses(), [](Use &Use) { 447 if (IntrinsicInst *IntrinsicUse = 448 dyn_cast<IntrinsicInst>(Use.getUser())) 449 return IntrinsicUse->isLifetimeStartOrEnd(); 450 return false; 451 }); 452 return false; 453 } 454 455 // Assumptions are dead if their condition is trivially true. Guards on 456 // true are operationally no-ops. In the future we can consider more 457 // sophisticated tradeoffs for guards considering potential for check 458 // widening, but for now we keep things simple. 459 if ((II->getIntrinsicID() == Intrinsic::assume && 460 isAssumeWithEmptyBundle(*II)) || 461 II->getIntrinsicID() == Intrinsic::experimental_guard) { 462 if (ConstantInt *Cond = dyn_cast<ConstantInt>(II->getArgOperand(0))) 463 return !Cond->isZero(); 464 465 return false; 466 } 467 } 468 469 if (isAllocLikeFn(I, TLI)) 470 return true; 471 472 if (CallInst *CI = isFreeCall(I, TLI)) 473 if (Constant *C = dyn_cast<Constant>(CI->getArgOperand(0))) 474 return C->isNullValue() || isa<UndefValue>(C); 475 476 if (auto *Call = dyn_cast<CallBase>(I)) 477 if (isMathLibCallNoop(Call, TLI)) 478 return true; 479 480 return false; 481 } 482 483 /// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a 484 /// trivially dead instruction, delete it. If that makes any of its operands 485 /// trivially dead, delete them too, recursively. Return true if any 486 /// instructions were deleted. 487 bool llvm::RecursivelyDeleteTriviallyDeadInstructions( 488 Value *V, const TargetLibraryInfo *TLI, MemorySSAUpdater *MSSAU, 489 std::function<void(Value *)> AboutToDeleteCallback) { 490 Instruction *I = dyn_cast<Instruction>(V); 491 if (!I || !isInstructionTriviallyDead(I, TLI)) 492 return false; 493 494 SmallVector<WeakTrackingVH, 16> DeadInsts; 495 DeadInsts.push_back(I); 496 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI, MSSAU, 497 AboutToDeleteCallback); 498 499 return true; 500 } 501 502 bool llvm::RecursivelyDeleteTriviallyDeadInstructionsPermissive( 503 SmallVectorImpl<WeakTrackingVH> &DeadInsts, const TargetLibraryInfo *TLI, 504 MemorySSAUpdater *MSSAU, 505 std::function<void(Value *)> AboutToDeleteCallback) { 506 unsigned S = 0, E = DeadInsts.size(), Alive = 0; 507 for (; S != E; ++S) { 508 auto *I = cast<Instruction>(DeadInsts[S]); 509 if (!isInstructionTriviallyDead(I)) { 510 DeadInsts[S] = nullptr; 511 ++Alive; 512 } 513 } 514 if (Alive == E) 515 return false; 516 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI, MSSAU, 517 AboutToDeleteCallback); 518 return true; 519 } 520 521 void llvm::RecursivelyDeleteTriviallyDeadInstructions( 522 SmallVectorImpl<WeakTrackingVH> &DeadInsts, const TargetLibraryInfo *TLI, 523 MemorySSAUpdater *MSSAU, 524 std::function<void(Value *)> AboutToDeleteCallback) { 525 // Process the dead instruction list until empty. 526 while (!DeadInsts.empty()) { 527 Value *V = DeadInsts.pop_back_val(); 528 Instruction *I = cast_or_null<Instruction>(V); 529 if (!I) 530 continue; 531 assert(isInstructionTriviallyDead(I, TLI) && 532 "Live instruction found in dead worklist!"); 533 assert(I->use_empty() && "Instructions with uses are not dead."); 534 535 // Don't lose the debug info while deleting the instructions. 536 salvageDebugInfo(*I); 537 538 if (AboutToDeleteCallback) 539 AboutToDeleteCallback(I); 540 541 // Null out all of the instruction's operands to see if any operand becomes 542 // dead as we go. 543 for (Use &OpU : I->operands()) { 544 Value *OpV = OpU.get(); 545 OpU.set(nullptr); 546 547 if (!OpV->use_empty()) 548 continue; 549 550 // If the operand is an instruction that became dead as we nulled out the 551 // operand, and if it is 'trivially' dead, delete it in a future loop 552 // iteration. 553 if (Instruction *OpI = dyn_cast<Instruction>(OpV)) 554 if (isInstructionTriviallyDead(OpI, TLI)) 555 DeadInsts.push_back(OpI); 556 } 557 if (MSSAU) 558 MSSAU->removeMemoryAccess(I); 559 560 I->eraseFromParent(); 561 } 562 } 563 564 bool llvm::replaceDbgUsesWithUndef(Instruction *I) { 565 SmallVector<DbgVariableIntrinsic *, 1> DbgUsers; 566 findDbgUsers(DbgUsers, I); 567 for (auto *DII : DbgUsers) { 568 Value *Undef = UndefValue::get(I->getType()); 569 DII->replaceVariableLocationOp(I, Undef); 570 } 571 return !DbgUsers.empty(); 572 } 573 574 /// areAllUsesEqual - Check whether the uses of a value are all the same. 575 /// This is similar to Instruction::hasOneUse() except this will also return 576 /// true when there are no uses or multiple uses that all refer to the same 577 /// value. 578 static bool areAllUsesEqual(Instruction *I) { 579 Value::user_iterator UI = I->user_begin(); 580 Value::user_iterator UE = I->user_end(); 581 if (UI == UE) 582 return true; 583 584 User *TheUse = *UI; 585 for (++UI; UI != UE; ++UI) { 586 if (*UI != TheUse) 587 return false; 588 } 589 return true; 590 } 591 592 /// RecursivelyDeleteDeadPHINode - If the specified value is an effectively 593 /// dead PHI node, due to being a def-use chain of single-use nodes that 594 /// either forms a cycle or is terminated by a trivially dead instruction, 595 /// delete it. If that makes any of its operands trivially dead, delete them 596 /// too, recursively. Return true if a change was made. 597 bool llvm::RecursivelyDeleteDeadPHINode(PHINode *PN, 598 const TargetLibraryInfo *TLI, 599 llvm::MemorySSAUpdater *MSSAU) { 600 SmallPtrSet<Instruction*, 4> Visited; 601 for (Instruction *I = PN; areAllUsesEqual(I) && !I->mayHaveSideEffects(); 602 I = cast<Instruction>(*I->user_begin())) { 603 if (I->use_empty()) 604 return RecursivelyDeleteTriviallyDeadInstructions(I, TLI, MSSAU); 605 606 // If we find an instruction more than once, we're on a cycle that 607 // won't prove fruitful. 608 if (!Visited.insert(I).second) { 609 // Break the cycle and delete the instruction and its operands. 610 I->replaceAllUsesWith(UndefValue::get(I->getType())); 611 (void)RecursivelyDeleteTriviallyDeadInstructions(I, TLI, MSSAU); 612 return true; 613 } 614 } 615 return false; 616 } 617 618 static bool 619 simplifyAndDCEInstruction(Instruction *I, 620 SmallSetVector<Instruction *, 16> &WorkList, 621 const DataLayout &DL, 622 const TargetLibraryInfo *TLI) { 623 if (isInstructionTriviallyDead(I, TLI)) { 624 salvageDebugInfo(*I); 625 626 // Null out all of the instruction's operands to see if any operand becomes 627 // dead as we go. 628 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 629 Value *OpV = I->getOperand(i); 630 I->setOperand(i, nullptr); 631 632 if (!OpV->use_empty() || I == OpV) 633 continue; 634 635 // If the operand is an instruction that became dead as we nulled out the 636 // operand, and if it is 'trivially' dead, delete it in a future loop 637 // iteration. 638 if (Instruction *OpI = dyn_cast<Instruction>(OpV)) 639 if (isInstructionTriviallyDead(OpI, TLI)) 640 WorkList.insert(OpI); 641 } 642 643 I->eraseFromParent(); 644 645 return true; 646 } 647 648 if (Value *SimpleV = SimplifyInstruction(I, DL)) { 649 // Add the users to the worklist. CAREFUL: an instruction can use itself, 650 // in the case of a phi node. 651 for (User *U : I->users()) { 652 if (U != I) { 653 WorkList.insert(cast<Instruction>(U)); 654 } 655 } 656 657 // Replace the instruction with its simplified value. 658 bool Changed = false; 659 if (!I->use_empty()) { 660 I->replaceAllUsesWith(SimpleV); 661 Changed = true; 662 } 663 if (isInstructionTriviallyDead(I, TLI)) { 664 I->eraseFromParent(); 665 Changed = true; 666 } 667 return Changed; 668 } 669 return false; 670 } 671 672 /// SimplifyInstructionsInBlock - Scan the specified basic block and try to 673 /// simplify any instructions in it and recursively delete dead instructions. 674 /// 675 /// This returns true if it changed the code, note that it can delete 676 /// instructions in other blocks as well in this block. 677 bool llvm::SimplifyInstructionsInBlock(BasicBlock *BB, 678 const TargetLibraryInfo *TLI) { 679 bool MadeChange = false; 680 const DataLayout &DL = BB->getModule()->getDataLayout(); 681 682 #ifndef NDEBUG 683 // In debug builds, ensure that the terminator of the block is never replaced 684 // or deleted by these simplifications. The idea of simplification is that it 685 // cannot introduce new instructions, and there is no way to replace the 686 // terminator of a block without introducing a new instruction. 687 AssertingVH<Instruction> TerminatorVH(&BB->back()); 688 #endif 689 690 SmallSetVector<Instruction *, 16> WorkList; 691 // Iterate over the original function, only adding insts to the worklist 692 // if they actually need to be revisited. This avoids having to pre-init 693 // the worklist with the entire function's worth of instructions. 694 for (BasicBlock::iterator BI = BB->begin(), E = std::prev(BB->end()); 695 BI != E;) { 696 assert(!BI->isTerminator()); 697 Instruction *I = &*BI; 698 ++BI; 699 700 // We're visiting this instruction now, so make sure it's not in the 701 // worklist from an earlier visit. 702 if (!WorkList.count(I)) 703 MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI); 704 } 705 706 while (!WorkList.empty()) { 707 Instruction *I = WorkList.pop_back_val(); 708 MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI); 709 } 710 return MadeChange; 711 } 712 713 //===----------------------------------------------------------------------===// 714 // Control Flow Graph Restructuring. 715 // 716 717 void llvm::MergeBasicBlockIntoOnlyPred(BasicBlock *DestBB, 718 DomTreeUpdater *DTU) { 719 720 // If BB has single-entry PHI nodes, fold them. 721 while (PHINode *PN = dyn_cast<PHINode>(DestBB->begin())) { 722 Value *NewVal = PN->getIncomingValue(0); 723 // Replace self referencing PHI with undef, it must be dead. 724 if (NewVal == PN) NewVal = UndefValue::get(PN->getType()); 725 PN->replaceAllUsesWith(NewVal); 726 PN->eraseFromParent(); 727 } 728 729 BasicBlock *PredBB = DestBB->getSinglePredecessor(); 730 assert(PredBB && "Block doesn't have a single predecessor!"); 731 732 bool ReplaceEntryBB = false; 733 if (PredBB == &DestBB->getParent()->getEntryBlock()) 734 ReplaceEntryBB = true; 735 736 // DTU updates: Collect all the edges that enter 737 // PredBB. These dominator edges will be redirected to DestBB. 738 SmallVector<DominatorTree::UpdateType, 32> Updates; 739 740 if (DTU) { 741 for (BasicBlock *PredPredBB : predecessors(PredBB)) { 742 // This predecessor of PredBB may already have DestBB as a successor. 743 if (!llvm::is_contained(successors(PredPredBB), DestBB)) 744 Updates.push_back({DominatorTree::Insert, PredPredBB, DestBB}); 745 Updates.push_back({DominatorTree::Delete, PredPredBB, PredBB}); 746 } 747 Updates.push_back({DominatorTree::Delete, PredBB, DestBB}); 748 } 749 750 // Zap anything that took the address of DestBB. Not doing this will give the 751 // address an invalid value. 752 if (DestBB->hasAddressTaken()) { 753 BlockAddress *BA = BlockAddress::get(DestBB); 754 Constant *Replacement = 755 ConstantInt::get(Type::getInt32Ty(BA->getContext()), 1); 756 BA->replaceAllUsesWith(ConstantExpr::getIntToPtr(Replacement, 757 BA->getType())); 758 BA->destroyConstant(); 759 } 760 761 // Anything that branched to PredBB now branches to DestBB. 762 PredBB->replaceAllUsesWith(DestBB); 763 764 // Splice all the instructions from PredBB to DestBB. 765 PredBB->getTerminator()->eraseFromParent(); 766 DestBB->getInstList().splice(DestBB->begin(), PredBB->getInstList()); 767 new UnreachableInst(PredBB->getContext(), PredBB); 768 769 // If the PredBB is the entry block of the function, move DestBB up to 770 // become the entry block after we erase PredBB. 771 if (ReplaceEntryBB) 772 DestBB->moveAfter(PredBB); 773 774 if (DTU) { 775 assert(PredBB->getInstList().size() == 1 && 776 isa<UnreachableInst>(PredBB->getTerminator()) && 777 "The successor list of PredBB isn't empty before " 778 "applying corresponding DTU updates."); 779 DTU->applyUpdatesPermissive(Updates); 780 DTU->deleteBB(PredBB); 781 // Recalculation of DomTree is needed when updating a forward DomTree and 782 // the Entry BB is replaced. 783 if (ReplaceEntryBB && DTU->hasDomTree()) { 784 // The entry block was removed and there is no external interface for 785 // the dominator tree to be notified of this change. In this corner-case 786 // we recalculate the entire tree. 787 DTU->recalculate(*(DestBB->getParent())); 788 } 789 } 790 791 else { 792 PredBB->eraseFromParent(); // Nuke BB if DTU is nullptr. 793 } 794 } 795 796 /// Return true if we can choose one of these values to use in place of the 797 /// other. Note that we will always choose the non-undef value to keep. 798 static bool CanMergeValues(Value *First, Value *Second) { 799 return First == Second || isa<UndefValue>(First) || isa<UndefValue>(Second); 800 } 801 802 /// Return true if we can fold BB, an almost-empty BB ending in an unconditional 803 /// branch to Succ, into Succ. 804 /// 805 /// Assumption: Succ is the single successor for BB. 806 static bool CanPropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ) { 807 assert(*succ_begin(BB) == Succ && "Succ is not successor of BB!"); 808 809 LLVM_DEBUG(dbgs() << "Looking to fold " << BB->getName() << " into " 810 << Succ->getName() << "\n"); 811 // Shortcut, if there is only a single predecessor it must be BB and merging 812 // is always safe 813 if (Succ->getSinglePredecessor()) return true; 814 815 // Make a list of the predecessors of BB 816 SmallPtrSet<BasicBlock*, 16> BBPreds(pred_begin(BB), pred_end(BB)); 817 818 // Look at all the phi nodes in Succ, to see if they present a conflict when 819 // merging these blocks 820 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) { 821 PHINode *PN = cast<PHINode>(I); 822 823 // If the incoming value from BB is again a PHINode in 824 // BB which has the same incoming value for *PI as PN does, we can 825 // merge the phi nodes and then the blocks can still be merged 826 PHINode *BBPN = dyn_cast<PHINode>(PN->getIncomingValueForBlock(BB)); 827 if (BBPN && BBPN->getParent() == BB) { 828 for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) { 829 BasicBlock *IBB = PN->getIncomingBlock(PI); 830 if (BBPreds.count(IBB) && 831 !CanMergeValues(BBPN->getIncomingValueForBlock(IBB), 832 PN->getIncomingValue(PI))) { 833 LLVM_DEBUG(dbgs() 834 << "Can't fold, phi node " << PN->getName() << " in " 835 << Succ->getName() << " is conflicting with " 836 << BBPN->getName() << " with regard to common predecessor " 837 << IBB->getName() << "\n"); 838 return false; 839 } 840 } 841 } else { 842 Value* Val = PN->getIncomingValueForBlock(BB); 843 for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) { 844 // See if the incoming value for the common predecessor is equal to the 845 // one for BB, in which case this phi node will not prevent the merging 846 // of the block. 847 BasicBlock *IBB = PN->getIncomingBlock(PI); 848 if (BBPreds.count(IBB) && 849 !CanMergeValues(Val, PN->getIncomingValue(PI))) { 850 LLVM_DEBUG(dbgs() << "Can't fold, phi node " << PN->getName() 851 << " in " << Succ->getName() 852 << " is conflicting with regard to common " 853 << "predecessor " << IBB->getName() << "\n"); 854 return false; 855 } 856 } 857 } 858 } 859 860 return true; 861 } 862 863 using PredBlockVector = SmallVector<BasicBlock *, 16>; 864 using IncomingValueMap = DenseMap<BasicBlock *, Value *>; 865 866 /// Determines the value to use as the phi node input for a block. 867 /// 868 /// Select between \p OldVal any value that we know flows from \p BB 869 /// to a particular phi on the basis of which one (if either) is not 870 /// undef. Update IncomingValues based on the selected value. 871 /// 872 /// \param OldVal The value we are considering selecting. 873 /// \param BB The block that the value flows in from. 874 /// \param IncomingValues A map from block-to-value for other phi inputs 875 /// that we have examined. 876 /// 877 /// \returns the selected value. 878 static Value *selectIncomingValueForBlock(Value *OldVal, BasicBlock *BB, 879 IncomingValueMap &IncomingValues) { 880 if (!isa<UndefValue>(OldVal)) { 881 assert((!IncomingValues.count(BB) || 882 IncomingValues.find(BB)->second == OldVal) && 883 "Expected OldVal to match incoming value from BB!"); 884 885 IncomingValues.insert(std::make_pair(BB, OldVal)); 886 return OldVal; 887 } 888 889 IncomingValueMap::const_iterator It = IncomingValues.find(BB); 890 if (It != IncomingValues.end()) return It->second; 891 892 return OldVal; 893 } 894 895 /// Create a map from block to value for the operands of a 896 /// given phi. 897 /// 898 /// Create a map from block to value for each non-undef value flowing 899 /// into \p PN. 900 /// 901 /// \param PN The phi we are collecting the map for. 902 /// \param IncomingValues [out] The map from block to value for this phi. 903 static void gatherIncomingValuesToPhi(PHINode *PN, 904 IncomingValueMap &IncomingValues) { 905 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 906 BasicBlock *BB = PN->getIncomingBlock(i); 907 Value *V = PN->getIncomingValue(i); 908 909 if (!isa<UndefValue>(V)) 910 IncomingValues.insert(std::make_pair(BB, V)); 911 } 912 } 913 914 /// Replace the incoming undef values to a phi with the values 915 /// from a block-to-value map. 916 /// 917 /// \param PN The phi we are replacing the undefs in. 918 /// \param IncomingValues A map from block to value. 919 static void replaceUndefValuesInPhi(PHINode *PN, 920 const IncomingValueMap &IncomingValues) { 921 SmallVector<unsigned> TrueUndefOps; 922 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 923 Value *V = PN->getIncomingValue(i); 924 925 if (!isa<UndefValue>(V)) continue; 926 927 BasicBlock *BB = PN->getIncomingBlock(i); 928 IncomingValueMap::const_iterator It = IncomingValues.find(BB); 929 930 // Keep track of undef/poison incoming values. Those must match, so we fix 931 // them up below if needed. 932 // Note: this is conservatively correct, but we could try harder and group 933 // the undef values per incoming basic block. 934 if (It == IncomingValues.end()) { 935 TrueUndefOps.push_back(i); 936 continue; 937 } 938 939 // There is a defined value for this incoming block, so map this undef 940 // incoming value to the defined value. 941 PN->setIncomingValue(i, It->second); 942 } 943 944 // If there are both undef and poison values incoming, then convert those 945 // values to undef. It is invalid to have different values for the same 946 // incoming block. 947 unsigned PoisonCount = count_if(TrueUndefOps, [&](unsigned i) { 948 return isa<PoisonValue>(PN->getIncomingValue(i)); 949 }); 950 if (PoisonCount != 0 && PoisonCount != TrueUndefOps.size()) { 951 for (unsigned i : TrueUndefOps) 952 PN->setIncomingValue(i, UndefValue::get(PN->getType())); 953 } 954 } 955 956 /// Replace a value flowing from a block to a phi with 957 /// potentially multiple instances of that value flowing from the 958 /// block's predecessors to the phi. 959 /// 960 /// \param BB The block with the value flowing into the phi. 961 /// \param BBPreds The predecessors of BB. 962 /// \param PN The phi that we are updating. 963 static void redirectValuesFromPredecessorsToPhi(BasicBlock *BB, 964 const PredBlockVector &BBPreds, 965 PHINode *PN) { 966 Value *OldVal = PN->removeIncomingValue(BB, false); 967 assert(OldVal && "No entry in PHI for Pred BB!"); 968 969 IncomingValueMap IncomingValues; 970 971 // We are merging two blocks - BB, and the block containing PN - and 972 // as a result we need to redirect edges from the predecessors of BB 973 // to go to the block containing PN, and update PN 974 // accordingly. Since we allow merging blocks in the case where the 975 // predecessor and successor blocks both share some predecessors, 976 // and where some of those common predecessors might have undef 977 // values flowing into PN, we want to rewrite those values to be 978 // consistent with the non-undef values. 979 980 gatherIncomingValuesToPhi(PN, IncomingValues); 981 982 // If this incoming value is one of the PHI nodes in BB, the new entries 983 // in the PHI node are the entries from the old PHI. 984 if (isa<PHINode>(OldVal) && cast<PHINode>(OldVal)->getParent() == BB) { 985 PHINode *OldValPN = cast<PHINode>(OldVal); 986 for (unsigned i = 0, e = OldValPN->getNumIncomingValues(); i != e; ++i) { 987 // Note that, since we are merging phi nodes and BB and Succ might 988 // have common predecessors, we could end up with a phi node with 989 // identical incoming branches. This will be cleaned up later (and 990 // will trigger asserts if we try to clean it up now, without also 991 // simplifying the corresponding conditional branch). 992 BasicBlock *PredBB = OldValPN->getIncomingBlock(i); 993 Value *PredVal = OldValPN->getIncomingValue(i); 994 Value *Selected = selectIncomingValueForBlock(PredVal, PredBB, 995 IncomingValues); 996 997 // And add a new incoming value for this predecessor for the 998 // newly retargeted branch. 999 PN->addIncoming(Selected, PredBB); 1000 } 1001 } else { 1002 for (unsigned i = 0, e = BBPreds.size(); i != e; ++i) { 1003 // Update existing incoming values in PN for this 1004 // predecessor of BB. 1005 BasicBlock *PredBB = BBPreds[i]; 1006 Value *Selected = selectIncomingValueForBlock(OldVal, PredBB, 1007 IncomingValues); 1008 1009 // And add a new incoming value for this predecessor for the 1010 // newly retargeted branch. 1011 PN->addIncoming(Selected, PredBB); 1012 } 1013 } 1014 1015 replaceUndefValuesInPhi(PN, IncomingValues); 1016 } 1017 1018 bool llvm::TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB, 1019 DomTreeUpdater *DTU) { 1020 assert(BB != &BB->getParent()->getEntryBlock() && 1021 "TryToSimplifyUncondBranchFromEmptyBlock called on entry block!"); 1022 1023 // We can't eliminate infinite loops. 1024 BasicBlock *Succ = cast<BranchInst>(BB->getTerminator())->getSuccessor(0); 1025 if (BB == Succ) return false; 1026 1027 // Check to see if merging these blocks would cause conflicts for any of the 1028 // phi nodes in BB or Succ. If not, we can safely merge. 1029 if (!CanPropagatePredecessorsForPHIs(BB, Succ)) return false; 1030 1031 // Check for cases where Succ has multiple predecessors and a PHI node in BB 1032 // has uses which will not disappear when the PHI nodes are merged. It is 1033 // possible to handle such cases, but difficult: it requires checking whether 1034 // BB dominates Succ, which is non-trivial to calculate in the case where 1035 // Succ has multiple predecessors. Also, it requires checking whether 1036 // constructing the necessary self-referential PHI node doesn't introduce any 1037 // conflicts; this isn't too difficult, but the previous code for doing this 1038 // was incorrect. 1039 // 1040 // Note that if this check finds a live use, BB dominates Succ, so BB is 1041 // something like a loop pre-header (or rarely, a part of an irreducible CFG); 1042 // folding the branch isn't profitable in that case anyway. 1043 if (!Succ->getSinglePredecessor()) { 1044 BasicBlock::iterator BBI = BB->begin(); 1045 while (isa<PHINode>(*BBI)) { 1046 for (Use &U : BBI->uses()) { 1047 if (PHINode* PN = dyn_cast<PHINode>(U.getUser())) { 1048 if (PN->getIncomingBlock(U) != BB) 1049 return false; 1050 } else { 1051 return false; 1052 } 1053 } 1054 ++BBI; 1055 } 1056 } 1057 1058 // We cannot fold the block if it's a branch to an already present callbr 1059 // successor because that creates duplicate successors. 1060 for (BasicBlock *PredBB : predecessors(BB)) { 1061 if (auto *CBI = dyn_cast<CallBrInst>(PredBB->getTerminator())) { 1062 if (Succ == CBI->getDefaultDest()) 1063 return false; 1064 for (unsigned i = 0, e = CBI->getNumIndirectDests(); i != e; ++i) 1065 if (Succ == CBI->getIndirectDest(i)) 1066 return false; 1067 } 1068 } 1069 1070 LLVM_DEBUG(dbgs() << "Killing Trivial BB: \n" << *BB); 1071 1072 SmallVector<DominatorTree::UpdateType, 32> Updates; 1073 if (DTU) { 1074 // All predecessors of BB will be moved to Succ. 1075 SmallSetVector<BasicBlock *, 8> Predecessors(pred_begin(BB), pred_end(BB)); 1076 Updates.reserve(Updates.size() + 2 * Predecessors.size()); 1077 for (auto *Predecessor : Predecessors) { 1078 // This predecessor of BB may already have Succ as a successor. 1079 if (!llvm::is_contained(successors(Predecessor), Succ)) 1080 Updates.push_back({DominatorTree::Insert, Predecessor, Succ}); 1081 Updates.push_back({DominatorTree::Delete, Predecessor, BB}); 1082 } 1083 Updates.push_back({DominatorTree::Delete, BB, Succ}); 1084 } 1085 1086 if (isa<PHINode>(Succ->begin())) { 1087 // If there is more than one pred of succ, and there are PHI nodes in 1088 // the successor, then we need to add incoming edges for the PHI nodes 1089 // 1090 const PredBlockVector BBPreds(pred_begin(BB), pred_end(BB)); 1091 1092 // Loop over all of the PHI nodes in the successor of BB. 1093 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) { 1094 PHINode *PN = cast<PHINode>(I); 1095 1096 redirectValuesFromPredecessorsToPhi(BB, BBPreds, PN); 1097 } 1098 } 1099 1100 if (Succ->getSinglePredecessor()) { 1101 // BB is the only predecessor of Succ, so Succ will end up with exactly 1102 // the same predecessors BB had. 1103 1104 // Copy over any phi, debug or lifetime instruction. 1105 BB->getTerminator()->eraseFromParent(); 1106 Succ->getInstList().splice(Succ->getFirstNonPHI()->getIterator(), 1107 BB->getInstList()); 1108 } else { 1109 while (PHINode *PN = dyn_cast<PHINode>(&BB->front())) { 1110 // We explicitly check for such uses in CanPropagatePredecessorsForPHIs. 1111 assert(PN->use_empty() && "There shouldn't be any uses here!"); 1112 PN->eraseFromParent(); 1113 } 1114 } 1115 1116 // If the unconditional branch we replaced contains llvm.loop metadata, we 1117 // add the metadata to the branch instructions in the predecessors. 1118 unsigned LoopMDKind = BB->getContext().getMDKindID("llvm.loop"); 1119 Instruction *TI = BB->getTerminator(); 1120 if (TI) 1121 if (MDNode *LoopMD = TI->getMetadata(LoopMDKind)) 1122 for (BasicBlock *Pred : predecessors(BB)) 1123 Pred->getTerminator()->setMetadata(LoopMDKind, LoopMD); 1124 1125 // For AutoFDO, since BB is going to be removed, we won't be able to sample 1126 // it. To avoid assigning a zero weight for BB, move all its pseudo probes 1127 // into Succ and mark them dangling. This should allow the counts inference a 1128 // chance to get a more reasonable weight for BB. 1129 moveAndDanglePseudoProbes(BB, &*Succ->getFirstInsertionPt()); 1130 1131 // Everything that jumped to BB now goes to Succ. 1132 BB->replaceAllUsesWith(Succ); 1133 if (!Succ->hasName()) Succ->takeName(BB); 1134 1135 // Clear the successor list of BB to match updates applying to DTU later. 1136 if (BB->getTerminator()) 1137 BB->getInstList().pop_back(); 1138 new UnreachableInst(BB->getContext(), BB); 1139 assert(succ_empty(BB) && "The successor list of BB isn't empty before " 1140 "applying corresponding DTU updates."); 1141 1142 if (DTU) { 1143 DTU->applyUpdates(Updates); 1144 DTU->deleteBB(BB); 1145 } else { 1146 BB->eraseFromParent(); // Delete the old basic block. 1147 } 1148 return true; 1149 } 1150 1151 static bool EliminateDuplicatePHINodesNaiveImpl(BasicBlock *BB) { 1152 // This implementation doesn't currently consider undef operands 1153 // specially. Theoretically, two phis which are identical except for 1154 // one having an undef where the other doesn't could be collapsed. 1155 1156 bool Changed = false; 1157 1158 // Examine each PHI. 1159 // Note that increment of I must *NOT* be in the iteration_expression, since 1160 // we don't want to immediately advance when we restart from the beginning. 1161 for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I);) { 1162 ++I; 1163 // Is there an identical PHI node in this basic block? 1164 // Note that we only look in the upper square's triangle, 1165 // we already checked that the lower triangle PHI's aren't identical. 1166 for (auto J = I; PHINode *DuplicatePN = dyn_cast<PHINode>(J); ++J) { 1167 if (!DuplicatePN->isIdenticalToWhenDefined(PN)) 1168 continue; 1169 // A duplicate. Replace this PHI with the base PHI. 1170 ++NumPHICSEs; 1171 DuplicatePN->replaceAllUsesWith(PN); 1172 DuplicatePN->eraseFromParent(); 1173 Changed = true; 1174 1175 // The RAUW can change PHIs that we already visited. 1176 I = BB->begin(); 1177 break; // Start over from the beginning. 1178 } 1179 } 1180 return Changed; 1181 } 1182 1183 static bool EliminateDuplicatePHINodesSetBasedImpl(BasicBlock *BB) { 1184 // This implementation doesn't currently consider undef operands 1185 // specially. Theoretically, two phis which are identical except for 1186 // one having an undef where the other doesn't could be collapsed. 1187 1188 struct PHIDenseMapInfo { 1189 static PHINode *getEmptyKey() { 1190 return DenseMapInfo<PHINode *>::getEmptyKey(); 1191 } 1192 1193 static PHINode *getTombstoneKey() { 1194 return DenseMapInfo<PHINode *>::getTombstoneKey(); 1195 } 1196 1197 static bool isSentinel(PHINode *PN) { 1198 return PN == getEmptyKey() || PN == getTombstoneKey(); 1199 } 1200 1201 // WARNING: this logic must be kept in sync with 1202 // Instruction::isIdenticalToWhenDefined()! 1203 static unsigned getHashValueImpl(PHINode *PN) { 1204 // Compute a hash value on the operands. Instcombine will likely have 1205 // sorted them, which helps expose duplicates, but we have to check all 1206 // the operands to be safe in case instcombine hasn't run. 1207 return static_cast<unsigned>(hash_combine( 1208 hash_combine_range(PN->value_op_begin(), PN->value_op_end()), 1209 hash_combine_range(PN->block_begin(), PN->block_end()))); 1210 } 1211 1212 static unsigned getHashValue(PHINode *PN) { 1213 #ifndef NDEBUG 1214 // If -phicse-debug-hash was specified, return a constant -- this 1215 // will force all hashing to collide, so we'll exhaustively search 1216 // the table for a match, and the assertion in isEqual will fire if 1217 // there's a bug causing equal keys to hash differently. 1218 if (PHICSEDebugHash) 1219 return 0; 1220 #endif 1221 return getHashValueImpl(PN); 1222 } 1223 1224 static bool isEqualImpl(PHINode *LHS, PHINode *RHS) { 1225 if (isSentinel(LHS) || isSentinel(RHS)) 1226 return LHS == RHS; 1227 return LHS->isIdenticalTo(RHS); 1228 } 1229 1230 static bool isEqual(PHINode *LHS, PHINode *RHS) { 1231 // These comparisons are nontrivial, so assert that equality implies 1232 // hash equality (DenseMap demands this as an invariant). 1233 bool Result = isEqualImpl(LHS, RHS); 1234 assert(!Result || (isSentinel(LHS) && LHS == RHS) || 1235 getHashValueImpl(LHS) == getHashValueImpl(RHS)); 1236 return Result; 1237 } 1238 }; 1239 1240 // Set of unique PHINodes. 1241 DenseSet<PHINode *, PHIDenseMapInfo> PHISet; 1242 PHISet.reserve(4 * PHICSENumPHISmallSize); 1243 1244 // Examine each PHI. 1245 bool Changed = false; 1246 for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I++);) { 1247 auto Inserted = PHISet.insert(PN); 1248 if (!Inserted.second) { 1249 // A duplicate. Replace this PHI with its duplicate. 1250 ++NumPHICSEs; 1251 PN->replaceAllUsesWith(*Inserted.first); 1252 PN->eraseFromParent(); 1253 Changed = true; 1254 1255 // The RAUW can change PHIs that we already visited. Start over from the 1256 // beginning. 1257 PHISet.clear(); 1258 I = BB->begin(); 1259 } 1260 } 1261 1262 return Changed; 1263 } 1264 1265 bool llvm::EliminateDuplicatePHINodes(BasicBlock *BB) { 1266 if ( 1267 #ifndef NDEBUG 1268 !PHICSEDebugHash && 1269 #endif 1270 hasNItemsOrLess(BB->phis(), PHICSENumPHISmallSize)) 1271 return EliminateDuplicatePHINodesNaiveImpl(BB); 1272 return EliminateDuplicatePHINodesSetBasedImpl(BB); 1273 } 1274 1275 /// If the specified pointer points to an object that we control, try to modify 1276 /// the object's alignment to PrefAlign. Returns a minimum known alignment of 1277 /// the value after the operation, which may be lower than PrefAlign. 1278 /// 1279 /// Increating value alignment isn't often possible though. If alignment is 1280 /// important, a more reliable approach is to simply align all global variables 1281 /// and allocation instructions to their preferred alignment from the beginning. 1282 static Align tryEnforceAlignment(Value *V, Align PrefAlign, 1283 const DataLayout &DL) { 1284 V = V->stripPointerCasts(); 1285 1286 if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) { 1287 // TODO: Ideally, this function would not be called if PrefAlign is smaller 1288 // than the current alignment, as the known bits calculation should have 1289 // already taken it into account. However, this is not always the case, 1290 // as computeKnownBits() has a depth limit, while stripPointerCasts() 1291 // doesn't. 1292 Align CurrentAlign = AI->getAlign(); 1293 if (PrefAlign <= CurrentAlign) 1294 return CurrentAlign; 1295 1296 // If the preferred alignment is greater than the natural stack alignment 1297 // then don't round up. This avoids dynamic stack realignment. 1298 if (DL.exceedsNaturalStackAlignment(PrefAlign)) 1299 return CurrentAlign; 1300 AI->setAlignment(PrefAlign); 1301 return PrefAlign; 1302 } 1303 1304 if (auto *GO = dyn_cast<GlobalObject>(V)) { 1305 // TODO: as above, this shouldn't be necessary. 1306 Align CurrentAlign = GO->getPointerAlignment(DL); 1307 if (PrefAlign <= CurrentAlign) 1308 return CurrentAlign; 1309 1310 // If there is a large requested alignment and we can, bump up the alignment 1311 // of the global. If the memory we set aside for the global may not be the 1312 // memory used by the final program then it is impossible for us to reliably 1313 // enforce the preferred alignment. 1314 if (!GO->canIncreaseAlignment()) 1315 return CurrentAlign; 1316 1317 GO->setAlignment(PrefAlign); 1318 return PrefAlign; 1319 } 1320 1321 return Align(1); 1322 } 1323 1324 Align llvm::getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign, 1325 const DataLayout &DL, 1326 const Instruction *CxtI, 1327 AssumptionCache *AC, 1328 const DominatorTree *DT) { 1329 assert(V->getType()->isPointerTy() && 1330 "getOrEnforceKnownAlignment expects a pointer!"); 1331 1332 KnownBits Known = computeKnownBits(V, DL, 0, AC, CxtI, DT); 1333 unsigned TrailZ = Known.countMinTrailingZeros(); 1334 1335 // Avoid trouble with ridiculously large TrailZ values, such as 1336 // those computed from a null pointer. 1337 // LLVM doesn't support alignments larger than (1 << MaxAlignmentExponent). 1338 TrailZ = std::min(TrailZ, +Value::MaxAlignmentExponent); 1339 1340 Align Alignment = Align(1ull << std::min(Known.getBitWidth() - 1, TrailZ)); 1341 1342 if (PrefAlign && *PrefAlign > Alignment) 1343 Alignment = std::max(Alignment, tryEnforceAlignment(V, *PrefAlign, DL)); 1344 1345 // We don't need to make any adjustment. 1346 return Alignment; 1347 } 1348 1349 ///===---------------------------------------------------------------------===// 1350 /// Dbg Intrinsic utilities 1351 /// 1352 1353 /// See if there is a dbg.value intrinsic for DIVar for the PHI node. 1354 static bool PhiHasDebugValue(DILocalVariable *DIVar, 1355 DIExpression *DIExpr, 1356 PHINode *APN) { 1357 // Since we can't guarantee that the original dbg.declare instrinsic 1358 // is removed by LowerDbgDeclare(), we need to make sure that we are 1359 // not inserting the same dbg.value intrinsic over and over. 1360 SmallVector<DbgValueInst *, 1> DbgValues; 1361 findDbgValues(DbgValues, APN); 1362 for (auto *DVI : DbgValues) { 1363 assert(DVI->getValue() == APN); 1364 if ((DVI->getVariable() == DIVar) && (DVI->getExpression() == DIExpr)) 1365 return true; 1366 } 1367 return false; 1368 } 1369 1370 /// Check if the alloc size of \p ValTy is large enough to cover the variable 1371 /// (or fragment of the variable) described by \p DII. 1372 /// 1373 /// This is primarily intended as a helper for the different 1374 /// ConvertDebugDeclareToDebugValue functions. The dbg.declare/dbg.addr that is 1375 /// converted describes an alloca'd variable, so we need to use the 1376 /// alloc size of the value when doing the comparison. E.g. an i1 value will be 1377 /// identified as covering an n-bit fragment, if the store size of i1 is at 1378 /// least n bits. 1379 static bool valueCoversEntireFragment(Type *ValTy, DbgVariableIntrinsic *DII) { 1380 const DataLayout &DL = DII->getModule()->getDataLayout(); 1381 TypeSize ValueSize = DL.getTypeAllocSizeInBits(ValTy); 1382 if (Optional<uint64_t> FragmentSize = DII->getFragmentSizeInBits()) { 1383 assert(!ValueSize.isScalable() && 1384 "Fragments don't work on scalable types."); 1385 return ValueSize.getFixedSize() >= *FragmentSize; 1386 } 1387 // We can't always calculate the size of the DI variable (e.g. if it is a 1388 // VLA). Try to use the size of the alloca that the dbg intrinsic describes 1389 // intead. 1390 if (DII->isAddressOfVariable()) 1391 if (auto *AI = dyn_cast_or_null<AllocaInst>(DII->getVariableLocationOp(0))) 1392 if (Optional<TypeSize> FragmentSize = AI->getAllocationSizeInBits(DL)) { 1393 assert(ValueSize.isScalable() == FragmentSize->isScalable() && 1394 "Both sizes should agree on the scalable flag."); 1395 return TypeSize::isKnownGE(ValueSize, *FragmentSize); 1396 } 1397 // Could not determine size of variable. Conservatively return false. 1398 return false; 1399 } 1400 1401 /// Produce a DebugLoc to use for each dbg.declare/inst pair that are promoted 1402 /// to a dbg.value. Because no machine insts can come from debug intrinsics, 1403 /// only the scope and inlinedAt is significant. Zero line numbers are used in 1404 /// case this DebugLoc leaks into any adjacent instructions. 1405 static DebugLoc getDebugValueLoc(DbgVariableIntrinsic *DII, Instruction *Src) { 1406 // Original dbg.declare must have a location. 1407 DebugLoc DeclareLoc = DII->getDebugLoc(); 1408 MDNode *Scope = DeclareLoc.getScope(); 1409 DILocation *InlinedAt = DeclareLoc.getInlinedAt(); 1410 // Produce an unknown location with the correct scope / inlinedAt fields. 1411 return DILocation::get(DII->getContext(), 0, 0, Scope, InlinedAt); 1412 } 1413 1414 /// Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value 1415 /// that has an associated llvm.dbg.declare or llvm.dbg.addr intrinsic. 1416 void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII, 1417 StoreInst *SI, DIBuilder &Builder) { 1418 assert(DII->isAddressOfVariable()); 1419 auto *DIVar = DII->getVariable(); 1420 assert(DIVar && "Missing variable"); 1421 auto *DIExpr = DII->getExpression(); 1422 Value *DV = SI->getValueOperand(); 1423 1424 DebugLoc NewLoc = getDebugValueLoc(DII, SI); 1425 1426 if (!valueCoversEntireFragment(DV->getType(), DII)) { 1427 // FIXME: If storing to a part of the variable described by the dbg.declare, 1428 // then we want to insert a dbg.value for the corresponding fragment. 1429 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: " 1430 << *DII << '\n'); 1431 // For now, when there is a store to parts of the variable (but we do not 1432 // know which part) we insert an dbg.value instrinsic to indicate that we 1433 // know nothing about the variable's content. 1434 DV = UndefValue::get(DV->getType()); 1435 Builder.insertDbgValueIntrinsic(DV, DIVar, DIExpr, NewLoc, SI); 1436 return; 1437 } 1438 1439 Builder.insertDbgValueIntrinsic(DV, DIVar, DIExpr, NewLoc, SI); 1440 } 1441 1442 /// Inserts a llvm.dbg.value intrinsic before a load of an alloca'd value 1443 /// that has an associated llvm.dbg.declare or llvm.dbg.addr intrinsic. 1444 void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII, 1445 LoadInst *LI, DIBuilder &Builder) { 1446 auto *DIVar = DII->getVariable(); 1447 auto *DIExpr = DII->getExpression(); 1448 assert(DIVar && "Missing variable"); 1449 1450 if (!valueCoversEntireFragment(LI->getType(), DII)) { 1451 // FIXME: If only referring to a part of the variable described by the 1452 // dbg.declare, then we want to insert a dbg.value for the corresponding 1453 // fragment. 1454 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: " 1455 << *DII << '\n'); 1456 return; 1457 } 1458 1459 DebugLoc NewLoc = getDebugValueLoc(DII, nullptr); 1460 1461 // We are now tracking the loaded value instead of the address. In the 1462 // future if multi-location support is added to the IR, it might be 1463 // preferable to keep tracking both the loaded value and the original 1464 // address in case the alloca can not be elided. 1465 Instruction *DbgValue = Builder.insertDbgValueIntrinsic( 1466 LI, DIVar, DIExpr, NewLoc, (Instruction *)nullptr); 1467 DbgValue->insertAfter(LI); 1468 } 1469 1470 /// Inserts a llvm.dbg.value intrinsic after a phi that has an associated 1471 /// llvm.dbg.declare or llvm.dbg.addr intrinsic. 1472 void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII, 1473 PHINode *APN, DIBuilder &Builder) { 1474 auto *DIVar = DII->getVariable(); 1475 auto *DIExpr = DII->getExpression(); 1476 assert(DIVar && "Missing variable"); 1477 1478 if (PhiHasDebugValue(DIVar, DIExpr, APN)) 1479 return; 1480 1481 if (!valueCoversEntireFragment(APN->getType(), DII)) { 1482 // FIXME: If only referring to a part of the variable described by the 1483 // dbg.declare, then we want to insert a dbg.value for the corresponding 1484 // fragment. 1485 LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: " 1486 << *DII << '\n'); 1487 return; 1488 } 1489 1490 BasicBlock *BB = APN->getParent(); 1491 auto InsertionPt = BB->getFirstInsertionPt(); 1492 1493 DebugLoc NewLoc = getDebugValueLoc(DII, nullptr); 1494 1495 // The block may be a catchswitch block, which does not have a valid 1496 // insertion point. 1497 // FIXME: Insert dbg.value markers in the successors when appropriate. 1498 if (InsertionPt != BB->end()) 1499 Builder.insertDbgValueIntrinsic(APN, DIVar, DIExpr, NewLoc, &*InsertionPt); 1500 } 1501 1502 /// Determine whether this alloca is either a VLA or an array. 1503 static bool isArray(AllocaInst *AI) { 1504 return AI->isArrayAllocation() || 1505 (AI->getAllocatedType() && AI->getAllocatedType()->isArrayTy()); 1506 } 1507 1508 /// Determine whether this alloca is a structure. 1509 static bool isStructure(AllocaInst *AI) { 1510 return AI->getAllocatedType() && AI->getAllocatedType()->isStructTy(); 1511 } 1512 1513 /// LowerDbgDeclare - Lowers llvm.dbg.declare intrinsics into appropriate set 1514 /// of llvm.dbg.value intrinsics. 1515 bool llvm::LowerDbgDeclare(Function &F) { 1516 bool Changed = false; 1517 DIBuilder DIB(*F.getParent(), /*AllowUnresolved*/ false); 1518 SmallVector<DbgDeclareInst *, 4> Dbgs; 1519 for (auto &FI : F) 1520 for (Instruction &BI : FI) 1521 if (auto DDI = dyn_cast<DbgDeclareInst>(&BI)) 1522 Dbgs.push_back(DDI); 1523 1524 if (Dbgs.empty()) 1525 return Changed; 1526 1527 for (auto &I : Dbgs) { 1528 DbgDeclareInst *DDI = I; 1529 AllocaInst *AI = dyn_cast_or_null<AllocaInst>(DDI->getAddress()); 1530 // If this is an alloca for a scalar variable, insert a dbg.value 1531 // at each load and store to the alloca and erase the dbg.declare. 1532 // The dbg.values allow tracking a variable even if it is not 1533 // stored on the stack, while the dbg.declare can only describe 1534 // the stack slot (and at a lexical-scope granularity). Later 1535 // passes will attempt to elide the stack slot. 1536 if (!AI || isArray(AI) || isStructure(AI)) 1537 continue; 1538 1539 // A volatile load/store means that the alloca can't be elided anyway. 1540 if (llvm::any_of(AI->users(), [](User *U) -> bool { 1541 if (LoadInst *LI = dyn_cast<LoadInst>(U)) 1542 return LI->isVolatile(); 1543 if (StoreInst *SI = dyn_cast<StoreInst>(U)) 1544 return SI->isVolatile(); 1545 return false; 1546 })) 1547 continue; 1548 1549 SmallVector<const Value *, 8> WorkList; 1550 WorkList.push_back(AI); 1551 while (!WorkList.empty()) { 1552 const Value *V = WorkList.pop_back_val(); 1553 for (auto &AIUse : V->uses()) { 1554 User *U = AIUse.getUser(); 1555 if (StoreInst *SI = dyn_cast<StoreInst>(U)) { 1556 if (AIUse.getOperandNo() == 1) 1557 ConvertDebugDeclareToDebugValue(DDI, SI, DIB); 1558 } else if (LoadInst *LI = dyn_cast<LoadInst>(U)) { 1559 ConvertDebugDeclareToDebugValue(DDI, LI, DIB); 1560 } else if (CallInst *CI = dyn_cast<CallInst>(U)) { 1561 // This is a call by-value or some other instruction that takes a 1562 // pointer to the variable. Insert a *value* intrinsic that describes 1563 // the variable by dereferencing the alloca. 1564 if (!CI->isLifetimeStartOrEnd()) { 1565 DebugLoc NewLoc = getDebugValueLoc(DDI, nullptr); 1566 auto *DerefExpr = 1567 DIExpression::append(DDI->getExpression(), dwarf::DW_OP_deref); 1568 DIB.insertDbgValueIntrinsic(AI, DDI->getVariable(), DerefExpr, 1569 NewLoc, CI); 1570 } 1571 } else if (BitCastInst *BI = dyn_cast<BitCastInst>(U)) { 1572 if (BI->getType()->isPointerTy()) 1573 WorkList.push_back(BI); 1574 } 1575 } 1576 } 1577 DDI->eraseFromParent(); 1578 Changed = true; 1579 } 1580 1581 if (Changed) 1582 for (BasicBlock &BB : F) 1583 RemoveRedundantDbgInstrs(&BB); 1584 1585 return Changed; 1586 } 1587 1588 /// Propagate dbg.value intrinsics through the newly inserted PHIs. 1589 void llvm::insertDebugValuesForPHIs(BasicBlock *BB, 1590 SmallVectorImpl<PHINode *> &InsertedPHIs) { 1591 assert(BB && "No BasicBlock to clone dbg.value(s) from."); 1592 if (InsertedPHIs.size() == 0) 1593 return; 1594 1595 // Map existing PHI nodes to their dbg.values. 1596 ValueToValueMapTy DbgValueMap; 1597 for (auto &I : *BB) { 1598 if (auto DbgII = dyn_cast<DbgVariableIntrinsic>(&I)) { 1599 if (auto *Loc = 1600 dyn_cast_or_null<PHINode>(DbgII->getVariableLocationOp(0))) 1601 DbgValueMap.insert({Loc, DbgII}); 1602 } 1603 } 1604 if (DbgValueMap.size() == 0) 1605 return; 1606 1607 // Then iterate through the new PHIs and look to see if they use one of the 1608 // previously mapped PHIs. If so, insert a new dbg.value intrinsic that will 1609 // propagate the info through the new PHI. 1610 for (auto PHI : InsertedPHIs) { 1611 BasicBlock *Parent = PHI->getParent(); 1612 // Avoid inserting an intrinsic into an EH block. 1613 if (Parent->getFirstNonPHI()->isEHPad()) 1614 continue; 1615 for (auto VI : PHI->operand_values()) { 1616 auto V = DbgValueMap.find(VI); 1617 if (V != DbgValueMap.end()) { 1618 auto *DbgII = cast<DbgVariableIntrinsic>(V->second); 1619 DbgVariableIntrinsic *NewDbgII = 1620 cast<DbgVariableIntrinsic>(DbgII->clone()); 1621 NewDbgII->replaceVariableLocationOp(VI, PHI); 1622 auto InsertionPt = Parent->getFirstInsertionPt(); 1623 assert(InsertionPt != Parent->end() && "Ill-formed basic block"); 1624 NewDbgII->insertBefore(&*InsertionPt); 1625 } 1626 } 1627 } 1628 } 1629 1630 /// Finds all intrinsics declaring local variables as living in the memory that 1631 /// 'V' points to. This may include a mix of dbg.declare and 1632 /// dbg.addr intrinsics. 1633 TinyPtrVector<DbgVariableIntrinsic *> llvm::FindDbgAddrUses(Value *V) { 1634 // This function is hot. Check whether the value has any metadata to avoid a 1635 // DenseMap lookup. 1636 if (!V->isUsedByMetadata()) 1637 return {}; 1638 auto *L = LocalAsMetadata::getIfExists(V); 1639 if (!L) 1640 return {}; 1641 auto *MDV = MetadataAsValue::getIfExists(V->getContext(), L); 1642 if (!MDV) 1643 return {}; 1644 1645 TinyPtrVector<DbgVariableIntrinsic *> Declares; 1646 for (User *U : MDV->users()) { 1647 if (auto *DII = dyn_cast<DbgVariableIntrinsic>(U)) 1648 if (DII->isAddressOfVariable()) 1649 Declares.push_back(DII); 1650 } 1651 1652 return Declares; 1653 } 1654 1655 TinyPtrVector<DbgDeclareInst *> llvm::FindDbgDeclareUses(Value *V) { 1656 TinyPtrVector<DbgDeclareInst *> DDIs; 1657 for (DbgVariableIntrinsic *DVI : FindDbgAddrUses(V)) 1658 if (auto *DDI = dyn_cast<DbgDeclareInst>(DVI)) 1659 DDIs.push_back(DDI); 1660 return DDIs; 1661 } 1662 1663 void llvm::findDbgValues(SmallVectorImpl<DbgValueInst *> &DbgValues, Value *V) { 1664 // This function is hot. Check whether the value has any metadata to avoid a 1665 // DenseMap lookup. 1666 if (!V->isUsedByMetadata()) 1667 return; 1668 if (auto *L = LocalAsMetadata::getIfExists(V)) 1669 if (auto *MDV = MetadataAsValue::getIfExists(V->getContext(), L)) 1670 for (User *U : MDV->users()) 1671 if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(U)) 1672 DbgValues.push_back(DVI); 1673 } 1674 1675 void llvm::findDbgUsers(SmallVectorImpl<DbgVariableIntrinsic *> &DbgUsers, 1676 Value *V) { 1677 // This function is hot. Check whether the value has any metadata to avoid a 1678 // DenseMap lookup. 1679 if (!V->isUsedByMetadata()) 1680 return; 1681 if (auto *L = LocalAsMetadata::getIfExists(V)) 1682 if (auto *MDV = MetadataAsValue::getIfExists(V->getContext(), L)) 1683 for (User *U : MDV->users()) 1684 if (DbgVariableIntrinsic *DII = dyn_cast<DbgVariableIntrinsic>(U)) 1685 DbgUsers.push_back(DII); 1686 } 1687 1688 bool llvm::replaceDbgDeclare(Value *Address, Value *NewAddress, 1689 DIBuilder &Builder, uint8_t DIExprFlags, 1690 int Offset) { 1691 auto DbgAddrs = FindDbgAddrUses(Address); 1692 for (DbgVariableIntrinsic *DII : DbgAddrs) { 1693 DebugLoc Loc = DII->getDebugLoc(); 1694 auto *DIVar = DII->getVariable(); 1695 auto *DIExpr = DII->getExpression(); 1696 assert(DIVar && "Missing variable"); 1697 DIExpr = DIExpression::prepend(DIExpr, DIExprFlags, Offset); 1698 // Insert llvm.dbg.declare immediately before DII, and remove old 1699 // llvm.dbg.declare. 1700 Builder.insertDeclare(NewAddress, DIVar, DIExpr, Loc, DII); 1701 DII->eraseFromParent(); 1702 } 1703 return !DbgAddrs.empty(); 1704 } 1705 1706 static void replaceOneDbgValueForAlloca(DbgValueInst *DVI, Value *NewAddress, 1707 DIBuilder &Builder, int Offset) { 1708 DebugLoc Loc = DVI->getDebugLoc(); 1709 auto *DIVar = DVI->getVariable(); 1710 auto *DIExpr = DVI->getExpression(); 1711 assert(DIVar && "Missing variable"); 1712 1713 // This is an alloca-based llvm.dbg.value. The first thing it should do with 1714 // the alloca pointer is dereference it. Otherwise we don't know how to handle 1715 // it and give up. 1716 if (!DIExpr || DIExpr->getNumElements() < 1 || 1717 DIExpr->getElement(0) != dwarf::DW_OP_deref) 1718 return; 1719 1720 // Insert the offset before the first deref. 1721 // We could just change the offset argument of dbg.value, but it's unsigned... 1722 if (Offset) 1723 DIExpr = DIExpression::prepend(DIExpr, 0, Offset); 1724 1725 Builder.insertDbgValueIntrinsic(NewAddress, DIVar, DIExpr, Loc, DVI); 1726 DVI->eraseFromParent(); 1727 } 1728 1729 void llvm::replaceDbgValueForAlloca(AllocaInst *AI, Value *NewAllocaAddress, 1730 DIBuilder &Builder, int Offset) { 1731 if (auto *L = LocalAsMetadata::getIfExists(AI)) 1732 if (auto *MDV = MetadataAsValue::getIfExists(AI->getContext(), L)) 1733 for (Use &U : llvm::make_early_inc_range(MDV->uses())) 1734 if (auto *DVI = dyn_cast<DbgValueInst>(U.getUser())) 1735 replaceOneDbgValueForAlloca(DVI, NewAllocaAddress, Builder, Offset); 1736 } 1737 1738 /// Where possible to salvage debug information for \p I do so 1739 /// and return True. If not possible mark undef and return False. 1740 void llvm::salvageDebugInfo(Instruction &I) { 1741 SmallVector<DbgVariableIntrinsic *, 1> DbgUsers; 1742 findDbgUsers(DbgUsers, &I); 1743 salvageDebugInfoForDbgValues(I, DbgUsers); 1744 } 1745 1746 void llvm::salvageDebugInfoForDbgValues( 1747 Instruction &I, ArrayRef<DbgVariableIntrinsic *> DbgUsers) { 1748 bool Salvaged = false; 1749 1750 for (auto *DII : DbgUsers) { 1751 // Do not add DW_OP_stack_value for DbgDeclare and DbgAddr, because they 1752 // are implicitly pointing out the value as a DWARF memory location 1753 // description. 1754 bool StackValue = isa<DbgValueInst>(DII); 1755 1756 DIExpression *DIExpr = 1757 salvageDebugInfoImpl(I, DII->getExpression(), StackValue); 1758 1759 // salvageDebugInfoImpl should fail on examining the first element of 1760 // DbgUsers, or none of them. 1761 if (!DIExpr) 1762 break; 1763 1764 DII->replaceVariableLocationOp(&I, I.getOperand(0)); 1765 DII->setExpression(DIExpr); 1766 LLVM_DEBUG(dbgs() << "SALVAGE: " << *DII << '\n'); 1767 Salvaged = true; 1768 } 1769 1770 if (Salvaged) 1771 return; 1772 1773 for (auto *DII : DbgUsers) { 1774 Value *Undef = UndefValue::get(I.getType()); 1775 DII->replaceVariableLocationOp(&I, Undef); 1776 } 1777 } 1778 1779 DIExpression *llvm::salvageDebugInfoImpl(Instruction &I, 1780 DIExpression *SrcDIExpr, 1781 bool WithStackValue) { 1782 auto &M = *I.getModule(); 1783 auto &DL = M.getDataLayout(); 1784 1785 // Apply a vector of opcodes to the source DIExpression. 1786 auto doSalvage = [&](SmallVectorImpl<uint64_t> &Ops) -> DIExpression * { 1787 DIExpression *DIExpr = SrcDIExpr; 1788 if (!Ops.empty()) { 1789 DIExpr = DIExpression::prependOpcodes(DIExpr, Ops, WithStackValue); 1790 } 1791 return DIExpr; 1792 }; 1793 1794 // Apply the given offset to the source DIExpression. 1795 auto applyOffset = [&](uint64_t Offset) -> DIExpression * { 1796 SmallVector<uint64_t, 8> Ops; 1797 DIExpression::appendOffset(Ops, Offset); 1798 return doSalvage(Ops); 1799 }; 1800 1801 // initializer-list helper for applying operators to the source DIExpression. 1802 auto applyOps = [&](ArrayRef<uint64_t> Opcodes) -> DIExpression * { 1803 SmallVector<uint64_t, 8> Ops(Opcodes.begin(), Opcodes.end()); 1804 return doSalvage(Ops); 1805 }; 1806 1807 if (auto *CI = dyn_cast<CastInst>(&I)) { 1808 // No-op casts are irrelevant for debug info. 1809 if (CI->isNoopCast(DL)) 1810 return SrcDIExpr; 1811 1812 Type *Type = CI->getType(); 1813 // Casts other than Trunc, SExt, or ZExt to scalar types cannot be salvaged. 1814 if (Type->isVectorTy() || 1815 !(isa<TruncInst>(&I) || isa<SExtInst>(&I) || isa<ZExtInst>(&I))) 1816 return nullptr; 1817 1818 Value *FromValue = CI->getOperand(0); 1819 unsigned FromTypeBitSize = FromValue->getType()->getScalarSizeInBits(); 1820 unsigned ToTypeBitSize = Type->getScalarSizeInBits(); 1821 1822 return applyOps(DIExpression::getExtOps(FromTypeBitSize, ToTypeBitSize, 1823 isa<SExtInst>(&I))); 1824 } 1825 1826 if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) { 1827 unsigned BitWidth = 1828 M.getDataLayout().getIndexSizeInBits(GEP->getPointerAddressSpace()); 1829 // Rewrite a constant GEP into a DIExpression. 1830 APInt Offset(BitWidth, 0); 1831 if (GEP->accumulateConstantOffset(M.getDataLayout(), Offset)) { 1832 return applyOffset(Offset.getSExtValue()); 1833 } else { 1834 return nullptr; 1835 } 1836 } else if (auto *BI = dyn_cast<BinaryOperator>(&I)) { 1837 // Rewrite binary operations with constant integer operands. 1838 auto *ConstInt = dyn_cast<ConstantInt>(I.getOperand(1)); 1839 if (!ConstInt || ConstInt->getBitWidth() > 64) 1840 return nullptr; 1841 1842 uint64_t Val = ConstInt->getSExtValue(); 1843 switch (BI->getOpcode()) { 1844 case Instruction::Add: 1845 return applyOffset(Val); 1846 case Instruction::Sub: 1847 return applyOffset(-int64_t(Val)); 1848 case Instruction::Mul: 1849 return applyOps({dwarf::DW_OP_constu, Val, dwarf::DW_OP_mul}); 1850 case Instruction::SDiv: 1851 return applyOps({dwarf::DW_OP_constu, Val, dwarf::DW_OP_div}); 1852 case Instruction::SRem: 1853 return applyOps({dwarf::DW_OP_constu, Val, dwarf::DW_OP_mod}); 1854 case Instruction::Or: 1855 return applyOps({dwarf::DW_OP_constu, Val, dwarf::DW_OP_or}); 1856 case Instruction::And: 1857 return applyOps({dwarf::DW_OP_constu, Val, dwarf::DW_OP_and}); 1858 case Instruction::Xor: 1859 return applyOps({dwarf::DW_OP_constu, Val, dwarf::DW_OP_xor}); 1860 case Instruction::Shl: 1861 return applyOps({dwarf::DW_OP_constu, Val, dwarf::DW_OP_shl}); 1862 case Instruction::LShr: 1863 return applyOps({dwarf::DW_OP_constu, Val, dwarf::DW_OP_shr}); 1864 case Instruction::AShr: 1865 return applyOps({dwarf::DW_OP_constu, Val, dwarf::DW_OP_shra}); 1866 default: 1867 // TODO: Salvage constants from each kind of binop we know about. 1868 return nullptr; 1869 } 1870 // *Not* to do: we should not attempt to salvage load instructions, 1871 // because the validity and lifetime of a dbg.value containing 1872 // DW_OP_deref becomes difficult to analyze. See PR40628 for examples. 1873 } 1874 return nullptr; 1875 } 1876 1877 /// A replacement for a dbg.value expression. 1878 using DbgValReplacement = Optional<DIExpression *>; 1879 1880 /// Point debug users of \p From to \p To using exprs given by \p RewriteExpr, 1881 /// possibly moving/undefing users to prevent use-before-def. Returns true if 1882 /// changes are made. 1883 static bool rewriteDebugUsers( 1884 Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT, 1885 function_ref<DbgValReplacement(DbgVariableIntrinsic &DII)> RewriteExpr) { 1886 // Find debug users of From. 1887 SmallVector<DbgVariableIntrinsic *, 1> Users; 1888 findDbgUsers(Users, &From); 1889 if (Users.empty()) 1890 return false; 1891 1892 // Prevent use-before-def of To. 1893 bool Changed = false; 1894 SmallPtrSet<DbgVariableIntrinsic *, 1> UndefOrSalvage; 1895 if (isa<Instruction>(&To)) { 1896 bool DomPointAfterFrom = From.getNextNonDebugInstruction() == &DomPoint; 1897 1898 for (auto *DII : Users) { 1899 // It's common to see a debug user between From and DomPoint. Move it 1900 // after DomPoint to preserve the variable update without any reordering. 1901 if (DomPointAfterFrom && DII->getNextNonDebugInstruction() == &DomPoint) { 1902 LLVM_DEBUG(dbgs() << "MOVE: " << *DII << '\n'); 1903 DII->moveAfter(&DomPoint); 1904 Changed = true; 1905 1906 // Users which otherwise aren't dominated by the replacement value must 1907 // be salvaged or deleted. 1908 } else if (!DT.dominates(&DomPoint, DII)) { 1909 UndefOrSalvage.insert(DII); 1910 } 1911 } 1912 } 1913 1914 // Update debug users without use-before-def risk. 1915 for (auto *DII : Users) { 1916 if (UndefOrSalvage.count(DII)) 1917 continue; 1918 1919 DbgValReplacement DVR = RewriteExpr(*DII); 1920 if (!DVR) 1921 continue; 1922 1923 DII->replaceVariableLocationOp(&From, &To); 1924 DII->setExpression(*DVR); 1925 LLVM_DEBUG(dbgs() << "REWRITE: " << *DII << '\n'); 1926 Changed = true; 1927 } 1928 1929 if (!UndefOrSalvage.empty()) { 1930 // Try to salvage the remaining debug users. 1931 salvageDebugInfo(From); 1932 Changed = true; 1933 } 1934 1935 return Changed; 1936 } 1937 1938 /// Check if a bitcast between a value of type \p FromTy to type \p ToTy would 1939 /// losslessly preserve the bits and semantics of the value. This predicate is 1940 /// symmetric, i.e swapping \p FromTy and \p ToTy should give the same result. 1941 /// 1942 /// Note that Type::canLosslesslyBitCastTo is not suitable here because it 1943 /// allows semantically unequivalent bitcasts, such as <2 x i64> -> <4 x i32>, 1944 /// and also does not allow lossless pointer <-> integer conversions. 1945 static bool isBitCastSemanticsPreserving(const DataLayout &DL, Type *FromTy, 1946 Type *ToTy) { 1947 // Trivially compatible types. 1948 if (FromTy == ToTy) 1949 return true; 1950 1951 // Handle compatible pointer <-> integer conversions. 1952 if (FromTy->isIntOrPtrTy() && ToTy->isIntOrPtrTy()) { 1953 bool SameSize = DL.getTypeSizeInBits(FromTy) == DL.getTypeSizeInBits(ToTy); 1954 bool LosslessConversion = !DL.isNonIntegralPointerType(FromTy) && 1955 !DL.isNonIntegralPointerType(ToTy); 1956 return SameSize && LosslessConversion; 1957 } 1958 1959 // TODO: This is not exhaustive. 1960 return false; 1961 } 1962 1963 bool llvm::replaceAllDbgUsesWith(Instruction &From, Value &To, 1964 Instruction &DomPoint, DominatorTree &DT) { 1965 // Exit early if From has no debug users. 1966 if (!From.isUsedByMetadata()) 1967 return false; 1968 1969 assert(&From != &To && "Can't replace something with itself"); 1970 1971 Type *FromTy = From.getType(); 1972 Type *ToTy = To.getType(); 1973 1974 auto Identity = [&](DbgVariableIntrinsic &DII) -> DbgValReplacement { 1975 return DII.getExpression(); 1976 }; 1977 1978 // Handle no-op conversions. 1979 Module &M = *From.getModule(); 1980 const DataLayout &DL = M.getDataLayout(); 1981 if (isBitCastSemanticsPreserving(DL, FromTy, ToTy)) 1982 return rewriteDebugUsers(From, To, DomPoint, DT, Identity); 1983 1984 // Handle integer-to-integer widening and narrowing. 1985 // FIXME: Use DW_OP_convert when it's available everywhere. 1986 if (FromTy->isIntegerTy() && ToTy->isIntegerTy()) { 1987 uint64_t FromBits = FromTy->getPrimitiveSizeInBits(); 1988 uint64_t ToBits = ToTy->getPrimitiveSizeInBits(); 1989 assert(FromBits != ToBits && "Unexpected no-op conversion"); 1990 1991 // When the width of the result grows, assume that a debugger will only 1992 // access the low `FromBits` bits when inspecting the source variable. 1993 if (FromBits < ToBits) 1994 return rewriteDebugUsers(From, To, DomPoint, DT, Identity); 1995 1996 // The width of the result has shrunk. Use sign/zero extension to describe 1997 // the source variable's high bits. 1998 auto SignOrZeroExt = [&](DbgVariableIntrinsic &DII) -> DbgValReplacement { 1999 DILocalVariable *Var = DII.getVariable(); 2000 2001 // Without knowing signedness, sign/zero extension isn't possible. 2002 auto Signedness = Var->getSignedness(); 2003 if (!Signedness) 2004 return None; 2005 2006 bool Signed = *Signedness == DIBasicType::Signedness::Signed; 2007 return DIExpression::appendExt(DII.getExpression(), ToBits, FromBits, 2008 Signed); 2009 }; 2010 return rewriteDebugUsers(From, To, DomPoint, DT, SignOrZeroExt); 2011 } 2012 2013 // TODO: Floating-point conversions, vectors. 2014 return false; 2015 } 2016 2017 std::pair<unsigned, unsigned> 2018 llvm::removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB) { 2019 unsigned NumDeadInst = 0; 2020 unsigned NumDeadDbgInst = 0; 2021 // Delete the instructions backwards, as it has a reduced likelihood of 2022 // having to update as many def-use and use-def chains. 2023 Instruction *EndInst = BB->getTerminator(); // Last not to be deleted. 2024 while (EndInst != &BB->front()) { 2025 // Delete the next to last instruction. 2026 Instruction *Inst = &*--EndInst->getIterator(); 2027 if (!Inst->use_empty() && !Inst->getType()->isTokenTy()) 2028 Inst->replaceAllUsesWith(UndefValue::get(Inst->getType())); 2029 if (Inst->isEHPad() || Inst->getType()->isTokenTy()) { 2030 EndInst = Inst; 2031 continue; 2032 } 2033 if (isa<DbgInfoIntrinsic>(Inst)) 2034 ++NumDeadDbgInst; 2035 else 2036 ++NumDeadInst; 2037 Inst->eraseFromParent(); 2038 } 2039 return {NumDeadInst, NumDeadDbgInst}; 2040 } 2041 2042 unsigned llvm::changeToUnreachable(Instruction *I, bool UseLLVMTrap, 2043 bool PreserveLCSSA, DomTreeUpdater *DTU, 2044 MemorySSAUpdater *MSSAU) { 2045 BasicBlock *BB = I->getParent(); 2046 2047 if (MSSAU) 2048 MSSAU->changeToUnreachable(I); 2049 2050 SmallSetVector<BasicBlock *, 8> UniqueSuccessors; 2051 2052 // Loop over all of the successors, removing BB's entry from any PHI 2053 // nodes. 2054 for (BasicBlock *Successor : successors(BB)) { 2055 Successor->removePredecessor(BB, PreserveLCSSA); 2056 if (DTU) 2057 UniqueSuccessors.insert(Successor); 2058 } 2059 // Insert a call to llvm.trap right before this. This turns the undefined 2060 // behavior into a hard fail instead of falling through into random code. 2061 if (UseLLVMTrap) { 2062 Function *TrapFn = 2063 Intrinsic::getDeclaration(BB->getParent()->getParent(), Intrinsic::trap); 2064 CallInst *CallTrap = CallInst::Create(TrapFn, "", I); 2065 CallTrap->setDebugLoc(I->getDebugLoc()); 2066 } 2067 auto *UI = new UnreachableInst(I->getContext(), I); 2068 UI->setDebugLoc(I->getDebugLoc()); 2069 2070 // All instructions after this are dead. 2071 unsigned NumInstrsRemoved = 0; 2072 BasicBlock::iterator BBI = I->getIterator(), BBE = BB->end(); 2073 while (BBI != BBE) { 2074 if (!BBI->use_empty()) 2075 BBI->replaceAllUsesWith(UndefValue::get(BBI->getType())); 2076 BB->getInstList().erase(BBI++); 2077 ++NumInstrsRemoved; 2078 } 2079 if (DTU) { 2080 SmallVector<DominatorTree::UpdateType, 8> Updates; 2081 Updates.reserve(UniqueSuccessors.size()); 2082 for (BasicBlock *UniqueSuccessor : UniqueSuccessors) 2083 Updates.push_back({DominatorTree::Delete, BB, UniqueSuccessor}); 2084 DTU->applyUpdates(Updates); 2085 } 2086 return NumInstrsRemoved; 2087 } 2088 2089 CallInst *llvm::createCallMatchingInvoke(InvokeInst *II) { 2090 SmallVector<Value *, 8> Args(II->args()); 2091 SmallVector<OperandBundleDef, 1> OpBundles; 2092 II->getOperandBundlesAsDefs(OpBundles); 2093 CallInst *NewCall = CallInst::Create(II->getFunctionType(), 2094 II->getCalledOperand(), Args, OpBundles); 2095 NewCall->setCallingConv(II->getCallingConv()); 2096 NewCall->setAttributes(II->getAttributes()); 2097 NewCall->setDebugLoc(II->getDebugLoc()); 2098 NewCall->copyMetadata(*II); 2099 2100 // If the invoke had profile metadata, try converting them for CallInst. 2101 uint64_t TotalWeight; 2102 if (NewCall->extractProfTotalWeight(TotalWeight)) { 2103 // Set the total weight if it fits into i32, otherwise reset. 2104 MDBuilder MDB(NewCall->getContext()); 2105 auto NewWeights = uint32_t(TotalWeight) != TotalWeight 2106 ? nullptr 2107 : MDB.createBranchWeights({uint32_t(TotalWeight)}); 2108 NewCall->setMetadata(LLVMContext::MD_prof, NewWeights); 2109 } 2110 2111 return NewCall; 2112 } 2113 2114 /// changeToCall - Convert the specified invoke into a normal call. 2115 void llvm::changeToCall(InvokeInst *II, DomTreeUpdater *DTU) { 2116 CallInst *NewCall = createCallMatchingInvoke(II); 2117 NewCall->takeName(II); 2118 NewCall->insertBefore(II); 2119 II->replaceAllUsesWith(NewCall); 2120 2121 // Follow the call by a branch to the normal destination. 2122 BasicBlock *NormalDestBB = II->getNormalDest(); 2123 BranchInst::Create(NormalDestBB, II); 2124 2125 // Update PHI nodes in the unwind destination 2126 BasicBlock *BB = II->getParent(); 2127 BasicBlock *UnwindDestBB = II->getUnwindDest(); 2128 UnwindDestBB->removePredecessor(BB); 2129 II->eraseFromParent(); 2130 if (DTU) 2131 DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDestBB}}); 2132 } 2133 2134 BasicBlock *llvm::changeToInvokeAndSplitBasicBlock(CallInst *CI, 2135 BasicBlock *UnwindEdge, 2136 DomTreeUpdater *DTU) { 2137 BasicBlock *BB = CI->getParent(); 2138 2139 // Convert this function call into an invoke instruction. First, split the 2140 // basic block. 2141 BasicBlock *Split = SplitBlock(BB, CI, DTU, /*LI=*/nullptr, /*MSSAU*/ nullptr, 2142 CI->getName() + ".noexc"); 2143 2144 // Delete the unconditional branch inserted by SplitBlock 2145 BB->getInstList().pop_back(); 2146 2147 // Create the new invoke instruction. 2148 SmallVector<Value *, 8> InvokeArgs(CI->args()); 2149 SmallVector<OperandBundleDef, 1> OpBundles; 2150 2151 CI->getOperandBundlesAsDefs(OpBundles); 2152 2153 // Note: we're round tripping operand bundles through memory here, and that 2154 // can potentially be avoided with a cleverer API design that we do not have 2155 // as of this time. 2156 2157 InvokeInst *II = 2158 InvokeInst::Create(CI->getFunctionType(), CI->getCalledOperand(), Split, 2159 UnwindEdge, InvokeArgs, OpBundles, CI->getName(), BB); 2160 II->setDebugLoc(CI->getDebugLoc()); 2161 II->setCallingConv(CI->getCallingConv()); 2162 II->setAttributes(CI->getAttributes()); 2163 2164 if (DTU) 2165 DTU->applyUpdates({{DominatorTree::Insert, BB, UnwindEdge}}); 2166 2167 // Make sure that anything using the call now uses the invoke! This also 2168 // updates the CallGraph if present, because it uses a WeakTrackingVH. 2169 CI->replaceAllUsesWith(II); 2170 2171 // Delete the original call 2172 Split->getInstList().pop_front(); 2173 return Split; 2174 } 2175 2176 static bool markAliveBlocks(Function &F, 2177 SmallPtrSetImpl<BasicBlock *> &Reachable, 2178 DomTreeUpdater *DTU = nullptr) { 2179 SmallVector<BasicBlock*, 128> Worklist; 2180 BasicBlock *BB = &F.front(); 2181 Worklist.push_back(BB); 2182 Reachable.insert(BB); 2183 bool Changed = false; 2184 do { 2185 BB = Worklist.pop_back_val(); 2186 2187 // Do a quick scan of the basic block, turning any obviously unreachable 2188 // instructions into LLVM unreachable insts. The instruction combining pass 2189 // canonicalizes unreachable insts into stores to null or undef. 2190 for (Instruction &I : *BB) { 2191 if (auto *CI = dyn_cast<CallInst>(&I)) { 2192 Value *Callee = CI->getCalledOperand(); 2193 // Handle intrinsic calls. 2194 if (Function *F = dyn_cast<Function>(Callee)) { 2195 auto IntrinsicID = F->getIntrinsicID(); 2196 // Assumptions that are known to be false are equivalent to 2197 // unreachable. Also, if the condition is undefined, then we make the 2198 // choice most beneficial to the optimizer, and choose that to also be 2199 // unreachable. 2200 if (IntrinsicID == Intrinsic::assume) { 2201 if (match(CI->getArgOperand(0), m_CombineOr(m_Zero(), m_Undef()))) { 2202 // Don't insert a call to llvm.trap right before the unreachable. 2203 changeToUnreachable(CI, false, false, DTU); 2204 Changed = true; 2205 break; 2206 } 2207 } else if (IntrinsicID == Intrinsic::experimental_guard) { 2208 // A call to the guard intrinsic bails out of the current 2209 // compilation unit if the predicate passed to it is false. If the 2210 // predicate is a constant false, then we know the guard will bail 2211 // out of the current compile unconditionally, so all code following 2212 // it is dead. 2213 // 2214 // Note: unlike in llvm.assume, it is not "obviously profitable" for 2215 // guards to treat `undef` as `false` since a guard on `undef` can 2216 // still be useful for widening. 2217 if (match(CI->getArgOperand(0), m_Zero())) 2218 if (!isa<UnreachableInst>(CI->getNextNode())) { 2219 changeToUnreachable(CI->getNextNode(), /*UseLLVMTrap=*/false, 2220 false, DTU); 2221 Changed = true; 2222 break; 2223 } 2224 } 2225 } else if ((isa<ConstantPointerNull>(Callee) && 2226 !NullPointerIsDefined(CI->getFunction())) || 2227 isa<UndefValue>(Callee)) { 2228 changeToUnreachable(CI, /*UseLLVMTrap=*/false, false, DTU); 2229 Changed = true; 2230 break; 2231 } 2232 if (CI->doesNotReturn() && !CI->isMustTailCall()) { 2233 // If we found a call to a no-return function, insert an unreachable 2234 // instruction after it. Make sure there isn't *already* one there 2235 // though. 2236 if (!isa<UnreachableInst>(CI->getNextNode())) { 2237 // Don't insert a call to llvm.trap right before the unreachable. 2238 changeToUnreachable(CI->getNextNode(), false, false, DTU); 2239 Changed = true; 2240 } 2241 break; 2242 } 2243 } else if (auto *SI = dyn_cast<StoreInst>(&I)) { 2244 // Store to undef and store to null are undefined and used to signal 2245 // that they should be changed to unreachable by passes that can't 2246 // modify the CFG. 2247 2248 // Don't touch volatile stores. 2249 if (SI->isVolatile()) continue; 2250 2251 Value *Ptr = SI->getOperand(1); 2252 2253 if (isa<UndefValue>(Ptr) || 2254 (isa<ConstantPointerNull>(Ptr) && 2255 !NullPointerIsDefined(SI->getFunction(), 2256 SI->getPointerAddressSpace()))) { 2257 changeToUnreachable(SI, true, false, DTU); 2258 Changed = true; 2259 break; 2260 } 2261 } 2262 } 2263 2264 Instruction *Terminator = BB->getTerminator(); 2265 if (auto *II = dyn_cast<InvokeInst>(Terminator)) { 2266 // Turn invokes that call 'nounwind' functions into ordinary calls. 2267 Value *Callee = II->getCalledOperand(); 2268 if ((isa<ConstantPointerNull>(Callee) && 2269 !NullPointerIsDefined(BB->getParent())) || 2270 isa<UndefValue>(Callee)) { 2271 changeToUnreachable(II, true, false, DTU); 2272 Changed = true; 2273 } else if (II->doesNotThrow() && canSimplifyInvokeNoUnwind(&F)) { 2274 if (II->use_empty() && II->onlyReadsMemory()) { 2275 // jump to the normal destination branch. 2276 BasicBlock *NormalDestBB = II->getNormalDest(); 2277 BasicBlock *UnwindDestBB = II->getUnwindDest(); 2278 BranchInst::Create(NormalDestBB, II); 2279 UnwindDestBB->removePredecessor(II->getParent()); 2280 II->eraseFromParent(); 2281 if (DTU) 2282 DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDestBB}}); 2283 } else 2284 changeToCall(II, DTU); 2285 Changed = true; 2286 } 2287 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Terminator)) { 2288 // Remove catchpads which cannot be reached. 2289 struct CatchPadDenseMapInfo { 2290 static CatchPadInst *getEmptyKey() { 2291 return DenseMapInfo<CatchPadInst *>::getEmptyKey(); 2292 } 2293 2294 static CatchPadInst *getTombstoneKey() { 2295 return DenseMapInfo<CatchPadInst *>::getTombstoneKey(); 2296 } 2297 2298 static unsigned getHashValue(CatchPadInst *CatchPad) { 2299 return static_cast<unsigned>(hash_combine_range( 2300 CatchPad->value_op_begin(), CatchPad->value_op_end())); 2301 } 2302 2303 static bool isEqual(CatchPadInst *LHS, CatchPadInst *RHS) { 2304 if (LHS == getEmptyKey() || LHS == getTombstoneKey() || 2305 RHS == getEmptyKey() || RHS == getTombstoneKey()) 2306 return LHS == RHS; 2307 return LHS->isIdenticalTo(RHS); 2308 } 2309 }; 2310 2311 SmallMapVector<BasicBlock *, int, 8> NumPerSuccessorCases; 2312 // Set of unique CatchPads. 2313 SmallDenseMap<CatchPadInst *, detail::DenseSetEmpty, 4, 2314 CatchPadDenseMapInfo, detail::DenseSetPair<CatchPadInst *>> 2315 HandlerSet; 2316 detail::DenseSetEmpty Empty; 2317 for (CatchSwitchInst::handler_iterator I = CatchSwitch->handler_begin(), 2318 E = CatchSwitch->handler_end(); 2319 I != E; ++I) { 2320 BasicBlock *HandlerBB = *I; 2321 ++NumPerSuccessorCases[HandlerBB]; 2322 auto *CatchPad = cast<CatchPadInst>(HandlerBB->getFirstNonPHI()); 2323 if (!HandlerSet.insert({CatchPad, Empty}).second) { 2324 --NumPerSuccessorCases[HandlerBB]; 2325 CatchSwitch->removeHandler(I); 2326 --I; 2327 --E; 2328 Changed = true; 2329 } 2330 } 2331 std::vector<DominatorTree::UpdateType> Updates; 2332 for (const std::pair<BasicBlock *, int> &I : NumPerSuccessorCases) 2333 if (I.second == 0) 2334 Updates.push_back({DominatorTree::Delete, BB, I.first}); 2335 if (DTU) 2336 DTU->applyUpdates(Updates); 2337 } 2338 2339 Changed |= ConstantFoldTerminator(BB, true, nullptr, DTU); 2340 for (BasicBlock *Successor : successors(BB)) 2341 if (Reachable.insert(Successor).second) 2342 Worklist.push_back(Successor); 2343 } while (!Worklist.empty()); 2344 return Changed; 2345 } 2346 2347 void llvm::removeUnwindEdge(BasicBlock *BB, DomTreeUpdater *DTU) { 2348 Instruction *TI = BB->getTerminator(); 2349 2350 if (auto *II = dyn_cast<InvokeInst>(TI)) { 2351 changeToCall(II, DTU); 2352 return; 2353 } 2354 2355 Instruction *NewTI; 2356 BasicBlock *UnwindDest; 2357 2358 if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) { 2359 NewTI = CleanupReturnInst::Create(CRI->getCleanupPad(), nullptr, CRI); 2360 UnwindDest = CRI->getUnwindDest(); 2361 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(TI)) { 2362 auto *NewCatchSwitch = CatchSwitchInst::Create( 2363 CatchSwitch->getParentPad(), nullptr, CatchSwitch->getNumHandlers(), 2364 CatchSwitch->getName(), CatchSwitch); 2365 for (BasicBlock *PadBB : CatchSwitch->handlers()) 2366 NewCatchSwitch->addHandler(PadBB); 2367 2368 NewTI = NewCatchSwitch; 2369 UnwindDest = CatchSwitch->getUnwindDest(); 2370 } else { 2371 llvm_unreachable("Could not find unwind successor"); 2372 } 2373 2374 NewTI->takeName(TI); 2375 NewTI->setDebugLoc(TI->getDebugLoc()); 2376 UnwindDest->removePredecessor(BB); 2377 TI->replaceAllUsesWith(NewTI); 2378 TI->eraseFromParent(); 2379 if (DTU) 2380 DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDest}}); 2381 } 2382 2383 /// removeUnreachableBlocks - Remove blocks that are not reachable, even 2384 /// if they are in a dead cycle. Return true if a change was made, false 2385 /// otherwise. 2386 bool llvm::removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU, 2387 MemorySSAUpdater *MSSAU) { 2388 SmallPtrSet<BasicBlock *, 16> Reachable; 2389 bool Changed = markAliveBlocks(F, Reachable, DTU); 2390 2391 // If there are unreachable blocks in the CFG... 2392 if (Reachable.size() == F.size()) 2393 return Changed; 2394 2395 assert(Reachable.size() < F.size()); 2396 2397 // Are there any blocks left to actually delete? 2398 SmallSetVector<BasicBlock *, 8> BlocksToRemove; 2399 for (BasicBlock &BB : F) { 2400 // Skip reachable basic blocks 2401 if (Reachable.count(&BB)) 2402 continue; 2403 // Skip already-deleted blocks 2404 if (DTU && DTU->isBBPendingDeletion(&BB)) 2405 continue; 2406 BlocksToRemove.insert(&BB); 2407 } 2408 2409 if (BlocksToRemove.empty()) 2410 return Changed; 2411 2412 Changed = true; 2413 NumRemoved += BlocksToRemove.size(); 2414 2415 if (MSSAU) 2416 MSSAU->removeBlocks(BlocksToRemove); 2417 2418 // Loop over all of the basic blocks that are up for removal, dropping all of 2419 // their internal references. Update DTU if available. 2420 std::vector<DominatorTree::UpdateType> Updates; 2421 for (auto *BB : BlocksToRemove) { 2422 SmallSetVector<BasicBlock *, 8> UniqueSuccessors; 2423 for (BasicBlock *Successor : successors(BB)) { 2424 // Only remove references to BB in reachable successors of BB. 2425 if (Reachable.count(Successor)) 2426 Successor->removePredecessor(BB); 2427 if (DTU) 2428 UniqueSuccessors.insert(Successor); 2429 } 2430 BB->dropAllReferences(); 2431 if (DTU) { 2432 Instruction *TI = BB->getTerminator(); 2433 assert(TI && "Basic block should have a terminator"); 2434 // Terminators like invoke can have users. We have to replace their users, 2435 // before removing them. 2436 if (!TI->use_empty()) 2437 TI->replaceAllUsesWith(UndefValue::get(TI->getType())); 2438 TI->eraseFromParent(); 2439 new UnreachableInst(BB->getContext(), BB); 2440 assert(succ_empty(BB) && "The successor list of BB isn't empty before " 2441 "applying corresponding DTU updates."); 2442 Updates.reserve(Updates.size() + UniqueSuccessors.size()); 2443 for (auto *UniqueSuccessor : UniqueSuccessors) 2444 Updates.push_back({DominatorTree::Delete, BB, UniqueSuccessor}); 2445 } 2446 } 2447 2448 if (DTU) { 2449 DTU->applyUpdates(Updates); 2450 for (auto *BB : BlocksToRemove) 2451 DTU->deleteBB(BB); 2452 } else { 2453 for (auto *BB : BlocksToRemove) 2454 BB->eraseFromParent(); 2455 } 2456 2457 return Changed; 2458 } 2459 2460 void llvm::combineMetadata(Instruction *K, const Instruction *J, 2461 ArrayRef<unsigned> KnownIDs, bool DoesKMove) { 2462 SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata; 2463 K->dropUnknownNonDebugMetadata(KnownIDs); 2464 K->getAllMetadataOtherThanDebugLoc(Metadata); 2465 for (const auto &MD : Metadata) { 2466 unsigned Kind = MD.first; 2467 MDNode *JMD = J->getMetadata(Kind); 2468 MDNode *KMD = MD.second; 2469 2470 switch (Kind) { 2471 default: 2472 K->setMetadata(Kind, nullptr); // Remove unknown metadata 2473 break; 2474 case LLVMContext::MD_dbg: 2475 llvm_unreachable("getAllMetadataOtherThanDebugLoc returned a MD_dbg"); 2476 case LLVMContext::MD_tbaa: 2477 K->setMetadata(Kind, MDNode::getMostGenericTBAA(JMD, KMD)); 2478 break; 2479 case LLVMContext::MD_alias_scope: 2480 K->setMetadata(Kind, MDNode::getMostGenericAliasScope(JMD, KMD)); 2481 break; 2482 case LLVMContext::MD_noalias: 2483 case LLVMContext::MD_mem_parallel_loop_access: 2484 K->setMetadata(Kind, MDNode::intersect(JMD, KMD)); 2485 break; 2486 case LLVMContext::MD_access_group: 2487 K->setMetadata(LLVMContext::MD_access_group, 2488 intersectAccessGroups(K, J)); 2489 break; 2490 case LLVMContext::MD_range: 2491 2492 // If K does move, use most generic range. Otherwise keep the range of 2493 // K. 2494 if (DoesKMove) 2495 // FIXME: If K does move, we should drop the range info and nonnull. 2496 // Currently this function is used with DoesKMove in passes 2497 // doing hoisting/sinking and the current behavior of using the 2498 // most generic range is correct in those cases. 2499 K->setMetadata(Kind, MDNode::getMostGenericRange(JMD, KMD)); 2500 break; 2501 case LLVMContext::MD_fpmath: 2502 K->setMetadata(Kind, MDNode::getMostGenericFPMath(JMD, KMD)); 2503 break; 2504 case LLVMContext::MD_invariant_load: 2505 // Only set the !invariant.load if it is present in both instructions. 2506 K->setMetadata(Kind, JMD); 2507 break; 2508 case LLVMContext::MD_nonnull: 2509 // If K does move, keep nonull if it is present in both instructions. 2510 if (DoesKMove) 2511 K->setMetadata(Kind, JMD); 2512 break; 2513 case LLVMContext::MD_invariant_group: 2514 // Preserve !invariant.group in K. 2515 break; 2516 case LLVMContext::MD_align: 2517 K->setMetadata(Kind, 2518 MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD)); 2519 break; 2520 case LLVMContext::MD_dereferenceable: 2521 case LLVMContext::MD_dereferenceable_or_null: 2522 K->setMetadata(Kind, 2523 MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD)); 2524 break; 2525 case LLVMContext::MD_preserve_access_index: 2526 // Preserve !preserve.access.index in K. 2527 break; 2528 } 2529 } 2530 // Set !invariant.group from J if J has it. If both instructions have it 2531 // then we will just pick it from J - even when they are different. 2532 // Also make sure that K is load or store - f.e. combining bitcast with load 2533 // could produce bitcast with invariant.group metadata, which is invalid. 2534 // FIXME: we should try to preserve both invariant.group md if they are 2535 // different, but right now instruction can only have one invariant.group. 2536 if (auto *JMD = J->getMetadata(LLVMContext::MD_invariant_group)) 2537 if (isa<LoadInst>(K) || isa<StoreInst>(K)) 2538 K->setMetadata(LLVMContext::MD_invariant_group, JMD); 2539 } 2540 2541 void llvm::combineMetadataForCSE(Instruction *K, const Instruction *J, 2542 bool KDominatesJ) { 2543 unsigned KnownIDs[] = { 2544 LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope, 2545 LLVMContext::MD_noalias, LLVMContext::MD_range, 2546 LLVMContext::MD_invariant_load, LLVMContext::MD_nonnull, 2547 LLVMContext::MD_invariant_group, LLVMContext::MD_align, 2548 LLVMContext::MD_dereferenceable, 2549 LLVMContext::MD_dereferenceable_or_null, 2550 LLVMContext::MD_access_group, LLVMContext::MD_preserve_access_index}; 2551 combineMetadata(K, J, KnownIDs, KDominatesJ); 2552 } 2553 2554 void llvm::copyMetadataForLoad(LoadInst &Dest, const LoadInst &Source) { 2555 SmallVector<std::pair<unsigned, MDNode *>, 8> MD; 2556 Source.getAllMetadata(MD); 2557 MDBuilder MDB(Dest.getContext()); 2558 Type *NewType = Dest.getType(); 2559 const DataLayout &DL = Source.getModule()->getDataLayout(); 2560 for (const auto &MDPair : MD) { 2561 unsigned ID = MDPair.first; 2562 MDNode *N = MDPair.second; 2563 // Note, essentially every kind of metadata should be preserved here! This 2564 // routine is supposed to clone a load instruction changing *only its type*. 2565 // The only metadata it makes sense to drop is metadata which is invalidated 2566 // when the pointer type changes. This should essentially never be the case 2567 // in LLVM, but we explicitly switch over only known metadata to be 2568 // conservatively correct. If you are adding metadata to LLVM which pertains 2569 // to loads, you almost certainly want to add it here. 2570 switch (ID) { 2571 case LLVMContext::MD_dbg: 2572 case LLVMContext::MD_tbaa: 2573 case LLVMContext::MD_prof: 2574 case LLVMContext::MD_fpmath: 2575 case LLVMContext::MD_tbaa_struct: 2576 case LLVMContext::MD_invariant_load: 2577 case LLVMContext::MD_alias_scope: 2578 case LLVMContext::MD_noalias: 2579 case LLVMContext::MD_nontemporal: 2580 case LLVMContext::MD_mem_parallel_loop_access: 2581 case LLVMContext::MD_access_group: 2582 // All of these directly apply. 2583 Dest.setMetadata(ID, N); 2584 break; 2585 2586 case LLVMContext::MD_nonnull: 2587 copyNonnullMetadata(Source, N, Dest); 2588 break; 2589 2590 case LLVMContext::MD_align: 2591 case LLVMContext::MD_dereferenceable: 2592 case LLVMContext::MD_dereferenceable_or_null: 2593 // These only directly apply if the new type is also a pointer. 2594 if (NewType->isPointerTy()) 2595 Dest.setMetadata(ID, N); 2596 break; 2597 2598 case LLVMContext::MD_range: 2599 copyRangeMetadata(DL, Source, N, Dest); 2600 break; 2601 } 2602 } 2603 } 2604 2605 void llvm::patchReplacementInstruction(Instruction *I, Value *Repl) { 2606 auto *ReplInst = dyn_cast<Instruction>(Repl); 2607 if (!ReplInst) 2608 return; 2609 2610 // Patch the replacement so that it is not more restrictive than the value 2611 // being replaced. 2612 // Note that if 'I' is a load being replaced by some operation, 2613 // for example, by an arithmetic operation, then andIRFlags() 2614 // would just erase all math flags from the original arithmetic 2615 // operation, which is clearly not wanted and not needed. 2616 if (!isa<LoadInst>(I)) 2617 ReplInst->andIRFlags(I); 2618 2619 // FIXME: If both the original and replacement value are part of the 2620 // same control-flow region (meaning that the execution of one 2621 // guarantees the execution of the other), then we can combine the 2622 // noalias scopes here and do better than the general conservative 2623 // answer used in combineMetadata(). 2624 2625 // In general, GVN unifies expressions over different control-flow 2626 // regions, and so we need a conservative combination of the noalias 2627 // scopes. 2628 static const unsigned KnownIDs[] = { 2629 LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope, 2630 LLVMContext::MD_noalias, LLVMContext::MD_range, 2631 LLVMContext::MD_fpmath, LLVMContext::MD_invariant_load, 2632 LLVMContext::MD_invariant_group, LLVMContext::MD_nonnull, 2633 LLVMContext::MD_access_group, LLVMContext::MD_preserve_access_index}; 2634 combineMetadata(ReplInst, I, KnownIDs, false); 2635 } 2636 2637 template <typename RootType, typename DominatesFn> 2638 static unsigned replaceDominatedUsesWith(Value *From, Value *To, 2639 const RootType &Root, 2640 const DominatesFn &Dominates) { 2641 assert(From->getType() == To->getType()); 2642 2643 unsigned Count = 0; 2644 for (Value::use_iterator UI = From->use_begin(), UE = From->use_end(); 2645 UI != UE;) { 2646 Use &U = *UI++; 2647 if (!Dominates(Root, U)) 2648 continue; 2649 U.set(To); 2650 LLVM_DEBUG(dbgs() << "Replace dominated use of '" << From->getName() 2651 << "' as " << *To << " in " << *U << "\n"); 2652 ++Count; 2653 } 2654 return Count; 2655 } 2656 2657 unsigned llvm::replaceNonLocalUsesWith(Instruction *From, Value *To) { 2658 assert(From->getType() == To->getType()); 2659 auto *BB = From->getParent(); 2660 unsigned Count = 0; 2661 2662 for (Value::use_iterator UI = From->use_begin(), UE = From->use_end(); 2663 UI != UE;) { 2664 Use &U = *UI++; 2665 auto *I = cast<Instruction>(U.getUser()); 2666 if (I->getParent() == BB) 2667 continue; 2668 U.set(To); 2669 ++Count; 2670 } 2671 return Count; 2672 } 2673 2674 unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To, 2675 DominatorTree &DT, 2676 const BasicBlockEdge &Root) { 2677 auto Dominates = [&DT](const BasicBlockEdge &Root, const Use &U) { 2678 return DT.dominates(Root, U); 2679 }; 2680 return ::replaceDominatedUsesWith(From, To, Root, Dominates); 2681 } 2682 2683 unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To, 2684 DominatorTree &DT, 2685 const BasicBlock *BB) { 2686 auto Dominates = [&DT](const BasicBlock *BB, const Use &U) { 2687 return DT.dominates(BB, U); 2688 }; 2689 return ::replaceDominatedUsesWith(From, To, BB, Dominates); 2690 } 2691 2692 bool llvm::callsGCLeafFunction(const CallBase *Call, 2693 const TargetLibraryInfo &TLI) { 2694 // Check if the function is specifically marked as a gc leaf function. 2695 if (Call->hasFnAttr("gc-leaf-function")) 2696 return true; 2697 if (const Function *F = Call->getCalledFunction()) { 2698 if (F->hasFnAttribute("gc-leaf-function")) 2699 return true; 2700 2701 if (auto IID = F->getIntrinsicID()) { 2702 // Most LLVM intrinsics do not take safepoints. 2703 return IID != Intrinsic::experimental_gc_statepoint && 2704 IID != Intrinsic::experimental_deoptimize && 2705 IID != Intrinsic::memcpy_element_unordered_atomic && 2706 IID != Intrinsic::memmove_element_unordered_atomic; 2707 } 2708 } 2709 2710 // Lib calls can be materialized by some passes, and won't be 2711 // marked as 'gc-leaf-function.' All available Libcalls are 2712 // GC-leaf. 2713 LibFunc LF; 2714 if (TLI.getLibFunc(*Call, LF)) { 2715 return TLI.has(LF); 2716 } 2717 2718 return false; 2719 } 2720 2721 void llvm::copyNonnullMetadata(const LoadInst &OldLI, MDNode *N, 2722 LoadInst &NewLI) { 2723 auto *NewTy = NewLI.getType(); 2724 2725 // This only directly applies if the new type is also a pointer. 2726 if (NewTy->isPointerTy()) { 2727 NewLI.setMetadata(LLVMContext::MD_nonnull, N); 2728 return; 2729 } 2730 2731 // The only other translation we can do is to integral loads with !range 2732 // metadata. 2733 if (!NewTy->isIntegerTy()) 2734 return; 2735 2736 MDBuilder MDB(NewLI.getContext()); 2737 const Value *Ptr = OldLI.getPointerOperand(); 2738 auto *ITy = cast<IntegerType>(NewTy); 2739 auto *NullInt = ConstantExpr::getPtrToInt( 2740 ConstantPointerNull::get(cast<PointerType>(Ptr->getType())), ITy); 2741 auto *NonNullInt = ConstantExpr::getAdd(NullInt, ConstantInt::get(ITy, 1)); 2742 NewLI.setMetadata(LLVMContext::MD_range, 2743 MDB.createRange(NonNullInt, NullInt)); 2744 } 2745 2746 void llvm::copyRangeMetadata(const DataLayout &DL, const LoadInst &OldLI, 2747 MDNode *N, LoadInst &NewLI) { 2748 auto *NewTy = NewLI.getType(); 2749 2750 // Give up unless it is converted to a pointer where there is a single very 2751 // valuable mapping we can do reliably. 2752 // FIXME: It would be nice to propagate this in more ways, but the type 2753 // conversions make it hard. 2754 if (!NewTy->isPointerTy()) 2755 return; 2756 2757 unsigned BitWidth = DL.getPointerTypeSizeInBits(NewTy); 2758 if (!getConstantRangeFromMetadata(*N).contains(APInt(BitWidth, 0))) { 2759 MDNode *NN = MDNode::get(OldLI.getContext(), None); 2760 NewLI.setMetadata(LLVMContext::MD_nonnull, NN); 2761 } 2762 } 2763 2764 void llvm::dropDebugUsers(Instruction &I) { 2765 SmallVector<DbgVariableIntrinsic *, 1> DbgUsers; 2766 findDbgUsers(DbgUsers, &I); 2767 for (auto *DII : DbgUsers) 2768 DII->eraseFromParent(); 2769 } 2770 2771 void llvm::hoistAllInstructionsInto(BasicBlock *DomBlock, Instruction *InsertPt, 2772 BasicBlock *BB) { 2773 // Since we are moving the instructions out of its basic block, we do not 2774 // retain their original debug locations (DILocations) and debug intrinsic 2775 // instructions. 2776 // 2777 // Doing so would degrade the debugging experience and adversely affect the 2778 // accuracy of profiling information. 2779 // 2780 // Currently, when hoisting the instructions, we take the following actions: 2781 // - Remove their debug intrinsic instructions. 2782 // - Set their debug locations to the values from the insertion point. 2783 // 2784 // As per PR39141 (comment #8), the more fundamental reason why the dbg.values 2785 // need to be deleted, is because there will not be any instructions with a 2786 // DILocation in either branch left after performing the transformation. We 2787 // can only insert a dbg.value after the two branches are joined again. 2788 // 2789 // See PR38762, PR39243 for more details. 2790 // 2791 // TODO: Extend llvm.dbg.value to take more than one SSA Value (PR39141) to 2792 // encode predicated DIExpressions that yield different results on different 2793 // code paths. 2794 2795 // A hoisted conditional probe should be treated as dangling so that it will 2796 // not be over-counted when the samples collected on the non-conditional path 2797 // are counted towards the conditional path. We leave it for the counts 2798 // inference algorithm to figure out a proper count for a danglng probe. 2799 moveAndDanglePseudoProbes(BB, InsertPt); 2800 2801 for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE;) { 2802 Instruction *I = &*II; 2803 I->dropUnknownNonDebugMetadata(); 2804 if (I->isUsedByMetadata()) 2805 dropDebugUsers(*I); 2806 if (isa<DbgInfoIntrinsic>(I)) { 2807 // Remove DbgInfo Intrinsics. 2808 II = I->eraseFromParent(); 2809 continue; 2810 } 2811 I->setDebugLoc(InsertPt->getDebugLoc()); 2812 ++II; 2813 } 2814 DomBlock->getInstList().splice(InsertPt->getIterator(), BB->getInstList(), 2815 BB->begin(), 2816 BB->getTerminator()->getIterator()); 2817 } 2818 2819 namespace { 2820 2821 /// A potential constituent of a bitreverse or bswap expression. See 2822 /// collectBitParts for a fuller explanation. 2823 struct BitPart { 2824 BitPart(Value *P, unsigned BW) : Provider(P) { 2825 Provenance.resize(BW); 2826 } 2827 2828 /// The Value that this is a bitreverse/bswap of. 2829 Value *Provider; 2830 2831 /// The "provenance" of each bit. Provenance[A] = B means that bit A 2832 /// in Provider becomes bit B in the result of this expression. 2833 SmallVector<int8_t, 32> Provenance; // int8_t means max size is i128. 2834 2835 enum { Unset = -1 }; 2836 }; 2837 2838 } // end anonymous namespace 2839 2840 /// Analyze the specified subexpression and see if it is capable of providing 2841 /// pieces of a bswap or bitreverse. The subexpression provides a potential 2842 /// piece of a bswap or bitreverse if it can be proved that each non-zero bit in 2843 /// the output of the expression came from a corresponding bit in some other 2844 /// value. This function is recursive, and the end result is a mapping of 2845 /// bitnumber to bitnumber. It is the caller's responsibility to validate that 2846 /// the bitnumber to bitnumber mapping is correct for a bswap or bitreverse. 2847 /// 2848 /// For example, if the current subexpression if "(shl i32 %X, 24)" then we know 2849 /// that the expression deposits the low byte of %X into the high byte of the 2850 /// result and that all other bits are zero. This expression is accepted and a 2851 /// BitPart is returned with Provider set to %X and Provenance[24-31] set to 2852 /// [0-7]. 2853 /// 2854 /// For vector types, all analysis is performed at the per-element level. No 2855 /// cross-element analysis is supported (shuffle/insertion/reduction), and all 2856 /// constant masks must be splatted across all elements. 2857 /// 2858 /// To avoid revisiting values, the BitPart results are memoized into the 2859 /// provided map. To avoid unnecessary copying of BitParts, BitParts are 2860 /// constructed in-place in the \c BPS map. Because of this \c BPS needs to 2861 /// store BitParts objects, not pointers. As we need the concept of a nullptr 2862 /// BitParts (Value has been analyzed and the analysis failed), we an Optional 2863 /// type instead to provide the same functionality. 2864 /// 2865 /// Because we pass around references into \c BPS, we must use a container that 2866 /// does not invalidate internal references (std::map instead of DenseMap). 2867 static const Optional<BitPart> & 2868 collectBitParts(Value *V, bool MatchBSwaps, bool MatchBitReversals, 2869 std::map<Value *, Optional<BitPart>> &BPS, int Depth) { 2870 auto I = BPS.find(V); 2871 if (I != BPS.end()) 2872 return I->second; 2873 2874 auto &Result = BPS[V] = None; 2875 auto BitWidth = V->getType()->getScalarSizeInBits(); 2876 2877 // Can't do integer/elements > 128 bits. 2878 if (BitWidth > 128) 2879 return Result; 2880 2881 // Prevent stack overflow by limiting the recursion depth 2882 if (Depth == BitPartRecursionMaxDepth) { 2883 LLVM_DEBUG(dbgs() << "collectBitParts max recursion depth reached.\n"); 2884 return Result; 2885 } 2886 2887 if (auto *I = dyn_cast<Instruction>(V)) { 2888 Value *X, *Y; 2889 const APInt *C; 2890 2891 // If this is an or instruction, it may be an inner node of the bswap. 2892 if (match(V, m_Or(m_Value(X), m_Value(Y)))) { 2893 const auto &A = 2894 collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, Depth + 1); 2895 const auto &B = 2896 collectBitParts(Y, MatchBSwaps, MatchBitReversals, BPS, Depth + 1); 2897 if (!A || !B) 2898 return Result; 2899 2900 // Try and merge the two together. 2901 if (!A->Provider || A->Provider != B->Provider) 2902 return Result; 2903 2904 Result = BitPart(A->Provider, BitWidth); 2905 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx) { 2906 if (A->Provenance[BitIdx] != BitPart::Unset && 2907 B->Provenance[BitIdx] != BitPart::Unset && 2908 A->Provenance[BitIdx] != B->Provenance[BitIdx]) 2909 return Result = None; 2910 2911 if (A->Provenance[BitIdx] == BitPart::Unset) 2912 Result->Provenance[BitIdx] = B->Provenance[BitIdx]; 2913 else 2914 Result->Provenance[BitIdx] = A->Provenance[BitIdx]; 2915 } 2916 2917 return Result; 2918 } 2919 2920 // If this is a logical shift by a constant, recurse then shift the result. 2921 if (match(V, m_LogicalShift(m_Value(X), m_APInt(C)))) { 2922 const APInt &BitShift = *C; 2923 2924 // Ensure the shift amount is defined. 2925 if (BitShift.uge(BitWidth)) 2926 return Result; 2927 2928 const auto &Res = 2929 collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, Depth + 1); 2930 if (!Res) 2931 return Result; 2932 Result = Res; 2933 2934 // Perform the "shift" on BitProvenance. 2935 auto &P = Result->Provenance; 2936 if (I->getOpcode() == Instruction::Shl) { 2937 P.erase(std::prev(P.end(), BitShift.getZExtValue()), P.end()); 2938 P.insert(P.begin(), BitShift.getZExtValue(), BitPart::Unset); 2939 } else { 2940 P.erase(P.begin(), std::next(P.begin(), BitShift.getZExtValue())); 2941 P.insert(P.end(), BitShift.getZExtValue(), BitPart::Unset); 2942 } 2943 2944 return Result; 2945 } 2946 2947 // If this is a logical 'and' with a mask that clears bits, recurse then 2948 // unset the appropriate bits. 2949 if (match(V, m_And(m_Value(X), m_APInt(C)))) { 2950 const APInt &AndMask = *C; 2951 2952 // Check that the mask allows a multiple of 8 bits for a bswap, for an 2953 // early exit. 2954 unsigned NumMaskedBits = AndMask.countPopulation(); 2955 if (!MatchBitReversals && (NumMaskedBits % 8) != 0) 2956 return Result; 2957 2958 const auto &Res = 2959 collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, Depth + 1); 2960 if (!Res) 2961 return Result; 2962 Result = Res; 2963 2964 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx) 2965 // If the AndMask is zero for this bit, clear the bit. 2966 if (AndMask[BitIdx] == 0) 2967 Result->Provenance[BitIdx] = BitPart::Unset; 2968 return Result; 2969 } 2970 2971 // If this is a zext instruction zero extend the result. 2972 if (match(V, m_ZExt(m_Value(X)))) { 2973 const auto &Res = 2974 collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, Depth + 1); 2975 if (!Res) 2976 return Result; 2977 2978 Result = BitPart(Res->Provider, BitWidth); 2979 auto NarrowBitWidth = X->getType()->getScalarSizeInBits(); 2980 for (unsigned BitIdx = 0; BitIdx < NarrowBitWidth; ++BitIdx) 2981 Result->Provenance[BitIdx] = Res->Provenance[BitIdx]; 2982 for (unsigned BitIdx = NarrowBitWidth; BitIdx < BitWidth; ++BitIdx) 2983 Result->Provenance[BitIdx] = BitPart::Unset; 2984 return Result; 2985 } 2986 2987 // If this is a truncate instruction, extract the lower bits. 2988 if (match(V, m_Trunc(m_Value(X)))) { 2989 const auto &Res = 2990 collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, Depth + 1); 2991 if (!Res) 2992 return Result; 2993 2994 Result = BitPart(Res->Provider, BitWidth); 2995 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx) 2996 Result->Provenance[BitIdx] = Res->Provenance[BitIdx]; 2997 return Result; 2998 } 2999 3000 // BITREVERSE - most likely due to us previous matching a partial 3001 // bitreverse. 3002 if (match(V, m_BitReverse(m_Value(X)))) { 3003 const auto &Res = 3004 collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, Depth + 1); 3005 if (!Res) 3006 return Result; 3007 3008 Result = BitPart(Res->Provider, BitWidth); 3009 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx) 3010 Result->Provenance[(BitWidth - 1) - BitIdx] = Res->Provenance[BitIdx]; 3011 return Result; 3012 } 3013 3014 // BSWAP - most likely due to us previous matching a partial bswap. 3015 if (match(V, m_BSwap(m_Value(X)))) { 3016 const auto &Res = 3017 collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, Depth + 1); 3018 if (!Res) 3019 return Result; 3020 3021 unsigned ByteWidth = BitWidth / 8; 3022 Result = BitPart(Res->Provider, BitWidth); 3023 for (unsigned ByteIdx = 0; ByteIdx < ByteWidth; ++ByteIdx) { 3024 unsigned ByteBitOfs = ByteIdx * 8; 3025 for (unsigned BitIdx = 0; BitIdx < 8; ++BitIdx) 3026 Result->Provenance[(BitWidth - 8 - ByteBitOfs) + BitIdx] = 3027 Res->Provenance[ByteBitOfs + BitIdx]; 3028 } 3029 return Result; 3030 } 3031 3032 // Funnel 'double' shifts take 3 operands, 2 inputs and the shift 3033 // amount (modulo). 3034 // fshl(X,Y,Z): (X << (Z % BW)) | (Y >> (BW - (Z % BW))) 3035 // fshr(X,Y,Z): (X << (BW - (Z % BW))) | (Y >> (Z % BW)) 3036 if (match(V, m_FShl(m_Value(X), m_Value(Y), m_APInt(C))) || 3037 match(V, m_FShr(m_Value(X), m_Value(Y), m_APInt(C)))) { 3038 // We can treat fshr as a fshl by flipping the modulo amount. 3039 unsigned ModAmt = C->urem(BitWidth); 3040 if (cast<IntrinsicInst>(I)->getIntrinsicID() == Intrinsic::fshr) 3041 ModAmt = BitWidth - ModAmt; 3042 3043 const auto &LHS = 3044 collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS, Depth + 1); 3045 const auto &RHS = 3046 collectBitParts(Y, MatchBSwaps, MatchBitReversals, BPS, Depth + 1); 3047 3048 // Check we have both sources and they are from the same provider. 3049 if (!LHS || !RHS || !LHS->Provider || LHS->Provider != RHS->Provider) 3050 return Result; 3051 3052 unsigned StartBitRHS = BitWidth - ModAmt; 3053 Result = BitPart(LHS->Provider, BitWidth); 3054 for (unsigned BitIdx = 0; BitIdx < StartBitRHS; ++BitIdx) 3055 Result->Provenance[BitIdx + ModAmt] = LHS->Provenance[BitIdx]; 3056 for (unsigned BitIdx = 0; BitIdx < ModAmt; ++BitIdx) 3057 Result->Provenance[BitIdx] = RHS->Provenance[BitIdx + StartBitRHS]; 3058 return Result; 3059 } 3060 } 3061 3062 // Okay, we got to something that isn't a shift, 'or' or 'and'. This must be 3063 // the input value to the bswap/bitreverse. 3064 Result = BitPart(V, BitWidth); 3065 for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx) 3066 Result->Provenance[BitIdx] = BitIdx; 3067 return Result; 3068 } 3069 3070 static bool bitTransformIsCorrectForBSwap(unsigned From, unsigned To, 3071 unsigned BitWidth) { 3072 if (From % 8 != To % 8) 3073 return false; 3074 // Convert from bit indices to byte indices and check for a byte reversal. 3075 From >>= 3; 3076 To >>= 3; 3077 BitWidth >>= 3; 3078 return From == BitWidth - To - 1; 3079 } 3080 3081 static bool bitTransformIsCorrectForBitReverse(unsigned From, unsigned To, 3082 unsigned BitWidth) { 3083 return From == BitWidth - To - 1; 3084 } 3085 3086 bool llvm::recognizeBSwapOrBitReverseIdiom( 3087 Instruction *I, bool MatchBSwaps, bool MatchBitReversals, 3088 SmallVectorImpl<Instruction *> &InsertedInsts) { 3089 if (Operator::getOpcode(I) != Instruction::Or) 3090 return false; 3091 if (!MatchBSwaps && !MatchBitReversals) 3092 return false; 3093 Type *ITy = I->getType(); 3094 if (!ITy->isIntOrIntVectorTy() || ITy->getScalarSizeInBits() > 128) 3095 return false; // Can't do integer/elements > 128 bits. 3096 3097 Type *DemandedTy = ITy; 3098 if (I->hasOneUse()) 3099 if (auto *Trunc = dyn_cast<TruncInst>(I->user_back())) 3100 DemandedTy = Trunc->getType(); 3101 3102 // Try to find all the pieces corresponding to the bswap. 3103 std::map<Value *, Optional<BitPart>> BPS; 3104 auto Res = collectBitParts(I, MatchBSwaps, MatchBitReversals, BPS, 0); 3105 if (!Res) 3106 return false; 3107 ArrayRef<int8_t> BitProvenance = Res->Provenance; 3108 assert(all_of(BitProvenance, 3109 [](int8_t I) { return I == BitPart::Unset || 0 <= I; }) && 3110 "Illegal bit provenance index"); 3111 3112 // If the upper bits are zero, then attempt to perform as a truncated op. 3113 if (BitProvenance.back() == BitPart::Unset) { 3114 while (!BitProvenance.empty() && BitProvenance.back() == BitPart::Unset) 3115 BitProvenance = BitProvenance.drop_back(); 3116 if (BitProvenance.empty()) 3117 return false; // TODO - handle null value? 3118 DemandedTy = Type::getIntNTy(I->getContext(), BitProvenance.size()); 3119 if (auto *IVecTy = dyn_cast<VectorType>(ITy)) 3120 DemandedTy = VectorType::get(DemandedTy, IVecTy); 3121 } 3122 3123 // Check BitProvenance hasn't found a source larger than the result type. 3124 unsigned DemandedBW = DemandedTy->getScalarSizeInBits(); 3125 if (DemandedBW > ITy->getScalarSizeInBits()) 3126 return false; 3127 3128 // Now, is the bit permutation correct for a bswap or a bitreverse? We can 3129 // only byteswap values with an even number of bytes. 3130 APInt DemandedMask = APInt::getAllOnesValue(DemandedBW); 3131 bool OKForBSwap = MatchBSwaps && (DemandedBW % 16) == 0; 3132 bool OKForBitReverse = MatchBitReversals; 3133 for (unsigned BitIdx = 0; 3134 (BitIdx < DemandedBW) && (OKForBSwap || OKForBitReverse); ++BitIdx) { 3135 if (BitProvenance[BitIdx] == BitPart::Unset) { 3136 DemandedMask.clearBit(BitIdx); 3137 continue; 3138 } 3139 OKForBSwap &= bitTransformIsCorrectForBSwap(BitProvenance[BitIdx], BitIdx, 3140 DemandedBW); 3141 OKForBitReverse &= bitTransformIsCorrectForBitReverse(BitProvenance[BitIdx], 3142 BitIdx, DemandedBW); 3143 } 3144 3145 Intrinsic::ID Intrin; 3146 if (OKForBSwap) 3147 Intrin = Intrinsic::bswap; 3148 else if (OKForBitReverse) 3149 Intrin = Intrinsic::bitreverse; 3150 else 3151 return false; 3152 3153 Function *F = Intrinsic::getDeclaration(I->getModule(), Intrin, DemandedTy); 3154 Value *Provider = Res->Provider; 3155 3156 // We may need to truncate the provider. 3157 if (DemandedTy != Provider->getType()) { 3158 auto *Trunc = 3159 CastInst::CreateIntegerCast(Provider, DemandedTy, false, "trunc", I); 3160 InsertedInsts.push_back(Trunc); 3161 Provider = Trunc; 3162 } 3163 3164 Instruction *Result = CallInst::Create(F, Provider, "rev", I); 3165 InsertedInsts.push_back(Result); 3166 3167 if (!DemandedMask.isAllOnesValue()) { 3168 auto *Mask = ConstantInt::get(DemandedTy, DemandedMask); 3169 Result = BinaryOperator::Create(Instruction::And, Result, Mask, "mask", I); 3170 InsertedInsts.push_back(Result); 3171 } 3172 3173 // We may need to zeroextend back to the result type. 3174 if (ITy != Result->getType()) { 3175 auto *ExtInst = CastInst::CreateIntegerCast(Result, ITy, false, "zext", I); 3176 InsertedInsts.push_back(ExtInst); 3177 } 3178 3179 return true; 3180 } 3181 3182 // CodeGen has special handling for some string functions that may replace 3183 // them with target-specific intrinsics. Since that'd skip our interceptors 3184 // in ASan/MSan/TSan/DFSan, and thus make us miss some memory accesses, 3185 // we mark affected calls as NoBuiltin, which will disable optimization 3186 // in CodeGen. 3187 void llvm::maybeMarkSanitizerLibraryCallNoBuiltin( 3188 CallInst *CI, const TargetLibraryInfo *TLI) { 3189 Function *F = CI->getCalledFunction(); 3190 LibFunc Func; 3191 if (F && !F->hasLocalLinkage() && F->hasName() && 3192 TLI->getLibFunc(F->getName(), Func) && TLI->hasOptimizedCodeGen(Func) && 3193 !F->doesNotAccessMemory()) 3194 CI->addAttribute(AttributeList::FunctionIndex, Attribute::NoBuiltin); 3195 } 3196 3197 bool llvm::canReplaceOperandWithVariable(const Instruction *I, unsigned OpIdx) { 3198 // We can't have a PHI with a metadata type. 3199 if (I->getOperand(OpIdx)->getType()->isMetadataTy()) 3200 return false; 3201 3202 // Early exit. 3203 if (!isa<Constant>(I->getOperand(OpIdx))) 3204 return true; 3205 3206 switch (I->getOpcode()) { 3207 default: 3208 return true; 3209 case Instruction::Call: 3210 case Instruction::Invoke: { 3211 const auto &CB = cast<CallBase>(*I); 3212 3213 // Can't handle inline asm. Skip it. 3214 if (CB.isInlineAsm()) 3215 return false; 3216 3217 // Constant bundle operands may need to retain their constant-ness for 3218 // correctness. 3219 if (CB.isBundleOperand(OpIdx)) 3220 return false; 3221 3222 if (OpIdx < CB.getNumArgOperands()) { 3223 // Some variadic intrinsics require constants in the variadic arguments, 3224 // which currently aren't markable as immarg. 3225 if (isa<IntrinsicInst>(CB) && 3226 OpIdx >= CB.getFunctionType()->getNumParams()) { 3227 // This is known to be OK for stackmap. 3228 return CB.getIntrinsicID() == Intrinsic::experimental_stackmap; 3229 } 3230 3231 // gcroot is a special case, since it requires a constant argument which 3232 // isn't also required to be a simple ConstantInt. 3233 if (CB.getIntrinsicID() == Intrinsic::gcroot) 3234 return false; 3235 3236 // Some intrinsic operands are required to be immediates. 3237 return !CB.paramHasAttr(OpIdx, Attribute::ImmArg); 3238 } 3239 3240 // It is never allowed to replace the call argument to an intrinsic, but it 3241 // may be possible for a call. 3242 return !isa<IntrinsicInst>(CB); 3243 } 3244 case Instruction::ShuffleVector: 3245 // Shufflevector masks are constant. 3246 return OpIdx != 2; 3247 case Instruction::Switch: 3248 case Instruction::ExtractValue: 3249 // All operands apart from the first are constant. 3250 return OpIdx == 0; 3251 case Instruction::InsertValue: 3252 // All operands apart from the first and the second are constant. 3253 return OpIdx < 2; 3254 case Instruction::Alloca: 3255 // Static allocas (constant size in the entry block) are handled by 3256 // prologue/epilogue insertion so they're free anyway. We definitely don't 3257 // want to make them non-constant. 3258 return !cast<AllocaInst>(I)->isStaticAlloca(); 3259 case Instruction::GetElementPtr: 3260 if (OpIdx == 0) 3261 return true; 3262 gep_type_iterator It = gep_type_begin(I); 3263 for (auto E = std::next(It, OpIdx); It != E; ++It) 3264 if (It.isStruct()) 3265 return false; 3266 return true; 3267 } 3268 } 3269 3270 Value *llvm::invertCondition(Value *Condition) { 3271 // First: Check if it's a constant 3272 if (Constant *C = dyn_cast<Constant>(Condition)) 3273 return ConstantExpr::getNot(C); 3274 3275 // Second: If the condition is already inverted, return the original value 3276 Value *NotCondition; 3277 if (match(Condition, m_Not(m_Value(NotCondition)))) 3278 return NotCondition; 3279 3280 BasicBlock *Parent = nullptr; 3281 Instruction *Inst = dyn_cast<Instruction>(Condition); 3282 if (Inst) 3283 Parent = Inst->getParent(); 3284 else if (Argument *Arg = dyn_cast<Argument>(Condition)) 3285 Parent = &Arg->getParent()->getEntryBlock(); 3286 assert(Parent && "Unsupported condition to invert"); 3287 3288 // Third: Check all the users for an invert 3289 for (User *U : Condition->users()) 3290 if (Instruction *I = dyn_cast<Instruction>(U)) 3291 if (I->getParent() == Parent && match(I, m_Not(m_Specific(Condition)))) 3292 return I; 3293 3294 // Last option: Create a new instruction 3295 auto *Inverted = 3296 BinaryOperator::CreateNot(Condition, Condition->getName() + ".inv"); 3297 if (Inst && !isa<PHINode>(Inst)) 3298 Inverted->insertAfter(Inst); 3299 else 3300 Inverted->insertBefore(&*Parent->getFirstInsertionPt()); 3301 return Inverted; 3302 } 3303