1 //===-- ImplicitNullChecks.cpp - Fold null checks into memory accesses ----===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass turns explicit null checks of the form 11 // 12 // test %r10, %r10 13 // je throw_npe 14 // movl (%r10), %esi 15 // ... 16 // 17 // to 18 // 19 // faulting_load_op("movl (%r10), %esi", throw_npe) 20 // ... 21 // 22 // With the help of a runtime that understands the .fault_maps section, 23 // faulting_load_op branches to throw_npe if executing movl (%r10), %esi incurs 24 // a page fault. 25 // Store and LoadStore are also supported. 26 // 27 //===----------------------------------------------------------------------===// 28 29 #include "llvm/ADT/DenseSet.h" 30 #include "llvm/ADT/SmallVector.h" 31 #include "llvm/ADT/Statistic.h" 32 #include "llvm/Analysis/AliasAnalysis.h" 33 #include "llvm/CodeGen/FaultMaps.h" 34 #include "llvm/CodeGen/MachineFunction.h" 35 #include "llvm/CodeGen/MachineFunctionPass.h" 36 #include "llvm/CodeGen/MachineInstrBuilder.h" 37 #include "llvm/CodeGen/MachineMemOperand.h" 38 #include "llvm/CodeGen/MachineModuleInfo.h" 39 #include "llvm/CodeGen/MachineOperand.h" 40 #include "llvm/CodeGen/MachineRegisterInfo.h" 41 #include "llvm/CodeGen/Passes.h" 42 #include "llvm/IR/BasicBlock.h" 43 #include "llvm/IR/Instruction.h" 44 #include "llvm/IR/LLVMContext.h" 45 #include "llvm/Support/CommandLine.h" 46 #include "llvm/Support/Debug.h" 47 #include "llvm/Target/TargetInstrInfo.h" 48 #include "llvm/Target/TargetSubtargetInfo.h" 49 50 using namespace llvm; 51 52 static cl::opt<int> PageSize("imp-null-check-page-size", 53 cl::desc("The page size of the target in bytes"), 54 cl::init(4096)); 55 56 static cl::opt<unsigned> MaxInstsToConsider( 57 "imp-null-max-insts-to-consider", 58 cl::desc("The max number of instructions to consider hoisting loads over " 59 "(the algorithm is quadratic over this number)"), 60 cl::init(8)); 61 62 #define DEBUG_TYPE "implicit-null-checks" 63 64 STATISTIC(NumImplicitNullChecks, 65 "Number of explicit null checks made implicit"); 66 67 namespace { 68 69 class ImplicitNullChecks : public MachineFunctionPass { 70 /// Return true if \c computeDependence can process \p MI. 71 static bool canHandle(const MachineInstr *MI); 72 73 /// Helper function for \c computeDependence. Return true if \p A 74 /// and \p B do not have any dependences between them, and can be 75 /// re-ordered without changing program semantics. 76 bool canReorder(const MachineInstr *A, const MachineInstr *B); 77 78 /// A data type for representing the result computed by \c 79 /// computeDependence. States whether it is okay to reorder the 80 /// instruction passed to \c computeDependence with at most one 81 /// depednency. 82 struct DependenceResult { 83 /// Can we actually re-order \p MI with \p Insts (see \c 84 /// computeDependence). 85 bool CanReorder; 86 87 /// If non-None, then an instruction in \p Insts that also must be 88 /// hoisted. 89 Optional<ArrayRef<MachineInstr *>::iterator> PotentialDependence; 90 91 /*implicit*/ DependenceResult( 92 bool CanReorder, 93 Optional<ArrayRef<MachineInstr *>::iterator> PotentialDependence) 94 : CanReorder(CanReorder), PotentialDependence(PotentialDependence) { 95 assert((!PotentialDependence || CanReorder) && 96 "!CanReorder && PotentialDependence.hasValue() not allowed!"); 97 } 98 }; 99 100 /// Compute a result for the following question: can \p MI be 101 /// re-ordered from after \p Insts to before it. 102 /// 103 /// \c canHandle should return true for all instructions in \p 104 /// Insts. 105 DependenceResult computeDependence(const MachineInstr *MI, 106 ArrayRef<MachineInstr *> Insts); 107 108 /// Represents one null check that can be made implicit. 109 class NullCheck { 110 // The memory operation the null check can be folded into. 111 MachineInstr *MemOperation; 112 113 // The instruction actually doing the null check (Ptr != 0). 114 MachineInstr *CheckOperation; 115 116 // The block the check resides in. 117 MachineBasicBlock *CheckBlock; 118 119 // The block branched to if the pointer is non-null. 120 MachineBasicBlock *NotNullSucc; 121 122 // The block branched to if the pointer is null. 123 MachineBasicBlock *NullSucc; 124 125 // If this is non-null, then MemOperation has a dependency on on this 126 // instruction; and it needs to be hoisted to execute before MemOperation. 127 MachineInstr *OnlyDependency; 128 129 public: 130 explicit NullCheck(MachineInstr *memOperation, MachineInstr *checkOperation, 131 MachineBasicBlock *checkBlock, 132 MachineBasicBlock *notNullSucc, 133 MachineBasicBlock *nullSucc, 134 MachineInstr *onlyDependency) 135 : MemOperation(memOperation), CheckOperation(checkOperation), 136 CheckBlock(checkBlock), NotNullSucc(notNullSucc), NullSucc(nullSucc), 137 OnlyDependency(onlyDependency) {} 138 139 MachineInstr *getMemOperation() const { return MemOperation; } 140 141 MachineInstr *getCheckOperation() const { return CheckOperation; } 142 143 MachineBasicBlock *getCheckBlock() const { return CheckBlock; } 144 145 MachineBasicBlock *getNotNullSucc() const { return NotNullSucc; } 146 147 MachineBasicBlock *getNullSucc() const { return NullSucc; } 148 149 MachineInstr *getOnlyDependency() const { return OnlyDependency; } 150 }; 151 152 const TargetInstrInfo *TII = nullptr; 153 const TargetRegisterInfo *TRI = nullptr; 154 AliasAnalysis *AA = nullptr; 155 MachineModuleInfo *MMI = nullptr; 156 MachineFrameInfo *MFI = nullptr; 157 158 bool analyzeBlockForNullChecks(MachineBasicBlock &MBB, 159 SmallVectorImpl<NullCheck> &NullCheckList); 160 MachineInstr *insertFaultingInstr(MachineInstr *MI, MachineBasicBlock *MBB, 161 MachineBasicBlock *HandlerMBB); 162 void rewriteNullChecks(ArrayRef<NullCheck> NullCheckList); 163 164 enum AliasResult { 165 AR_NoAlias, 166 AR_MayAlias, 167 AR_WillAliasEverything 168 }; 169 /// Returns AR_NoAlias if \p MI memory operation does not alias with 170 /// \p PrevMI, AR_MayAlias if they may alias and AR_WillAliasEverything if 171 /// they may alias and any further memory operation may alias with \p PrevMI. 172 AliasResult areMemoryOpsAliased(MachineInstr &MI, MachineInstr *PrevMI); 173 174 enum SuitabilityResult { 175 SR_Suitable, 176 SR_Unsuitable, 177 SR_Impossible 178 }; 179 /// Return SR_Suitable if \p MI a memory operation that can be used to 180 /// implicitly null check the value in \p PointerReg, SR_Unsuitable if 181 /// \p MI cannot be used to null check and SR_Impossible if there is 182 /// no sense to continue lookup due to any other instruction will not be able 183 /// to be used. \p PrevInsts is the set of instruction seen since 184 /// the explicit null check on \p PointerReg. 185 SuitabilityResult isSuitableMemoryOp(MachineInstr &MI, unsigned PointerReg, 186 ArrayRef<MachineInstr *> PrevInsts); 187 188 /// Return true if \p FaultingMI can be hoisted from after the the 189 /// instructions in \p InstsSeenSoFar to before them. Set \p Dependence to a 190 /// non-null value if we also need to (and legally can) hoist a depedency. 191 bool canHoistInst(MachineInstr *FaultingMI, unsigned PointerReg, 192 ArrayRef<MachineInstr *> InstsSeenSoFar, 193 MachineBasicBlock *NullSucc, MachineInstr *&Dependence); 194 195 public: 196 static char ID; 197 198 ImplicitNullChecks() : MachineFunctionPass(ID) { 199 initializeImplicitNullChecksPass(*PassRegistry::getPassRegistry()); 200 } 201 202 bool runOnMachineFunction(MachineFunction &MF) override; 203 void getAnalysisUsage(AnalysisUsage &AU) const override { 204 AU.addRequired<AAResultsWrapperPass>(); 205 MachineFunctionPass::getAnalysisUsage(AU); 206 } 207 208 MachineFunctionProperties getRequiredProperties() const override { 209 return MachineFunctionProperties().set( 210 MachineFunctionProperties::Property::NoVRegs); 211 } 212 }; 213 214 } 215 216 bool ImplicitNullChecks::canHandle(const MachineInstr *MI) { 217 if (MI->isCall() || MI->hasUnmodeledSideEffects()) 218 return false; 219 auto IsRegMask = [](const MachineOperand &MO) { return MO.isRegMask(); }; 220 (void)IsRegMask; 221 222 assert(!llvm::any_of(MI->operands(), IsRegMask) && 223 "Calls were filtered out above!"); 224 225 auto IsUnordered = [](MachineMemOperand *MMO) { return MMO->isUnordered(); }; 226 return llvm::all_of(MI->memoperands(), IsUnordered); 227 } 228 229 ImplicitNullChecks::DependenceResult 230 ImplicitNullChecks::computeDependence(const MachineInstr *MI, 231 ArrayRef<MachineInstr *> Block) { 232 assert(llvm::all_of(Block, canHandle) && "Check this first!"); 233 assert(!llvm::is_contained(Block, MI) && "Block must be exclusive of MI!"); 234 235 Optional<ArrayRef<MachineInstr *>::iterator> Dep; 236 237 for (auto I = Block.begin(), E = Block.end(); I != E; ++I) { 238 if (canReorder(*I, MI)) 239 continue; 240 241 if (Dep == None) { 242 // Found one possible dependency, keep track of it. 243 Dep = I; 244 } else { 245 // We found two dependencies, so bail out. 246 return {false, None}; 247 } 248 } 249 250 return {true, Dep}; 251 } 252 253 bool ImplicitNullChecks::canReorder(const MachineInstr *A, 254 const MachineInstr *B) { 255 assert(canHandle(A) && canHandle(B) && "Precondition!"); 256 257 // canHandle makes sure that we _can_ correctly analyze the dependencies 258 // between A and B here -- for instance, we should not be dealing with heap 259 // load-store dependencies here. 260 261 for (auto MOA : A->operands()) { 262 if (!(MOA.isReg() && MOA.getReg())) 263 continue; 264 265 unsigned RegA = MOA.getReg(); 266 for (auto MOB : B->operands()) { 267 if (!(MOB.isReg() && MOB.getReg())) 268 continue; 269 270 unsigned RegB = MOB.getReg(); 271 272 if (TRI->regsOverlap(RegA, RegB) && (MOA.isDef() || MOB.isDef())) 273 return false; 274 } 275 } 276 277 return true; 278 } 279 280 bool ImplicitNullChecks::runOnMachineFunction(MachineFunction &MF) { 281 TII = MF.getSubtarget().getInstrInfo(); 282 TRI = MF.getRegInfo().getTargetRegisterInfo(); 283 MMI = &MF.getMMI(); 284 MFI = &MF.getFrameInfo(); 285 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 286 287 SmallVector<NullCheck, 16> NullCheckList; 288 289 for (auto &MBB : MF) 290 analyzeBlockForNullChecks(MBB, NullCheckList); 291 292 if (!NullCheckList.empty()) 293 rewriteNullChecks(NullCheckList); 294 295 return !NullCheckList.empty(); 296 } 297 298 // Return true if any register aliasing \p Reg is live-in into \p MBB. 299 static bool AnyAliasLiveIn(const TargetRegisterInfo *TRI, 300 MachineBasicBlock *MBB, unsigned Reg) { 301 for (MCRegAliasIterator AR(Reg, TRI, /*IncludeSelf*/ true); AR.isValid(); 302 ++AR) 303 if (MBB->isLiveIn(*AR)) 304 return true; 305 return false; 306 } 307 308 ImplicitNullChecks::AliasResult 309 ImplicitNullChecks::areMemoryOpsAliased(MachineInstr &MI, 310 MachineInstr *PrevMI) { 311 // If it is not memory access, skip the check. 312 if (!(PrevMI->mayStore() || PrevMI->mayLoad())) 313 return AR_NoAlias; 314 // Load-Load may alias 315 if (!(MI.mayStore() || PrevMI->mayStore())) 316 return AR_NoAlias; 317 // We lost info, conservatively alias. If it was store then no sense to 318 // continue because we won't be able to check against it further. 319 if (MI.memoperands_empty()) 320 return MI.mayStore() ? AR_WillAliasEverything : AR_MayAlias; 321 if (PrevMI->memoperands_empty()) 322 return PrevMI->mayStore() ? AR_WillAliasEverything : AR_MayAlias; 323 324 for (MachineMemOperand *MMO1 : MI.memoperands()) { 325 // MMO1 should have a value due it comes from operation we'd like to use 326 // as implicit null check. 327 assert(MMO1->getValue() && "MMO1 should have a Value!"); 328 for (MachineMemOperand *MMO2 : PrevMI->memoperands()) { 329 if (const PseudoSourceValue *PSV = MMO2->getPseudoValue()) { 330 if (PSV->mayAlias(MFI)) 331 return AR_MayAlias; 332 continue; 333 } 334 llvm::AliasResult AAResult = AA->alias( 335 MemoryLocation(MMO1->getValue(), MemoryLocation::UnknownSize, 336 MMO1->getAAInfo()), 337 MemoryLocation(MMO2->getValue(), MemoryLocation::UnknownSize, 338 MMO2->getAAInfo())); 339 if (AAResult != NoAlias) 340 return AR_MayAlias; 341 } 342 } 343 return AR_NoAlias; 344 } 345 346 ImplicitNullChecks::SuitabilityResult 347 ImplicitNullChecks::isSuitableMemoryOp(MachineInstr &MI, unsigned PointerReg, 348 ArrayRef<MachineInstr *> PrevInsts) { 349 int64_t Offset; 350 unsigned BaseReg; 351 352 if (!TII->getMemOpBaseRegImmOfs(MI, BaseReg, Offset, TRI) || 353 BaseReg != PointerReg) 354 return SR_Unsuitable; 355 356 // We want the mem access to be issued at a sane offset from PointerReg, 357 // so that if PointerReg is null then the access reliably page faults. 358 if (!((MI.mayLoad() || MI.mayStore()) && !MI.isPredicable() && 359 Offset < PageSize)) 360 return SR_Unsuitable; 361 362 // Finally, check whether the current memory access aliases with previous one. 363 for (auto *PrevMI : PrevInsts) { 364 AliasResult AR = areMemoryOpsAliased(MI, PrevMI); 365 if (AR == AR_WillAliasEverything) 366 return SR_Impossible; 367 if (AR == AR_MayAlias) 368 return SR_Unsuitable; 369 } 370 return SR_Suitable; 371 } 372 373 bool ImplicitNullChecks::canHoistInst(MachineInstr *FaultingMI, 374 unsigned PointerReg, 375 ArrayRef<MachineInstr *> InstsSeenSoFar, 376 MachineBasicBlock *NullSucc, 377 MachineInstr *&Dependence) { 378 auto DepResult = computeDependence(FaultingMI, InstsSeenSoFar); 379 if (!DepResult.CanReorder) 380 return false; 381 382 if (!DepResult.PotentialDependence) { 383 Dependence = nullptr; 384 return true; 385 } 386 387 auto DependenceItr = *DepResult.PotentialDependence; 388 auto *DependenceMI = *DependenceItr; 389 390 // We don't want to reason about speculating loads. Note -- at this point 391 // we should have already filtered out all of the other non-speculatable 392 // things, like calls and stores. 393 // We also do not want to hoist stores because it might change the memory 394 // while the FaultingMI may result in faulting. 395 assert(canHandle(DependenceMI) && "Should never have reached here!"); 396 if (DependenceMI->mayLoadOrStore()) 397 return false; 398 399 for (auto &DependenceMO : DependenceMI->operands()) { 400 if (!(DependenceMO.isReg() && DependenceMO.getReg())) 401 continue; 402 403 // Make sure that we won't clobber any live ins to the sibling block by 404 // hoisting Dependency. For instance, we can't hoist INST to before the 405 // null check (even if it safe, and does not violate any dependencies in 406 // the non_null_block) if %rdx is live in to _null_block. 407 // 408 // test %rcx, %rcx 409 // je _null_block 410 // _non_null_block: 411 // %rdx<def> = INST 412 // ... 413 // 414 // This restriction does not apply to the faulting load inst because in 415 // case the pointer loaded from is in the null page, the load will not 416 // semantically execute, and affect machine state. That is, if the load 417 // was loading into %rax and it faults, the value of %rax should stay the 418 // same as it would have been had the load not have executed and we'd have 419 // branched to NullSucc directly. 420 if (AnyAliasLiveIn(TRI, NullSucc, DependenceMO.getReg())) 421 return false; 422 423 // The Dependency can't be re-defining the base register -- then we won't 424 // get the memory operation on the address we want. This is already 425 // checked in \c IsSuitableMemoryOp. 426 assert(!(DependenceMO.isDef() && 427 TRI->regsOverlap(DependenceMO.getReg(), PointerReg)) && 428 "Should have been checked before!"); 429 } 430 431 auto DepDepResult = 432 computeDependence(DependenceMI, {InstsSeenSoFar.begin(), DependenceItr}); 433 434 if (!DepDepResult.CanReorder || DepDepResult.PotentialDependence) 435 return false; 436 437 Dependence = DependenceMI; 438 return true; 439 } 440 441 /// Analyze MBB to check if its terminating branch can be turned into an 442 /// implicit null check. If yes, append a description of the said null check to 443 /// NullCheckList and return true, else return false. 444 bool ImplicitNullChecks::analyzeBlockForNullChecks( 445 MachineBasicBlock &MBB, SmallVectorImpl<NullCheck> &NullCheckList) { 446 typedef TargetInstrInfo::MachineBranchPredicate MachineBranchPredicate; 447 448 MDNode *BranchMD = nullptr; 449 if (auto *BB = MBB.getBasicBlock()) 450 BranchMD = BB->getTerminator()->getMetadata(LLVMContext::MD_make_implicit); 451 452 if (!BranchMD) 453 return false; 454 455 MachineBranchPredicate MBP; 456 457 if (TII->analyzeBranchPredicate(MBB, MBP, true)) 458 return false; 459 460 // Is the predicate comparing an integer to zero? 461 if (!(MBP.LHS.isReg() && MBP.RHS.isImm() && MBP.RHS.getImm() == 0 && 462 (MBP.Predicate == MachineBranchPredicate::PRED_NE || 463 MBP.Predicate == MachineBranchPredicate::PRED_EQ))) 464 return false; 465 466 // If we cannot erase the test instruction itself, then making the null check 467 // implicit does not buy us much. 468 if (!MBP.SingleUseCondition) 469 return false; 470 471 MachineBasicBlock *NotNullSucc, *NullSucc; 472 473 if (MBP.Predicate == MachineBranchPredicate::PRED_NE) { 474 NotNullSucc = MBP.TrueDest; 475 NullSucc = MBP.FalseDest; 476 } else { 477 NotNullSucc = MBP.FalseDest; 478 NullSucc = MBP.TrueDest; 479 } 480 481 // We handle the simplest case for now. We can potentially do better by using 482 // the machine dominator tree. 483 if (NotNullSucc->pred_size() != 1) 484 return false; 485 486 // Starting with a code fragment like: 487 // 488 // test %RAX, %RAX 489 // jne LblNotNull 490 // 491 // LblNull: 492 // callq throw_NullPointerException 493 // 494 // LblNotNull: 495 // Inst0 496 // Inst1 497 // ... 498 // Def = Load (%RAX + <offset>) 499 // ... 500 // 501 // 502 // we want to end up with 503 // 504 // Def = FaultingLoad (%RAX + <offset>), LblNull 505 // jmp LblNotNull ;; explicit or fallthrough 506 // 507 // LblNotNull: 508 // Inst0 509 // Inst1 510 // ... 511 // 512 // LblNull: 513 // callq throw_NullPointerException 514 // 515 // 516 // To see why this is legal, consider the two possibilities: 517 // 518 // 1. %RAX is null: since we constrain <offset> to be less than PageSize, the 519 // load instruction dereferences the null page, causing a segmentation 520 // fault. 521 // 522 // 2. %RAX is not null: in this case we know that the load cannot fault, as 523 // otherwise the load would've faulted in the original program too and the 524 // original program would've been undefined. 525 // 526 // This reasoning cannot be extended to justify hoisting through arbitrary 527 // control flow. For instance, in the example below (in pseudo-C) 528 // 529 // if (ptr == null) { throw_npe(); unreachable; } 530 // if (some_cond) { return 42; } 531 // v = ptr->field; // LD 532 // ... 533 // 534 // we cannot (without code duplication) use the load marked "LD" to null check 535 // ptr -- clause (2) above does not apply in this case. In the above program 536 // the safety of ptr->field can be dependent on some_cond; and, for instance, 537 // ptr could be some non-null invalid reference that never gets loaded from 538 // because some_cond is always true. 539 540 const unsigned PointerReg = MBP.LHS.getReg(); 541 542 SmallVector<MachineInstr *, 8> InstsSeenSoFar; 543 544 for (auto &MI : *NotNullSucc) { 545 if (!canHandle(&MI) || InstsSeenSoFar.size() >= MaxInstsToConsider) 546 return false; 547 548 MachineInstr *Dependence; 549 SuitabilityResult SR = isSuitableMemoryOp(MI, PointerReg, InstsSeenSoFar); 550 if (SR == SR_Impossible) 551 return false; 552 if (SR == SR_Suitable && 553 canHoistInst(&MI, PointerReg, InstsSeenSoFar, NullSucc, Dependence)) { 554 NullCheckList.emplace_back(&MI, MBP.ConditionDef, &MBB, NotNullSucc, 555 NullSucc, Dependence); 556 return true; 557 } 558 559 // If MI re-defines the PointerReg then we cannot move further. 560 if (any_of(MI.operands(), [&](MachineOperand &MO) { 561 return MO.isReg() && MO.getReg() && MO.isDef() && 562 TRI->regsOverlap(MO.getReg(), PointerReg); 563 })) 564 return false; 565 InstsSeenSoFar.push_back(&MI); 566 } 567 568 return false; 569 } 570 571 /// Wrap a machine instruction, MI, into a FAULTING machine instruction. 572 /// The FAULTING instruction does the same load/store as MI 573 /// (defining the same register), and branches to HandlerMBB if the mem access 574 /// faults. The FAULTING instruction is inserted at the end of MBB. 575 MachineInstr *ImplicitNullChecks::insertFaultingInstr( 576 MachineInstr *MI, MachineBasicBlock *MBB, MachineBasicBlock *HandlerMBB) { 577 const unsigned NoRegister = 0; // Guaranteed to be the NoRegister value for 578 // all targets. 579 580 DebugLoc DL; 581 unsigned NumDefs = MI->getDesc().getNumDefs(); 582 assert(NumDefs <= 1 && "other cases unhandled!"); 583 584 unsigned DefReg = NoRegister; 585 if (NumDefs != 0) { 586 DefReg = MI->defs().begin()->getReg(); 587 assert(std::distance(MI->defs().begin(), MI->defs().end()) == 1 && 588 "expected exactly one def!"); 589 } 590 591 FaultMaps::FaultKind FK; 592 if (MI->mayLoad()) 593 FK = 594 MI->mayStore() ? FaultMaps::FaultingLoadStore : FaultMaps::FaultingLoad; 595 else 596 FK = FaultMaps::FaultingStore; 597 598 auto MIB = BuildMI(MBB, DL, TII->get(TargetOpcode::FAULTING_OP), DefReg) 599 .addImm(FK) 600 .addMBB(HandlerMBB) 601 .addImm(MI->getOpcode()); 602 603 for (auto &MO : MI->uses()) { 604 if (MO.isReg()) { 605 MachineOperand NewMO = MO; 606 if (MO.isUse()) { 607 NewMO.setIsKill(false); 608 } else { 609 assert(MO.isDef() && "Expected def or use"); 610 NewMO.setIsDead(false); 611 } 612 MIB.add(NewMO); 613 } else { 614 MIB.add(MO); 615 } 616 } 617 618 MIB.setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); 619 620 return MIB; 621 } 622 623 /// Rewrite the null checks in NullCheckList into implicit null checks. 624 void ImplicitNullChecks::rewriteNullChecks( 625 ArrayRef<ImplicitNullChecks::NullCheck> NullCheckList) { 626 DebugLoc DL; 627 628 for (auto &NC : NullCheckList) { 629 // Remove the conditional branch dependent on the null check. 630 unsigned BranchesRemoved = TII->removeBranch(*NC.getCheckBlock()); 631 (void)BranchesRemoved; 632 assert(BranchesRemoved > 0 && "expected at least one branch!"); 633 634 if (auto *DepMI = NC.getOnlyDependency()) { 635 DepMI->removeFromParent(); 636 NC.getCheckBlock()->insert(NC.getCheckBlock()->end(), DepMI); 637 } 638 639 // Insert a faulting instruction where the conditional branch was 640 // originally. We check earlier ensures that this bit of code motion 641 // is legal. We do not touch the successors list for any basic block 642 // since we haven't changed control flow, we've just made it implicit. 643 MachineInstr *FaultingInstr = insertFaultingInstr( 644 NC.getMemOperation(), NC.getCheckBlock(), NC.getNullSucc()); 645 // Now the values defined by MemOperation, if any, are live-in of 646 // the block of MemOperation. 647 // The original operation may define implicit-defs alongside 648 // the value. 649 MachineBasicBlock *MBB = NC.getMemOperation()->getParent(); 650 for (const MachineOperand &MO : FaultingInstr->operands()) { 651 if (!MO.isReg() || !MO.isDef()) 652 continue; 653 unsigned Reg = MO.getReg(); 654 if (!Reg || MBB->isLiveIn(Reg)) 655 continue; 656 MBB->addLiveIn(Reg); 657 } 658 659 if (auto *DepMI = NC.getOnlyDependency()) { 660 for (auto &MO : DepMI->operands()) { 661 if (!MO.isReg() || !MO.getReg() || !MO.isDef()) 662 continue; 663 if (!NC.getNotNullSucc()->isLiveIn(MO.getReg())) 664 NC.getNotNullSucc()->addLiveIn(MO.getReg()); 665 } 666 } 667 668 NC.getMemOperation()->eraseFromParent(); 669 NC.getCheckOperation()->eraseFromParent(); 670 671 // Insert an *unconditional* branch to not-null successor. 672 TII->insertBranch(*NC.getCheckBlock(), NC.getNotNullSucc(), nullptr, 673 /*Cond=*/None, DL); 674 675 NumImplicitNullChecks++; 676 } 677 } 678 679 680 char ImplicitNullChecks::ID = 0; 681 char &llvm::ImplicitNullChecksID = ImplicitNullChecks::ID; 682 INITIALIZE_PASS_BEGIN(ImplicitNullChecks, DEBUG_TYPE, 683 "Implicit null checks", false, false) 684 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 685 INITIALIZE_PASS_END(ImplicitNullChecks, DEBUG_TYPE, 686 "Implicit null checks", false, false) 687