1 //===- ImplicitNullChecks.cpp - Fold null checks into memory accesses -----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass turns explicit null checks of the form 10 // 11 // test %r10, %r10 12 // je throw_npe 13 // movl (%r10), %esi 14 // ... 15 // 16 // to 17 // 18 // faulting_load_op("movl (%r10), %esi", throw_npe) 19 // ... 20 // 21 // With the help of a runtime that understands the .fault_maps section, 22 // faulting_load_op branches to throw_npe if executing movl (%r10), %esi incurs 23 // a page fault. 24 // Store and LoadStore are also supported. 25 // 26 //===----------------------------------------------------------------------===// 27 28 #include "llvm/ADT/ArrayRef.h" 29 #include "llvm/ADT/None.h" 30 #include "llvm/ADT/Optional.h" 31 #include "llvm/ADT/STLExtras.h" 32 #include "llvm/ADT/SmallVector.h" 33 #include "llvm/ADT/Statistic.h" 34 #include "llvm/Analysis/AliasAnalysis.h" 35 #include "llvm/Analysis/MemoryLocation.h" 36 #include "llvm/CodeGen/FaultMaps.h" 37 #include "llvm/CodeGen/MachineBasicBlock.h" 38 #include "llvm/CodeGen/MachineFunction.h" 39 #include "llvm/CodeGen/MachineFunctionPass.h" 40 #include "llvm/CodeGen/MachineInstr.h" 41 #include "llvm/CodeGen/MachineInstrBuilder.h" 42 #include "llvm/CodeGen/MachineMemOperand.h" 43 #include "llvm/CodeGen/MachineOperand.h" 44 #include "llvm/CodeGen/MachineRegisterInfo.h" 45 #include "llvm/CodeGen/PseudoSourceValue.h" 46 #include "llvm/CodeGen/TargetInstrInfo.h" 47 #include "llvm/CodeGen/TargetOpcodes.h" 48 #include "llvm/CodeGen/TargetRegisterInfo.h" 49 #include "llvm/CodeGen/TargetSubtargetInfo.h" 50 #include "llvm/IR/BasicBlock.h" 51 #include "llvm/IR/DebugLoc.h" 52 #include "llvm/IR/LLVMContext.h" 53 #include "llvm/InitializePasses.h" 54 #include "llvm/MC/MCInstrDesc.h" 55 #include "llvm/MC/MCRegisterInfo.h" 56 #include "llvm/Pass.h" 57 #include "llvm/Support/CommandLine.h" 58 #include <cassert> 59 #include <cstdint> 60 #include <iterator> 61 62 using namespace llvm; 63 64 static cl::opt<int> PageSize("imp-null-check-page-size", 65 cl::desc("The page size of the target in bytes"), 66 cl::init(4096), cl::Hidden); 67 68 static cl::opt<unsigned> MaxInstsToConsider( 69 "imp-null-max-insts-to-consider", 70 cl::desc("The max number of instructions to consider hoisting loads over " 71 "(the algorithm is quadratic over this number)"), 72 cl::Hidden, cl::init(8)); 73 74 #define DEBUG_TYPE "implicit-null-checks" 75 76 STATISTIC(NumImplicitNullChecks, 77 "Number of explicit null checks made implicit"); 78 79 namespace { 80 81 class ImplicitNullChecks : public MachineFunctionPass { 82 /// Return true if \c computeDependence can process \p MI. 83 static bool canHandle(const MachineInstr *MI); 84 85 /// Helper function for \c computeDependence. Return true if \p A 86 /// and \p B do not have any dependences between them, and can be 87 /// re-ordered without changing program semantics. 88 bool canReorder(const MachineInstr *A, const MachineInstr *B); 89 90 /// A data type for representing the result computed by \c 91 /// computeDependence. States whether it is okay to reorder the 92 /// instruction passed to \c computeDependence with at most one 93 /// dependency. 94 struct DependenceResult { 95 /// Can we actually re-order \p MI with \p Insts (see \c 96 /// computeDependence). 97 bool CanReorder; 98 99 /// If non-None, then an instruction in \p Insts that also must be 100 /// hoisted. 101 Optional<ArrayRef<MachineInstr *>::iterator> PotentialDependence; 102 103 /*implicit*/ DependenceResult( 104 bool CanReorder, 105 Optional<ArrayRef<MachineInstr *>::iterator> PotentialDependence) 106 : CanReorder(CanReorder), PotentialDependence(PotentialDependence) { 107 assert((!PotentialDependence || CanReorder) && 108 "!CanReorder && PotentialDependence.hasValue() not allowed!"); 109 } 110 }; 111 112 /// Compute a result for the following question: can \p MI be 113 /// re-ordered from after \p Insts to before it. 114 /// 115 /// \c canHandle should return true for all instructions in \p 116 /// Insts. 117 DependenceResult computeDependence(const MachineInstr *MI, 118 ArrayRef<MachineInstr *> Block); 119 120 /// Represents one null check that can be made implicit. 121 class NullCheck { 122 // The memory operation the null check can be folded into. 123 MachineInstr *MemOperation; 124 125 // The instruction actually doing the null check (Ptr != 0). 126 MachineInstr *CheckOperation; 127 128 // The block the check resides in. 129 MachineBasicBlock *CheckBlock; 130 131 // The block branched to if the pointer is non-null. 132 MachineBasicBlock *NotNullSucc; 133 134 // The block branched to if the pointer is null. 135 MachineBasicBlock *NullSucc; 136 137 // If this is non-null, then MemOperation has a dependency on this 138 // instruction; and it needs to be hoisted to execute before MemOperation. 139 MachineInstr *OnlyDependency; 140 141 public: 142 explicit NullCheck(MachineInstr *memOperation, MachineInstr *checkOperation, 143 MachineBasicBlock *checkBlock, 144 MachineBasicBlock *notNullSucc, 145 MachineBasicBlock *nullSucc, 146 MachineInstr *onlyDependency) 147 : MemOperation(memOperation), CheckOperation(checkOperation), 148 CheckBlock(checkBlock), NotNullSucc(notNullSucc), NullSucc(nullSucc), 149 OnlyDependency(onlyDependency) {} 150 151 MachineInstr *getMemOperation() const { return MemOperation; } 152 153 MachineInstr *getCheckOperation() const { return CheckOperation; } 154 155 MachineBasicBlock *getCheckBlock() const { return CheckBlock; } 156 157 MachineBasicBlock *getNotNullSucc() const { return NotNullSucc; } 158 159 MachineBasicBlock *getNullSucc() const { return NullSucc; } 160 161 MachineInstr *getOnlyDependency() const { return OnlyDependency; } 162 }; 163 164 const TargetInstrInfo *TII = nullptr; 165 const TargetRegisterInfo *TRI = nullptr; 166 AliasAnalysis *AA = nullptr; 167 MachineFrameInfo *MFI = nullptr; 168 169 bool analyzeBlockForNullChecks(MachineBasicBlock &MBB, 170 SmallVectorImpl<NullCheck> &NullCheckList); 171 MachineInstr *insertFaultingInstr(MachineInstr *MI, MachineBasicBlock *MBB, 172 MachineBasicBlock *HandlerMBB); 173 void rewriteNullChecks(ArrayRef<NullCheck> NullCheckList); 174 175 enum AliasResult { 176 AR_NoAlias, 177 AR_MayAlias, 178 AR_WillAliasEverything 179 }; 180 181 /// Returns AR_NoAlias if \p MI memory operation does not alias with 182 /// \p PrevMI, AR_MayAlias if they may alias and AR_WillAliasEverything if 183 /// they may alias and any further memory operation may alias with \p PrevMI. 184 AliasResult areMemoryOpsAliased(const MachineInstr &MI, 185 const MachineInstr *PrevMI) const; 186 187 enum SuitabilityResult { 188 SR_Suitable, 189 SR_Unsuitable, 190 SR_Impossible 191 }; 192 193 /// Return SR_Suitable if \p MI a memory operation that can be used to 194 /// implicitly null check the value in \p PointerReg, SR_Unsuitable if 195 /// \p MI cannot be used to null check and SR_Impossible if there is 196 /// no sense to continue lookup due to any other instruction will not be able 197 /// to be used. \p PrevInsts is the set of instruction seen since 198 /// the explicit null check on \p PointerReg. 199 SuitabilityResult isSuitableMemoryOp(const MachineInstr &MI, 200 unsigned PointerReg, 201 ArrayRef<MachineInstr *> PrevInsts); 202 203 /// Returns true if \p DependenceMI can clobber the liveIns in NullSucc block 204 /// if it was hoisted to the NullCheck block. This is used by caller 205 /// canHoistInst to decide if DependenceMI can be hoisted safely. 206 bool canDependenceHoistingClobberLiveIns(MachineInstr *DependenceMI, 207 MachineBasicBlock *NullSucc); 208 209 /// Return true if \p FaultingMI can be hoisted from after the 210 /// instructions in \p InstsSeenSoFar to before them. Set \p Dependence to a 211 /// non-null value if we also need to (and legally can) hoist a dependency. 212 bool canHoistInst(MachineInstr *FaultingMI, 213 ArrayRef<MachineInstr *> InstsSeenSoFar, 214 MachineBasicBlock *NullSucc, MachineInstr *&Dependence); 215 216 public: 217 static char ID; 218 219 ImplicitNullChecks() : MachineFunctionPass(ID) { 220 initializeImplicitNullChecksPass(*PassRegistry::getPassRegistry()); 221 } 222 223 bool runOnMachineFunction(MachineFunction &MF) override; 224 225 void getAnalysisUsage(AnalysisUsage &AU) const override { 226 AU.addRequired<AAResultsWrapperPass>(); 227 MachineFunctionPass::getAnalysisUsage(AU); 228 } 229 230 MachineFunctionProperties getRequiredProperties() const override { 231 return MachineFunctionProperties().set( 232 MachineFunctionProperties::Property::NoVRegs); 233 } 234 }; 235 236 } // end anonymous namespace 237 238 bool ImplicitNullChecks::canHandle(const MachineInstr *MI) { 239 if (MI->isCall() || MI->mayRaiseFPException() || 240 MI->hasUnmodeledSideEffects()) 241 return false; 242 auto IsRegMask = [](const MachineOperand &MO) { return MO.isRegMask(); }; 243 (void)IsRegMask; 244 245 assert(!llvm::any_of(MI->operands(), IsRegMask) && 246 "Calls were filtered out above!"); 247 248 auto IsUnordered = [](MachineMemOperand *MMO) { return MMO->isUnordered(); }; 249 return llvm::all_of(MI->memoperands(), IsUnordered); 250 } 251 252 ImplicitNullChecks::DependenceResult 253 ImplicitNullChecks::computeDependence(const MachineInstr *MI, 254 ArrayRef<MachineInstr *> Block) { 255 assert(llvm::all_of(Block, canHandle) && "Check this first!"); 256 assert(!is_contained(Block, MI) && "Block must be exclusive of MI!"); 257 258 Optional<ArrayRef<MachineInstr *>::iterator> Dep; 259 260 for (auto I = Block.begin(), E = Block.end(); I != E; ++I) { 261 if (canReorder(*I, MI)) 262 continue; 263 264 if (Dep == None) { 265 // Found one possible dependency, keep track of it. 266 Dep = I; 267 } else { 268 // We found two dependencies, so bail out. 269 return {false, None}; 270 } 271 } 272 273 return {true, Dep}; 274 } 275 276 bool ImplicitNullChecks::canReorder(const MachineInstr *A, 277 const MachineInstr *B) { 278 assert(canHandle(A) && canHandle(B) && "Precondition!"); 279 280 // canHandle makes sure that we _can_ correctly analyze the dependencies 281 // between A and B here -- for instance, we should not be dealing with heap 282 // load-store dependencies here. 283 284 for (const auto &MOA : A->operands()) { 285 if (!(MOA.isReg() && MOA.getReg())) 286 continue; 287 288 Register RegA = MOA.getReg(); 289 for (const auto &MOB : B->operands()) { 290 if (!(MOB.isReg() && MOB.getReg())) 291 continue; 292 293 Register RegB = MOB.getReg(); 294 295 if (TRI->regsOverlap(RegA, RegB) && (MOA.isDef() || MOB.isDef())) 296 return false; 297 } 298 } 299 300 return true; 301 } 302 303 bool ImplicitNullChecks::runOnMachineFunction(MachineFunction &MF) { 304 TII = MF.getSubtarget().getInstrInfo(); 305 TRI = MF.getRegInfo().getTargetRegisterInfo(); 306 MFI = &MF.getFrameInfo(); 307 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 308 309 SmallVector<NullCheck, 16> NullCheckList; 310 311 for (auto &MBB : MF) 312 analyzeBlockForNullChecks(MBB, NullCheckList); 313 314 if (!NullCheckList.empty()) 315 rewriteNullChecks(NullCheckList); 316 317 return !NullCheckList.empty(); 318 } 319 320 // Return true if any register aliasing \p Reg is live-in into \p MBB. 321 static bool AnyAliasLiveIn(const TargetRegisterInfo *TRI, 322 MachineBasicBlock *MBB, unsigned Reg) { 323 for (MCRegAliasIterator AR(Reg, TRI, /*IncludeSelf*/ true); AR.isValid(); 324 ++AR) 325 if (MBB->isLiveIn(*AR)) 326 return true; 327 return false; 328 } 329 330 ImplicitNullChecks::AliasResult 331 ImplicitNullChecks::areMemoryOpsAliased(const MachineInstr &MI, 332 const MachineInstr *PrevMI) const { 333 // If it is not memory access, skip the check. 334 if (!(PrevMI->mayStore() || PrevMI->mayLoad())) 335 return AR_NoAlias; 336 // Load-Load may alias 337 if (!(MI.mayStore() || PrevMI->mayStore())) 338 return AR_NoAlias; 339 // We lost info, conservatively alias. If it was store then no sense to 340 // continue because we won't be able to check against it further. 341 if (MI.memoperands_empty()) 342 return MI.mayStore() ? AR_WillAliasEverything : AR_MayAlias; 343 if (PrevMI->memoperands_empty()) 344 return PrevMI->mayStore() ? AR_WillAliasEverything : AR_MayAlias; 345 346 for (MachineMemOperand *MMO1 : MI.memoperands()) { 347 // MMO1 should have a value due it comes from operation we'd like to use 348 // as implicit null check. 349 assert(MMO1->getValue() && "MMO1 should have a Value!"); 350 for (MachineMemOperand *MMO2 : PrevMI->memoperands()) { 351 if (const PseudoSourceValue *PSV = MMO2->getPseudoValue()) { 352 if (PSV->mayAlias(MFI)) 353 return AR_MayAlias; 354 continue; 355 } 356 llvm::AliasResult AAResult = 357 AA->alias(MemoryLocation(MMO1->getValue(), LocationSize::unknown(), 358 MMO1->getAAInfo()), 359 MemoryLocation(MMO2->getValue(), LocationSize::unknown(), 360 MMO2->getAAInfo())); 361 if (AAResult != NoAlias) 362 return AR_MayAlias; 363 } 364 } 365 return AR_NoAlias; 366 } 367 368 ImplicitNullChecks::SuitabilityResult 369 ImplicitNullChecks::isSuitableMemoryOp(const MachineInstr &MI, 370 unsigned PointerReg, 371 ArrayRef<MachineInstr *> PrevInsts) { 372 int64_t Offset; 373 bool OffsetIsScalable; 374 const MachineOperand *BaseOp; 375 376 // Implementation restriction for faulting_op insertion 377 // TODO: This could be relaxed if we find a test case which warrants it. 378 if (MI.getDesc().getNumDefs() > 1) 379 return SR_Unsuitable; 380 381 // FIXME: This handles only simple addressing mode. 382 if (!TII->getMemOperandWithOffset(MI, BaseOp, Offset, OffsetIsScalable, TRI)) 383 return SR_Unsuitable; 384 385 // We need the base of the memory instruction to be same as the register 386 // where the null check is performed (i.e. PointerReg). 387 if (!BaseOp->isReg() || BaseOp->getReg() != PointerReg) 388 return SR_Unsuitable; 389 390 // Scalable offsets are a part of scalable vectors (SVE for AArch64). That 391 // target is in-practice unsupported for ImplicitNullChecks. 392 if (OffsetIsScalable) 393 return SR_Unsuitable; 394 395 if (!MI.mayLoadOrStore() || MI.isPredicable()) 396 return SR_Unsuitable; 397 398 // We want the mem access to be issued at a sane offset from PointerReg, 399 // so that if PointerReg is null then the access reliably page faults. 400 if (!(-PageSize < Offset && Offset < PageSize)) 401 return SR_Unsuitable; 402 403 // Finally, check whether the current memory access aliases with previous one. 404 for (auto *PrevMI : PrevInsts) { 405 AliasResult AR = areMemoryOpsAliased(MI, PrevMI); 406 if (AR == AR_WillAliasEverything) 407 return SR_Impossible; 408 if (AR == AR_MayAlias) 409 return SR_Unsuitable; 410 } 411 return SR_Suitable; 412 } 413 414 bool ImplicitNullChecks::canDependenceHoistingClobberLiveIns( 415 MachineInstr *DependenceMI, MachineBasicBlock *NullSucc) { 416 for (const auto &DependenceMO : DependenceMI->operands()) { 417 if (!(DependenceMO.isReg() && DependenceMO.getReg())) 418 continue; 419 420 // Make sure that we won't clobber any live ins to the sibling block by 421 // hoisting Dependency. For instance, we can't hoist INST to before the 422 // null check (even if it safe, and does not violate any dependencies in 423 // the non_null_block) if %rdx is live in to _null_block. 424 // 425 // test %rcx, %rcx 426 // je _null_block 427 // _non_null_block: 428 // %rdx = INST 429 // ... 430 // 431 // This restriction does not apply to the faulting load inst because in 432 // case the pointer loaded from is in the null page, the load will not 433 // semantically execute, and affect machine state. That is, if the load 434 // was loading into %rax and it faults, the value of %rax should stay the 435 // same as it would have been had the load not have executed and we'd have 436 // branched to NullSucc directly. 437 if (AnyAliasLiveIn(TRI, NullSucc, DependenceMO.getReg())) 438 return true; 439 440 } 441 442 // The dependence does not clobber live-ins in NullSucc block. 443 return false; 444 } 445 446 bool ImplicitNullChecks::canHoistInst(MachineInstr *FaultingMI, 447 ArrayRef<MachineInstr *> InstsSeenSoFar, 448 MachineBasicBlock *NullSucc, 449 MachineInstr *&Dependence) { 450 auto DepResult = computeDependence(FaultingMI, InstsSeenSoFar); 451 if (!DepResult.CanReorder) 452 return false; 453 454 if (!DepResult.PotentialDependence) { 455 Dependence = nullptr; 456 return true; 457 } 458 459 auto DependenceItr = *DepResult.PotentialDependence; 460 auto *DependenceMI = *DependenceItr; 461 462 // We don't want to reason about speculating loads. Note -- at this point 463 // we should have already filtered out all of the other non-speculatable 464 // things, like calls and stores. 465 // We also do not want to hoist stores because it might change the memory 466 // while the FaultingMI may result in faulting. 467 assert(canHandle(DependenceMI) && "Should never have reached here!"); 468 if (DependenceMI->mayLoadOrStore()) 469 return false; 470 471 if (canDependenceHoistingClobberLiveIns(DependenceMI, NullSucc)) 472 return false; 473 474 auto DepDepResult = 475 computeDependence(DependenceMI, {InstsSeenSoFar.begin(), DependenceItr}); 476 477 if (!DepDepResult.CanReorder || DepDepResult.PotentialDependence) 478 return false; 479 480 Dependence = DependenceMI; 481 return true; 482 } 483 484 /// Analyze MBB to check if its terminating branch can be turned into an 485 /// implicit null check. If yes, append a description of the said null check to 486 /// NullCheckList and return true, else return false. 487 bool ImplicitNullChecks::analyzeBlockForNullChecks( 488 MachineBasicBlock &MBB, SmallVectorImpl<NullCheck> &NullCheckList) { 489 using MachineBranchPredicate = TargetInstrInfo::MachineBranchPredicate; 490 491 MDNode *BranchMD = nullptr; 492 if (auto *BB = MBB.getBasicBlock()) 493 BranchMD = BB->getTerminator()->getMetadata(LLVMContext::MD_make_implicit); 494 495 if (!BranchMD) 496 return false; 497 498 MachineBranchPredicate MBP; 499 500 if (TII->analyzeBranchPredicate(MBB, MBP, true)) 501 return false; 502 503 // Is the predicate comparing an integer to zero? 504 if (!(MBP.LHS.isReg() && MBP.RHS.isImm() && MBP.RHS.getImm() == 0 && 505 (MBP.Predicate == MachineBranchPredicate::PRED_NE || 506 MBP.Predicate == MachineBranchPredicate::PRED_EQ))) 507 return false; 508 509 // If there is a separate condition generation instruction, we chose not to 510 // transform unless we can remove both condition and consuming branch. 511 if (MBP.ConditionDef && !MBP.SingleUseCondition) 512 return false; 513 514 MachineBasicBlock *NotNullSucc, *NullSucc; 515 516 if (MBP.Predicate == MachineBranchPredicate::PRED_NE) { 517 NotNullSucc = MBP.TrueDest; 518 NullSucc = MBP.FalseDest; 519 } else { 520 NotNullSucc = MBP.FalseDest; 521 NullSucc = MBP.TrueDest; 522 } 523 524 // We handle the simplest case for now. We can potentially do better by using 525 // the machine dominator tree. 526 if (NotNullSucc->pred_size() != 1) 527 return false; 528 529 const Register PointerReg = MBP.LHS.getReg(); 530 531 if (MBP.ConditionDef) { 532 // To prevent the invalid transformation of the following code: 533 // 534 // mov %rax, %rcx 535 // test %rax, %rax 536 // %rax = ... 537 // je throw_npe 538 // mov(%rcx), %r9 539 // mov(%rax), %r10 540 // 541 // into: 542 // 543 // mov %rax, %rcx 544 // %rax = .... 545 // faulting_load_op("movl (%rax), %r10", throw_npe) 546 // mov(%rcx), %r9 547 // 548 // we must ensure that there are no instructions between the 'test' and 549 // conditional jump that modify %rax. 550 assert(MBP.ConditionDef->getParent() == &MBB && 551 "Should be in basic block"); 552 553 for (auto I = MBB.rbegin(); MBP.ConditionDef != &*I; ++I) 554 if (I->modifiesRegister(PointerReg, TRI)) 555 return false; 556 } 557 // Starting with a code fragment like: 558 // 559 // test %rax, %rax 560 // jne LblNotNull 561 // 562 // LblNull: 563 // callq throw_NullPointerException 564 // 565 // LblNotNull: 566 // Inst0 567 // Inst1 568 // ... 569 // Def = Load (%rax + <offset>) 570 // ... 571 // 572 // 573 // we want to end up with 574 // 575 // Def = FaultingLoad (%rax + <offset>), LblNull 576 // jmp LblNotNull ;; explicit or fallthrough 577 // 578 // LblNotNull: 579 // Inst0 580 // Inst1 581 // ... 582 // 583 // LblNull: 584 // callq throw_NullPointerException 585 // 586 // 587 // To see why this is legal, consider the two possibilities: 588 // 589 // 1. %rax is null: since we constrain <offset> to be less than PageSize, the 590 // load instruction dereferences the null page, causing a segmentation 591 // fault. 592 // 593 // 2. %rax is not null: in this case we know that the load cannot fault, as 594 // otherwise the load would've faulted in the original program too and the 595 // original program would've been undefined. 596 // 597 // This reasoning cannot be extended to justify hoisting through arbitrary 598 // control flow. For instance, in the example below (in pseudo-C) 599 // 600 // if (ptr == null) { throw_npe(); unreachable; } 601 // if (some_cond) { return 42; } 602 // v = ptr->field; // LD 603 // ... 604 // 605 // we cannot (without code duplication) use the load marked "LD" to null check 606 // ptr -- clause (2) above does not apply in this case. In the above program 607 // the safety of ptr->field can be dependent on some_cond; and, for instance, 608 // ptr could be some non-null invalid reference that never gets loaded from 609 // because some_cond is always true. 610 611 SmallVector<MachineInstr *, 8> InstsSeenSoFar; 612 613 for (auto &MI : *NotNullSucc) { 614 if (!canHandle(&MI) || InstsSeenSoFar.size() >= MaxInstsToConsider) 615 return false; 616 617 MachineInstr *Dependence; 618 SuitabilityResult SR = isSuitableMemoryOp(MI, PointerReg, InstsSeenSoFar); 619 if (SR == SR_Impossible) 620 return false; 621 if (SR == SR_Suitable && 622 canHoistInst(&MI, InstsSeenSoFar, NullSucc, Dependence)) { 623 NullCheckList.emplace_back(&MI, MBP.ConditionDef, &MBB, NotNullSucc, 624 NullSucc, Dependence); 625 return true; 626 } 627 628 // If MI re-defines the PointerReg in a way that changes the value of 629 // PointerReg if it was null, then we cannot move further. 630 if (!TII->preservesZeroValueInReg(&MI, PointerReg, TRI)) 631 return false; 632 InstsSeenSoFar.push_back(&MI); 633 } 634 635 return false; 636 } 637 638 /// Wrap a machine instruction, MI, into a FAULTING machine instruction. 639 /// The FAULTING instruction does the same load/store as MI 640 /// (defining the same register), and branches to HandlerMBB if the mem access 641 /// faults. The FAULTING instruction is inserted at the end of MBB. 642 MachineInstr *ImplicitNullChecks::insertFaultingInstr( 643 MachineInstr *MI, MachineBasicBlock *MBB, MachineBasicBlock *HandlerMBB) { 644 const unsigned NoRegister = 0; // Guaranteed to be the NoRegister value for 645 // all targets. 646 647 DebugLoc DL; 648 unsigned NumDefs = MI->getDesc().getNumDefs(); 649 assert(NumDefs <= 1 && "other cases unhandled!"); 650 651 unsigned DefReg = NoRegister; 652 if (NumDefs != 0) { 653 DefReg = MI->getOperand(0).getReg(); 654 assert(NumDefs == 1 && "expected exactly one def!"); 655 } 656 657 FaultMaps::FaultKind FK; 658 if (MI->mayLoad()) 659 FK = 660 MI->mayStore() ? FaultMaps::FaultingLoadStore : FaultMaps::FaultingLoad; 661 else 662 FK = FaultMaps::FaultingStore; 663 664 auto MIB = BuildMI(MBB, DL, TII->get(TargetOpcode::FAULTING_OP), DefReg) 665 .addImm(FK) 666 .addMBB(HandlerMBB) 667 .addImm(MI->getOpcode()); 668 669 for (auto &MO : MI->uses()) { 670 if (MO.isReg()) { 671 MachineOperand NewMO = MO; 672 if (MO.isUse()) { 673 NewMO.setIsKill(false); 674 } else { 675 assert(MO.isDef() && "Expected def or use"); 676 NewMO.setIsDead(false); 677 } 678 MIB.add(NewMO); 679 } else { 680 MIB.add(MO); 681 } 682 } 683 684 MIB.setMemRefs(MI->memoperands()); 685 686 return MIB; 687 } 688 689 /// Rewrite the null checks in NullCheckList into implicit null checks. 690 void ImplicitNullChecks::rewriteNullChecks( 691 ArrayRef<ImplicitNullChecks::NullCheck> NullCheckList) { 692 DebugLoc DL; 693 694 for (auto &NC : NullCheckList) { 695 // Remove the conditional branch dependent on the null check. 696 unsigned BranchesRemoved = TII->removeBranch(*NC.getCheckBlock()); 697 (void)BranchesRemoved; 698 assert(BranchesRemoved > 0 && "expected at least one branch!"); 699 700 if (auto *DepMI = NC.getOnlyDependency()) { 701 DepMI->removeFromParent(); 702 NC.getCheckBlock()->insert(NC.getCheckBlock()->end(), DepMI); 703 } 704 705 // Insert a faulting instruction where the conditional branch was 706 // originally. We check earlier ensures that this bit of code motion 707 // is legal. We do not touch the successors list for any basic block 708 // since we haven't changed control flow, we've just made it implicit. 709 MachineInstr *FaultingInstr = insertFaultingInstr( 710 NC.getMemOperation(), NC.getCheckBlock(), NC.getNullSucc()); 711 // Now the values defined by MemOperation, if any, are live-in of 712 // the block of MemOperation. 713 // The original operation may define implicit-defs alongside 714 // the value. 715 MachineBasicBlock *MBB = NC.getMemOperation()->getParent(); 716 for (const MachineOperand &MO : FaultingInstr->operands()) { 717 if (!MO.isReg() || !MO.isDef()) 718 continue; 719 Register Reg = MO.getReg(); 720 if (!Reg || MBB->isLiveIn(Reg)) 721 continue; 722 MBB->addLiveIn(Reg); 723 } 724 725 if (auto *DepMI = NC.getOnlyDependency()) { 726 for (auto &MO : DepMI->operands()) { 727 if (!MO.isReg() || !MO.getReg() || !MO.isDef() || MO.isDead()) 728 continue; 729 if (!NC.getNotNullSucc()->isLiveIn(MO.getReg())) 730 NC.getNotNullSucc()->addLiveIn(MO.getReg()); 731 } 732 } 733 734 NC.getMemOperation()->eraseFromParent(); 735 if (auto *CheckOp = NC.getCheckOperation()) 736 CheckOp->eraseFromParent(); 737 738 // Insert an *unconditional* branch to not-null successor - we expect 739 // block placement to remove fallthroughs later. 740 TII->insertBranch(*NC.getCheckBlock(), NC.getNotNullSucc(), nullptr, 741 /*Cond=*/None, DL); 742 743 NumImplicitNullChecks++; 744 } 745 } 746 747 char ImplicitNullChecks::ID = 0; 748 749 char &llvm::ImplicitNullChecksID = ImplicitNullChecks::ID; 750 751 INITIALIZE_PASS_BEGIN(ImplicitNullChecks, DEBUG_TYPE, 752 "Implicit null checks", false, false) 753 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 754 INITIALIZE_PASS_END(ImplicitNullChecks, DEBUG_TYPE, 755 "Implicit null checks", false, false) 756