1 //===- ImplicitNullChecks.cpp - Fold null checks into memory accesses -----===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass turns explicit null checks of the form 11 // 12 // test %r10, %r10 13 // je throw_npe 14 // movl (%r10), %esi 15 // ... 16 // 17 // to 18 // 19 // faulting_load_op("movl (%r10), %esi", throw_npe) 20 // ... 21 // 22 // With the help of a runtime that understands the .fault_maps section, 23 // faulting_load_op branches to throw_npe if executing movl (%r10), %esi incurs 24 // a page fault. 25 // Store and LoadStore are also supported. 26 // 27 //===----------------------------------------------------------------------===// 28 29 #include "llvm/ADT/ArrayRef.h" 30 #include "llvm/ADT/None.h" 31 #include "llvm/ADT/Optional.h" 32 #include "llvm/ADT/STLExtras.h" 33 #include "llvm/ADT/SmallVector.h" 34 #include "llvm/ADT/Statistic.h" 35 #include "llvm/Analysis/AliasAnalysis.h" 36 #include "llvm/Analysis/MemoryLocation.h" 37 #include "llvm/CodeGen/FaultMaps.h" 38 #include "llvm/CodeGen/MachineBasicBlock.h" 39 #include "llvm/CodeGen/MachineFunction.h" 40 #include "llvm/CodeGen/MachineFunctionPass.h" 41 #include "llvm/CodeGen/MachineInstr.h" 42 #include "llvm/CodeGen/MachineInstrBuilder.h" 43 #include "llvm/CodeGen/MachineMemOperand.h" 44 #include "llvm/CodeGen/MachineOperand.h" 45 #include "llvm/CodeGen/MachineRegisterInfo.h" 46 #include "llvm/CodeGen/PseudoSourceValue.h" 47 #include "llvm/IR/BasicBlock.h" 48 #include "llvm/IR/DebugLoc.h" 49 #include "llvm/IR/LLVMContext.h" 50 #include "llvm/MC/MCInstrDesc.h" 51 #include "llvm/MC/MCRegisterInfo.h" 52 #include "llvm/Pass.h" 53 #include "llvm/Support/CommandLine.h" 54 #include "llvm/Target/TargetInstrInfo.h" 55 #include "llvm/Target/TargetOpcodes.h" 56 #include "llvm/Target/TargetRegisterInfo.h" 57 #include "llvm/Target/TargetSubtargetInfo.h" 58 #include <cassert> 59 #include <cstdint> 60 #include <iterator> 61 62 using namespace llvm; 63 64 static cl::opt<int> PageSize("imp-null-check-page-size", 65 cl::desc("The page size of the target in bytes"), 66 cl::init(4096)); 67 68 static cl::opt<unsigned> MaxInstsToConsider( 69 "imp-null-max-insts-to-consider", 70 cl::desc("The max number of instructions to consider hoisting loads over " 71 "(the algorithm is quadratic over this number)"), 72 cl::init(8)); 73 74 #define DEBUG_TYPE "implicit-null-checks" 75 76 STATISTIC(NumImplicitNullChecks, 77 "Number of explicit null checks made implicit"); 78 79 namespace { 80 81 class ImplicitNullChecks : public MachineFunctionPass { 82 /// Return true if \c computeDependence can process \p MI. 83 static bool canHandle(const MachineInstr *MI); 84 85 /// Helper function for \c computeDependence. Return true if \p A 86 /// and \p B do not have any dependences between them, and can be 87 /// re-ordered without changing program semantics. 88 bool canReorder(const MachineInstr *A, const MachineInstr *B); 89 90 /// A data type for representing the result computed by \c 91 /// computeDependence. States whether it is okay to reorder the 92 /// instruction passed to \c computeDependence with at most one 93 /// depednency. 94 struct DependenceResult { 95 /// Can we actually re-order \p MI with \p Insts (see \c 96 /// computeDependence). 97 bool CanReorder; 98 99 /// If non-None, then an instruction in \p Insts that also must be 100 /// hoisted. 101 Optional<ArrayRef<MachineInstr *>::iterator> PotentialDependence; 102 103 /*implicit*/ DependenceResult( 104 bool CanReorder, 105 Optional<ArrayRef<MachineInstr *>::iterator> PotentialDependence) 106 : CanReorder(CanReorder), PotentialDependence(PotentialDependence) { 107 assert((!PotentialDependence || CanReorder) && 108 "!CanReorder && PotentialDependence.hasValue() not allowed!"); 109 } 110 }; 111 112 /// Compute a result for the following question: can \p MI be 113 /// re-ordered from after \p Insts to before it. 114 /// 115 /// \c canHandle should return true for all instructions in \p 116 /// Insts. 117 DependenceResult computeDependence(const MachineInstr *MI, 118 ArrayRef<MachineInstr *> Insts); 119 120 /// Represents one null check that can be made implicit. 121 class NullCheck { 122 // The memory operation the null check can be folded into. 123 MachineInstr *MemOperation; 124 125 // The instruction actually doing the null check (Ptr != 0). 126 MachineInstr *CheckOperation; 127 128 // The block the check resides in. 129 MachineBasicBlock *CheckBlock; 130 131 // The block branched to if the pointer is non-null. 132 MachineBasicBlock *NotNullSucc; 133 134 // The block branched to if the pointer is null. 135 MachineBasicBlock *NullSucc; 136 137 // If this is non-null, then MemOperation has a dependency on on this 138 // instruction; and it needs to be hoisted to execute before MemOperation. 139 MachineInstr *OnlyDependency; 140 141 public: 142 explicit NullCheck(MachineInstr *memOperation, MachineInstr *checkOperation, 143 MachineBasicBlock *checkBlock, 144 MachineBasicBlock *notNullSucc, 145 MachineBasicBlock *nullSucc, 146 MachineInstr *onlyDependency) 147 : MemOperation(memOperation), CheckOperation(checkOperation), 148 CheckBlock(checkBlock), NotNullSucc(notNullSucc), NullSucc(nullSucc), 149 OnlyDependency(onlyDependency) {} 150 151 MachineInstr *getMemOperation() const { return MemOperation; } 152 153 MachineInstr *getCheckOperation() const { return CheckOperation; } 154 155 MachineBasicBlock *getCheckBlock() const { return CheckBlock; } 156 157 MachineBasicBlock *getNotNullSucc() const { return NotNullSucc; } 158 159 MachineBasicBlock *getNullSucc() const { return NullSucc; } 160 161 MachineInstr *getOnlyDependency() const { return OnlyDependency; } 162 }; 163 164 const TargetInstrInfo *TII = nullptr; 165 const TargetRegisterInfo *TRI = nullptr; 166 AliasAnalysis *AA = nullptr; 167 MachineFrameInfo *MFI = nullptr; 168 169 bool analyzeBlockForNullChecks(MachineBasicBlock &MBB, 170 SmallVectorImpl<NullCheck> &NullCheckList); 171 MachineInstr *insertFaultingInstr(MachineInstr *MI, MachineBasicBlock *MBB, 172 MachineBasicBlock *HandlerMBB); 173 void rewriteNullChecks(ArrayRef<NullCheck> NullCheckList); 174 175 enum AliasResult { 176 AR_NoAlias, 177 AR_MayAlias, 178 AR_WillAliasEverything 179 }; 180 181 /// Returns AR_NoAlias if \p MI memory operation does not alias with 182 /// \p PrevMI, AR_MayAlias if they may alias and AR_WillAliasEverything if 183 /// they may alias and any further memory operation may alias with \p PrevMI. 184 AliasResult areMemoryOpsAliased(MachineInstr &MI, MachineInstr *PrevMI); 185 186 enum SuitabilityResult { 187 SR_Suitable, 188 SR_Unsuitable, 189 SR_Impossible 190 }; 191 192 /// Return SR_Suitable if \p MI a memory operation that can be used to 193 /// implicitly null check the value in \p PointerReg, SR_Unsuitable if 194 /// \p MI cannot be used to null check and SR_Impossible if there is 195 /// no sense to continue lookup due to any other instruction will not be able 196 /// to be used. \p PrevInsts is the set of instruction seen since 197 /// the explicit null check on \p PointerReg. 198 SuitabilityResult isSuitableMemoryOp(MachineInstr &MI, unsigned PointerReg, 199 ArrayRef<MachineInstr *> PrevInsts); 200 201 /// Return true if \p FaultingMI can be hoisted from after the the 202 /// instructions in \p InstsSeenSoFar to before them. Set \p Dependence to a 203 /// non-null value if we also need to (and legally can) hoist a depedency. 204 bool canHoistInst(MachineInstr *FaultingMI, unsigned PointerReg, 205 ArrayRef<MachineInstr *> InstsSeenSoFar, 206 MachineBasicBlock *NullSucc, MachineInstr *&Dependence); 207 208 public: 209 static char ID; 210 211 ImplicitNullChecks() : MachineFunctionPass(ID) { 212 initializeImplicitNullChecksPass(*PassRegistry::getPassRegistry()); 213 } 214 215 bool runOnMachineFunction(MachineFunction &MF) override; 216 217 void getAnalysisUsage(AnalysisUsage &AU) const override { 218 AU.addRequired<AAResultsWrapperPass>(); 219 MachineFunctionPass::getAnalysisUsage(AU); 220 } 221 222 MachineFunctionProperties getRequiredProperties() const override { 223 return MachineFunctionProperties().set( 224 MachineFunctionProperties::Property::NoVRegs); 225 } 226 }; 227 228 } // end anonymous namespace 229 230 bool ImplicitNullChecks::canHandle(const MachineInstr *MI) { 231 if (MI->isCall() || MI->hasUnmodeledSideEffects()) 232 return false; 233 auto IsRegMask = [](const MachineOperand &MO) { return MO.isRegMask(); }; 234 (void)IsRegMask; 235 236 assert(!llvm::any_of(MI->operands(), IsRegMask) && 237 "Calls were filtered out above!"); 238 239 auto IsUnordered = [](MachineMemOperand *MMO) { return MMO->isUnordered(); }; 240 return llvm::all_of(MI->memoperands(), IsUnordered); 241 } 242 243 ImplicitNullChecks::DependenceResult 244 ImplicitNullChecks::computeDependence(const MachineInstr *MI, 245 ArrayRef<MachineInstr *> Block) { 246 assert(llvm::all_of(Block, canHandle) && "Check this first!"); 247 assert(!is_contained(Block, MI) && "Block must be exclusive of MI!"); 248 249 Optional<ArrayRef<MachineInstr *>::iterator> Dep; 250 251 for (auto I = Block.begin(), E = Block.end(); I != E; ++I) { 252 if (canReorder(*I, MI)) 253 continue; 254 255 if (Dep == None) { 256 // Found one possible dependency, keep track of it. 257 Dep = I; 258 } else { 259 // We found two dependencies, so bail out. 260 return {false, None}; 261 } 262 } 263 264 return {true, Dep}; 265 } 266 267 bool ImplicitNullChecks::canReorder(const MachineInstr *A, 268 const MachineInstr *B) { 269 assert(canHandle(A) && canHandle(B) && "Precondition!"); 270 271 // canHandle makes sure that we _can_ correctly analyze the dependencies 272 // between A and B here -- for instance, we should not be dealing with heap 273 // load-store dependencies here. 274 275 for (auto MOA : A->operands()) { 276 if (!(MOA.isReg() && MOA.getReg())) 277 continue; 278 279 unsigned RegA = MOA.getReg(); 280 for (auto MOB : B->operands()) { 281 if (!(MOB.isReg() && MOB.getReg())) 282 continue; 283 284 unsigned RegB = MOB.getReg(); 285 286 if (TRI->regsOverlap(RegA, RegB) && (MOA.isDef() || MOB.isDef())) 287 return false; 288 } 289 } 290 291 return true; 292 } 293 294 bool ImplicitNullChecks::runOnMachineFunction(MachineFunction &MF) { 295 TII = MF.getSubtarget().getInstrInfo(); 296 TRI = MF.getRegInfo().getTargetRegisterInfo(); 297 MFI = &MF.getFrameInfo(); 298 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 299 300 SmallVector<NullCheck, 16> NullCheckList; 301 302 for (auto &MBB : MF) 303 analyzeBlockForNullChecks(MBB, NullCheckList); 304 305 if (!NullCheckList.empty()) 306 rewriteNullChecks(NullCheckList); 307 308 return !NullCheckList.empty(); 309 } 310 311 // Return true if any register aliasing \p Reg is live-in into \p MBB. 312 static bool AnyAliasLiveIn(const TargetRegisterInfo *TRI, 313 MachineBasicBlock *MBB, unsigned Reg) { 314 for (MCRegAliasIterator AR(Reg, TRI, /*IncludeSelf*/ true); AR.isValid(); 315 ++AR) 316 if (MBB->isLiveIn(*AR)) 317 return true; 318 return false; 319 } 320 321 ImplicitNullChecks::AliasResult 322 ImplicitNullChecks::areMemoryOpsAliased(MachineInstr &MI, 323 MachineInstr *PrevMI) { 324 // If it is not memory access, skip the check. 325 if (!(PrevMI->mayStore() || PrevMI->mayLoad())) 326 return AR_NoAlias; 327 // Load-Load may alias 328 if (!(MI.mayStore() || PrevMI->mayStore())) 329 return AR_NoAlias; 330 // We lost info, conservatively alias. If it was store then no sense to 331 // continue because we won't be able to check against it further. 332 if (MI.memoperands_empty()) 333 return MI.mayStore() ? AR_WillAliasEverything : AR_MayAlias; 334 if (PrevMI->memoperands_empty()) 335 return PrevMI->mayStore() ? AR_WillAliasEverything : AR_MayAlias; 336 337 for (MachineMemOperand *MMO1 : MI.memoperands()) { 338 // MMO1 should have a value due it comes from operation we'd like to use 339 // as implicit null check. 340 assert(MMO1->getValue() && "MMO1 should have a Value!"); 341 for (MachineMemOperand *MMO2 : PrevMI->memoperands()) { 342 if (const PseudoSourceValue *PSV = MMO2->getPseudoValue()) { 343 if (PSV->mayAlias(MFI)) 344 return AR_MayAlias; 345 continue; 346 } 347 llvm::AliasResult AAResult = AA->alias( 348 MemoryLocation(MMO1->getValue(), MemoryLocation::UnknownSize, 349 MMO1->getAAInfo()), 350 MemoryLocation(MMO2->getValue(), MemoryLocation::UnknownSize, 351 MMO2->getAAInfo())); 352 if (AAResult != NoAlias) 353 return AR_MayAlias; 354 } 355 } 356 return AR_NoAlias; 357 } 358 359 ImplicitNullChecks::SuitabilityResult 360 ImplicitNullChecks::isSuitableMemoryOp(MachineInstr &MI, unsigned PointerReg, 361 ArrayRef<MachineInstr *> PrevInsts) { 362 int64_t Offset; 363 unsigned BaseReg; 364 365 if (!TII->getMemOpBaseRegImmOfs(MI, BaseReg, Offset, TRI) || 366 BaseReg != PointerReg) 367 return SR_Unsuitable; 368 369 // We want the mem access to be issued at a sane offset from PointerReg, 370 // so that if PointerReg is null then the access reliably page faults. 371 if (!((MI.mayLoad() || MI.mayStore()) && !MI.isPredicable() && 372 -PageSize < Offset && Offset < PageSize)) 373 return SR_Unsuitable; 374 375 // Finally, check whether the current memory access aliases with previous one. 376 for (auto *PrevMI : PrevInsts) { 377 AliasResult AR = areMemoryOpsAliased(MI, PrevMI); 378 if (AR == AR_WillAliasEverything) 379 return SR_Impossible; 380 if (AR == AR_MayAlias) 381 return SR_Unsuitable; 382 } 383 return SR_Suitable; 384 } 385 386 bool ImplicitNullChecks::canHoistInst(MachineInstr *FaultingMI, 387 unsigned PointerReg, 388 ArrayRef<MachineInstr *> InstsSeenSoFar, 389 MachineBasicBlock *NullSucc, 390 MachineInstr *&Dependence) { 391 auto DepResult = computeDependence(FaultingMI, InstsSeenSoFar); 392 if (!DepResult.CanReorder) 393 return false; 394 395 if (!DepResult.PotentialDependence) { 396 Dependence = nullptr; 397 return true; 398 } 399 400 auto DependenceItr = *DepResult.PotentialDependence; 401 auto *DependenceMI = *DependenceItr; 402 403 // We don't want to reason about speculating loads. Note -- at this point 404 // we should have already filtered out all of the other non-speculatable 405 // things, like calls and stores. 406 // We also do not want to hoist stores because it might change the memory 407 // while the FaultingMI may result in faulting. 408 assert(canHandle(DependenceMI) && "Should never have reached here!"); 409 if (DependenceMI->mayLoadOrStore()) 410 return false; 411 412 for (auto &DependenceMO : DependenceMI->operands()) { 413 if (!(DependenceMO.isReg() && DependenceMO.getReg())) 414 continue; 415 416 // Make sure that we won't clobber any live ins to the sibling block by 417 // hoisting Dependency. For instance, we can't hoist INST to before the 418 // null check (even if it safe, and does not violate any dependencies in 419 // the non_null_block) if %rdx is live in to _null_block. 420 // 421 // test %rcx, %rcx 422 // je _null_block 423 // _non_null_block: 424 // %rdx<def> = INST 425 // ... 426 // 427 // This restriction does not apply to the faulting load inst because in 428 // case the pointer loaded from is in the null page, the load will not 429 // semantically execute, and affect machine state. That is, if the load 430 // was loading into %rax and it faults, the value of %rax should stay the 431 // same as it would have been had the load not have executed and we'd have 432 // branched to NullSucc directly. 433 if (AnyAliasLiveIn(TRI, NullSucc, DependenceMO.getReg())) 434 return false; 435 436 // The Dependency can't be re-defining the base register -- then we won't 437 // get the memory operation on the address we want. This is already 438 // checked in \c IsSuitableMemoryOp. 439 assert(!(DependenceMO.isDef() && 440 TRI->regsOverlap(DependenceMO.getReg(), PointerReg)) && 441 "Should have been checked before!"); 442 } 443 444 auto DepDepResult = 445 computeDependence(DependenceMI, {InstsSeenSoFar.begin(), DependenceItr}); 446 447 if (!DepDepResult.CanReorder || DepDepResult.PotentialDependence) 448 return false; 449 450 Dependence = DependenceMI; 451 return true; 452 } 453 454 /// Analyze MBB to check if its terminating branch can be turned into an 455 /// implicit null check. If yes, append a description of the said null check to 456 /// NullCheckList and return true, else return false. 457 bool ImplicitNullChecks::analyzeBlockForNullChecks( 458 MachineBasicBlock &MBB, SmallVectorImpl<NullCheck> &NullCheckList) { 459 using MachineBranchPredicate = TargetInstrInfo::MachineBranchPredicate; 460 461 MDNode *BranchMD = nullptr; 462 if (auto *BB = MBB.getBasicBlock()) 463 BranchMD = BB->getTerminator()->getMetadata(LLVMContext::MD_make_implicit); 464 465 if (!BranchMD) 466 return false; 467 468 MachineBranchPredicate MBP; 469 470 if (TII->analyzeBranchPredicate(MBB, MBP, true)) 471 return false; 472 473 // Is the predicate comparing an integer to zero? 474 if (!(MBP.LHS.isReg() && MBP.RHS.isImm() && MBP.RHS.getImm() == 0 && 475 (MBP.Predicate == MachineBranchPredicate::PRED_NE || 476 MBP.Predicate == MachineBranchPredicate::PRED_EQ))) 477 return false; 478 479 // If we cannot erase the test instruction itself, then making the null check 480 // implicit does not buy us much. 481 if (!MBP.SingleUseCondition) 482 return false; 483 484 MachineBasicBlock *NotNullSucc, *NullSucc; 485 486 if (MBP.Predicate == MachineBranchPredicate::PRED_NE) { 487 NotNullSucc = MBP.TrueDest; 488 NullSucc = MBP.FalseDest; 489 } else { 490 NotNullSucc = MBP.FalseDest; 491 NullSucc = MBP.TrueDest; 492 } 493 494 // We handle the simplest case for now. We can potentially do better by using 495 // the machine dominator tree. 496 if (NotNullSucc->pred_size() != 1) 497 return false; 498 499 // Starting with a code fragment like: 500 // 501 // test %RAX, %RAX 502 // jne LblNotNull 503 // 504 // LblNull: 505 // callq throw_NullPointerException 506 // 507 // LblNotNull: 508 // Inst0 509 // Inst1 510 // ... 511 // Def = Load (%RAX + <offset>) 512 // ... 513 // 514 // 515 // we want to end up with 516 // 517 // Def = FaultingLoad (%RAX + <offset>), LblNull 518 // jmp LblNotNull ;; explicit or fallthrough 519 // 520 // LblNotNull: 521 // Inst0 522 // Inst1 523 // ... 524 // 525 // LblNull: 526 // callq throw_NullPointerException 527 // 528 // 529 // To see why this is legal, consider the two possibilities: 530 // 531 // 1. %RAX is null: since we constrain <offset> to be less than PageSize, the 532 // load instruction dereferences the null page, causing a segmentation 533 // fault. 534 // 535 // 2. %RAX is not null: in this case we know that the load cannot fault, as 536 // otherwise the load would've faulted in the original program too and the 537 // original program would've been undefined. 538 // 539 // This reasoning cannot be extended to justify hoisting through arbitrary 540 // control flow. For instance, in the example below (in pseudo-C) 541 // 542 // if (ptr == null) { throw_npe(); unreachable; } 543 // if (some_cond) { return 42; } 544 // v = ptr->field; // LD 545 // ... 546 // 547 // we cannot (without code duplication) use the load marked "LD" to null check 548 // ptr -- clause (2) above does not apply in this case. In the above program 549 // the safety of ptr->field can be dependent on some_cond; and, for instance, 550 // ptr could be some non-null invalid reference that never gets loaded from 551 // because some_cond is always true. 552 553 const unsigned PointerReg = MBP.LHS.getReg(); 554 555 SmallVector<MachineInstr *, 8> InstsSeenSoFar; 556 557 for (auto &MI : *NotNullSucc) { 558 if (!canHandle(&MI) || InstsSeenSoFar.size() >= MaxInstsToConsider) 559 return false; 560 561 MachineInstr *Dependence; 562 SuitabilityResult SR = isSuitableMemoryOp(MI, PointerReg, InstsSeenSoFar); 563 if (SR == SR_Impossible) 564 return false; 565 if (SR == SR_Suitable && 566 canHoistInst(&MI, PointerReg, InstsSeenSoFar, NullSucc, Dependence)) { 567 NullCheckList.emplace_back(&MI, MBP.ConditionDef, &MBB, NotNullSucc, 568 NullSucc, Dependence); 569 return true; 570 } 571 572 // If MI re-defines the PointerReg then we cannot move further. 573 if (llvm::any_of(MI.operands(), [&](MachineOperand &MO) { 574 return MO.isReg() && MO.getReg() && MO.isDef() && 575 TRI->regsOverlap(MO.getReg(), PointerReg); 576 })) 577 return false; 578 InstsSeenSoFar.push_back(&MI); 579 } 580 581 return false; 582 } 583 584 /// Wrap a machine instruction, MI, into a FAULTING machine instruction. 585 /// The FAULTING instruction does the same load/store as MI 586 /// (defining the same register), and branches to HandlerMBB if the mem access 587 /// faults. The FAULTING instruction is inserted at the end of MBB. 588 MachineInstr *ImplicitNullChecks::insertFaultingInstr( 589 MachineInstr *MI, MachineBasicBlock *MBB, MachineBasicBlock *HandlerMBB) { 590 const unsigned NoRegister = 0; // Guaranteed to be the NoRegister value for 591 // all targets. 592 593 DebugLoc DL; 594 unsigned NumDefs = MI->getDesc().getNumDefs(); 595 assert(NumDefs <= 1 && "other cases unhandled!"); 596 597 unsigned DefReg = NoRegister; 598 if (NumDefs != 0) { 599 DefReg = MI->defs().begin()->getReg(); 600 assert(std::distance(MI->defs().begin(), MI->defs().end()) == 1 && 601 "expected exactly one def!"); 602 } 603 604 FaultMaps::FaultKind FK; 605 if (MI->mayLoad()) 606 FK = 607 MI->mayStore() ? FaultMaps::FaultingLoadStore : FaultMaps::FaultingLoad; 608 else 609 FK = FaultMaps::FaultingStore; 610 611 auto MIB = BuildMI(MBB, DL, TII->get(TargetOpcode::FAULTING_OP), DefReg) 612 .addImm(FK) 613 .addMBB(HandlerMBB) 614 .addImm(MI->getOpcode()); 615 616 for (auto &MO : MI->uses()) { 617 if (MO.isReg()) { 618 MachineOperand NewMO = MO; 619 if (MO.isUse()) { 620 NewMO.setIsKill(false); 621 } else { 622 assert(MO.isDef() && "Expected def or use"); 623 NewMO.setIsDead(false); 624 } 625 MIB.add(NewMO); 626 } else { 627 MIB.add(MO); 628 } 629 } 630 631 MIB.setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); 632 633 return MIB; 634 } 635 636 /// Rewrite the null checks in NullCheckList into implicit null checks. 637 void ImplicitNullChecks::rewriteNullChecks( 638 ArrayRef<ImplicitNullChecks::NullCheck> NullCheckList) { 639 DebugLoc DL; 640 641 for (auto &NC : NullCheckList) { 642 // Remove the conditional branch dependent on the null check. 643 unsigned BranchesRemoved = TII->removeBranch(*NC.getCheckBlock()); 644 (void)BranchesRemoved; 645 assert(BranchesRemoved > 0 && "expected at least one branch!"); 646 647 if (auto *DepMI = NC.getOnlyDependency()) { 648 DepMI->removeFromParent(); 649 NC.getCheckBlock()->insert(NC.getCheckBlock()->end(), DepMI); 650 } 651 652 // Insert a faulting instruction where the conditional branch was 653 // originally. We check earlier ensures that this bit of code motion 654 // is legal. We do not touch the successors list for any basic block 655 // since we haven't changed control flow, we've just made it implicit. 656 MachineInstr *FaultingInstr = insertFaultingInstr( 657 NC.getMemOperation(), NC.getCheckBlock(), NC.getNullSucc()); 658 // Now the values defined by MemOperation, if any, are live-in of 659 // the block of MemOperation. 660 // The original operation may define implicit-defs alongside 661 // the value. 662 MachineBasicBlock *MBB = NC.getMemOperation()->getParent(); 663 for (const MachineOperand &MO : FaultingInstr->operands()) { 664 if (!MO.isReg() || !MO.isDef()) 665 continue; 666 unsigned Reg = MO.getReg(); 667 if (!Reg || MBB->isLiveIn(Reg)) 668 continue; 669 MBB->addLiveIn(Reg); 670 } 671 672 if (auto *DepMI = NC.getOnlyDependency()) { 673 for (auto &MO : DepMI->operands()) { 674 if (!MO.isReg() || !MO.getReg() || !MO.isDef()) 675 continue; 676 if (!NC.getNotNullSucc()->isLiveIn(MO.getReg())) 677 NC.getNotNullSucc()->addLiveIn(MO.getReg()); 678 } 679 } 680 681 NC.getMemOperation()->eraseFromParent(); 682 NC.getCheckOperation()->eraseFromParent(); 683 684 // Insert an *unconditional* branch to not-null successor. 685 TII->insertBranch(*NC.getCheckBlock(), NC.getNotNullSucc(), nullptr, 686 /*Cond=*/None, DL); 687 688 NumImplicitNullChecks++; 689 } 690 } 691 692 char ImplicitNullChecks::ID = 0; 693 694 char &llvm::ImplicitNullChecksID = ImplicitNullChecks::ID; 695 696 INITIALIZE_PASS_BEGIN(ImplicitNullChecks, DEBUG_TYPE, 697 "Implicit null checks", false, false) 698 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 699 INITIALIZE_PASS_END(ImplicitNullChecks, DEBUG_TYPE, 700 "Implicit null checks", false, false) 701