1 //===- ImplicitNullChecks.cpp - Fold null checks into memory accesses -----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass turns explicit null checks of the form 10 // 11 // test %r10, %r10 12 // je throw_npe 13 // movl (%r10), %esi 14 // ... 15 // 16 // to 17 // 18 // faulting_load_op("movl (%r10), %esi", throw_npe) 19 // ... 20 // 21 // With the help of a runtime that understands the .fault_maps section, 22 // faulting_load_op branches to throw_npe if executing movl (%r10), %esi incurs 23 // a page fault. 24 // Store and LoadStore are also supported. 25 // 26 //===----------------------------------------------------------------------===// 27 28 #include "llvm/ADT/ArrayRef.h" 29 #include "llvm/ADT/None.h" 30 #include "llvm/ADT/Optional.h" 31 #include "llvm/ADT/STLExtras.h" 32 #include "llvm/ADT/SmallVector.h" 33 #include "llvm/ADT/Statistic.h" 34 #include "llvm/Analysis/AliasAnalysis.h" 35 #include "llvm/Analysis/MemoryLocation.h" 36 #include "llvm/CodeGen/FaultMaps.h" 37 #include "llvm/CodeGen/MachineBasicBlock.h" 38 #include "llvm/CodeGen/MachineFunction.h" 39 #include "llvm/CodeGen/MachineFunctionPass.h" 40 #include "llvm/CodeGen/MachineInstr.h" 41 #include "llvm/CodeGen/MachineInstrBuilder.h" 42 #include "llvm/CodeGen/MachineMemOperand.h" 43 #include "llvm/CodeGen/MachineOperand.h" 44 #include "llvm/CodeGen/MachineRegisterInfo.h" 45 #include "llvm/CodeGen/PseudoSourceValue.h" 46 #include "llvm/CodeGen/TargetInstrInfo.h" 47 #include "llvm/CodeGen/TargetOpcodes.h" 48 #include "llvm/CodeGen/TargetRegisterInfo.h" 49 #include "llvm/CodeGen/TargetSubtargetInfo.h" 50 #include "llvm/IR/BasicBlock.h" 51 #include "llvm/IR/DebugLoc.h" 52 #include "llvm/IR/LLVMContext.h" 53 #include "llvm/InitializePasses.h" 54 #include "llvm/MC/MCInstrDesc.h" 55 #include "llvm/MC/MCRegisterInfo.h" 56 #include "llvm/Pass.h" 57 #include "llvm/Support/CommandLine.h" 58 #include <cassert> 59 #include <cstdint> 60 #include <iterator> 61 62 using namespace llvm; 63 64 static cl::opt<int> PageSize("imp-null-check-page-size", 65 cl::desc("The page size of the target in bytes"), 66 cl::init(4096), cl::Hidden); 67 68 static cl::opt<unsigned> MaxInstsToConsider( 69 "imp-null-max-insts-to-consider", 70 cl::desc("The max number of instructions to consider hoisting loads over " 71 "(the algorithm is quadratic over this number)"), 72 cl::Hidden, cl::init(8)); 73 74 #define DEBUG_TYPE "implicit-null-checks" 75 76 STATISTIC(NumImplicitNullChecks, 77 "Number of explicit null checks made implicit"); 78 79 namespace { 80 81 class ImplicitNullChecks : public MachineFunctionPass { 82 /// Return true if \c computeDependence can process \p MI. 83 static bool canHandle(const MachineInstr *MI); 84 85 /// Helper function for \c computeDependence. Return true if \p A 86 /// and \p B do not have any dependences between them, and can be 87 /// re-ordered without changing program semantics. 88 bool canReorder(const MachineInstr *A, const MachineInstr *B); 89 90 /// A data type for representing the result computed by \c 91 /// computeDependence. States whether it is okay to reorder the 92 /// instruction passed to \c computeDependence with at most one 93 /// dependency. 94 struct DependenceResult { 95 /// Can we actually re-order \p MI with \p Insts (see \c 96 /// computeDependence). 97 bool CanReorder; 98 99 /// If non-None, then an instruction in \p Insts that also must be 100 /// hoisted. 101 Optional<ArrayRef<MachineInstr *>::iterator> PotentialDependence; 102 103 /*implicit*/ DependenceResult( 104 bool CanReorder, 105 Optional<ArrayRef<MachineInstr *>::iterator> PotentialDependence) 106 : CanReorder(CanReorder), PotentialDependence(PotentialDependence) { 107 assert((!PotentialDependence || CanReorder) && 108 "!CanReorder && PotentialDependence.hasValue() not allowed!"); 109 } 110 }; 111 112 /// Compute a result for the following question: can \p MI be 113 /// re-ordered from after \p Insts to before it. 114 /// 115 /// \c canHandle should return true for all instructions in \p 116 /// Insts. 117 DependenceResult computeDependence(const MachineInstr *MI, 118 ArrayRef<MachineInstr *> Block); 119 120 /// Represents one null check that can be made implicit. 121 class NullCheck { 122 // The memory operation the null check can be folded into. 123 MachineInstr *MemOperation; 124 125 // The instruction actually doing the null check (Ptr != 0). 126 MachineInstr *CheckOperation; 127 128 // The block the check resides in. 129 MachineBasicBlock *CheckBlock; 130 131 // The block branched to if the pointer is non-null. 132 MachineBasicBlock *NotNullSucc; 133 134 // The block branched to if the pointer is null. 135 MachineBasicBlock *NullSucc; 136 137 // If this is non-null, then MemOperation has a dependency on this 138 // instruction; and it needs to be hoisted to execute before MemOperation. 139 MachineInstr *OnlyDependency; 140 141 public: 142 explicit NullCheck(MachineInstr *memOperation, MachineInstr *checkOperation, 143 MachineBasicBlock *checkBlock, 144 MachineBasicBlock *notNullSucc, 145 MachineBasicBlock *nullSucc, 146 MachineInstr *onlyDependency) 147 : MemOperation(memOperation), CheckOperation(checkOperation), 148 CheckBlock(checkBlock), NotNullSucc(notNullSucc), NullSucc(nullSucc), 149 OnlyDependency(onlyDependency) {} 150 151 MachineInstr *getMemOperation() const { return MemOperation; } 152 153 MachineInstr *getCheckOperation() const { return CheckOperation; } 154 155 MachineBasicBlock *getCheckBlock() const { return CheckBlock; } 156 157 MachineBasicBlock *getNotNullSucc() const { return NotNullSucc; } 158 159 MachineBasicBlock *getNullSucc() const { return NullSucc; } 160 161 MachineInstr *getOnlyDependency() const { return OnlyDependency; } 162 }; 163 164 const TargetInstrInfo *TII = nullptr; 165 const TargetRegisterInfo *TRI = nullptr; 166 AliasAnalysis *AA = nullptr; 167 MachineFrameInfo *MFI = nullptr; 168 169 bool analyzeBlockForNullChecks(MachineBasicBlock &MBB, 170 SmallVectorImpl<NullCheck> &NullCheckList); 171 MachineInstr *insertFaultingInstr(MachineInstr *MI, MachineBasicBlock *MBB, 172 MachineBasicBlock *HandlerMBB); 173 void rewriteNullChecks(ArrayRef<NullCheck> NullCheckList); 174 175 enum AliasResult { 176 AR_NoAlias, 177 AR_MayAlias, 178 AR_WillAliasEverything 179 }; 180 181 /// Returns AR_NoAlias if \p MI memory operation does not alias with 182 /// \p PrevMI, AR_MayAlias if they may alias and AR_WillAliasEverything if 183 /// they may alias and any further memory operation may alias with \p PrevMI. 184 AliasResult areMemoryOpsAliased(const MachineInstr &MI, 185 const MachineInstr *PrevMI) const; 186 187 enum SuitabilityResult { 188 SR_Suitable, 189 SR_Unsuitable, 190 SR_Impossible 191 }; 192 193 /// Return SR_Suitable if \p MI a memory operation that can be used to 194 /// implicitly null check the value in \p PointerReg, SR_Unsuitable if 195 /// \p MI cannot be used to null check and SR_Impossible if there is 196 /// no sense to continue lookup due to any other instruction will not be able 197 /// to be used. \p PrevInsts is the set of instruction seen since 198 /// the explicit null check on \p PointerReg. 199 SuitabilityResult isSuitableMemoryOp(const MachineInstr &MI, 200 unsigned PointerReg, 201 ArrayRef<MachineInstr *> PrevInsts); 202 203 /// Returns true if \p DependenceMI can clobber the liveIns in NullSucc block 204 /// if it was hoisted to the NullCheck block. This is used by caller 205 /// canHoistInst to decide if DependenceMI can be hoisted safely. 206 bool canDependenceHoistingClobberLiveIns(MachineInstr *DependenceMI, 207 MachineBasicBlock *NullSucc); 208 209 /// Return true if \p FaultingMI can be hoisted from after the 210 /// instructions in \p InstsSeenSoFar to before them. Set \p Dependence to a 211 /// non-null value if we also need to (and legally can) hoist a dependency. 212 bool canHoistInst(MachineInstr *FaultingMI, 213 ArrayRef<MachineInstr *> InstsSeenSoFar, 214 MachineBasicBlock *NullSucc, MachineInstr *&Dependence); 215 216 public: 217 static char ID; 218 219 ImplicitNullChecks() : MachineFunctionPass(ID) { 220 initializeImplicitNullChecksPass(*PassRegistry::getPassRegistry()); 221 } 222 223 bool runOnMachineFunction(MachineFunction &MF) override; 224 225 void getAnalysisUsage(AnalysisUsage &AU) const override { 226 AU.addRequired<AAResultsWrapperPass>(); 227 MachineFunctionPass::getAnalysisUsage(AU); 228 } 229 230 MachineFunctionProperties getRequiredProperties() const override { 231 return MachineFunctionProperties().set( 232 MachineFunctionProperties::Property::NoVRegs); 233 } 234 }; 235 236 } // end anonymous namespace 237 238 bool ImplicitNullChecks::canHandle(const MachineInstr *MI) { 239 if (MI->isCall() || MI->mayRaiseFPException() || 240 MI->hasUnmodeledSideEffects()) 241 return false; 242 auto IsRegMask = [](const MachineOperand &MO) { return MO.isRegMask(); }; 243 (void)IsRegMask; 244 245 assert(!llvm::any_of(MI->operands(), IsRegMask) && 246 "Calls were filtered out above!"); 247 248 auto IsUnordered = [](MachineMemOperand *MMO) { return MMO->isUnordered(); }; 249 return llvm::all_of(MI->memoperands(), IsUnordered); 250 } 251 252 ImplicitNullChecks::DependenceResult 253 ImplicitNullChecks::computeDependence(const MachineInstr *MI, 254 ArrayRef<MachineInstr *> Block) { 255 assert(llvm::all_of(Block, canHandle) && "Check this first!"); 256 assert(!is_contained(Block, MI) && "Block must be exclusive of MI!"); 257 258 Optional<ArrayRef<MachineInstr *>::iterator> Dep; 259 260 for (auto I = Block.begin(), E = Block.end(); I != E; ++I) { 261 if (canReorder(*I, MI)) 262 continue; 263 264 if (Dep == None) { 265 // Found one possible dependency, keep track of it. 266 Dep = I; 267 } else { 268 // We found two dependencies, so bail out. 269 return {false, None}; 270 } 271 } 272 273 return {true, Dep}; 274 } 275 276 bool ImplicitNullChecks::canReorder(const MachineInstr *A, 277 const MachineInstr *B) { 278 assert(canHandle(A) && canHandle(B) && "Precondition!"); 279 280 // canHandle makes sure that we _can_ correctly analyze the dependencies 281 // between A and B here -- for instance, we should not be dealing with heap 282 // load-store dependencies here. 283 284 for (const auto &MOA : A->operands()) { 285 if (!(MOA.isReg() && MOA.getReg())) 286 continue; 287 288 Register RegA = MOA.getReg(); 289 for (const auto &MOB : B->operands()) { 290 if (!(MOB.isReg() && MOB.getReg())) 291 continue; 292 293 Register RegB = MOB.getReg(); 294 295 if (TRI->regsOverlap(RegA, RegB) && (MOA.isDef() || MOB.isDef())) 296 return false; 297 } 298 } 299 300 return true; 301 } 302 303 bool ImplicitNullChecks::runOnMachineFunction(MachineFunction &MF) { 304 TII = MF.getSubtarget().getInstrInfo(); 305 TRI = MF.getRegInfo().getTargetRegisterInfo(); 306 MFI = &MF.getFrameInfo(); 307 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 308 309 SmallVector<NullCheck, 16> NullCheckList; 310 311 for (auto &MBB : MF) 312 analyzeBlockForNullChecks(MBB, NullCheckList); 313 314 if (!NullCheckList.empty()) 315 rewriteNullChecks(NullCheckList); 316 317 return !NullCheckList.empty(); 318 } 319 320 // Return true if any register aliasing \p Reg is live-in into \p MBB. 321 static bool AnyAliasLiveIn(const TargetRegisterInfo *TRI, 322 MachineBasicBlock *MBB, unsigned Reg) { 323 for (MCRegAliasIterator AR(Reg, TRI, /*IncludeSelf*/ true); AR.isValid(); 324 ++AR) 325 if (MBB->isLiveIn(*AR)) 326 return true; 327 return false; 328 } 329 330 ImplicitNullChecks::AliasResult 331 ImplicitNullChecks::areMemoryOpsAliased(const MachineInstr &MI, 332 const MachineInstr *PrevMI) const { 333 // If it is not memory access, skip the check. 334 if (!(PrevMI->mayStore() || PrevMI->mayLoad())) 335 return AR_NoAlias; 336 // Load-Load may alias 337 if (!(MI.mayStore() || PrevMI->mayStore())) 338 return AR_NoAlias; 339 // We lost info, conservatively alias. If it was store then no sense to 340 // continue because we won't be able to check against it further. 341 if (MI.memoperands_empty()) 342 return MI.mayStore() ? AR_WillAliasEverything : AR_MayAlias; 343 if (PrevMI->memoperands_empty()) 344 return PrevMI->mayStore() ? AR_WillAliasEverything : AR_MayAlias; 345 346 for (MachineMemOperand *MMO1 : MI.memoperands()) { 347 // MMO1 should have a value due it comes from operation we'd like to use 348 // as implicit null check. 349 assert(MMO1->getValue() && "MMO1 should have a Value!"); 350 for (MachineMemOperand *MMO2 : PrevMI->memoperands()) { 351 if (const PseudoSourceValue *PSV = MMO2->getPseudoValue()) { 352 if (PSV->mayAlias(MFI)) 353 return AR_MayAlias; 354 continue; 355 } 356 llvm::AliasResult AAResult = 357 AA->alias(MemoryLocation(MMO1->getValue(), LocationSize::unknown(), 358 MMO1->getAAInfo()), 359 MemoryLocation(MMO2->getValue(), LocationSize::unknown(), 360 MMO2->getAAInfo())); 361 if (AAResult != NoAlias) 362 return AR_MayAlias; 363 } 364 } 365 return AR_NoAlias; 366 } 367 368 ImplicitNullChecks::SuitabilityResult 369 ImplicitNullChecks::isSuitableMemoryOp(const MachineInstr &MI, 370 unsigned PointerReg, 371 ArrayRef<MachineInstr *> PrevInsts) { 372 // Implementation restriction for faulting_op insertion 373 // TODO: This could be relaxed if we find a test case which warrants it. 374 if (MI.getDesc().getNumDefs() > 1) 375 return SR_Unsuitable; 376 377 if (!MI.mayLoadOrStore() || MI.isPredicable()) 378 return SR_Unsuitable; 379 auto AM = TII->getAddrModeFromMemoryOp(MI, TRI); 380 if (!AM) 381 return SR_Unsuitable; 382 auto AddrMode = *AM; 383 const Register BaseReg = AddrMode.BaseReg, ScaledReg = AddrMode.ScaledReg; 384 int64_t Displacement = AddrMode.Displacement; 385 386 // We need the base of the memory instruction to be same as the register 387 // where the null check is performed (i.e. PointerReg). 388 if (BaseReg != PointerReg && ScaledReg != PointerReg) 389 return SR_Unsuitable; 390 const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo(); 391 unsigned PointerRegSizeInBits = TRI->getRegSizeInBits(PointerReg, MRI); 392 // Bail out of the sizes of BaseReg, ScaledReg and PointerReg are not the 393 // same. 394 if ((BaseReg && 395 TRI->getRegSizeInBits(BaseReg, MRI) != PointerRegSizeInBits) || 396 (ScaledReg && 397 TRI->getRegSizeInBits(ScaledReg, MRI) != PointerRegSizeInBits)) 398 return SR_Unsuitable; 399 400 // Returns true if RegUsedInAddr is used for calculating the displacement 401 // depending on addressing mode. Also calculates the Displacement. 402 auto CalculateDisplacementFromAddrMode = [&](Register RegUsedInAddr, 403 int64_t Multiplier) { 404 // The register can be NoRegister, which is defined as zero for all targets. 405 // Consider instruction of interest as `movq 8(,%rdi,8), %rax`. Here the 406 // ScaledReg is %rdi, while there is no BaseReg. 407 if (!RegUsedInAddr) 408 return false; 409 assert(Multiplier && "expected to be non-zero!"); 410 MachineInstr *ModifyingMI = nullptr; 411 for (auto It = std::next(MachineBasicBlock::const_reverse_iterator(&MI)); 412 It != MI.getParent()->rend(); It++) { 413 const MachineInstr *CurrMI = &*It; 414 if (CurrMI->modifiesRegister(RegUsedInAddr, TRI)) { 415 ModifyingMI = const_cast<MachineInstr *>(CurrMI); 416 break; 417 } 418 } 419 if (!ModifyingMI) 420 return false; 421 // Check for the const value defined in register by ModifyingMI. This means 422 // all other previous values for that register has been invalidated. 423 int64_t ImmVal; 424 if (!TII->getConstValDefinedInReg(*ModifyingMI, RegUsedInAddr, ImmVal)) 425 return false; 426 // Calculate the reg size in bits, since this is needed for bailing out in 427 // case of overflow. 428 int32_t RegSizeInBits = TRI->getRegSizeInBits(RegUsedInAddr, MRI); 429 APInt ImmValC(RegSizeInBits, ImmVal, true /*IsSigned*/); 430 APInt MultiplierC(RegSizeInBits, Multiplier); 431 assert(MultiplierC.isStrictlyPositive() && 432 "expected to be a positive value!"); 433 bool IsOverflow; 434 // Sign of the product depends on the sign of the ImmVal, since Multiplier 435 // is always positive. 436 APInt Product = ImmValC.smul_ov(MultiplierC, IsOverflow); 437 if (IsOverflow) 438 return false; 439 APInt DisplacementC(64, Displacement, true /*isSigned*/); 440 DisplacementC = Product.sadd_ov(DisplacementC, IsOverflow); 441 if (IsOverflow) 442 return false; 443 444 // We only handle diplacements upto 64 bits wide. 445 if (DisplacementC.getActiveBits() > 64) 446 return false; 447 Displacement = DisplacementC.getSExtValue(); 448 return true; 449 }; 450 451 // If a register used in the address is constant, fold it's effect into the 452 // displacement for ease of analysis. 453 bool BaseRegIsConstVal = false, ScaledRegIsConstVal = false; 454 if (CalculateDisplacementFromAddrMode(BaseReg, 1)) 455 BaseRegIsConstVal = true; 456 if (CalculateDisplacementFromAddrMode(ScaledReg, AddrMode.Scale)) 457 ScaledRegIsConstVal = true; 458 459 // The register which is not null checked should be part of the Displacement 460 // calculation, otherwise we do not know whether the Displacement is made up 461 // by some symbolic values. 462 // This matters because we do not want to incorrectly assume that load from 463 // falls in the zeroth faulting page in the "sane offset check" below. 464 if ((BaseReg && BaseReg != PointerReg && !BaseRegIsConstVal) || 465 (ScaledReg && ScaledReg != PointerReg && !ScaledRegIsConstVal)) 466 return SR_Unsuitable; 467 468 // We want the mem access to be issued at a sane offset from PointerReg, 469 // so that if PointerReg is null then the access reliably page faults. 470 if (!(-PageSize < Displacement && Displacement < PageSize)) 471 return SR_Unsuitable; 472 473 // Finally, check whether the current memory access aliases with previous one. 474 for (auto *PrevMI : PrevInsts) { 475 AliasResult AR = areMemoryOpsAliased(MI, PrevMI); 476 if (AR == AR_WillAliasEverything) 477 return SR_Impossible; 478 if (AR == AR_MayAlias) 479 return SR_Unsuitable; 480 } 481 return SR_Suitable; 482 } 483 484 bool ImplicitNullChecks::canDependenceHoistingClobberLiveIns( 485 MachineInstr *DependenceMI, MachineBasicBlock *NullSucc) { 486 for (const auto &DependenceMO : DependenceMI->operands()) { 487 if (!(DependenceMO.isReg() && DependenceMO.getReg())) 488 continue; 489 490 // Make sure that we won't clobber any live ins to the sibling block by 491 // hoisting Dependency. For instance, we can't hoist INST to before the 492 // null check (even if it safe, and does not violate any dependencies in 493 // the non_null_block) if %rdx is live in to _null_block. 494 // 495 // test %rcx, %rcx 496 // je _null_block 497 // _non_null_block: 498 // %rdx = INST 499 // ... 500 // 501 // This restriction does not apply to the faulting load inst because in 502 // case the pointer loaded from is in the null page, the load will not 503 // semantically execute, and affect machine state. That is, if the load 504 // was loading into %rax and it faults, the value of %rax should stay the 505 // same as it would have been had the load not have executed and we'd have 506 // branched to NullSucc directly. 507 if (AnyAliasLiveIn(TRI, NullSucc, DependenceMO.getReg())) 508 return true; 509 510 } 511 512 // The dependence does not clobber live-ins in NullSucc block. 513 return false; 514 } 515 516 bool ImplicitNullChecks::canHoistInst(MachineInstr *FaultingMI, 517 ArrayRef<MachineInstr *> InstsSeenSoFar, 518 MachineBasicBlock *NullSucc, 519 MachineInstr *&Dependence) { 520 auto DepResult = computeDependence(FaultingMI, InstsSeenSoFar); 521 if (!DepResult.CanReorder) 522 return false; 523 524 if (!DepResult.PotentialDependence) { 525 Dependence = nullptr; 526 return true; 527 } 528 529 auto DependenceItr = *DepResult.PotentialDependence; 530 auto *DependenceMI = *DependenceItr; 531 532 // We don't want to reason about speculating loads. Note -- at this point 533 // we should have already filtered out all of the other non-speculatable 534 // things, like calls and stores. 535 // We also do not want to hoist stores because it might change the memory 536 // while the FaultingMI may result in faulting. 537 assert(canHandle(DependenceMI) && "Should never have reached here!"); 538 if (DependenceMI->mayLoadOrStore()) 539 return false; 540 541 if (canDependenceHoistingClobberLiveIns(DependenceMI, NullSucc)) 542 return false; 543 544 auto DepDepResult = 545 computeDependence(DependenceMI, {InstsSeenSoFar.begin(), DependenceItr}); 546 547 if (!DepDepResult.CanReorder || DepDepResult.PotentialDependence) 548 return false; 549 550 Dependence = DependenceMI; 551 return true; 552 } 553 554 /// Analyze MBB to check if its terminating branch can be turned into an 555 /// implicit null check. If yes, append a description of the said null check to 556 /// NullCheckList and return true, else return false. 557 bool ImplicitNullChecks::analyzeBlockForNullChecks( 558 MachineBasicBlock &MBB, SmallVectorImpl<NullCheck> &NullCheckList) { 559 using MachineBranchPredicate = TargetInstrInfo::MachineBranchPredicate; 560 561 MDNode *BranchMD = nullptr; 562 if (auto *BB = MBB.getBasicBlock()) 563 BranchMD = BB->getTerminator()->getMetadata(LLVMContext::MD_make_implicit); 564 565 if (!BranchMD) 566 return false; 567 568 MachineBranchPredicate MBP; 569 570 if (TII->analyzeBranchPredicate(MBB, MBP, true)) 571 return false; 572 573 // Is the predicate comparing an integer to zero? 574 if (!(MBP.LHS.isReg() && MBP.RHS.isImm() && MBP.RHS.getImm() == 0 && 575 (MBP.Predicate == MachineBranchPredicate::PRED_NE || 576 MBP.Predicate == MachineBranchPredicate::PRED_EQ))) 577 return false; 578 579 // If there is a separate condition generation instruction, we chose not to 580 // transform unless we can remove both condition and consuming branch. 581 if (MBP.ConditionDef && !MBP.SingleUseCondition) 582 return false; 583 584 MachineBasicBlock *NotNullSucc, *NullSucc; 585 586 if (MBP.Predicate == MachineBranchPredicate::PRED_NE) { 587 NotNullSucc = MBP.TrueDest; 588 NullSucc = MBP.FalseDest; 589 } else { 590 NotNullSucc = MBP.FalseDest; 591 NullSucc = MBP.TrueDest; 592 } 593 594 // We handle the simplest case for now. We can potentially do better by using 595 // the machine dominator tree. 596 if (NotNullSucc->pred_size() != 1) 597 return false; 598 599 const Register PointerReg = MBP.LHS.getReg(); 600 601 if (MBP.ConditionDef) { 602 // To prevent the invalid transformation of the following code: 603 // 604 // mov %rax, %rcx 605 // test %rax, %rax 606 // %rax = ... 607 // je throw_npe 608 // mov(%rcx), %r9 609 // mov(%rax), %r10 610 // 611 // into: 612 // 613 // mov %rax, %rcx 614 // %rax = .... 615 // faulting_load_op("movl (%rax), %r10", throw_npe) 616 // mov(%rcx), %r9 617 // 618 // we must ensure that there are no instructions between the 'test' and 619 // conditional jump that modify %rax. 620 assert(MBP.ConditionDef->getParent() == &MBB && 621 "Should be in basic block"); 622 623 for (auto I = MBB.rbegin(); MBP.ConditionDef != &*I; ++I) 624 if (I->modifiesRegister(PointerReg, TRI)) 625 return false; 626 } 627 // Starting with a code fragment like: 628 // 629 // test %rax, %rax 630 // jne LblNotNull 631 // 632 // LblNull: 633 // callq throw_NullPointerException 634 // 635 // LblNotNull: 636 // Inst0 637 // Inst1 638 // ... 639 // Def = Load (%rax + <offset>) 640 // ... 641 // 642 // 643 // we want to end up with 644 // 645 // Def = FaultingLoad (%rax + <offset>), LblNull 646 // jmp LblNotNull ;; explicit or fallthrough 647 // 648 // LblNotNull: 649 // Inst0 650 // Inst1 651 // ... 652 // 653 // LblNull: 654 // callq throw_NullPointerException 655 // 656 // 657 // To see why this is legal, consider the two possibilities: 658 // 659 // 1. %rax is null: since we constrain <offset> to be less than PageSize, the 660 // load instruction dereferences the null page, causing a segmentation 661 // fault. 662 // 663 // 2. %rax is not null: in this case we know that the load cannot fault, as 664 // otherwise the load would've faulted in the original program too and the 665 // original program would've been undefined. 666 // 667 // This reasoning cannot be extended to justify hoisting through arbitrary 668 // control flow. For instance, in the example below (in pseudo-C) 669 // 670 // if (ptr == null) { throw_npe(); unreachable; } 671 // if (some_cond) { return 42; } 672 // v = ptr->field; // LD 673 // ... 674 // 675 // we cannot (without code duplication) use the load marked "LD" to null check 676 // ptr -- clause (2) above does not apply in this case. In the above program 677 // the safety of ptr->field can be dependent on some_cond; and, for instance, 678 // ptr could be some non-null invalid reference that never gets loaded from 679 // because some_cond is always true. 680 681 SmallVector<MachineInstr *, 8> InstsSeenSoFar; 682 683 for (auto &MI : *NotNullSucc) { 684 if (!canHandle(&MI) || InstsSeenSoFar.size() >= MaxInstsToConsider) 685 return false; 686 687 MachineInstr *Dependence; 688 SuitabilityResult SR = isSuitableMemoryOp(MI, PointerReg, InstsSeenSoFar); 689 if (SR == SR_Impossible) 690 return false; 691 if (SR == SR_Suitable && 692 canHoistInst(&MI, InstsSeenSoFar, NullSucc, Dependence)) { 693 NullCheckList.emplace_back(&MI, MBP.ConditionDef, &MBB, NotNullSucc, 694 NullSucc, Dependence); 695 return true; 696 } 697 698 // If MI re-defines the PointerReg in a way that changes the value of 699 // PointerReg if it was null, then we cannot move further. 700 if (!TII->preservesZeroValueInReg(&MI, PointerReg, TRI)) 701 return false; 702 InstsSeenSoFar.push_back(&MI); 703 } 704 705 return false; 706 } 707 708 /// Wrap a machine instruction, MI, into a FAULTING machine instruction. 709 /// The FAULTING instruction does the same load/store as MI 710 /// (defining the same register), and branches to HandlerMBB if the mem access 711 /// faults. The FAULTING instruction is inserted at the end of MBB. 712 MachineInstr *ImplicitNullChecks::insertFaultingInstr( 713 MachineInstr *MI, MachineBasicBlock *MBB, MachineBasicBlock *HandlerMBB) { 714 const unsigned NoRegister = 0; // Guaranteed to be the NoRegister value for 715 // all targets. 716 717 DebugLoc DL; 718 unsigned NumDefs = MI->getDesc().getNumDefs(); 719 assert(NumDefs <= 1 && "other cases unhandled!"); 720 721 unsigned DefReg = NoRegister; 722 if (NumDefs != 0) { 723 DefReg = MI->getOperand(0).getReg(); 724 assert(NumDefs == 1 && "expected exactly one def!"); 725 } 726 727 FaultMaps::FaultKind FK; 728 if (MI->mayLoad()) 729 FK = 730 MI->mayStore() ? FaultMaps::FaultingLoadStore : FaultMaps::FaultingLoad; 731 else 732 FK = FaultMaps::FaultingStore; 733 734 auto MIB = BuildMI(MBB, DL, TII->get(TargetOpcode::FAULTING_OP), DefReg) 735 .addImm(FK) 736 .addMBB(HandlerMBB) 737 .addImm(MI->getOpcode()); 738 739 for (auto &MO : MI->uses()) { 740 if (MO.isReg()) { 741 MachineOperand NewMO = MO; 742 if (MO.isUse()) { 743 NewMO.setIsKill(false); 744 } else { 745 assert(MO.isDef() && "Expected def or use"); 746 NewMO.setIsDead(false); 747 } 748 MIB.add(NewMO); 749 } else { 750 MIB.add(MO); 751 } 752 } 753 754 MIB.setMemRefs(MI->memoperands()); 755 756 return MIB; 757 } 758 759 /// Rewrite the null checks in NullCheckList into implicit null checks. 760 void ImplicitNullChecks::rewriteNullChecks( 761 ArrayRef<ImplicitNullChecks::NullCheck> NullCheckList) { 762 DebugLoc DL; 763 764 for (auto &NC : NullCheckList) { 765 // Remove the conditional branch dependent on the null check. 766 unsigned BranchesRemoved = TII->removeBranch(*NC.getCheckBlock()); 767 (void)BranchesRemoved; 768 assert(BranchesRemoved > 0 && "expected at least one branch!"); 769 770 if (auto *DepMI = NC.getOnlyDependency()) { 771 DepMI->removeFromParent(); 772 NC.getCheckBlock()->insert(NC.getCheckBlock()->end(), DepMI); 773 } 774 775 // Insert a faulting instruction where the conditional branch was 776 // originally. We check earlier ensures that this bit of code motion 777 // is legal. We do not touch the successors list for any basic block 778 // since we haven't changed control flow, we've just made it implicit. 779 MachineInstr *FaultingInstr = insertFaultingInstr( 780 NC.getMemOperation(), NC.getCheckBlock(), NC.getNullSucc()); 781 // Now the values defined by MemOperation, if any, are live-in of 782 // the block of MemOperation. 783 // The original operation may define implicit-defs alongside 784 // the value. 785 MachineBasicBlock *MBB = NC.getMemOperation()->getParent(); 786 for (const MachineOperand &MO : FaultingInstr->operands()) { 787 if (!MO.isReg() || !MO.isDef()) 788 continue; 789 Register Reg = MO.getReg(); 790 if (!Reg || MBB->isLiveIn(Reg)) 791 continue; 792 MBB->addLiveIn(Reg); 793 } 794 795 if (auto *DepMI = NC.getOnlyDependency()) { 796 for (auto &MO : DepMI->operands()) { 797 if (!MO.isReg() || !MO.getReg() || !MO.isDef() || MO.isDead()) 798 continue; 799 if (!NC.getNotNullSucc()->isLiveIn(MO.getReg())) 800 NC.getNotNullSucc()->addLiveIn(MO.getReg()); 801 } 802 } 803 804 NC.getMemOperation()->eraseFromParent(); 805 if (auto *CheckOp = NC.getCheckOperation()) 806 CheckOp->eraseFromParent(); 807 808 // Insert an *unconditional* branch to not-null successor - we expect 809 // block placement to remove fallthroughs later. 810 TII->insertBranch(*NC.getCheckBlock(), NC.getNotNullSucc(), nullptr, 811 /*Cond=*/None, DL); 812 813 NumImplicitNullChecks++; 814 } 815 } 816 817 char ImplicitNullChecks::ID = 0; 818 819 char &llvm::ImplicitNullChecksID = ImplicitNullChecks::ID; 820 821 INITIALIZE_PASS_BEGIN(ImplicitNullChecks, DEBUG_TYPE, 822 "Implicit null checks", false, false) 823 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 824 INITIALIZE_PASS_END(ImplicitNullChecks, DEBUG_TYPE, 825 "Implicit null checks", false, false) 826