1 //===-- ImplicitNullChecks.cpp - Fold null checks into memory accesses ----===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This pass turns explicit null checks of the form
11 //
12 //   test %r10, %r10
13 //   je throw_npe
14 //   movl (%r10), %esi
15 //   ...
16 //
17 // to
18 //
19 //   faulting_load_op("movl (%r10), %esi", throw_npe)
20 //   ...
21 //
22 // With the help of a runtime that understands the .fault_maps section,
23 // faulting_load_op branches to throw_npe if executing movl (%r10), %esi incurs
24 // a page fault.
25 // Store is also supported.
26 //
27 //===----------------------------------------------------------------------===//
28 
29 #include "llvm/ADT/DenseSet.h"
30 #include "llvm/ADT/SmallVector.h"
31 #include "llvm/ADT/Statistic.h"
32 #include "llvm/Analysis/AliasAnalysis.h"
33 #include "llvm/CodeGen/FaultMaps.h"
34 #include "llvm/CodeGen/Passes.h"
35 #include "llvm/CodeGen/MachineFunction.h"
36 #include "llvm/CodeGen/MachineMemOperand.h"
37 #include "llvm/CodeGen/MachineOperand.h"
38 #include "llvm/CodeGen/MachineFunctionPass.h"
39 #include "llvm/CodeGen/MachineInstrBuilder.h"
40 #include "llvm/CodeGen/MachineRegisterInfo.h"
41 #include "llvm/CodeGen/MachineModuleInfo.h"
42 #include "llvm/IR/BasicBlock.h"
43 #include "llvm/IR/Instruction.h"
44 #include "llvm/IR/LLVMContext.h"
45 #include "llvm/Support/CommandLine.h"
46 #include "llvm/Support/Debug.h"
47 #include "llvm/Target/TargetSubtargetInfo.h"
48 #include "llvm/Target/TargetInstrInfo.h"
49 
50 using namespace llvm;
51 
52 static cl::opt<int> PageSize("imp-null-check-page-size",
53                              cl::desc("The page size of the target in bytes"),
54                              cl::init(4096));
55 
56 static cl::opt<unsigned> MaxInstsToConsider(
57     "imp-null-max-insts-to-consider",
58     cl::desc("The max number of instructions to consider hoisting loads over "
59              "(the algorithm is quadratic over this number)"),
60     cl::init(8));
61 
62 #define DEBUG_TYPE "implicit-null-checks"
63 
64 STATISTIC(NumImplicitNullChecks,
65           "Number of explicit null checks made implicit");
66 
67 namespace {
68 
69 class ImplicitNullChecks : public MachineFunctionPass {
70   /// Return true if \c computeDependence can process \p MI.
71   static bool canHandle(const MachineInstr *MI);
72 
73   /// Helper function for \c computeDependence.  Return true if \p A
74   /// and \p B do not have any dependences between them, and can be
75   /// re-ordered without changing program semantics.
76   bool canReorder(const MachineInstr *A, const MachineInstr *B);
77 
78   /// A data type for representing the result computed by \c
79   /// computeDependence.  States whether it is okay to reorder the
80   /// instruction passed to \c computeDependence with at most one
81   /// depednency.
82   struct DependenceResult {
83     /// Can we actually re-order \p MI with \p Insts (see \c
84     /// computeDependence).
85     bool CanReorder;
86 
87     /// If non-None, then an instruction in \p Insts that also must be
88     /// hoisted.
89     Optional<ArrayRef<MachineInstr *>::iterator> PotentialDependence;
90 
91     /*implicit*/ DependenceResult(
92         bool CanReorder,
93         Optional<ArrayRef<MachineInstr *>::iterator> PotentialDependence)
94         : CanReorder(CanReorder), PotentialDependence(PotentialDependence) {
95       assert((!PotentialDependence || CanReorder) &&
96              "!CanReorder && PotentialDependence.hasValue() not allowed!");
97     }
98   };
99 
100   /// Compute a result for the following question: can \p MI be
101   /// re-ordered from after \p Insts to before it.
102   ///
103   /// \c canHandle should return true for all instructions in \p
104   /// Insts.
105   DependenceResult computeDependence(const MachineInstr *MI,
106                                      ArrayRef<MachineInstr *> Insts);
107 
108   /// Represents one null check that can be made implicit.
109   class NullCheck {
110     // The memory operation the null check can be folded into.
111     MachineInstr *MemOperation;
112 
113     // The instruction actually doing the null check (Ptr != 0).
114     MachineInstr *CheckOperation;
115 
116     // The block the check resides in.
117     MachineBasicBlock *CheckBlock;
118 
119     // The block branched to if the pointer is non-null.
120     MachineBasicBlock *NotNullSucc;
121 
122     // The block branched to if the pointer is null.
123     MachineBasicBlock *NullSucc;
124 
125     // If this is non-null, then MemOperation has a dependency on on this
126     // instruction; and it needs to be hoisted to execute before MemOperation.
127     MachineInstr *OnlyDependency;
128 
129   public:
130     explicit NullCheck(MachineInstr *memOperation, MachineInstr *checkOperation,
131                        MachineBasicBlock *checkBlock,
132                        MachineBasicBlock *notNullSucc,
133                        MachineBasicBlock *nullSucc,
134                        MachineInstr *onlyDependency)
135         : MemOperation(memOperation), CheckOperation(checkOperation),
136           CheckBlock(checkBlock), NotNullSucc(notNullSucc), NullSucc(nullSucc),
137           OnlyDependency(onlyDependency) {}
138 
139     MachineInstr *getMemOperation() const { return MemOperation; }
140 
141     MachineInstr *getCheckOperation() const { return CheckOperation; }
142 
143     MachineBasicBlock *getCheckBlock() const { return CheckBlock; }
144 
145     MachineBasicBlock *getNotNullSucc() const { return NotNullSucc; }
146 
147     MachineBasicBlock *getNullSucc() const { return NullSucc; }
148 
149     MachineInstr *getOnlyDependency() const { return OnlyDependency; }
150   };
151 
152   const TargetInstrInfo *TII = nullptr;
153   const TargetRegisterInfo *TRI = nullptr;
154   AliasAnalysis *AA = nullptr;
155   MachineModuleInfo *MMI = nullptr;
156 
157   bool analyzeBlockForNullChecks(MachineBasicBlock &MBB,
158                                  SmallVectorImpl<NullCheck> &NullCheckList);
159   MachineInstr *insertFaultingInstr(MachineInstr *MI, MachineBasicBlock *MBB,
160                                     MachineBasicBlock *HandlerMBB);
161   void rewriteNullChecks(ArrayRef<NullCheck> NullCheckList);
162 
163   enum SuitabilityResult { SR_Suitable, SR_Unsuitable, SR_Impossible };
164 
165   /// Return SR_Suitable if \p MI a memory operation that can be used to
166   /// implicitly null check the value in \p PointerReg, SR_Unsuitable if
167   /// \p MI cannot be used to null check and SR_Impossible if there is
168   /// no sense to continue lookup due to any other instruction will not be able
169   /// to be used. \p PrevInsts is the set of instruction seen since
170   /// the explicit null check on \p PointerReg. \p SeenLoad means that load
171   /// instruction has been observed in \PrevInsts set.
172   SuitabilityResult isSuitableMemoryOp(MachineInstr &MI, unsigned PointerReg,
173                                        ArrayRef<MachineInstr *> PrevInsts,
174                                        bool &SeenLoad);
175 
176   /// Return true if \p FaultingMI can be hoisted from after the the
177   /// instructions in \p InstsSeenSoFar to before them.  Set \p Dependence to a
178   /// non-null value if we also need to (and legally can) hoist a depedency.
179   bool canHoistInst(MachineInstr *FaultingMI, unsigned PointerReg,
180                     ArrayRef<MachineInstr *> InstsSeenSoFar,
181                     MachineBasicBlock *NullSucc, MachineInstr *&Dependence);
182 
183 public:
184   static char ID;
185 
186   ImplicitNullChecks() : MachineFunctionPass(ID) {
187     initializeImplicitNullChecksPass(*PassRegistry::getPassRegistry());
188   }
189 
190   bool runOnMachineFunction(MachineFunction &MF) override;
191   void getAnalysisUsage(AnalysisUsage &AU) const override {
192     AU.addRequired<AAResultsWrapperPass>();
193     MachineFunctionPass::getAnalysisUsage(AU);
194   }
195 
196   MachineFunctionProperties getRequiredProperties() const override {
197     return MachineFunctionProperties().set(
198         MachineFunctionProperties::Property::NoVRegs);
199   }
200 };
201 
202 }
203 
204 bool ImplicitNullChecks::canHandle(const MachineInstr *MI) {
205   if (MI->isCall() || MI->hasUnmodeledSideEffects())
206     return false;
207   auto IsRegMask = [](const MachineOperand &MO) { return MO.isRegMask(); };
208   (void)IsRegMask;
209 
210   assert(!llvm::any_of(MI->operands(), IsRegMask) &&
211          "Calls were filtered out above!");
212 
213   auto IsUnordered = [](MachineMemOperand *MMO) { return MMO->isUnordered(); };
214   return llvm::all_of(MI->memoperands(), IsUnordered);
215 }
216 
217 ImplicitNullChecks::DependenceResult
218 ImplicitNullChecks::computeDependence(const MachineInstr *MI,
219                                       ArrayRef<MachineInstr *> Block) {
220   assert(llvm::all_of(Block, canHandle) && "Check this first!");
221   assert(!llvm::is_contained(Block, MI) && "Block must be exclusive of MI!");
222 
223   Optional<ArrayRef<MachineInstr *>::iterator> Dep;
224 
225   for (auto I = Block.begin(), E = Block.end(); I != E; ++I) {
226     if (canReorder(*I, MI))
227       continue;
228 
229     if (Dep == None) {
230       // Found one possible dependency, keep track of it.
231       Dep = I;
232     } else {
233       // We found two dependencies, so bail out.
234       return {false, None};
235     }
236   }
237 
238   return {true, Dep};
239 }
240 
241 bool ImplicitNullChecks::canReorder(const MachineInstr *A,
242                                     const MachineInstr *B) {
243   assert(canHandle(A) && canHandle(B) && "Precondition!");
244 
245   // canHandle makes sure that we _can_ correctly analyze the dependencies
246   // between A and B here -- for instance, we should not be dealing with heap
247   // load-store dependencies here.
248 
249   for (auto MOA : A->operands()) {
250     if (!(MOA.isReg() && MOA.getReg()))
251       continue;
252 
253     unsigned RegA = MOA.getReg();
254     for (auto MOB : B->operands()) {
255       if (!(MOB.isReg() && MOB.getReg()))
256         continue;
257 
258       unsigned RegB = MOB.getReg();
259 
260       if (TRI->regsOverlap(RegA, RegB) && (MOA.isDef() || MOB.isDef()))
261         return false;
262     }
263   }
264 
265   return true;
266 }
267 
268 bool ImplicitNullChecks::runOnMachineFunction(MachineFunction &MF) {
269   TII = MF.getSubtarget().getInstrInfo();
270   TRI = MF.getRegInfo().getTargetRegisterInfo();
271   MMI = &MF.getMMI();
272   AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
273 
274   SmallVector<NullCheck, 16> NullCheckList;
275 
276   for (auto &MBB : MF)
277     analyzeBlockForNullChecks(MBB, NullCheckList);
278 
279   if (!NullCheckList.empty())
280     rewriteNullChecks(NullCheckList);
281 
282   return !NullCheckList.empty();
283 }
284 
285 // Return true if any register aliasing \p Reg is live-in into \p MBB.
286 static bool AnyAliasLiveIn(const TargetRegisterInfo *TRI,
287                            MachineBasicBlock *MBB, unsigned Reg) {
288   for (MCRegAliasIterator AR(Reg, TRI, /*IncludeSelf*/ true); AR.isValid();
289        ++AR)
290     if (MBB->isLiveIn(*AR))
291       return true;
292   return false;
293 }
294 
295 ImplicitNullChecks::SuitabilityResult
296 ImplicitNullChecks::isSuitableMemoryOp(MachineInstr &MI, unsigned PointerReg,
297                                        ArrayRef<MachineInstr *> PrevInsts,
298                                        bool &SeenLoad) {
299   int64_t Offset;
300   unsigned BaseReg;
301 
302   // First, if it is a store and we saw load before we bail out
303   // because we will not be able to re-order load-store without
304   // using alias analysis.
305   if (SeenLoad && MI.mayStore())
306     return SR_Impossible;
307 
308   SeenLoad = SeenLoad || MI.mayLoad();
309 
310   // Without alias analysis we cannot re-order store with anything.
311   // so if this instruction is not a candidate we should stop.
312   SuitabilityResult Unsuitable = MI.mayStore() ? SR_Impossible : SR_Unsuitable;
313 
314   if (!TII->getMemOpBaseRegImmOfs(MI, BaseReg, Offset, TRI) ||
315       BaseReg != PointerReg)
316     return Unsuitable;
317 
318   // We want the mem access to be issued at a sane offset from PointerReg,
319   // so that if PointerReg is null then the access reliably page faults.
320   if (!((MI.mayLoad() || MI.mayStore()) && !MI.isPredicable() &&
321         Offset < PageSize))
322     return Unsuitable;
323 
324   // Finally, we need to make sure that the access instruction actually is
325   // accessing from PointerReg, and there isn't some re-definition of PointerReg
326   // between the compare and the memory access.
327   // If PointerReg has been redefined before then there is no sense to continue
328   // lookup due to this condition will fail for any further instruction.
329   for (auto *PrevMI : PrevInsts)
330     for (auto &PrevMO : PrevMI->operands())
331       if (PrevMO.isReg() && PrevMO.getReg() && PrevMO.isDef() &&
332           TRI->regsOverlap(PrevMO.getReg(), PointerReg))
333         return SR_Impossible;
334 
335   return SR_Suitable;
336 }
337 
338 bool ImplicitNullChecks::canHoistInst(MachineInstr *FaultingMI,
339                                       unsigned PointerReg,
340                                       ArrayRef<MachineInstr *> InstsSeenSoFar,
341                                       MachineBasicBlock *NullSucc,
342                                       MachineInstr *&Dependence) {
343   auto DepResult = computeDependence(FaultingMI, InstsSeenSoFar);
344   if (!DepResult.CanReorder)
345     return false;
346 
347   if (!DepResult.PotentialDependence) {
348     Dependence = nullptr;
349     return true;
350   }
351 
352   auto DependenceItr = *DepResult.PotentialDependence;
353   auto *DependenceMI = *DependenceItr;
354 
355   // We don't want to reason about speculating loads.  Note -- at this point
356   // we should have already filtered out all of the other non-speculatable
357   // things, like calls and stores.
358   assert(canHandle(DependenceMI) && "Should never have reached here!");
359   if (DependenceMI->mayLoad())
360     return false;
361 
362   for (auto &DependenceMO : DependenceMI->operands()) {
363     if (!(DependenceMO.isReg() && DependenceMO.getReg()))
364       continue;
365 
366     // Make sure that we won't clobber any live ins to the sibling block by
367     // hoisting Dependency.  For instance, we can't hoist INST to before the
368     // null check (even if it safe, and does not violate any dependencies in
369     // the non_null_block) if %rdx is live in to _null_block.
370     //
371     //    test %rcx, %rcx
372     //    je _null_block
373     //  _non_null_block:
374     //    %rdx<def> = INST
375     //    ...
376     //
377     // This restriction does not apply to the faulting load inst because in
378     // case the pointer loaded from is in the null page, the load will not
379     // semantically execute, and affect machine state.  That is, if the load
380     // was loading into %rax and it faults, the value of %rax should stay the
381     // same as it would have been had the load not have executed and we'd have
382     // branched to NullSucc directly.
383     if (AnyAliasLiveIn(TRI, NullSucc, DependenceMO.getReg()))
384       return false;
385 
386     // The Dependency can't be re-defining the base register -- then we won't
387     // get the memory operation on the address we want.  This is already
388     // checked in \c IsSuitableMemoryOp.
389     assert(!(DependenceMO.isDef() &&
390              TRI->regsOverlap(DependenceMO.getReg(), PointerReg)) &&
391            "Should have been checked before!");
392   }
393 
394   auto DepDepResult =
395       computeDependence(DependenceMI, {InstsSeenSoFar.begin(), DependenceItr});
396 
397   if (!DepDepResult.CanReorder || DepDepResult.PotentialDependence)
398     return false;
399 
400   Dependence = DependenceMI;
401   return true;
402 }
403 
404 /// Analyze MBB to check if its terminating branch can be turned into an
405 /// implicit null check.  If yes, append a description of the said null check to
406 /// NullCheckList and return true, else return false.
407 bool ImplicitNullChecks::analyzeBlockForNullChecks(
408     MachineBasicBlock &MBB, SmallVectorImpl<NullCheck> &NullCheckList) {
409   typedef TargetInstrInfo::MachineBranchPredicate MachineBranchPredicate;
410 
411   MDNode *BranchMD = nullptr;
412   if (auto *BB = MBB.getBasicBlock())
413     BranchMD = BB->getTerminator()->getMetadata(LLVMContext::MD_make_implicit);
414 
415   if (!BranchMD)
416     return false;
417 
418   MachineBranchPredicate MBP;
419 
420   if (TII->analyzeBranchPredicate(MBB, MBP, true))
421     return false;
422 
423   // Is the predicate comparing an integer to zero?
424   if (!(MBP.LHS.isReg() && MBP.RHS.isImm() && MBP.RHS.getImm() == 0 &&
425         (MBP.Predicate == MachineBranchPredicate::PRED_NE ||
426          MBP.Predicate == MachineBranchPredicate::PRED_EQ)))
427     return false;
428 
429   // If we cannot erase the test instruction itself, then making the null check
430   // implicit does not buy us much.
431   if (!MBP.SingleUseCondition)
432     return false;
433 
434   MachineBasicBlock *NotNullSucc, *NullSucc;
435 
436   if (MBP.Predicate == MachineBranchPredicate::PRED_NE) {
437     NotNullSucc = MBP.TrueDest;
438     NullSucc = MBP.FalseDest;
439   } else {
440     NotNullSucc = MBP.FalseDest;
441     NullSucc = MBP.TrueDest;
442   }
443 
444   // We handle the simplest case for now.  We can potentially do better by using
445   // the machine dominator tree.
446   if (NotNullSucc->pred_size() != 1)
447     return false;
448 
449   // Starting with a code fragment like:
450   //
451   //   test %RAX, %RAX
452   //   jne LblNotNull
453   //
454   //  LblNull:
455   //   callq throw_NullPointerException
456   //
457   //  LblNotNull:
458   //   Inst0
459   //   Inst1
460   //   ...
461   //   Def = Load (%RAX + <offset>)
462   //   ...
463   //
464   //
465   // we want to end up with
466   //
467   //   Def = FaultingLoad (%RAX + <offset>), LblNull
468   //   jmp LblNotNull ;; explicit or fallthrough
469   //
470   //  LblNotNull:
471   //   Inst0
472   //   Inst1
473   //   ...
474   //
475   //  LblNull:
476   //   callq throw_NullPointerException
477   //
478   //
479   // To see why this is legal, consider the two possibilities:
480   //
481   //  1. %RAX is null: since we constrain <offset> to be less than PageSize, the
482   //     load instruction dereferences the null page, causing a segmentation
483   //     fault.
484   //
485   //  2. %RAX is not null: in this case we know that the load cannot fault, as
486   //     otherwise the load would've faulted in the original program too and the
487   //     original program would've been undefined.
488   //
489   // This reasoning cannot be extended to justify hoisting through arbitrary
490   // control flow.  For instance, in the example below (in pseudo-C)
491   //
492   //    if (ptr == null) { throw_npe(); unreachable; }
493   //    if (some_cond) { return 42; }
494   //    v = ptr->field;  // LD
495   //    ...
496   //
497   // we cannot (without code duplication) use the load marked "LD" to null check
498   // ptr -- clause (2) above does not apply in this case.  In the above program
499   // the safety of ptr->field can be dependent on some_cond; and, for instance,
500   // ptr could be some non-null invalid reference that never gets loaded from
501   // because some_cond is always true.
502 
503   const unsigned PointerReg = MBP.LHS.getReg();
504 
505   SmallVector<MachineInstr *, 8> InstsSeenSoFar;
506   bool SeenLoad = false;
507 
508   for (auto &MI : *NotNullSucc) {
509     if (!canHandle(&MI) || InstsSeenSoFar.size() >= MaxInstsToConsider)
510       return false;
511 
512     MachineInstr *Dependence;
513     SuitabilityResult SR =
514         isSuitableMemoryOp(MI, PointerReg, InstsSeenSoFar, SeenLoad);
515     if (SR == SR_Impossible)
516       return false;
517     if (SR == SR_Suitable &&
518         canHoistInst(&MI, PointerReg, InstsSeenSoFar, NullSucc, Dependence)) {
519       NullCheckList.emplace_back(&MI, MBP.ConditionDef, &MBB, NotNullSucc,
520                                  NullSucc, Dependence);
521       return true;
522     }
523 
524     InstsSeenSoFar.push_back(&MI);
525   }
526 
527   return false;
528 }
529 
530 /// Wrap a machine instruction, MI, into a FAULTING machine instruction.
531 /// The FAULTING instruction does the same load/store as MI
532 /// (defining the same register), and branches to HandlerMBB if the mem access
533 /// faults.  The FAULTING instruction is inserted at the end of MBB.
534 MachineInstr *ImplicitNullChecks::insertFaultingInstr(
535     MachineInstr *MI, MachineBasicBlock *MBB, MachineBasicBlock *HandlerMBB) {
536   const unsigned NoRegister = 0; // Guaranteed to be the NoRegister value for
537                                  // all targets.
538 
539   DebugLoc DL;
540   unsigned NumDefs = MI->getDesc().getNumDefs();
541   assert(NumDefs <= 1 && "other cases unhandled!");
542 
543   unsigned DefReg = NoRegister;
544   if (NumDefs != 0) {
545     DefReg = MI->defs().begin()->getReg();
546     assert(std::distance(MI->defs().begin(), MI->defs().end()) == 1 &&
547            "expected exactly one def!");
548   }
549 
550   FaultMaps::FaultKind FK;
551   if (MI->mayLoad())
552     FK =
553         MI->mayStore() ? FaultMaps::FaultingLoadStore : FaultMaps::FaultingLoad;
554   else
555     FK = FaultMaps::FaultingStore;
556 
557   auto MIB = BuildMI(MBB, DL, TII->get(TargetOpcode::FAULTING_OP), DefReg)
558                  .addImm(FK)
559                  .addMBB(HandlerMBB)
560                  .addImm(MI->getOpcode());
561 
562   for (auto &MO : MI->uses())
563     MIB.add(MO);
564 
565   MIB.setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
566 
567   return MIB;
568 }
569 
570 /// Rewrite the null checks in NullCheckList into implicit null checks.
571 void ImplicitNullChecks::rewriteNullChecks(
572     ArrayRef<ImplicitNullChecks::NullCheck> NullCheckList) {
573   DebugLoc DL;
574 
575   for (auto &NC : NullCheckList) {
576     // Remove the conditional branch dependent on the null check.
577     unsigned BranchesRemoved = TII->removeBranch(*NC.getCheckBlock());
578     (void)BranchesRemoved;
579     assert(BranchesRemoved > 0 && "expected at least one branch!");
580 
581     if (auto *DepMI = NC.getOnlyDependency()) {
582       DepMI->removeFromParent();
583       NC.getCheckBlock()->insert(NC.getCheckBlock()->end(), DepMI);
584     }
585 
586     // Insert a faulting instruction where the conditional branch was
587     // originally. We check earlier ensures that this bit of code motion
588     // is legal.  We do not touch the successors list for any basic block
589     // since we haven't changed control flow, we've just made it implicit.
590     MachineInstr *FaultingInstr = insertFaultingInstr(
591         NC.getMemOperation(), NC.getCheckBlock(), NC.getNullSucc());
592     // Now the values defined by MemOperation, if any, are live-in of
593     // the block of MemOperation.
594     // The original operation may define implicit-defs alongside
595     // the value.
596     MachineBasicBlock *MBB = NC.getMemOperation()->getParent();
597     for (const MachineOperand &MO : FaultingInstr->operands()) {
598       if (!MO.isReg() || !MO.isDef())
599         continue;
600       unsigned Reg = MO.getReg();
601       if (!Reg || MBB->isLiveIn(Reg))
602         continue;
603       MBB->addLiveIn(Reg);
604     }
605 
606     if (auto *DepMI = NC.getOnlyDependency()) {
607       for (auto &MO : DepMI->operands()) {
608         if (!MO.isReg() || !MO.getReg() || !MO.isDef())
609           continue;
610         if (!NC.getNotNullSucc()->isLiveIn(MO.getReg()))
611           NC.getNotNullSucc()->addLiveIn(MO.getReg());
612       }
613     }
614 
615     NC.getMemOperation()->eraseFromParent();
616     NC.getCheckOperation()->eraseFromParent();
617 
618     // Insert an *unconditional* branch to not-null successor.
619     TII->insertBranch(*NC.getCheckBlock(), NC.getNotNullSucc(), nullptr,
620                       /*Cond=*/None, DL);
621 
622     NumImplicitNullChecks++;
623   }
624 }
625 
626 
627 char ImplicitNullChecks::ID = 0;
628 char &llvm::ImplicitNullChecksID = ImplicitNullChecks::ID;
629 INITIALIZE_PASS_BEGIN(ImplicitNullChecks, "implicit-null-checks",
630                       "Implicit null checks", false, false)
631 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
632 INITIALIZE_PASS_END(ImplicitNullChecks, "implicit-null-checks",
633                     "Implicit null checks", false, false)
634