1 //===-- ImplicitNullChecks.cpp - Fold null checks into memory accesses ----===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This pass turns explicit null checks of the form
11 //
12 //   test %r10, %r10
13 //   je throw_npe
14 //   movl (%r10), %esi
15 //   ...
16 //
17 // to
18 //
19 //   faulting_load_op("movl (%r10), %esi", throw_npe)
20 //   ...
21 //
22 // With the help of a runtime that understands the .fault_maps section,
23 // faulting_load_op branches to throw_npe if executing movl (%r10), %esi incurs
24 // a page fault.
25 //
26 //===----------------------------------------------------------------------===//
27 
28 #include "llvm/ADT/DenseSet.h"
29 #include "llvm/ADT/SmallVector.h"
30 #include "llvm/ADT/Statistic.h"
31 #include "llvm/Analysis/AliasAnalysis.h"
32 #include "llvm/CodeGen/Passes.h"
33 #include "llvm/CodeGen/MachineFunction.h"
34 #include "llvm/CodeGen/MachineMemOperand.h"
35 #include "llvm/CodeGen/MachineOperand.h"
36 #include "llvm/CodeGen/MachineFunctionPass.h"
37 #include "llvm/CodeGen/MachineInstrBuilder.h"
38 #include "llvm/CodeGen/MachineRegisterInfo.h"
39 #include "llvm/CodeGen/MachineModuleInfo.h"
40 #include "llvm/IR/BasicBlock.h"
41 #include "llvm/IR/Instruction.h"
42 #include "llvm/IR/LLVMContext.h"
43 #include "llvm/Support/CommandLine.h"
44 #include "llvm/Support/Debug.h"
45 #include "llvm/Target/TargetSubtargetInfo.h"
46 #include "llvm/Target/TargetInstrInfo.h"
47 
48 using namespace llvm;
49 
50 static cl::opt<int> PageSize("imp-null-check-page-size",
51                              cl::desc("The page size of the target in bytes"),
52                              cl::init(4096));
53 
54 static cl::opt<unsigned> MaxInstsToConsider(
55     "imp-null-max-insts-to-consider",
56     cl::desc("The max number of instructions to consider hoisting loads over "
57              "(the algorithm is quadratic over this number)"),
58     cl::init(8));
59 
60 #define DEBUG_TYPE "implicit-null-checks"
61 
62 STATISTIC(NumImplicitNullChecks,
63           "Number of explicit null checks made implicit");
64 
65 namespace {
66 
67 class ImplicitNullChecks : public MachineFunctionPass {
68   /// Return true if \c computeDependence can process \p MI.
69   static bool canHandle(const MachineInstr *MI);
70 
71   /// Helper function for \c computeDependence.  Return true if \p A
72   /// and \p B do not have any dependences between them, and can be
73   /// re-ordered without changing program semantics.
74   bool canReorder(const MachineInstr *A, const MachineInstr *B);
75 
76   /// A data type for representing the result computed by \c
77   /// computeDependence.  States whether it is okay to reorder the
78   /// instruction passed to \c computeDependence with at most one
79   /// depednency.
80   struct DependenceResult {
81     /// Can we actually re-order \p MI with \p Insts (see \c
82     /// computeDependence).
83     bool CanReorder;
84 
85     /// If non-None, then an instruction in \p Insts that also must be
86     /// hoisted.
87     Optional<ArrayRef<MachineInstr *>::iterator> PotentialDependence;
88 
89     /*implicit*/ DependenceResult(
90         bool CanReorder,
91         Optional<ArrayRef<MachineInstr *>::iterator> PotentialDependence)
92         : CanReorder(CanReorder), PotentialDependence(PotentialDependence) {
93       assert((!PotentialDependence || CanReorder) &&
94              "!CanReorder && PotentialDependence.hasValue() not allowed!");
95     }
96   };
97 
98   /// Compute a result for the following question: can \p MI be
99   /// re-ordered from after \p Insts to before it.
100   ///
101   /// \c canHandle should return true for all instructions in \p
102   /// Insts.
103   DependenceResult computeDependence(const MachineInstr *MI,
104                                      ArrayRef<MachineInstr *> Insts);
105 
106   /// Represents one null check that can be made implicit.
107   class NullCheck {
108     // The memory operation the null check can be folded into.
109     MachineInstr *MemOperation;
110 
111     // The instruction actually doing the null check (Ptr != 0).
112     MachineInstr *CheckOperation;
113 
114     // The block the check resides in.
115     MachineBasicBlock *CheckBlock;
116 
117     // The block branched to if the pointer is non-null.
118     MachineBasicBlock *NotNullSucc;
119 
120     // The block branched to if the pointer is null.
121     MachineBasicBlock *NullSucc;
122 
123     // If this is non-null, then MemOperation has a dependency on on this
124     // instruction; and it needs to be hoisted to execute before MemOperation.
125     MachineInstr *OnlyDependency;
126 
127   public:
128     explicit NullCheck(MachineInstr *memOperation, MachineInstr *checkOperation,
129                        MachineBasicBlock *checkBlock,
130                        MachineBasicBlock *notNullSucc,
131                        MachineBasicBlock *nullSucc,
132                        MachineInstr *onlyDependency)
133         : MemOperation(memOperation), CheckOperation(checkOperation),
134           CheckBlock(checkBlock), NotNullSucc(notNullSucc), NullSucc(nullSucc),
135           OnlyDependency(onlyDependency) {}
136 
137     MachineInstr *getMemOperation() const { return MemOperation; }
138 
139     MachineInstr *getCheckOperation() const { return CheckOperation; }
140 
141     MachineBasicBlock *getCheckBlock() const { return CheckBlock; }
142 
143     MachineBasicBlock *getNotNullSucc() const { return NotNullSucc; }
144 
145     MachineBasicBlock *getNullSucc() const { return NullSucc; }
146 
147     MachineInstr *getOnlyDependency() const { return OnlyDependency; }
148   };
149 
150   const TargetInstrInfo *TII = nullptr;
151   const TargetRegisterInfo *TRI = nullptr;
152   AliasAnalysis *AA = nullptr;
153   MachineModuleInfo *MMI = nullptr;
154 
155   bool analyzeBlockForNullChecks(MachineBasicBlock &MBB,
156                                  SmallVectorImpl<NullCheck> &NullCheckList);
157   MachineInstr *insertFaultingLoad(MachineInstr *LoadMI, MachineBasicBlock *MBB,
158                                    MachineBasicBlock *HandlerMBB);
159   void rewriteNullChecks(ArrayRef<NullCheck> NullCheckList);
160 
161   enum SuitabilityResult { SR_Suitable, SR_Unsuitable, SR_Impossible };
162 
163   /// Return SR_Suitable if \p MI a memory operation that can be used to
164   /// implicitly null check the value in \p PointerReg, SR_Unsuitable if
165   /// \p MI cannot be used to null check and SR_Impossible if there is
166   /// no sense to continue lookup due to any other instruction will not be able
167   /// to be used. \p PrevInsts is the set of instruction seen since
168   /// the explicit null check on \p PointerReg.
169   SuitabilityResult isSuitableMemoryOp(MachineInstr &MI, unsigned PointerReg,
170                                        ArrayRef<MachineInstr *> PrevInsts);
171 
172   /// Return true if \p FaultingMI can be hoisted from after the the
173   /// instructions in \p InstsSeenSoFar to before them.  Set \p Dependence to a
174   /// non-null value if we also need to (and legally can) hoist a depedency.
175   bool canHoistLoadInst(MachineInstr *FaultingMI, unsigned PointerReg,
176                         ArrayRef<MachineInstr *> InstsSeenSoFar,
177                         MachineBasicBlock *NullSucc, MachineInstr *&Dependence);
178 
179 public:
180   static char ID;
181 
182   ImplicitNullChecks() : MachineFunctionPass(ID) {
183     initializeImplicitNullChecksPass(*PassRegistry::getPassRegistry());
184   }
185 
186   bool runOnMachineFunction(MachineFunction &MF) override;
187   void getAnalysisUsage(AnalysisUsage &AU) const override {
188     AU.addRequired<AAResultsWrapperPass>();
189     MachineFunctionPass::getAnalysisUsage(AU);
190   }
191 
192   MachineFunctionProperties getRequiredProperties() const override {
193     return MachineFunctionProperties().set(
194         MachineFunctionProperties::Property::NoVRegs);
195   }
196 };
197 
198 }
199 
200 bool ImplicitNullChecks::canHandle(const MachineInstr *MI) {
201   if (MI->isCall() || MI->mayStore() || MI->hasUnmodeledSideEffects())
202     return false;
203   auto IsRegMask = [](const MachineOperand &MO) { return MO.isRegMask(); };
204   (void)IsRegMask;
205 
206   assert(!llvm::any_of(MI->operands(), IsRegMask) &&
207          "Calls were filtered out above!");
208 
209   auto IsUnordered = [](MachineMemOperand *MMO) { return MMO->isUnordered(); };
210   return llvm::all_of(MI->memoperands(), IsUnordered);
211 }
212 
213 ImplicitNullChecks::DependenceResult
214 ImplicitNullChecks::computeDependence(const MachineInstr *MI,
215                                       ArrayRef<MachineInstr *> Block) {
216   assert(llvm::all_of(Block, canHandle) && "Check this first!");
217   assert(!llvm::is_contained(Block, MI) && "Block must be exclusive of MI!");
218 
219   Optional<ArrayRef<MachineInstr *>::iterator> Dep;
220 
221   for (auto I = Block.begin(), E = Block.end(); I != E; ++I) {
222     if (canReorder(*I, MI))
223       continue;
224 
225     if (Dep == None) {
226       // Found one possible dependency, keep track of it.
227       Dep = I;
228     } else {
229       // We found two dependencies, so bail out.
230       return {false, None};
231     }
232   }
233 
234   return {true, Dep};
235 }
236 
237 bool ImplicitNullChecks::canReorder(const MachineInstr *A,
238                                     const MachineInstr *B) {
239   assert(canHandle(A) && canHandle(B) && "Precondition!");
240 
241   // canHandle makes sure that we _can_ correctly analyze the dependencies
242   // between A and B here -- for instance, we should not be dealing with heap
243   // load-store dependencies here.
244 
245   for (auto MOA : A->operands()) {
246     if (!(MOA.isReg() && MOA.getReg()))
247       continue;
248 
249     unsigned RegA = MOA.getReg();
250     for (auto MOB : B->operands()) {
251       if (!(MOB.isReg() && MOB.getReg()))
252         continue;
253 
254       unsigned RegB = MOB.getReg();
255 
256       if (TRI->regsOverlap(RegA, RegB) && (MOA.isDef() || MOB.isDef()))
257         return false;
258     }
259   }
260 
261   return true;
262 }
263 
264 bool ImplicitNullChecks::runOnMachineFunction(MachineFunction &MF) {
265   TII = MF.getSubtarget().getInstrInfo();
266   TRI = MF.getRegInfo().getTargetRegisterInfo();
267   MMI = &MF.getMMI();
268   AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
269 
270   SmallVector<NullCheck, 16> NullCheckList;
271 
272   for (auto &MBB : MF)
273     analyzeBlockForNullChecks(MBB, NullCheckList);
274 
275   if (!NullCheckList.empty())
276     rewriteNullChecks(NullCheckList);
277 
278   return !NullCheckList.empty();
279 }
280 
281 // Return true if any register aliasing \p Reg is live-in into \p MBB.
282 static bool AnyAliasLiveIn(const TargetRegisterInfo *TRI,
283                            MachineBasicBlock *MBB, unsigned Reg) {
284   for (MCRegAliasIterator AR(Reg, TRI, /*IncludeSelf*/ true); AR.isValid();
285        ++AR)
286     if (MBB->isLiveIn(*AR))
287       return true;
288   return false;
289 }
290 
291 ImplicitNullChecks::SuitabilityResult
292 ImplicitNullChecks::isSuitableMemoryOp(MachineInstr &MI, unsigned PointerReg,
293                                        ArrayRef<MachineInstr *> PrevInsts) {
294   int64_t Offset;
295   unsigned BaseReg;
296 
297   if (!TII->getMemOpBaseRegImmOfs(MI, BaseReg, Offset, TRI) ||
298       BaseReg != PointerReg)
299     return SR_Unsuitable;
300 
301   // We want the load to be issued at a sane offset from PointerReg, so that
302   // if PointerReg is null then the load reliably page faults.
303   if (!(MI.mayLoad() && !MI.isPredicable() && Offset < PageSize))
304     return SR_Unsuitable;
305 
306   // Finally, we need to make sure that the load instruction actually is
307   // loading from PointerReg, and there isn't some re-definition of PointerReg
308   // between the compare and the load.
309   // If PointerReg has been redefined before then there is no sense to continue
310   // lookup due to this condition will fail for any further instruction.
311   for (auto *PrevMI : PrevInsts)
312     for (auto &PrevMO : PrevMI->operands())
313       if (PrevMO.isReg() && PrevMO.getReg() && PrevMO.isDef() &&
314           TRI->regsOverlap(PrevMO.getReg(), PointerReg))
315         return SR_Impossible;
316 
317   return SR_Suitable;
318 }
319 
320 bool ImplicitNullChecks::canHoistLoadInst(
321     MachineInstr *FaultingMI, unsigned PointerReg,
322     ArrayRef<MachineInstr *> InstsSeenSoFar, MachineBasicBlock *NullSucc,
323     MachineInstr *&Dependence) {
324   auto DepResult = computeDependence(FaultingMI, InstsSeenSoFar);
325   if (!DepResult.CanReorder)
326     return false;
327 
328   if (!DepResult.PotentialDependence) {
329     Dependence = nullptr;
330     return true;
331   }
332 
333   auto DependenceItr = *DepResult.PotentialDependence;
334   auto *DependenceMI = *DependenceItr;
335 
336   // We don't want to reason about speculating loads.  Note -- at this point
337   // we should have already filtered out all of the other non-speculatable
338   // things, like calls and stores.
339   assert(canHandle(DependenceMI) && "Should never have reached here!");
340   if (DependenceMI->mayLoad())
341     return false;
342 
343   for (auto &DependenceMO : DependenceMI->operands()) {
344     if (!(DependenceMO.isReg() && DependenceMO.getReg()))
345       continue;
346 
347     // Make sure that we won't clobber any live ins to the sibling block by
348     // hoisting Dependency.  For instance, we can't hoist INST to before the
349     // null check (even if it safe, and does not violate any dependencies in
350     // the non_null_block) if %rdx is live in to _null_block.
351     //
352     //    test %rcx, %rcx
353     //    je _null_block
354     //  _non_null_block:
355     //    %rdx<def> = INST
356     //    ...
357     //
358     // This restriction does not apply to the faulting load inst because in
359     // case the pointer loaded from is in the null page, the load will not
360     // semantically execute, and affect machine state.  That is, if the load
361     // was loading into %rax and it faults, the value of %rax should stay the
362     // same as it would have been had the load not have executed and we'd have
363     // branched to NullSucc directly.
364     if (AnyAliasLiveIn(TRI, NullSucc, DependenceMO.getReg()))
365       return false;
366 
367     // The Dependency can't be re-defining the base register -- then we won't
368     // get the memory operation on the address we want.  This is already
369     // checked in \c IsSuitableMemoryOp.
370     assert(!(DependenceMO.isDef() &&
371              TRI->regsOverlap(DependenceMO.getReg(), PointerReg)) &&
372            "Should have been checked before!");
373   }
374 
375   auto DepDepResult =
376       computeDependence(DependenceMI, {InstsSeenSoFar.begin(), DependenceItr});
377 
378   if (!DepDepResult.CanReorder || DepDepResult.PotentialDependence)
379     return false;
380 
381   Dependence = DependenceMI;
382   return true;
383 }
384 
385 /// Analyze MBB to check if its terminating branch can be turned into an
386 /// implicit null check.  If yes, append a description of the said null check to
387 /// NullCheckList and return true, else return false.
388 bool ImplicitNullChecks::analyzeBlockForNullChecks(
389     MachineBasicBlock &MBB, SmallVectorImpl<NullCheck> &NullCheckList) {
390   typedef TargetInstrInfo::MachineBranchPredicate MachineBranchPredicate;
391 
392   MDNode *BranchMD = nullptr;
393   if (auto *BB = MBB.getBasicBlock())
394     BranchMD = BB->getTerminator()->getMetadata(LLVMContext::MD_make_implicit);
395 
396   if (!BranchMD)
397     return false;
398 
399   MachineBranchPredicate MBP;
400 
401   if (TII->analyzeBranchPredicate(MBB, MBP, true))
402     return false;
403 
404   // Is the predicate comparing an integer to zero?
405   if (!(MBP.LHS.isReg() && MBP.RHS.isImm() && MBP.RHS.getImm() == 0 &&
406         (MBP.Predicate == MachineBranchPredicate::PRED_NE ||
407          MBP.Predicate == MachineBranchPredicate::PRED_EQ)))
408     return false;
409 
410   // If we cannot erase the test instruction itself, then making the null check
411   // implicit does not buy us much.
412   if (!MBP.SingleUseCondition)
413     return false;
414 
415   MachineBasicBlock *NotNullSucc, *NullSucc;
416 
417   if (MBP.Predicate == MachineBranchPredicate::PRED_NE) {
418     NotNullSucc = MBP.TrueDest;
419     NullSucc = MBP.FalseDest;
420   } else {
421     NotNullSucc = MBP.FalseDest;
422     NullSucc = MBP.TrueDest;
423   }
424 
425   // We handle the simplest case for now.  We can potentially do better by using
426   // the machine dominator tree.
427   if (NotNullSucc->pred_size() != 1)
428     return false;
429 
430   // Starting with a code fragment like:
431   //
432   //   test %RAX, %RAX
433   //   jne LblNotNull
434   //
435   //  LblNull:
436   //   callq throw_NullPointerException
437   //
438   //  LblNotNull:
439   //   Inst0
440   //   Inst1
441   //   ...
442   //   Def = Load (%RAX + <offset>)
443   //   ...
444   //
445   //
446   // we want to end up with
447   //
448   //   Def = FaultingLoad (%RAX + <offset>), LblNull
449   //   jmp LblNotNull ;; explicit or fallthrough
450   //
451   //  LblNotNull:
452   //   Inst0
453   //   Inst1
454   //   ...
455   //
456   //  LblNull:
457   //   callq throw_NullPointerException
458   //
459   //
460   // To see why this is legal, consider the two possibilities:
461   //
462   //  1. %RAX is null: since we constrain <offset> to be less than PageSize, the
463   //     load instruction dereferences the null page, causing a segmentation
464   //     fault.
465   //
466   //  2. %RAX is not null: in this case we know that the load cannot fault, as
467   //     otherwise the load would've faulted in the original program too and the
468   //     original program would've been undefined.
469   //
470   // This reasoning cannot be extended to justify hoisting through arbitrary
471   // control flow.  For instance, in the example below (in pseudo-C)
472   //
473   //    if (ptr == null) { throw_npe(); unreachable; }
474   //    if (some_cond) { return 42; }
475   //    v = ptr->field;  // LD
476   //    ...
477   //
478   // we cannot (without code duplication) use the load marked "LD" to null check
479   // ptr -- clause (2) above does not apply in this case.  In the above program
480   // the safety of ptr->field can be dependent on some_cond; and, for instance,
481   // ptr could be some non-null invalid reference that never gets loaded from
482   // because some_cond is always true.
483 
484   const unsigned PointerReg = MBP.LHS.getReg();
485 
486   SmallVector<MachineInstr *, 8> InstsSeenSoFar;
487 
488   for (auto &MI : *NotNullSucc) {
489     if (!canHandle(&MI) || InstsSeenSoFar.size() >= MaxInstsToConsider)
490       return false;
491 
492     MachineInstr *Dependence;
493     SuitabilityResult SR = isSuitableMemoryOp(MI, PointerReg, InstsSeenSoFar);
494     if (SR == SR_Impossible)
495       return false;
496     if (SR == SR_Suitable && canHoistLoadInst(&MI, PointerReg, InstsSeenSoFar,
497                                               NullSucc, Dependence)) {
498       NullCheckList.emplace_back(&MI, MBP.ConditionDef, &MBB, NotNullSucc,
499                                  NullSucc, Dependence);
500       return true;
501     }
502 
503     InstsSeenSoFar.push_back(&MI);
504   }
505 
506   return false;
507 }
508 
509 /// Wrap a machine load instruction, LoadMI, into a FAULTING_LOAD_OP machine
510 /// instruction.  The FAULTING_LOAD_OP instruction does the same load as LoadMI
511 /// (defining the same register), and branches to HandlerMBB if the load
512 /// faults.  The FAULTING_LOAD_OP instruction is inserted at the end of MBB.
513 MachineInstr *
514 ImplicitNullChecks::insertFaultingLoad(MachineInstr *LoadMI,
515                                        MachineBasicBlock *MBB,
516                                        MachineBasicBlock *HandlerMBB) {
517   const unsigned NoRegister = 0; // Guaranteed to be the NoRegister value for
518                                  // all targets.
519 
520   DebugLoc DL;
521   unsigned NumDefs = LoadMI->getDesc().getNumDefs();
522   assert(NumDefs <= 1 && "other cases unhandled!");
523 
524   unsigned DefReg = NoRegister;
525   if (NumDefs != 0) {
526     DefReg = LoadMI->defs().begin()->getReg();
527     assert(std::distance(LoadMI->defs().begin(), LoadMI->defs().end()) == 1 &&
528            "expected exactly one def!");
529   }
530 
531   auto MIB = BuildMI(MBB, DL, TII->get(TargetOpcode::FAULTING_LOAD_OP), DefReg)
532                  .addMBB(HandlerMBB)
533                  .addImm(LoadMI->getOpcode());
534 
535   for (auto &MO : LoadMI->uses())
536     MIB.add(MO);
537 
538   MIB.setMemRefs(LoadMI->memoperands_begin(), LoadMI->memoperands_end());
539 
540   return MIB;
541 }
542 
543 /// Rewrite the null checks in NullCheckList into implicit null checks.
544 void ImplicitNullChecks::rewriteNullChecks(
545     ArrayRef<ImplicitNullChecks::NullCheck> NullCheckList) {
546   DebugLoc DL;
547 
548   for (auto &NC : NullCheckList) {
549     // Remove the conditional branch dependent on the null check.
550     unsigned BranchesRemoved = TII->removeBranch(*NC.getCheckBlock());
551     (void)BranchesRemoved;
552     assert(BranchesRemoved > 0 && "expected at least one branch!");
553 
554     if (auto *DepMI = NC.getOnlyDependency()) {
555       DepMI->removeFromParent();
556       NC.getCheckBlock()->insert(NC.getCheckBlock()->end(), DepMI);
557     }
558 
559     // Insert a faulting load where the conditional branch was originally.  We
560     // check earlier ensures that this bit of code motion is legal.  We do not
561     // touch the successors list for any basic block since we haven't changed
562     // control flow, we've just made it implicit.
563     MachineInstr *FaultingLoad = insertFaultingLoad(
564         NC.getMemOperation(), NC.getCheckBlock(), NC.getNullSucc());
565     // Now the values defined by MemOperation, if any, are live-in of
566     // the block of MemOperation.
567     // The original load operation may define implicit-defs alongside
568     // the loaded value.
569     MachineBasicBlock *MBB = NC.getMemOperation()->getParent();
570     for (const MachineOperand &MO : FaultingLoad->operands()) {
571       if (!MO.isReg() || !MO.isDef())
572         continue;
573       unsigned Reg = MO.getReg();
574       if (!Reg || MBB->isLiveIn(Reg))
575         continue;
576       MBB->addLiveIn(Reg);
577     }
578 
579     if (auto *DepMI = NC.getOnlyDependency()) {
580       for (auto &MO : DepMI->operands()) {
581         if (!MO.isReg() || !MO.getReg() || !MO.isDef())
582           continue;
583         if (!NC.getNotNullSucc()->isLiveIn(MO.getReg()))
584           NC.getNotNullSucc()->addLiveIn(MO.getReg());
585       }
586     }
587 
588     NC.getMemOperation()->eraseFromParent();
589     NC.getCheckOperation()->eraseFromParent();
590 
591     // Insert an *unconditional* branch to not-null successor.
592     TII->insertBranch(*NC.getCheckBlock(), NC.getNotNullSucc(), nullptr,
593                       /*Cond=*/None, DL);
594 
595     NumImplicitNullChecks++;
596   }
597 }
598 
599 
600 char ImplicitNullChecks::ID = 0;
601 char &llvm::ImplicitNullChecksID = ImplicitNullChecks::ID;
602 INITIALIZE_PASS_BEGIN(ImplicitNullChecks, "implicit-null-checks",
603                       "Implicit null checks", false, false)
604 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
605 INITIALIZE_PASS_END(ImplicitNullChecks, "implicit-null-checks",
606                     "Implicit null checks", false, false)
607