1 //===- Inliner.cpp - Pass to inline function calls ------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements a basic inlining algorithm that operates bottom up over
10 // the Strongly Connect Components(SCCs) of the CallGraph. This enables a more
11 // incremental propagation of inlining decisions from the leafs to the roots of
12 // the callgraph.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "PassDetail.h"
17 #include "mlir/Analysis/CallGraph.h"
18 #include "mlir/IR/PatternMatch.h"
19 #include "mlir/Interfaces/SideEffectInterfaces.h"
20 #include "mlir/Transforms/InliningUtils.h"
21 #include "mlir/Transforms/Passes.h"
22 #include "llvm/ADT/SCCIterator.h"
23 #include "llvm/Support/Debug.h"
24 #include "llvm/Support/Parallel.h"
25 
26 #define DEBUG_TYPE "inlining"
27 
28 using namespace mlir;
29 
30 //===----------------------------------------------------------------------===//
31 // Symbol Use Tracking
32 //===----------------------------------------------------------------------===//
33 
34 /// Walk all of the used symbol callgraph nodes referenced with the given op.
35 static void walkReferencedSymbolNodes(
36     Operation *op, CallGraph &cg,
37     DenseMap<Attribute, CallGraphNode *> &resolvedRefs,
38     function_ref<void(CallGraphNode *, Operation *)> callback) {
39   auto symbolUses = SymbolTable::getSymbolUses(op);
40   assert(symbolUses && "expected uses to be valid");
41 
42   Operation *symbolTableOp = op->getParentOp();
43   for (const SymbolTable::SymbolUse &use : *symbolUses) {
44     auto refIt = resolvedRefs.insert({use.getSymbolRef(), nullptr});
45     CallGraphNode *&node = refIt.first->second;
46 
47     // If this is the first instance of this reference, try to resolve a
48     // callgraph node for it.
49     if (refIt.second) {
50       auto *symbolOp = SymbolTable::lookupNearestSymbolFrom(symbolTableOp,
51                                                             use.getSymbolRef());
52       auto callableOp = dyn_cast_or_null<CallableOpInterface>(symbolOp);
53       if (!callableOp)
54         continue;
55       node = cg.lookupNode(callableOp.getCallableRegion());
56     }
57     if (node)
58       callback(node, use.getUser());
59   }
60 }
61 
62 //===----------------------------------------------------------------------===//
63 // CGUseList
64 
65 namespace {
66 /// This struct tracks the uses of callgraph nodes that can be dropped when
67 /// use_empty. It directly tracks and manages a use-list for all of the
68 /// call-graph nodes. This is necessary because many callgraph nodes are
69 /// referenced by SymbolRefAttr, which has no mechanism akin to the SSA `Use`
70 /// class.
71 struct CGUseList {
72   /// This struct tracks the uses of callgraph nodes within a specific
73   /// operation.
74   struct CGUser {
75     /// Any nodes referenced in the top-level attribute list of this user. We
76     /// use a set here because the number of references does not matter.
77     DenseSet<CallGraphNode *> topLevelUses;
78 
79     /// Uses of nodes referenced by nested operations.
80     DenseMap<CallGraphNode *, int> innerUses;
81   };
82 
83   CGUseList(Operation *op, CallGraph &cg);
84 
85   /// Drop uses of nodes referred to by the given call operation that resides
86   /// within 'userNode'.
87   void dropCallUses(CallGraphNode *userNode, Operation *callOp, CallGraph &cg);
88 
89   /// Remove the given node from the use list.
90   void eraseNode(CallGraphNode *node);
91 
92   /// Returns true if the given callgraph node has no uses and can be pruned.
93   bool isDead(CallGraphNode *node) const;
94 
95   /// Returns true if the given callgraph node has a single use and can be
96   /// discarded.
97   bool hasOneUseAndDiscardable(CallGraphNode *node) const;
98 
99   /// Recompute the uses held by the given callgraph node.
100   void recomputeUses(CallGraphNode *node, CallGraph &cg);
101 
102   /// Merge the uses of 'lhs' with the uses of the 'rhs' after inlining a copy
103   /// of 'lhs' into 'rhs'.
104   void mergeUsesAfterInlining(CallGraphNode *lhs, CallGraphNode *rhs);
105 
106 private:
107   /// Decrement the uses of discardable nodes referenced by the given user.
108   void decrementDiscardableUses(CGUser &uses);
109 
110   /// A mapping between a discardable callgraph node (that is a symbol) and the
111   /// number of uses for this node.
112   DenseMap<CallGraphNode *, int> discardableSymNodeUses;
113   /// A mapping between a callgraph node and the symbol callgraph nodes that it
114   /// uses.
115   DenseMap<CallGraphNode *, CGUser> nodeUses;
116 };
117 } // end anonymous namespace
118 
119 CGUseList::CGUseList(Operation *op, CallGraph &cg) {
120   /// A set of callgraph nodes that are always known to be live during inlining.
121   DenseMap<Attribute, CallGraphNode *> alwaysLiveNodes;
122 
123   // Walk each of the symbol tables looking for discardable callgraph nodes.
124   auto walkFn = [&](Operation *symbolTableOp, bool allUsesVisible) {
125     for (Operation &op : symbolTableOp->getRegion(0).getOps()) {
126       // If this is a callgraph operation, check to see if it is discardable.
127       if (auto callable = dyn_cast<CallableOpInterface>(&op)) {
128         if (auto *node = cg.lookupNode(callable.getCallableRegion())) {
129           SymbolOpInterface symbol = dyn_cast<SymbolOpInterface>(&op);
130           if (symbol && (allUsesVisible || symbol.isPrivate()) &&
131               symbol.canDiscardOnUseEmpty()) {
132             discardableSymNodeUses.try_emplace(node, 0);
133           }
134           continue;
135         }
136       }
137       // Otherwise, check for any referenced nodes. These will be always-live.
138       walkReferencedSymbolNodes(&op, cg, alwaysLiveNodes,
139                                 [](CallGraphNode *, Operation *) {});
140     }
141   };
142   SymbolTable::walkSymbolTables(op, /*allSymUsesVisible=*/!op->getBlock(),
143                                 walkFn);
144 
145   // Drop the use information for any discardable nodes that are always live.
146   for (auto &it : alwaysLiveNodes)
147     discardableSymNodeUses.erase(it.second);
148 
149   // Compute the uses for each of the callable nodes in the graph.
150   for (CallGraphNode *node : cg)
151     recomputeUses(node, cg);
152 }
153 
154 void CGUseList::dropCallUses(CallGraphNode *userNode, Operation *callOp,
155                              CallGraph &cg) {
156   auto &userRefs = nodeUses[userNode].innerUses;
157   auto walkFn = [&](CallGraphNode *node, Operation *user) {
158     auto parentIt = userRefs.find(node);
159     if (parentIt == userRefs.end())
160       return;
161     --parentIt->second;
162     --discardableSymNodeUses[node];
163   };
164   DenseMap<Attribute, CallGraphNode *> resolvedRefs;
165   walkReferencedSymbolNodes(callOp, cg, resolvedRefs, walkFn);
166 }
167 
168 void CGUseList::eraseNode(CallGraphNode *node) {
169   // Drop all child nodes.
170   for (auto &edge : *node)
171     if (edge.isChild())
172       eraseNode(edge.getTarget());
173 
174   // Drop the uses held by this node and erase it.
175   auto useIt = nodeUses.find(node);
176   assert(useIt != nodeUses.end() && "expected node to be valid");
177   decrementDiscardableUses(useIt->getSecond());
178   nodeUses.erase(useIt);
179   discardableSymNodeUses.erase(node);
180 }
181 
182 bool CGUseList::isDead(CallGraphNode *node) const {
183   // If the parent operation isn't a symbol, simply check normal SSA deadness.
184   Operation *nodeOp = node->getCallableRegion()->getParentOp();
185   if (!isa<SymbolOpInterface>(nodeOp))
186     return MemoryEffectOpInterface::hasNoEffect(nodeOp) && nodeOp->use_empty();
187 
188   // Otherwise, check the number of symbol uses.
189   auto symbolIt = discardableSymNodeUses.find(node);
190   return symbolIt != discardableSymNodeUses.end() && symbolIt->second == 0;
191 }
192 
193 bool CGUseList::hasOneUseAndDiscardable(CallGraphNode *node) const {
194   // If this isn't a symbol node, check for side-effects and SSA use count.
195   Operation *nodeOp = node->getCallableRegion()->getParentOp();
196   if (!isa<SymbolOpInterface>(nodeOp))
197     return MemoryEffectOpInterface::hasNoEffect(nodeOp) && nodeOp->hasOneUse();
198 
199   // Otherwise, check the number of symbol uses.
200   auto symbolIt = discardableSymNodeUses.find(node);
201   return symbolIt != discardableSymNodeUses.end() && symbolIt->second == 1;
202 }
203 
204 void CGUseList::recomputeUses(CallGraphNode *node, CallGraph &cg) {
205   Operation *parentOp = node->getCallableRegion()->getParentOp();
206   CGUser &uses = nodeUses[node];
207   decrementDiscardableUses(uses);
208 
209   // Collect the new discardable uses within this node.
210   uses = CGUser();
211   DenseMap<Attribute, CallGraphNode *> resolvedRefs;
212   auto walkFn = [&](CallGraphNode *refNode, Operation *user) {
213     auto discardSymIt = discardableSymNodeUses.find(refNode);
214     if (discardSymIt == discardableSymNodeUses.end())
215       return;
216 
217     if (user != parentOp)
218       ++uses.innerUses[refNode];
219     else if (!uses.topLevelUses.insert(refNode).second)
220       return;
221     ++discardSymIt->second;
222   };
223   walkReferencedSymbolNodes(parentOp, cg, resolvedRefs, walkFn);
224 }
225 
226 void CGUseList::mergeUsesAfterInlining(CallGraphNode *lhs, CallGraphNode *rhs) {
227   auto &lhsUses = nodeUses[lhs], &rhsUses = nodeUses[rhs];
228   for (auto &useIt : lhsUses.innerUses) {
229     rhsUses.innerUses[useIt.first] += useIt.second;
230     discardableSymNodeUses[useIt.first] += useIt.second;
231   }
232 }
233 
234 void CGUseList::decrementDiscardableUses(CGUser &uses) {
235   for (CallGraphNode *node : uses.topLevelUses)
236     --discardableSymNodeUses[node];
237   for (auto &it : uses.innerUses)
238     discardableSymNodeUses[it.first] -= it.second;
239 }
240 
241 //===----------------------------------------------------------------------===//
242 // CallGraph traversal
243 //===----------------------------------------------------------------------===//
244 
245 /// Run a given transformation over the SCCs of the callgraph in a bottom up
246 /// traversal.
247 static void runTransformOnCGSCCs(
248     const CallGraph &cg,
249     function_ref<void(MutableArrayRef<CallGraphNode *>)> sccTransformer) {
250   std::vector<CallGraphNode *> currentSCCVec;
251   auto cgi = llvm::scc_begin(&cg);
252   while (!cgi.isAtEnd()) {
253     // Copy the current SCC and increment so that the transformer can modify the
254     // SCC without invalidating our iterator.
255     currentSCCVec = *cgi;
256     ++cgi;
257     sccTransformer(currentSCCVec);
258   }
259 }
260 
261 namespace {
262 /// This struct represents a resolved call to a given callgraph node. Given that
263 /// the call does not actually contain a direct reference to the
264 /// Region(CallGraphNode) that it is dispatching to, we need to resolve them
265 /// explicitly.
266 struct ResolvedCall {
267   ResolvedCall(CallOpInterface call, CallGraphNode *sourceNode,
268                CallGraphNode *targetNode)
269       : call(call), sourceNode(sourceNode), targetNode(targetNode) {}
270   CallOpInterface call;
271   CallGraphNode *sourceNode, *targetNode;
272 };
273 } // end anonymous namespace
274 
275 /// Collect all of the callable operations within the given range of blocks. If
276 /// `traverseNestedCGNodes` is true, this will also collect call operations
277 /// inside of nested callgraph nodes.
278 static void collectCallOps(iterator_range<Region::iterator> blocks,
279                            CallGraphNode *sourceNode, CallGraph &cg,
280                            SmallVectorImpl<ResolvedCall> &calls,
281                            bool traverseNestedCGNodes) {
282   SmallVector<std::pair<Block *, CallGraphNode *>, 8> worklist;
283   auto addToWorklist = [&](CallGraphNode *node,
284                            iterator_range<Region::iterator> blocks) {
285     for (Block &block : blocks)
286       worklist.emplace_back(&block, node);
287   };
288 
289   addToWorklist(sourceNode, blocks);
290   while (!worklist.empty()) {
291     Block *block;
292     std::tie(block, sourceNode) = worklist.pop_back_val();
293 
294     for (Operation &op : *block) {
295       if (auto call = dyn_cast<CallOpInterface>(op)) {
296         // TODO(riverriddle) Support inlining nested call references.
297         CallInterfaceCallable callable = call.getCallableForCallee();
298         if (SymbolRefAttr symRef = callable.dyn_cast<SymbolRefAttr>()) {
299           if (!symRef.isa<FlatSymbolRefAttr>())
300             continue;
301         }
302 
303         CallGraphNode *targetNode = cg.resolveCallable(call);
304         if (!targetNode->isExternal())
305           calls.emplace_back(call, sourceNode, targetNode);
306         continue;
307       }
308 
309       // If this is not a call, traverse the nested regions. If
310       // `traverseNestedCGNodes` is false, then don't traverse nested call graph
311       // regions.
312       for (auto &nestedRegion : op.getRegions()) {
313         CallGraphNode *nestedNode = cg.lookupNode(&nestedRegion);
314         if (traverseNestedCGNodes || !nestedNode)
315           addToWorklist(nestedNode ? nestedNode : sourceNode, nestedRegion);
316       }
317     }
318   }
319 }
320 
321 //===----------------------------------------------------------------------===//
322 // Inliner
323 //===----------------------------------------------------------------------===//
324 namespace {
325 /// This class provides a specialization of the main inlining interface.
326 struct Inliner : public InlinerInterface {
327   Inliner(MLIRContext *context, CallGraph &cg)
328       : InlinerInterface(context), cg(cg) {}
329 
330   /// Process a set of blocks that have been inlined. This callback is invoked
331   /// *before* inlined terminator operations have been processed.
332   void
333   processInlinedBlocks(iterator_range<Region::iterator> inlinedBlocks) final {
334     // Find the closest callgraph node from the first block.
335     CallGraphNode *node;
336     Region *region = inlinedBlocks.begin()->getParent();
337     while (!(node = cg.lookupNode(region))) {
338       region = region->getParentRegion();
339       assert(region && "expected valid parent node");
340     }
341 
342     collectCallOps(inlinedBlocks, node, cg, calls,
343                    /*traverseNestedCGNodes=*/true);
344   }
345 
346   /// The current set of call instructions to consider for inlining.
347   SmallVector<ResolvedCall, 8> calls;
348 
349   /// The callgraph being operated on.
350   CallGraph &cg;
351 };
352 } // namespace
353 
354 /// Returns true if the given call should be inlined.
355 static bool shouldInline(ResolvedCall &resolvedCall) {
356   // Don't allow inlining terminator calls. We currently don't support this
357   // case.
358   if (resolvedCall.call.getOperation()->isKnownTerminator())
359     return false;
360 
361   // Don't allow inlining if the target is an ancestor of the call. This
362   // prevents inlining recursively.
363   if (resolvedCall.targetNode->getCallableRegion()->isAncestor(
364           resolvedCall.call.getParentRegion()))
365     return false;
366 
367   // Otherwise, inline.
368   return true;
369 }
370 
371 /// Delete the given node and remove it from the current scc and the callgraph.
372 static void deleteNode(CallGraphNode *node, CGUseList &useList, CallGraph &cg,
373                        MutableArrayRef<CallGraphNode *> currentSCC) {
374   // Erase the parent operation and remove it from the various lists.
375   node->getCallableRegion()->getParentOp()->erase();
376   cg.eraseNode(node);
377 
378   // Replace this node in the currentSCC with the external node.
379   auto it = llvm::find(currentSCC, node);
380   if (it != currentSCC.end())
381     *it = cg.getExternalNode();
382 }
383 
384 /// Attempt to inline calls within the given scc. This function returns
385 /// success if any calls were inlined, failure otherwise.
386 static LogicalResult
387 inlineCallsInSCC(Inliner &inliner, CGUseList &useList,
388                  MutableArrayRef<CallGraphNode *> currentSCC) {
389   CallGraph &cg = inliner.cg;
390   auto &calls = inliner.calls;
391 
392   // Collect all of the direct calls within the nodes of the current SCC. We
393   // don't traverse nested callgraph nodes, because they are handled separately
394   // likely within a different SCC.
395   for (CallGraphNode *node : currentSCC) {
396     if (node->isExternal())
397       continue;
398 
399     // If this node is dead, just delete it now.
400     if (useList.isDead(node))
401       deleteNode(node, useList, cg, currentSCC);
402     else
403       collectCallOps(*node->getCallableRegion(), node, cg, calls,
404                      /*traverseNestedCGNodes=*/false);
405   }
406   if (calls.empty())
407     return failure();
408 
409   // A set of dead nodes to remove after inlining.
410   SmallVector<CallGraphNode *, 1> deadNodes;
411 
412   // Try to inline each of the call operations. Don't cache the end iterator
413   // here as more calls may be added during inlining.
414   bool inlinedAnyCalls = false;
415   for (unsigned i = 0; i != calls.size(); ++i) {
416     ResolvedCall it = calls[i];
417     bool doInline = shouldInline(it);
418     LLVM_DEBUG({
419       if (doInline)
420         llvm::dbgs() << "* Inlining call: ";
421       else
422         llvm::dbgs() << "* Not inlining call: ";
423       it.call.dump();
424     });
425     if (!doInline)
426       continue;
427     CallOpInterface call = it.call;
428     Region *targetRegion = it.targetNode->getCallableRegion();
429 
430     // If this is the last call to the target node and the node is discardable,
431     // then inline it in-place and delete the node if successful.
432     bool inlineInPlace = useList.hasOneUseAndDiscardable(it.targetNode);
433 
434     LogicalResult inlineResult = inlineCall(
435         inliner, call, cast<CallableOpInterface>(targetRegion->getParentOp()),
436         targetRegion, /*shouldCloneInlinedRegion=*/!inlineInPlace);
437     if (failed(inlineResult))
438       continue;
439     inlinedAnyCalls = true;
440 
441     // If the inlining was successful, Merge the new uses into the source node.
442     useList.dropCallUses(it.sourceNode, call.getOperation(), cg);
443     useList.mergeUsesAfterInlining(it.targetNode, it.sourceNode);
444 
445     // then erase the call.
446     call.erase();
447 
448     // If we inlined in place, mark the node for deletion.
449     if (inlineInPlace) {
450       useList.eraseNode(it.targetNode);
451       deadNodes.push_back(it.targetNode);
452     }
453   }
454 
455   for (CallGraphNode *node : deadNodes)
456     deleteNode(node, useList, cg, currentSCC);
457   calls.clear();
458   return success(inlinedAnyCalls);
459 }
460 
461 /// Canonicalize the nodes within the given SCC with the given set of
462 /// canonicalization patterns.
463 static void canonicalizeSCC(CallGraph &cg, CGUseList &useList,
464                             MutableArrayRef<CallGraphNode *> currentSCC,
465                             MLIRContext *context,
466                             const OwningRewritePatternList &canonPatterns) {
467   // Collect the sets of nodes to canonicalize.
468   SmallVector<CallGraphNode *, 4> nodesToCanonicalize;
469   for (auto *node : currentSCC) {
470     // Don't canonicalize the external node, it has no valid callable region.
471     if (node->isExternal())
472       continue;
473 
474     // Don't canonicalize nodes with children. Nodes with children
475     // require special handling as we may remove the node during
476     // canonicalization. In the future, we should be able to handle this
477     // case with proper node deletion tracking.
478     if (node->hasChildren())
479       continue;
480 
481     // We also won't apply canonicalizations for nodes that are not
482     // isolated. This avoids potentially mutating the regions of nodes defined
483     // above, this is also a stipulation of the 'applyPatternsAndFoldGreedily'
484     // driver.
485     auto *region = node->getCallableRegion();
486     if (!region->getParentOp()->isKnownIsolatedFromAbove())
487       continue;
488     nodesToCanonicalize.push_back(node);
489   }
490   if (nodesToCanonicalize.empty())
491     return;
492 
493   // Canonicalize each of the nodes within the SCC in parallel.
494   // NOTE: This is simple now, because we don't enable canonicalizing nodes
495   // within children. When we remove this restriction, this logic will need to
496   // be reworked.
497   if (context->isMultithreadingEnabled()) {
498     ParallelDiagnosticHandler canonicalizationHandler(context);
499     llvm::parallelForEachN(
500         /*Begin=*/0, /*End=*/nodesToCanonicalize.size(), [&](size_t index) {
501           // Set the order for this thread so that diagnostics will be properly
502           // ordered.
503           canonicalizationHandler.setOrderIDForThread(index);
504 
505           // Apply the canonicalization patterns to this region.
506           auto *node = nodesToCanonicalize[index];
507           applyPatternsAndFoldGreedily(*node->getCallableRegion(),
508                                        canonPatterns);
509 
510           // Make sure to reset the order ID for the diagnostic handler, as this
511           // thread may be used in a different context.
512           canonicalizationHandler.eraseOrderIDForThread();
513         });
514   } else {
515     for (CallGraphNode *node : nodesToCanonicalize)
516       applyPatternsAndFoldGreedily(*node->getCallableRegion(), canonPatterns);
517   }
518 
519   // Recompute the uses held by each of the nodes.
520   for (CallGraphNode *node : nodesToCanonicalize)
521     useList.recomputeUses(node, cg);
522 }
523 
524 //===----------------------------------------------------------------------===//
525 // InlinerPass
526 //===----------------------------------------------------------------------===//
527 
528 namespace {
529 struct InlinerPass : public InlinerBase<InlinerPass> {
530   void runOnOperation() override;
531 
532   /// Attempt to inline calls within the given scc, and run canonicalizations
533   /// with the given patterns, until a fixed point is reached. This allows for
534   /// the inlining of newly devirtualized calls.
535   void inlineSCC(Inliner &inliner, CGUseList &useList,
536                  MutableArrayRef<CallGraphNode *> currentSCC,
537                  MLIRContext *context,
538                  const OwningRewritePatternList &canonPatterns);
539 };
540 } // end anonymous namespace
541 
542 void InlinerPass::runOnOperation() {
543   CallGraph &cg = getAnalysis<CallGraph>();
544   auto *context = &getContext();
545 
546   // The inliner should only be run on operations that define a symbol table,
547   // as the callgraph will need to resolve references.
548   Operation *op = getOperation();
549   if (!op->hasTrait<OpTrait::SymbolTable>()) {
550     op->emitOpError() << " was scheduled to run under the inliner, but does "
551                          "not define a symbol table";
552     return signalPassFailure();
553   }
554 
555   // Collect a set of canonicalization patterns to use when simplifying
556   // callable regions within an SCC.
557   OwningRewritePatternList canonPatterns;
558   for (auto *op : context->getRegisteredOperations())
559     op->getCanonicalizationPatterns(canonPatterns, context);
560 
561   // Run the inline transform in post-order over the SCCs in the callgraph.
562   Inliner inliner(context, cg);
563   CGUseList useList(getOperation(), cg);
564   runTransformOnCGSCCs(cg, [&](MutableArrayRef<CallGraphNode *> scc) {
565     inlineSCC(inliner, useList, scc, context, canonPatterns);
566   });
567 }
568 
569 void InlinerPass::inlineSCC(Inliner &inliner, CGUseList &useList,
570                             MutableArrayRef<CallGraphNode *> currentSCC,
571                             MLIRContext *context,
572                             const OwningRewritePatternList &canonPatterns) {
573   // If we successfully inlined any calls, run some simplifications on the
574   // nodes of the scc. Continue attempting to inline until we reach a fixed
575   // point, or a maximum iteration count. We canonicalize here as it may
576   // devirtualize new calls, as well as give us a better cost model.
577   unsigned iterationCount = 0;
578   while (succeeded(inlineCallsInSCC(inliner, useList, currentSCC))) {
579     // If we aren't allowing simplifications or the max iteration count was
580     // reached, then bail out early.
581     if (disableCanonicalization || ++iterationCount >= maxInliningIterations)
582       break;
583     canonicalizeSCC(inliner.cg, useList, currentSCC, context, canonPatterns);
584   }
585 }
586 
587 std::unique_ptr<Pass> mlir::createInlinerPass() {
588   return std::make_unique<InlinerPass>();
589 }
590