1 //===- SparseTensorPipelines.cpp - Pipelines for sparse tensor code -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "mlir/Dialect/SparseTensor/Pipelines/Passes.h"
10 
11 #include "mlir/Conversion/Passes.h"
12 #include "mlir/Dialect/Bufferization/Transforms/Bufferize.h"
13 #include "mlir/Dialect/Bufferization/Transforms/OneShotAnalysis.h"
14 #include "mlir/Dialect/Bufferization/Transforms/Passes.h"
15 #include "mlir/Dialect/Func/IR/FuncOps.h"
16 #include "mlir/Dialect/Linalg/Passes.h"
17 #include "mlir/Dialect/SparseTensor/IR/SparseTensor.h"
18 #include "mlir/Dialect/SparseTensor/Transforms/Passes.h"
19 #include "mlir/Pass/PassManager.h"
20 
21 using namespace mlir;
22 using namespace mlir::sparse_tensor;
23 
24 /// Return configuration options for One-Shot Bufferize.
25 static bufferization::OneShotBufferizationOptions
26 getBufferizationOptions(bool analysisOnly) {
27   using namespace bufferization;
28   OneShotBufferizationOptions options;
29   options.bufferizeFunctionBoundaries = true;
30   // TODO(springerm): To spot memory leaks more easily, returning dense allocs
31   // should be disallowed.
32   options.allowReturnAllocs = true;
33   options.functionBoundaryTypeConversion =
34       BufferizationOptions::LayoutMapOption::IdentityLayoutMap;
35   options.unknownTypeConverterFn = [](Value value, unsigned memorySpace,
36                                       const BufferizationOptions &options) {
37     return getMemRefTypeWithStaticIdentityLayout(
38         value.getType().cast<TensorType>(), memorySpace);
39   };
40   if (analysisOnly) {
41     options.testAnalysisOnly = true;
42     options.printConflicts = true;
43   }
44   return options;
45 }
46 
47 //===----------------------------------------------------------------------===//
48 // Pipeline implementation.
49 //===----------------------------------------------------------------------===//
50 
51 void mlir::sparse_tensor::buildSparseCompiler(
52     OpPassManager &pm, const SparseCompilerOptions &options) {
53   // TODO(wrengr): ensure the original `pm` is for ModuleOp
54   pm.addNestedPass<func::FuncOp>(createLinalgGeneralizationPass());
55   // TODO(springerm): Reactivate element-wise op fusion pass. This pass does not
56   // fit well with bufferization because it replaces unused "out" operands of
57   // LinalgOps with InitTensorOps. This would result in additional buffer
58   // allocations during bufferization.
59   // pm.addPass(createLinalgElementwiseOpFusionPass());
60   pm.addPass(
61       bufferization::createTensorCopyInsertionPass(getBufferizationOptions(
62           /*analysisOnly=*/options.testBufferizationAnalysisOnly)));
63   if (options.testBufferizationAnalysisOnly)
64     return;
65   pm.addPass(createSparsificationPass(options.sparsificationOptions()));
66   pm.addPass(createSparseTensorConversionPass(
67       options.sparseTensorConversionOptions()));
68   pm.addPass(createDenseBufferizationPass(
69       getBufferizationOptions(/*analysisOnly=*/false)));
70   pm.addNestedPass<func::FuncOp>(
71       mlir::bufferization::createFinalizingBufferizePass());
72   // TODO(springerm): Add sparse support to the BufferDeallocation pass and add
73   // it to this pipeline.
74   pm.addNestedPass<func::FuncOp>(createConvertLinalgToLoopsPass());
75   pm.addNestedPass<func::FuncOp>(createConvertVectorToSCFPass());
76   pm.addNestedPass<func::FuncOp>(createConvertSCFToCFPass());
77   pm.addPass(createLowerAffinePass());
78   pm.addPass(createConvertVectorToLLVMPass(options.lowerVectorToLLVMOptions()));
79   pm.addPass(createMemRefToLLVMPass());
80   pm.addNestedPass<func::FuncOp>(createConvertComplexToStandardPass());
81   pm.addNestedPass<func::FuncOp>(createConvertMathToLLVMPass());
82   pm.addPass(createConvertMathToLibmPass());
83   pm.addPass(createConvertComplexToLibmPass());
84   pm.addPass(createConvertComplexToLLVMPass());
85   pm.addPass(createConvertFuncToLLVMPass());
86   pm.addPass(createReconcileUnrealizedCastsPass());
87 }
88 
89 //===----------------------------------------------------------------------===//
90 // Pipeline registration.
91 //===----------------------------------------------------------------------===//
92 
93 void mlir::sparse_tensor::registerSparseTensorPipelines() {
94   PassPipelineRegistration<SparseCompilerOptions>(
95       "sparse-compiler",
96       "The standard pipeline for taking sparsity-agnostic IR using the"
97       " sparse-tensor type, and lowering it to LLVM IR with concrete"
98       " representations and algorithms for sparse tensors.",
99       buildSparseCompiler);
100 }
101