1 //===- SparseTensorPasses.cpp - Pass for autogen sparse tensor code -------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "mlir/Dialect/Affine/IR/AffineOps.h" 10 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h" 11 #include "mlir/Dialect/Bufferization/IR/Bufferization.h" 12 #include "mlir/Dialect/Complex/IR/Complex.h" 13 #include "mlir/Dialect/Func/IR/FuncOps.h" 14 #include "mlir/Dialect/Func/Transforms/FuncConversions.h" 15 #include "mlir/Dialect/LLVMIR/LLVMDialect.h" 16 #include "mlir/Dialect/Linalg/Transforms/Transforms.h" 17 #include "mlir/Dialect/SparseTensor/IR/SparseTensor.h" 18 #include "mlir/Dialect/SparseTensor/Transforms/Passes.h" 19 #include "mlir/Dialect/Tensor/IR/Tensor.h" 20 #include "mlir/Transforms/GreedyPatternRewriteDriver.h" 21 22 using namespace mlir; 23 using namespace mlir::sparse_tensor; 24 25 namespace { 26 27 //===----------------------------------------------------------------------===// 28 // Passes declaration. 29 //===----------------------------------------------------------------------===// 30 31 #define GEN_PASS_CLASSES 32 #include "mlir/Dialect/SparseTensor/Transforms/Passes.h.inc" 33 34 //===----------------------------------------------------------------------===// 35 // Passes implementation. 36 //===----------------------------------------------------------------------===// 37 38 struct SparsificationPass : public SparsificationBase<SparsificationPass> { 39 40 SparsificationPass() = default; 41 SparsificationPass(const SparsificationPass &pass) = default; 42 SparsificationPass(const SparsificationOptions &options) { 43 parallelization = static_cast<int32_t>(options.parallelizationStrategy); 44 vectorization = static_cast<int32_t>(options.vectorizationStrategy); 45 vectorLength = options.vectorLength; 46 enableSIMDIndex32 = options.enableSIMDIndex32; 47 enableVLAVectorization = options.enableVLAVectorization; 48 } 49 50 void runOnOperation() override { 51 auto *ctx = &getContext(); 52 RewritePatternSet patterns(ctx); 53 // Translate strategy flags to strategy options. 54 SparsificationOptions options( 55 sparseParallelizationStrategy(parallelization), 56 sparseVectorizationStrategy(vectorization), vectorLength, 57 enableSIMDIndex32, enableVLAVectorization); 58 // Apply rewriting. 59 populateSparsificationPatterns(patterns, options); 60 vector::populateVectorToVectorCanonicalizationPatterns(patterns); 61 (void)applyPatternsAndFoldGreedily(getOperation(), std::move(patterns)); 62 } 63 }; 64 65 class SparseTensorTypeConverter : public TypeConverter { 66 public: 67 SparseTensorTypeConverter() { 68 addConversion([](Type type) { return type; }); 69 addConversion(convertSparseTensorTypes); 70 } 71 // Maps each sparse tensor type to an opaque pointer. 72 static Optional<Type> convertSparseTensorTypes(Type type) { 73 if (getSparseTensorEncoding(type) != nullptr) 74 return LLVM::LLVMPointerType::get(IntegerType::get(type.getContext(), 8)); 75 return llvm::None; 76 } 77 }; 78 79 struct SparseTensorConversionPass 80 : public SparseTensorConversionBase<SparseTensorConversionPass> { 81 82 SparseTensorConversionPass() = default; 83 SparseTensorConversionPass(const SparseTensorConversionPass &pass) = default; 84 SparseTensorConversionPass(const SparseTensorConversionOptions &options) { 85 sparseToSparse = static_cast<int32_t>(options.sparseToSparseStrategy); 86 } 87 88 void runOnOperation() override { 89 auto *ctx = &getContext(); 90 RewritePatternSet patterns(ctx); 91 SparseTensorTypeConverter converter; 92 ConversionTarget target(*ctx); 93 // Everything in the sparse dialect must go! 94 target.addIllegalDialect<SparseTensorDialect>(); 95 // All dynamic rules below accept new function, call, return, and tensor 96 // dim and cast operations as legal output of the rewriting provided that 97 // all sparse tensor types have been fully rewritten. 98 target.addDynamicallyLegalOp<func::FuncOp>([&](func::FuncOp op) { 99 return converter.isSignatureLegal(op.getFunctionType()); 100 }); 101 target.addDynamicallyLegalOp<func::CallOp>([&](func::CallOp op) { 102 return converter.isSignatureLegal(op.getCalleeType()); 103 }); 104 target.addDynamicallyLegalOp<func::ReturnOp>([&](func::ReturnOp op) { 105 return converter.isLegal(op.getOperandTypes()); 106 }); 107 target.addDynamicallyLegalOp<tensor::DimOp>([&](tensor::DimOp op) { 108 return converter.isLegal(op.getOperandTypes()); 109 }); 110 target.addDynamicallyLegalOp<tensor::CastOp>([&](tensor::CastOp op) { 111 return converter.isLegal(op.getOperand().getType()); 112 }); 113 // The following operations and dialects may be introduced by the 114 // rewriting rules, and are therefore marked as legal. 115 target.addLegalOp<arith::CmpFOp, arith::CmpIOp, arith::ConstantOp, 116 arith::IndexCastOp, complex::ConstantOp, 117 complex::NotEqualOp, linalg::FillOp, linalg::YieldOp, 118 tensor::ExtractOp>(); 119 target 120 .addLegalDialect<bufferization::BufferizationDialect, LLVM::LLVMDialect, 121 memref::MemRefDialect, scf::SCFDialect>(); 122 // Translate strategy flags to strategy options. 123 SparseTensorConversionOptions options( 124 sparseToSparseConversionStrategy(sparseToSparse)); 125 // Populate with rules and apply rewriting rules. 126 populateFunctionOpInterfaceTypeConversionPattern<func::FuncOp>(patterns, 127 converter); 128 populateCallOpTypeConversionPattern(patterns, converter); 129 populateSparseTensorConversionPatterns(converter, patterns, options); 130 if (failed(applyPartialConversion(getOperation(), target, 131 std::move(patterns)))) 132 signalPassFailure(); 133 } 134 }; 135 136 } // namespace 137 138 SparseParallelizationStrategy 139 mlir::sparseParallelizationStrategy(int32_t flag) { 140 switch (flag) { 141 default: 142 return SparseParallelizationStrategy::kNone; 143 case 1: 144 return SparseParallelizationStrategy::kDenseOuterLoop; 145 case 2: 146 return SparseParallelizationStrategy::kAnyStorageOuterLoop; 147 case 3: 148 return SparseParallelizationStrategy::kDenseAnyLoop; 149 case 4: 150 return SparseParallelizationStrategy::kAnyStorageAnyLoop; 151 } 152 } 153 154 SparseVectorizationStrategy mlir::sparseVectorizationStrategy(int32_t flag) { 155 switch (flag) { 156 default: 157 return SparseVectorizationStrategy::kNone; 158 case 1: 159 return SparseVectorizationStrategy::kDenseInnerLoop; 160 case 2: 161 return SparseVectorizationStrategy::kAnyStorageInnerLoop; 162 } 163 } 164 165 SparseToSparseConversionStrategy 166 mlir::sparseToSparseConversionStrategy(int32_t flag) { 167 switch (flag) { 168 default: 169 return SparseToSparseConversionStrategy::kAuto; 170 case 1: 171 return SparseToSparseConversionStrategy::kViaCOO; 172 case 2: 173 return SparseToSparseConversionStrategy::kDirect; 174 } 175 } 176 177 std::unique_ptr<Pass> mlir::createSparsificationPass() { 178 return std::make_unique<SparsificationPass>(); 179 } 180 181 std::unique_ptr<Pass> 182 mlir::createSparsificationPass(const SparsificationOptions &options) { 183 return std::make_unique<SparsificationPass>(options); 184 } 185 186 std::unique_ptr<Pass> mlir::createSparseTensorConversionPass() { 187 return std::make_unique<SparseTensorConversionPass>(); 188 } 189 190 std::unique_ptr<Pass> mlir::createSparseTensorConversionPass( 191 const SparseTensorConversionOptions &options) { 192 return std::make_unique<SparseTensorConversionPass>(options); 193 } 194