1 //===- SparseTensorPasses.cpp - Pass for autogen sparse tensor code -------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "mlir/Dialect/Affine/IR/AffineOps.h" 10 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h" 11 #include "mlir/Dialect/Bufferization/IR/Bufferization.h" 12 #include "mlir/Dialect/Complex/IR/Complex.h" 13 #include "mlir/Dialect/Func/IR/FuncOps.h" 14 #include "mlir/Dialect/Func/Transforms/FuncConversions.h" 15 #include "mlir/Dialect/LLVMIR/LLVMDialect.h" 16 #include "mlir/Dialect/Linalg/Transforms/Transforms.h" 17 #include "mlir/Dialect/SparseTensor/IR/SparseTensor.h" 18 #include "mlir/Dialect/SparseTensor/Transforms/Passes.h" 19 #include "mlir/Dialect/Tensor/IR/Tensor.h" 20 #include "mlir/Transforms/GreedyPatternRewriteDriver.h" 21 22 using namespace mlir; 23 using namespace mlir::sparse_tensor; 24 25 namespace { 26 27 //===----------------------------------------------------------------------===// 28 // Passes declaration. 29 //===----------------------------------------------------------------------===// 30 31 #define GEN_PASS_CLASSES 32 #include "mlir/Dialect/SparseTensor/Transforms/Passes.h.inc" 33 34 //===----------------------------------------------------------------------===// 35 // Passes implementation. 36 //===----------------------------------------------------------------------===// 37 38 struct SparsificationPass : public SparsificationBase<SparsificationPass> { 39 40 SparsificationPass() = default; 41 SparsificationPass(const SparsificationPass &pass) = default; 42 SparsificationPass(const SparsificationOptions &options) { 43 parallelization = static_cast<int32_t>(options.parallelizationStrategy); 44 vectorization = static_cast<int32_t>(options.vectorizationStrategy); 45 vectorLength = options.vectorLength; 46 enableSIMDIndex32 = options.enableSIMDIndex32; 47 enableVLAVectorization = options.enableVLAVectorization; 48 } 49 50 void runOnOperation() override { 51 auto *ctx = &getContext(); 52 RewritePatternSet patterns(ctx); 53 // Translate strategy flags to strategy options. 54 SparsificationOptions options( 55 sparseParallelizationStrategy(parallelization), 56 sparseVectorizationStrategy(vectorization), vectorLength, 57 enableSIMDIndex32, enableVLAVectorization); 58 // Apply rewriting. 59 populateSparsificationPatterns(patterns, options); 60 vector::populateVectorToVectorCanonicalizationPatterns(patterns); 61 (void)applyPatternsAndFoldGreedily(getOperation(), std::move(patterns)); 62 } 63 }; 64 65 class SparseTensorTypeConverter : public TypeConverter { 66 public: 67 SparseTensorTypeConverter() { 68 addConversion([](Type type) { return type; }); 69 addConversion(convertSparseTensorTypes); 70 } 71 // Maps each sparse tensor type to an opaque pointer. 72 static Optional<Type> convertSparseTensorTypes(Type type) { 73 if (getSparseTensorEncoding(type) != nullptr) 74 return LLVM::LLVMPointerType::get(IntegerType::get(type.getContext(), 8)); 75 return llvm::None; 76 } 77 }; 78 79 struct SparseTensorConversionPass 80 : public SparseTensorConversionBase<SparseTensorConversionPass> { 81 82 SparseTensorConversionPass() = default; 83 SparseTensorConversionPass(const SparseTensorConversionPass &pass) = default; 84 SparseTensorConversionPass(const SparseTensorConversionOptions &options) { 85 sparseToSparse = static_cast<int32_t>(options.sparseToSparseStrategy); 86 } 87 88 void runOnOperation() override { 89 auto *ctx = &getContext(); 90 RewritePatternSet patterns(ctx); 91 SparseTensorTypeConverter converter; 92 ConversionTarget target(*ctx); 93 // Everything in the sparse dialect must go! 94 target.addIllegalDialect<SparseTensorDialect>(); 95 // All dynamic rules below accept new function, call, return, and tensor 96 // dim and cast operations as legal output of the rewriting provided that 97 // all sparse tensor types have been fully rewritten. 98 target.addDynamicallyLegalOp<func::FuncOp>([&](func::FuncOp op) { 99 return converter.isSignatureLegal(op.getFunctionType()); 100 }); 101 target.addDynamicallyLegalOp<func::CallOp>([&](func::CallOp op) { 102 return converter.isSignatureLegal(op.getCalleeType()); 103 }); 104 target.addDynamicallyLegalOp<func::ReturnOp>([&](func::ReturnOp op) { 105 return converter.isLegal(op.getOperandTypes()); 106 }); 107 target.addDynamicallyLegalOp<tensor::DimOp>([&](tensor::DimOp op) { 108 return converter.isLegal(op.getOperandTypes()); 109 }); 110 target.addDynamicallyLegalOp<tensor::CastOp>([&](tensor::CastOp op) { 111 return converter.isLegal(op.getOperand().getType()); 112 }); 113 // The following operations and dialects may be introduced by the 114 // rewriting rules, and are therefore marked as legal. 115 target.addLegalOp<arith::CmpFOp, arith::CmpIOp, arith::ConstantOp, 116 arith::IndexCastOp, complex::ConstantOp, 117 complex::NotEqualOp, linalg::FillOp, linalg::YieldOp, 118 tensor::ExtractOp>(); 119 target 120 .addLegalDialect<bufferization::BufferizationDialect, LLVM::LLVMDialect, 121 memref::MemRefDialect, scf::SCFDialect>(); 122 target.addIllegalOp<bufferization::AllocTensorOp>(); 123 // Translate strategy flags to strategy options. 124 SparseTensorConversionOptions options( 125 sparseToSparseConversionStrategy(sparseToSparse)); 126 // Populate with rules and apply rewriting rules. 127 populateFunctionOpInterfaceTypeConversionPattern<func::FuncOp>(patterns, 128 converter); 129 populateCallOpTypeConversionPattern(patterns, converter); 130 populateSparseTensorConversionPatterns(converter, patterns, options); 131 if (failed(applyPartialConversion(getOperation(), target, 132 std::move(patterns)))) 133 signalPassFailure(); 134 } 135 }; 136 137 } // namespace 138 139 SparseParallelizationStrategy 140 mlir::sparseParallelizationStrategy(int32_t flag) { 141 switch (flag) { 142 default: 143 return SparseParallelizationStrategy::kNone; 144 case 1: 145 return SparseParallelizationStrategy::kDenseOuterLoop; 146 case 2: 147 return SparseParallelizationStrategy::kAnyStorageOuterLoop; 148 case 3: 149 return SparseParallelizationStrategy::kDenseAnyLoop; 150 case 4: 151 return SparseParallelizationStrategy::kAnyStorageAnyLoop; 152 } 153 } 154 155 SparseVectorizationStrategy mlir::sparseVectorizationStrategy(int32_t flag) { 156 switch (flag) { 157 default: 158 return SparseVectorizationStrategy::kNone; 159 case 1: 160 return SparseVectorizationStrategy::kDenseInnerLoop; 161 case 2: 162 return SparseVectorizationStrategy::kAnyStorageInnerLoop; 163 } 164 } 165 166 SparseToSparseConversionStrategy 167 mlir::sparseToSparseConversionStrategy(int32_t flag) { 168 switch (flag) { 169 default: 170 return SparseToSparseConversionStrategy::kAuto; 171 case 1: 172 return SparseToSparseConversionStrategy::kViaCOO; 173 case 2: 174 return SparseToSparseConversionStrategy::kDirect; 175 } 176 } 177 178 std::unique_ptr<Pass> mlir::createSparsificationPass() { 179 return std::make_unique<SparsificationPass>(); 180 } 181 182 std::unique_ptr<Pass> 183 mlir::createSparsificationPass(const SparsificationOptions &options) { 184 return std::make_unique<SparsificationPass>(options); 185 } 186 187 std::unique_ptr<Pass> mlir::createSparseTensorConversionPass() { 188 return std::make_unique<SparseTensorConversionPass>(); 189 } 190 191 std::unique_ptr<Pass> mlir::createSparseTensorConversionPass( 192 const SparseTensorConversionOptions &options) { 193 return std::make_unique<SparseTensorConversionPass>(options); 194 } 195