1 //===- FoldSubViewOps.cpp - Fold memref.subview ops -----------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This transformation pass folds loading/storing from/to subview ops into 10 // loading/storing from/to the original memref. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "mlir/Dialect/Affine/IR/AffineOps.h" 15 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h" 16 #include "mlir/Dialect/MemRef/IR/MemRef.h" 17 #include "mlir/Dialect/MemRef/Transforms/Passes.h" 18 #include "mlir/Dialect/StandardOps/IR/Ops.h" 19 #include "mlir/Dialect/Vector/VectorOps.h" 20 #include "mlir/IR/BuiltinTypes.h" 21 #include "mlir/Transforms/GreedyPatternRewriteDriver.h" 22 23 using namespace mlir; 24 25 //===----------------------------------------------------------------------===// 26 // Utility functions 27 //===----------------------------------------------------------------------===// 28 29 /// Given the 'indices' of an load/store operation where the memref is a result 30 /// of a subview op, returns the indices w.r.t to the source memref of the 31 /// subview op. For example 32 /// 33 /// %0 = ... : memref<12x42xf32> 34 /// %1 = subview %0[%arg0, %arg1][][%stride1, %stride2] : memref<12x42xf32> to 35 /// memref<4x4xf32, offset=?, strides=[?, ?]> 36 /// %2 = load %1[%i1, %i2] : memref<4x4xf32, offset=?, strides=[?, ?]> 37 /// 38 /// could be folded into 39 /// 40 /// %2 = load %0[%arg0 + %i1 * %stride1][%arg1 + %i2 * %stride2] : 41 /// memref<12x42xf32> 42 static LogicalResult 43 resolveSourceIndices(Location loc, PatternRewriter &rewriter, 44 memref::SubViewOp subViewOp, ValueRange indices, 45 SmallVectorImpl<Value> &sourceIndices) { 46 SmallVector<OpFoldResult> mixedOffsets = subViewOp.getMixedOffsets(); 47 SmallVector<OpFoldResult> mixedSizes = subViewOp.getMixedSizes(); 48 SmallVector<OpFoldResult> mixedStrides = subViewOp.getMixedStrides(); 49 50 SmallVector<Value> useIndices; 51 // Check if this is rank-reducing case. Then for every unit-dim size add a 52 // zero to the indices. 53 unsigned resultDim = 0; 54 llvm::SmallDenseSet<unsigned> unusedDims = subViewOp.getDroppedDims(); 55 for (auto dim : llvm::seq<unsigned>(0, subViewOp.getSourceType().getRank())) { 56 if (unusedDims.count(dim)) 57 useIndices.push_back(rewriter.create<arith::ConstantIndexOp>(loc, 0)); 58 else 59 useIndices.push_back(indices[resultDim++]); 60 } 61 if (useIndices.size() != mixedOffsets.size()) 62 return failure(); 63 sourceIndices.resize(useIndices.size()); 64 for (auto index : llvm::seq<size_t>(0, mixedOffsets.size())) { 65 SmallVector<Value> dynamicOperands; 66 AffineExpr expr = rewriter.getAffineDimExpr(0); 67 unsigned numSymbols = 0; 68 dynamicOperands.push_back(useIndices[index]); 69 70 // Multiply the stride; 71 if (auto attr = mixedStrides[index].dyn_cast<Attribute>()) { 72 expr = expr * attr.cast<IntegerAttr>().getInt(); 73 } else { 74 dynamicOperands.push_back(mixedStrides[index].get<Value>()); 75 expr = expr * rewriter.getAffineSymbolExpr(numSymbols++); 76 } 77 78 // Add the offset. 79 if (auto attr = mixedOffsets[index].dyn_cast<Attribute>()) { 80 expr = expr + attr.cast<IntegerAttr>().getInt(); 81 } else { 82 dynamicOperands.push_back(mixedOffsets[index].get<Value>()); 83 expr = expr + rewriter.getAffineSymbolExpr(numSymbols++); 84 } 85 Location loc = subViewOp.getLoc(); 86 sourceIndices[index] = rewriter.create<AffineApplyOp>( 87 loc, AffineMap::get(1, numSymbols, expr), dynamicOperands); 88 } 89 return success(); 90 } 91 92 /// Helpers to access the memref operand for each op. 93 static Value getMemRefOperand(memref::LoadOp op) { return op.memref(); } 94 95 static Value getMemRefOperand(vector::TransferReadOp op) { return op.source(); } 96 97 static Value getMemRefOperand(memref::StoreOp op) { return op.memref(); } 98 99 static Value getMemRefOperand(vector::TransferWriteOp op) { 100 return op.source(); 101 } 102 103 /// Given the permutation map of the original 104 /// `vector.transfer_read`/`vector.transfer_write` operations compute the 105 /// permutation map to use after the subview is folded with it. 106 static AffineMap getPermutationMap(MLIRContext *context, 107 memref::SubViewOp subViewOp, 108 AffineMap currPermutationMap) { 109 llvm::SmallDenseSet<unsigned> unusedDims = subViewOp.getDroppedDims(); 110 SmallVector<AffineExpr> exprs; 111 int64_t sourceRank = subViewOp.getSourceType().getRank(); 112 for (auto dim : llvm::seq<int64_t>(0, sourceRank)) { 113 if (unusedDims.count(dim)) 114 continue; 115 exprs.push_back(getAffineDimExpr(dim, context)); 116 } 117 auto resultDimToSourceDimMap = AffineMap::get(sourceRank, 0, exprs, context); 118 return currPermutationMap.compose(resultDimToSourceDimMap); 119 } 120 121 //===----------------------------------------------------------------------===// 122 // Patterns 123 //===----------------------------------------------------------------------===// 124 125 namespace { 126 /// Merges subview operation with load/transferRead operation. 127 template <typename OpTy> 128 class LoadOpOfSubViewFolder final : public OpRewritePattern<OpTy> { 129 public: 130 using OpRewritePattern<OpTy>::OpRewritePattern; 131 132 LogicalResult matchAndRewrite(OpTy loadOp, 133 PatternRewriter &rewriter) const override; 134 135 private: 136 void replaceOp(OpTy loadOp, memref::SubViewOp subViewOp, 137 ArrayRef<Value> sourceIndices, 138 PatternRewriter &rewriter) const; 139 }; 140 141 /// Merges subview operation with store/transferWriteOp operation. 142 template <typename OpTy> 143 class StoreOpOfSubViewFolder final : public OpRewritePattern<OpTy> { 144 public: 145 using OpRewritePattern<OpTy>::OpRewritePattern; 146 147 LogicalResult matchAndRewrite(OpTy storeOp, 148 PatternRewriter &rewriter) const override; 149 150 private: 151 void replaceOp(OpTy storeOp, memref::SubViewOp subViewOp, 152 ArrayRef<Value> sourceIndices, 153 PatternRewriter &rewriter) const; 154 }; 155 156 template <> 157 void LoadOpOfSubViewFolder<memref::LoadOp>::replaceOp( 158 memref::LoadOp loadOp, memref::SubViewOp subViewOp, 159 ArrayRef<Value> sourceIndices, PatternRewriter &rewriter) const { 160 rewriter.replaceOpWithNewOp<memref::LoadOp>(loadOp, subViewOp.source(), 161 sourceIndices); 162 } 163 164 template <> 165 void LoadOpOfSubViewFolder<vector::TransferReadOp>::replaceOp( 166 vector::TransferReadOp loadOp, memref::SubViewOp subViewOp, 167 ArrayRef<Value> sourceIndices, PatternRewriter &rewriter) const { 168 rewriter.replaceOpWithNewOp<vector::TransferReadOp>( 169 loadOp, loadOp.getVectorType(), subViewOp.source(), sourceIndices, 170 getPermutationMap(rewriter.getContext(), subViewOp, 171 loadOp.permutation_map()), 172 loadOp.padding(), loadOp.in_boundsAttr()); 173 } 174 175 template <> 176 void StoreOpOfSubViewFolder<memref::StoreOp>::replaceOp( 177 memref::StoreOp storeOp, memref::SubViewOp subViewOp, 178 ArrayRef<Value> sourceIndices, PatternRewriter &rewriter) const { 179 rewriter.replaceOpWithNewOp<memref::StoreOp>( 180 storeOp, storeOp.value(), subViewOp.source(), sourceIndices); 181 } 182 183 template <> 184 void StoreOpOfSubViewFolder<vector::TransferWriteOp>::replaceOp( 185 vector::TransferWriteOp transferWriteOp, memref::SubViewOp subViewOp, 186 ArrayRef<Value> sourceIndices, PatternRewriter &rewriter) const { 187 rewriter.replaceOpWithNewOp<vector::TransferWriteOp>( 188 transferWriteOp, transferWriteOp.vector(), subViewOp.source(), 189 sourceIndices, 190 getPermutationMap(rewriter.getContext(), subViewOp, 191 transferWriteOp.permutation_map()), 192 transferWriteOp.in_boundsAttr()); 193 } 194 } // namespace 195 196 template <typename OpTy> 197 LogicalResult 198 LoadOpOfSubViewFolder<OpTy>::matchAndRewrite(OpTy loadOp, 199 PatternRewriter &rewriter) const { 200 auto subViewOp = 201 getMemRefOperand(loadOp).template getDefiningOp<memref::SubViewOp>(); 202 if (!subViewOp) 203 return failure(); 204 205 SmallVector<Value, 4> sourceIndices; 206 if (failed(resolveSourceIndices(loadOp.getLoc(), rewriter, subViewOp, 207 loadOp.indices(), sourceIndices))) 208 return failure(); 209 210 replaceOp(loadOp, subViewOp, sourceIndices, rewriter); 211 return success(); 212 } 213 214 template <typename OpTy> 215 LogicalResult 216 StoreOpOfSubViewFolder<OpTy>::matchAndRewrite(OpTy storeOp, 217 PatternRewriter &rewriter) const { 218 auto subViewOp = 219 getMemRefOperand(storeOp).template getDefiningOp<memref::SubViewOp>(); 220 if (!subViewOp) 221 return failure(); 222 223 SmallVector<Value, 4> sourceIndices; 224 if (failed(resolveSourceIndices(storeOp.getLoc(), rewriter, subViewOp, 225 storeOp.indices(), sourceIndices))) 226 return failure(); 227 228 replaceOp(storeOp, subViewOp, sourceIndices, rewriter); 229 return success(); 230 } 231 232 void memref::populateFoldSubViewOpPatterns(RewritePatternSet &patterns) { 233 patterns.add<LoadOpOfSubViewFolder<memref::LoadOp>, 234 LoadOpOfSubViewFolder<vector::TransferReadOp>, 235 StoreOpOfSubViewFolder<memref::StoreOp>, 236 StoreOpOfSubViewFolder<vector::TransferWriteOp>>( 237 patterns.getContext()); 238 } 239 240 //===----------------------------------------------------------------------===// 241 // Pass registration 242 //===----------------------------------------------------------------------===// 243 244 namespace { 245 246 #define GEN_PASS_CLASSES 247 #include "mlir/Dialect/MemRef/Transforms/Passes.h.inc" 248 249 struct FoldSubViewOpsPass final 250 : public FoldSubViewOpsBase<FoldSubViewOpsPass> { 251 void runOnOperation() override; 252 }; 253 254 } // namespace 255 256 void FoldSubViewOpsPass::runOnOperation() { 257 RewritePatternSet patterns(&getContext()); 258 memref::populateFoldSubViewOpPatterns(patterns); 259 (void)applyPatternsAndFoldGreedily(getOperation()->getRegions(), 260 std::move(patterns)); 261 } 262 263 std::unique_ptr<Pass> memref::createFoldSubViewOpsPass() { 264 return std::make_unique<FoldSubViewOpsPass>(); 265 } 266