1 //===- FoldSubViewOps.cpp - Fold memref.subview ops -----------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This transformation pass folds loading/storing from/to subview ops into 10 // loading/storing from/to the original memref. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "mlir/Dialect/Affine/IR/AffineOps.h" 15 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h" 16 #include "mlir/Dialect/MemRef/IR/MemRef.h" 17 #include "mlir/Dialect/MemRef/Transforms/Passes.h" 18 #include "mlir/Dialect/StandardOps/IR/Ops.h" 19 #include "mlir/Dialect/Vector/VectorOps.h" 20 #include "mlir/IR/BuiltinTypes.h" 21 #include "mlir/Transforms/GreedyPatternRewriteDriver.h" 22 23 using namespace mlir; 24 25 //===----------------------------------------------------------------------===// 26 // Utility functions 27 //===----------------------------------------------------------------------===// 28 29 /// Given the 'indices' of an load/store operation where the memref is a result 30 /// of a subview op, returns the indices w.r.t to the source memref of the 31 /// subview op. For example 32 /// 33 /// %0 = ... : memref<12x42xf32> 34 /// %1 = subview %0[%arg0, %arg1][][%stride1, %stride2] : memref<12x42xf32> to 35 /// memref<4x4xf32, offset=?, strides=[?, ?]> 36 /// %2 = load %1[%i1, %i2] : memref<4x4xf32, offset=?, strides=[?, ?]> 37 /// 38 /// could be folded into 39 /// 40 /// %2 = load %0[%arg0 + %i1 * %stride1][%arg1 + %i2 * %stride2] : 41 /// memref<12x42xf32> 42 static LogicalResult 43 resolveSourceIndices(Location loc, PatternRewriter &rewriter, 44 memref::SubViewOp subViewOp, ValueRange indices, 45 SmallVectorImpl<Value> &sourceIndices) { 46 SmallVector<OpFoldResult> mixedOffsets = subViewOp.getMixedOffsets(); 47 SmallVector<OpFoldResult> mixedSizes = subViewOp.getMixedSizes(); 48 SmallVector<OpFoldResult> mixedStrides = subViewOp.getMixedStrides(); 49 50 SmallVector<Value> useIndices; 51 // Check if this is rank-reducing case. Then for every unit-dim size add a 52 // zero to the indices. 53 unsigned resultDim = 0; 54 llvm::SmallDenseSet<unsigned> unusedDims = subViewOp.getDroppedDims(); 55 for (auto dim : llvm::seq<unsigned>(0, subViewOp.getSourceType().getRank())) { 56 if (unusedDims.count(dim)) 57 useIndices.push_back(rewriter.create<arith::ConstantIndexOp>(loc, 0)); 58 else 59 useIndices.push_back(indices[resultDim++]); 60 } 61 if (useIndices.size() != mixedOffsets.size()) 62 return failure(); 63 sourceIndices.resize(useIndices.size()); 64 for (auto index : llvm::seq<size_t>(0, mixedOffsets.size())) { 65 SmallVector<Value> dynamicOperands; 66 AffineExpr expr = rewriter.getAffineDimExpr(0); 67 unsigned numSymbols = 0; 68 dynamicOperands.push_back(useIndices[index]); 69 70 // Multiply the stride; 71 if (auto attr = mixedStrides[index].dyn_cast<Attribute>()) { 72 expr = expr * attr.cast<IntegerAttr>().getInt(); 73 } else { 74 dynamicOperands.push_back(mixedStrides[index].get<Value>()); 75 expr = expr * rewriter.getAffineSymbolExpr(numSymbols++); 76 } 77 78 // Add the offset. 79 if (auto attr = mixedOffsets[index].dyn_cast<Attribute>()) { 80 expr = expr + attr.cast<IntegerAttr>().getInt(); 81 } else { 82 dynamicOperands.push_back(mixedOffsets[index].get<Value>()); 83 expr = expr + rewriter.getAffineSymbolExpr(numSymbols++); 84 } 85 Location loc = subViewOp.getLoc(); 86 sourceIndices[index] = rewriter.create<AffineApplyOp>( 87 loc, AffineMap::get(1, numSymbols, expr), dynamicOperands); 88 } 89 return success(); 90 } 91 92 /// Helpers to access the memref operand for each op. 93 static Value getMemRefOperand(memref::LoadOp op) { return op.memref(); } 94 95 static Value getMemRefOperand(vector::TransferReadOp op) { return op.source(); } 96 97 static Value getMemRefOperand(memref::StoreOp op) { return op.memref(); } 98 99 static Value getMemRefOperand(vector::TransferWriteOp op) { 100 return op.source(); 101 } 102 103 /// Given the permutation map of the original 104 /// `vector.transfer_read`/`vector.transfer_write` operations compute the 105 /// permutation map to use after the subview is folded with it. 106 static AffineMapAttr getPermutationMapAttr(MLIRContext *context, 107 memref::SubViewOp subViewOp, 108 AffineMap currPermutationMap) { 109 llvm::SmallDenseSet<unsigned> unusedDims = subViewOp.getDroppedDims(); 110 SmallVector<AffineExpr> exprs; 111 int64_t sourceRank = subViewOp.getSourceType().getRank(); 112 for (auto dim : llvm::seq<int64_t>(0, sourceRank)) { 113 if (unusedDims.count(dim)) 114 continue; 115 exprs.push_back(getAffineDimExpr(dim, context)); 116 } 117 auto resultDimToSourceDimMap = AffineMap::get(sourceRank, 0, exprs, context); 118 return AffineMapAttr::get( 119 currPermutationMap.compose(resultDimToSourceDimMap)); 120 } 121 122 //===----------------------------------------------------------------------===// 123 // Patterns 124 //===----------------------------------------------------------------------===// 125 126 namespace { 127 /// Merges subview operation with load/transferRead operation. 128 template <typename OpTy> 129 class LoadOpOfSubViewFolder final : public OpRewritePattern<OpTy> { 130 public: 131 using OpRewritePattern<OpTy>::OpRewritePattern; 132 133 LogicalResult matchAndRewrite(OpTy loadOp, 134 PatternRewriter &rewriter) const override; 135 136 private: 137 void replaceOp(OpTy loadOp, memref::SubViewOp subViewOp, 138 ArrayRef<Value> sourceIndices, 139 PatternRewriter &rewriter) const; 140 }; 141 142 /// Merges subview operation with store/transferWriteOp operation. 143 template <typename OpTy> 144 class StoreOpOfSubViewFolder final : public OpRewritePattern<OpTy> { 145 public: 146 using OpRewritePattern<OpTy>::OpRewritePattern; 147 148 LogicalResult matchAndRewrite(OpTy storeOp, 149 PatternRewriter &rewriter) const override; 150 151 private: 152 void replaceOp(OpTy storeOp, memref::SubViewOp subViewOp, 153 ArrayRef<Value> sourceIndices, 154 PatternRewriter &rewriter) const; 155 }; 156 157 template <> 158 void LoadOpOfSubViewFolder<memref::LoadOp>::replaceOp( 159 memref::LoadOp loadOp, memref::SubViewOp subViewOp, 160 ArrayRef<Value> sourceIndices, PatternRewriter &rewriter) const { 161 rewriter.replaceOpWithNewOp<memref::LoadOp>(loadOp, subViewOp.source(), 162 sourceIndices); 163 } 164 165 template <> 166 void LoadOpOfSubViewFolder<vector::TransferReadOp>::replaceOp( 167 vector::TransferReadOp transferReadOp, memref::SubViewOp subViewOp, 168 ArrayRef<Value> sourceIndices, PatternRewriter &rewriter) const { 169 // TODO: support 0-d corner case. 170 if (transferReadOp.getTransferRank() == 0) 171 return; 172 rewriter.replaceOpWithNewOp<vector::TransferReadOp>( 173 transferReadOp, transferReadOp.getVectorType(), subViewOp.source(), 174 sourceIndices, 175 getPermutationMapAttr(rewriter.getContext(), subViewOp, 176 transferReadOp.permutation_map()), 177 transferReadOp.padding(), 178 /*mask=*/Value(), transferReadOp.in_boundsAttr()); 179 } 180 181 template <> 182 void StoreOpOfSubViewFolder<memref::StoreOp>::replaceOp( 183 memref::StoreOp storeOp, memref::SubViewOp subViewOp, 184 ArrayRef<Value> sourceIndices, PatternRewriter &rewriter) const { 185 rewriter.replaceOpWithNewOp<memref::StoreOp>( 186 storeOp, storeOp.value(), subViewOp.source(), sourceIndices); 187 } 188 189 template <> 190 void StoreOpOfSubViewFolder<vector::TransferWriteOp>::replaceOp( 191 vector::TransferWriteOp transferWriteOp, memref::SubViewOp subViewOp, 192 ArrayRef<Value> sourceIndices, PatternRewriter &rewriter) const { 193 // TODO: support 0-d corner case. 194 if (transferWriteOp.getTransferRank() == 0) 195 return; 196 rewriter.replaceOpWithNewOp<vector::TransferWriteOp>( 197 transferWriteOp, transferWriteOp.vector(), subViewOp.source(), 198 sourceIndices, 199 getPermutationMapAttr(rewriter.getContext(), subViewOp, 200 transferWriteOp.permutation_map()), 201 transferWriteOp.in_boundsAttr()); 202 } 203 } // namespace 204 205 template <typename OpTy> 206 LogicalResult 207 LoadOpOfSubViewFolder<OpTy>::matchAndRewrite(OpTy loadOp, 208 PatternRewriter &rewriter) const { 209 auto subViewOp = 210 getMemRefOperand(loadOp).template getDefiningOp<memref::SubViewOp>(); 211 if (!subViewOp) 212 return failure(); 213 214 SmallVector<Value, 4> sourceIndices; 215 if (failed(resolveSourceIndices(loadOp.getLoc(), rewriter, subViewOp, 216 loadOp.indices(), sourceIndices))) 217 return failure(); 218 219 replaceOp(loadOp, subViewOp, sourceIndices, rewriter); 220 return success(); 221 } 222 223 template <typename OpTy> 224 LogicalResult 225 StoreOpOfSubViewFolder<OpTy>::matchAndRewrite(OpTy storeOp, 226 PatternRewriter &rewriter) const { 227 auto subViewOp = 228 getMemRefOperand(storeOp).template getDefiningOp<memref::SubViewOp>(); 229 if (!subViewOp) 230 return failure(); 231 232 SmallVector<Value, 4> sourceIndices; 233 if (failed(resolveSourceIndices(storeOp.getLoc(), rewriter, subViewOp, 234 storeOp.indices(), sourceIndices))) 235 return failure(); 236 237 replaceOp(storeOp, subViewOp, sourceIndices, rewriter); 238 return success(); 239 } 240 241 void memref::populateFoldSubViewOpPatterns(RewritePatternSet &patterns) { 242 patterns.add<LoadOpOfSubViewFolder<memref::LoadOp>, 243 LoadOpOfSubViewFolder<vector::TransferReadOp>, 244 StoreOpOfSubViewFolder<memref::StoreOp>, 245 StoreOpOfSubViewFolder<vector::TransferWriteOp>>( 246 patterns.getContext()); 247 } 248 249 //===----------------------------------------------------------------------===// 250 // Pass registration 251 //===----------------------------------------------------------------------===// 252 253 namespace { 254 255 #define GEN_PASS_CLASSES 256 #include "mlir/Dialect/MemRef/Transforms/Passes.h.inc" 257 258 struct FoldSubViewOpsPass final 259 : public FoldSubViewOpsBase<FoldSubViewOpsPass> { 260 void runOnOperation() override; 261 }; 262 263 } // namespace 264 265 void FoldSubViewOpsPass::runOnOperation() { 266 RewritePatternSet patterns(&getContext()); 267 memref::populateFoldSubViewOpPatterns(patterns); 268 (void)applyPatternsAndFoldGreedily(getOperation()->getRegions(), 269 std::move(patterns)); 270 } 271 272 std::unique_ptr<Pass> memref::createFoldSubViewOpsPass() { 273 return std::make_unique<FoldSubViewOpsPass>(); 274 } 275