14ead2cf7SAlex Zinenko //===- VectorToSCF.cpp - Conversion from Vector to mix of SCF and Std -----===// 24ead2cf7SAlex Zinenko // 34ead2cf7SAlex Zinenko // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 44ead2cf7SAlex Zinenko // See https://llvm.org/LICENSE.txt for license information. 54ead2cf7SAlex Zinenko // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 64ead2cf7SAlex Zinenko // 74ead2cf7SAlex Zinenko //===----------------------------------------------------------------------===// 84ead2cf7SAlex Zinenko // 94ead2cf7SAlex Zinenko // This file implements target-dependent lowering of vector transfer operations. 104ead2cf7SAlex Zinenko // 114ead2cf7SAlex Zinenko //===----------------------------------------------------------------------===// 124ead2cf7SAlex Zinenko 134ead2cf7SAlex Zinenko #include <type_traits> 144ead2cf7SAlex Zinenko 154ead2cf7SAlex Zinenko #include "mlir/Conversion/VectorToSCF/VectorToSCF.h" 164ead2cf7SAlex Zinenko #include "mlir/Dialect/Affine/EDSC/Intrinsics.h" 174ead2cf7SAlex Zinenko #include "mlir/Dialect/SCF/EDSC/Builders.h" 184ead2cf7SAlex Zinenko #include "mlir/Dialect/SCF/EDSC/Intrinsics.h" 194ead2cf7SAlex Zinenko #include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h" 204ead2cf7SAlex Zinenko #include "mlir/Dialect/Vector/EDSC/Intrinsics.h" 214ead2cf7SAlex Zinenko #include "mlir/Dialect/Vector/VectorOps.h" 224ead2cf7SAlex Zinenko #include "mlir/IR/AffineExpr.h" 234ead2cf7SAlex Zinenko #include "mlir/IR/AffineMap.h" 244ead2cf7SAlex Zinenko #include "mlir/IR/Attributes.h" 254ead2cf7SAlex Zinenko #include "mlir/IR/Builders.h" 264ead2cf7SAlex Zinenko #include "mlir/IR/Location.h" 274ead2cf7SAlex Zinenko #include "mlir/IR/Matchers.h" 284ead2cf7SAlex Zinenko #include "mlir/IR/OperationSupport.h" 294ead2cf7SAlex Zinenko #include "mlir/IR/PatternMatch.h" 304ead2cf7SAlex Zinenko #include "mlir/IR/Types.h" 314ead2cf7SAlex Zinenko 324ead2cf7SAlex Zinenko using namespace mlir; 334ead2cf7SAlex Zinenko using namespace mlir::edsc; 344ead2cf7SAlex Zinenko using namespace mlir::edsc::intrinsics; 354ead2cf7SAlex Zinenko using vector::TransferReadOp; 364ead2cf7SAlex Zinenko using vector::TransferWriteOp; 374ead2cf7SAlex Zinenko 384ead2cf7SAlex Zinenko /// Helper class captures the common information needed to lower N>1-D vector 394ead2cf7SAlex Zinenko /// transfer operations (read and write). 404ead2cf7SAlex Zinenko /// On construction, this class opens an edsc::ScopedContext for simpler IR 414ead2cf7SAlex Zinenko /// manipulation. 424ead2cf7SAlex Zinenko /// In pseudo-IR, for an n-D vector_transfer_read such as: 434ead2cf7SAlex Zinenko /// 444ead2cf7SAlex Zinenko /// ``` 454ead2cf7SAlex Zinenko /// vector_transfer_read(%m, %offsets, identity_map, %fill) : 464ead2cf7SAlex Zinenko /// memref<(leading_dims) x (major_dims) x (minor_dims) x type>, 474ead2cf7SAlex Zinenko /// vector<(major_dims) x (minor_dims) x type> 484ead2cf7SAlex Zinenko /// ``` 494ead2cf7SAlex Zinenko /// 504ead2cf7SAlex Zinenko /// where rank(minor_dims) is the lower-level vector rank (e.g. 1 for LLVM or 514ead2cf7SAlex Zinenko /// higher). 524ead2cf7SAlex Zinenko /// 534ead2cf7SAlex Zinenko /// This is the entry point to emitting pseudo-IR resembling: 544ead2cf7SAlex Zinenko /// 554ead2cf7SAlex Zinenko /// ``` 564ead2cf7SAlex Zinenko /// %tmp = alloc(): memref<(major_dims) x vector<minor_dim x type>> 574ead2cf7SAlex Zinenko /// for (%ivs_major, {0}, {vector_shape}, {1}) { // (N-1)-D loop nest 584ead2cf7SAlex Zinenko /// if (any_of(%ivs_major + %offsets, <, major_dims)) { 594ead2cf7SAlex Zinenko /// %v = vector_transfer_read( 604ead2cf7SAlex Zinenko /// {%offsets_leading, %ivs_major + %offsets_major, %offsets_minor}, 614ead2cf7SAlex Zinenko /// %ivs_minor): 624ead2cf7SAlex Zinenko /// memref<(leading_dims) x (major_dims) x (minor_dims) x type>, 634ead2cf7SAlex Zinenko /// vector<(minor_dims) x type>; 644ead2cf7SAlex Zinenko /// store(%v, %tmp); 654ead2cf7SAlex Zinenko /// } else { 664ead2cf7SAlex Zinenko /// %v = splat(vector<(minor_dims) x type>, %fill) 674ead2cf7SAlex Zinenko /// store(%v, %tmp, %ivs_major); 684ead2cf7SAlex Zinenko /// } 694ead2cf7SAlex Zinenko /// } 704ead2cf7SAlex Zinenko /// %res = load(%tmp, %0): memref<(major_dims) x vector<minor_dim x type>>): 714ead2cf7SAlex Zinenko // vector<(major_dims) x (minor_dims) x type> 724ead2cf7SAlex Zinenko /// ``` 734ead2cf7SAlex Zinenko /// 744ead2cf7SAlex Zinenko template <typename ConcreteOp> 754ead2cf7SAlex Zinenko class NDTransferOpHelper { 764ead2cf7SAlex Zinenko public: 774ead2cf7SAlex Zinenko NDTransferOpHelper(PatternRewriter &rewriter, ConcreteOp xferOp) 784ead2cf7SAlex Zinenko : rewriter(rewriter), loc(xferOp.getLoc()), 794ead2cf7SAlex Zinenko scope(std::make_unique<ScopedContext>(rewriter, loc)), xferOp(xferOp), 804ead2cf7SAlex Zinenko op(xferOp.getOperation()) { 814ead2cf7SAlex Zinenko vectorType = xferOp.getVectorType(); 824ead2cf7SAlex Zinenko // TODO(ntv, ajcbik): when we go to k > 1-D vectors adapt minorRank. 834ead2cf7SAlex Zinenko minorRank = 1; 844ead2cf7SAlex Zinenko majorRank = vectorType.getRank() - minorRank; 854ead2cf7SAlex Zinenko leadingRank = xferOp.getMemRefType().getRank() - (majorRank + minorRank); 864ead2cf7SAlex Zinenko majorVectorType = 874ead2cf7SAlex Zinenko VectorType::get(vectorType.getShape().take_front(majorRank), 884ead2cf7SAlex Zinenko vectorType.getElementType()); 894ead2cf7SAlex Zinenko minorVectorType = 904ead2cf7SAlex Zinenko VectorType::get(vectorType.getShape().take_back(minorRank), 914ead2cf7SAlex Zinenko vectorType.getElementType()); 924ead2cf7SAlex Zinenko /// Memref of minor vector type is used for individual transfers. 934ead2cf7SAlex Zinenko memRefMinorVectorType = 944ead2cf7SAlex Zinenko MemRefType::get(majorVectorType.getShape(), minorVectorType, {}, 954ead2cf7SAlex Zinenko xferOp.getMemRefType().getMemorySpace()); 964ead2cf7SAlex Zinenko } 974ead2cf7SAlex Zinenko 984ead2cf7SAlex Zinenko LogicalResult doReplace(); 994ead2cf7SAlex Zinenko 1004ead2cf7SAlex Zinenko private: 1014ead2cf7SAlex Zinenko /// Creates the loop nest on the "major" dimensions and calls the 1024ead2cf7SAlex Zinenko /// `loopBodyBuilder` lambda in the context of the loop nest. 1034ead2cf7SAlex Zinenko template <typename Lambda> 1044ead2cf7SAlex Zinenko void emitLoops(Lambda loopBodyBuilder); 1054ead2cf7SAlex Zinenko 1064ead2cf7SAlex Zinenko /// Operate within the body of `emitLoops` to: 1074ead2cf7SAlex Zinenko /// 1. Compute the indexings `majorIvs + majorOffsets`. 1084ead2cf7SAlex Zinenko /// 2. Compute a boolean that determines whether the first `majorIvs.rank()` 1094ead2cf7SAlex Zinenko /// dimensions `majorIvs + majorOffsets` are all within `memrefBounds`. 1104ead2cf7SAlex Zinenko /// 3. Create an IfOp conditioned on the boolean in step 2. 1114ead2cf7SAlex Zinenko /// 4. Call a `thenBlockBuilder` and an `elseBlockBuilder` to append 1124ead2cf7SAlex Zinenko /// operations to the IfOp blocks as appropriate. 1134ead2cf7SAlex Zinenko template <typename LambdaThen, typename LambdaElse> 1144ead2cf7SAlex Zinenko void emitInBounds(ValueRange majorIvs, ValueRange majorOffsets, 1154ead2cf7SAlex Zinenko MemRefBoundsCapture &memrefBounds, 1164ead2cf7SAlex Zinenko LambdaThen thenBlockBuilder, LambdaElse elseBlockBuilder); 1174ead2cf7SAlex Zinenko 1184ead2cf7SAlex Zinenko /// Common state to lower vector transfer ops. 1194ead2cf7SAlex Zinenko PatternRewriter &rewriter; 1204ead2cf7SAlex Zinenko Location loc; 1214ead2cf7SAlex Zinenko std::unique_ptr<ScopedContext> scope; 1224ead2cf7SAlex Zinenko ConcreteOp xferOp; 1234ead2cf7SAlex Zinenko Operation *op; 1244ead2cf7SAlex Zinenko // A vector transfer copies data between: 1254ead2cf7SAlex Zinenko // - memref<(leading_dims) x (major_dims) x (minor_dims) x type> 1264ead2cf7SAlex Zinenko // - vector<(major_dims) x (minor_dims) x type> 1274ead2cf7SAlex Zinenko unsigned minorRank; // for now always 1 1284ead2cf7SAlex Zinenko unsigned majorRank; // vector rank - minorRank 1294ead2cf7SAlex Zinenko unsigned leadingRank; // memref rank - vector rank 1304ead2cf7SAlex Zinenko VectorType vectorType; // vector<(major_dims) x (minor_dims) x type> 1314ead2cf7SAlex Zinenko VectorType majorVectorType; // vector<(major_dims) x type> 1324ead2cf7SAlex Zinenko VectorType minorVectorType; // vector<(minor_dims) x type> 1334ead2cf7SAlex Zinenko MemRefType memRefMinorVectorType; // memref<vector<(minor_dims) x type>> 1344ead2cf7SAlex Zinenko }; 1354ead2cf7SAlex Zinenko 1364ead2cf7SAlex Zinenko template <typename ConcreteOp> 1374ead2cf7SAlex Zinenko template <typename Lambda> 1384ead2cf7SAlex Zinenko void NDTransferOpHelper<ConcreteOp>::emitLoops(Lambda loopBodyBuilder) { 1394ead2cf7SAlex Zinenko /// Loop nest operates on the major dimensions 1404ead2cf7SAlex Zinenko MemRefBoundsCapture memrefBoundsCapture(xferOp.memref()); 1414ead2cf7SAlex Zinenko VectorBoundsCapture vectorBoundsCapture(majorVectorType); 1424ead2cf7SAlex Zinenko auto majorLbs = vectorBoundsCapture.getLbs(); 1434ead2cf7SAlex Zinenko auto majorUbs = vectorBoundsCapture.getUbs(); 1444ead2cf7SAlex Zinenko auto majorSteps = vectorBoundsCapture.getSteps(); 1454ead2cf7SAlex Zinenko SmallVector<Value, 8> majorIvs(vectorBoundsCapture.rank()); 1464ead2cf7SAlex Zinenko AffineLoopNestBuilder(majorIvs, majorLbs, majorUbs, majorSteps)([&] { 1474ead2cf7SAlex Zinenko ValueRange indices(xferOp.indices()); 1484ead2cf7SAlex Zinenko loopBodyBuilder(majorIvs, indices.take_front(leadingRank), 1494ead2cf7SAlex Zinenko indices.drop_front(leadingRank).take_front(majorRank), 1504ead2cf7SAlex Zinenko indices.take_back(minorRank), memrefBoundsCapture); 1514ead2cf7SAlex Zinenko }); 1524ead2cf7SAlex Zinenko } 1534ead2cf7SAlex Zinenko 1544ead2cf7SAlex Zinenko template <typename ConcreteOp> 1554ead2cf7SAlex Zinenko template <typename LambdaThen, typename LambdaElse> 1564ead2cf7SAlex Zinenko void NDTransferOpHelper<ConcreteOp>::emitInBounds( 1574ead2cf7SAlex Zinenko ValueRange majorIvs, ValueRange majorOffsets, 1584ead2cf7SAlex Zinenko MemRefBoundsCapture &memrefBounds, LambdaThen thenBlockBuilder, 1594ead2cf7SAlex Zinenko LambdaElse elseBlockBuilder) { 1604ead2cf7SAlex Zinenko Value inBounds = std_constant_int(/*value=*/1, /*width=*/1); 1614ead2cf7SAlex Zinenko SmallVector<Value, 4> majorIvsPlusOffsets; 1624ead2cf7SAlex Zinenko majorIvsPlusOffsets.reserve(majorIvs.size()); 1634ead2cf7SAlex Zinenko for (auto it : llvm::zip(majorIvs, majorOffsets, memrefBounds.getUbs())) { 1644ead2cf7SAlex Zinenko Value iv = std::get<0>(it), off = std::get<1>(it), ub = std::get<2>(it); 1654ead2cf7SAlex Zinenko using namespace mlir::edsc::op; 1664ead2cf7SAlex Zinenko majorIvsPlusOffsets.push_back(iv + off); 1674ead2cf7SAlex Zinenko Value inBounds2 = majorIvsPlusOffsets.back() < ub; 1684ead2cf7SAlex Zinenko inBounds = inBounds && inBounds2; 1694ead2cf7SAlex Zinenko } 1704ead2cf7SAlex Zinenko 1714ead2cf7SAlex Zinenko auto ifOp = ScopedContext::getBuilderRef().create<scf::IfOp>( 1724ead2cf7SAlex Zinenko ScopedContext::getLocation(), TypeRange{}, inBounds, 1734ead2cf7SAlex Zinenko /*withElseRegion=*/std::is_same<ConcreteOp, TransferReadOp>()); 1744ead2cf7SAlex Zinenko BlockBuilder(&ifOp.thenRegion().front(), 1754ead2cf7SAlex Zinenko Append())([&] { thenBlockBuilder(majorIvsPlusOffsets); }); 1764ead2cf7SAlex Zinenko if (std::is_same<ConcreteOp, TransferReadOp>()) 1774ead2cf7SAlex Zinenko BlockBuilder(&ifOp.elseRegion().front(), 1784ead2cf7SAlex Zinenko Append())([&] { elseBlockBuilder(majorIvsPlusOffsets); }); 1794ead2cf7SAlex Zinenko } 1804ead2cf7SAlex Zinenko 1814ead2cf7SAlex Zinenko template <> 1824ead2cf7SAlex Zinenko LogicalResult NDTransferOpHelper<TransferReadOp>::doReplace() { 1834ead2cf7SAlex Zinenko Value alloc = std_alloc(memRefMinorVectorType); 1844ead2cf7SAlex Zinenko 1854ead2cf7SAlex Zinenko emitLoops([&](ValueRange majorIvs, ValueRange leadingOffsets, 1864ead2cf7SAlex Zinenko ValueRange majorOffsets, ValueRange minorOffsets, 1874ead2cf7SAlex Zinenko MemRefBoundsCapture &memrefBounds) { 1884ead2cf7SAlex Zinenko // If in-bounds, index into memref and lower to 1-D transfer read. 1894ead2cf7SAlex Zinenko auto thenBlockBuilder = [&](ValueRange majorIvsPlusOffsets) { 1904ead2cf7SAlex Zinenko SmallVector<Value, 8> indexing; 1914ead2cf7SAlex Zinenko indexing.reserve(leadingRank + majorRank + minorRank); 1924ead2cf7SAlex Zinenko indexing.append(leadingOffsets.begin(), leadingOffsets.end()); 1934ead2cf7SAlex Zinenko indexing.append(majorIvsPlusOffsets.begin(), majorIvsPlusOffsets.end()); 1944ead2cf7SAlex Zinenko indexing.append(minorOffsets.begin(), minorOffsets.end()); 195*36cdc17fSNicolas Vasilache // Lower to 1-D vector_transfer_read and let recursion handle it. 196*36cdc17fSNicolas Vasilache Value memref = xferOp.memref(); 197*36cdc17fSNicolas Vasilache auto map = TransferReadOp::getTransferMinorIdentityMap( 198*36cdc17fSNicolas Vasilache xferOp.getMemRefType(), minorVectorType); 1994ead2cf7SAlex Zinenko auto loaded1D = 2004ead2cf7SAlex Zinenko vector_transfer_read(minorVectorType, memref, indexing, 2014ead2cf7SAlex Zinenko AffineMapAttr::get(map), xferOp.padding()); 2024ead2cf7SAlex Zinenko // Store the 1-D vector. 2034ead2cf7SAlex Zinenko std_store(loaded1D, alloc, majorIvs); 2044ead2cf7SAlex Zinenko }; 2054ead2cf7SAlex Zinenko // If out-of-bounds, just store a splatted vector. 2064ead2cf7SAlex Zinenko auto elseBlockBuilder = [&](ValueRange majorIvsPlusOffsets) { 2074ead2cf7SAlex Zinenko auto vector = std_splat(minorVectorType, xferOp.padding()); 2084ead2cf7SAlex Zinenko std_store(vector, alloc, majorIvs); 2094ead2cf7SAlex Zinenko }; 2104ead2cf7SAlex Zinenko emitInBounds(majorIvs, majorOffsets, memrefBounds, thenBlockBuilder, 2114ead2cf7SAlex Zinenko elseBlockBuilder); 2124ead2cf7SAlex Zinenko }); 2134ead2cf7SAlex Zinenko 2144ead2cf7SAlex Zinenko Value loaded = 2154ead2cf7SAlex Zinenko std_load(vector_type_cast(MemRefType::get({}, vectorType), alloc)); 2164ead2cf7SAlex Zinenko rewriter.replaceOp(op, loaded); 2174ead2cf7SAlex Zinenko 2184ead2cf7SAlex Zinenko return success(); 2194ead2cf7SAlex Zinenko } 2204ead2cf7SAlex Zinenko 2214ead2cf7SAlex Zinenko template <> 2224ead2cf7SAlex Zinenko LogicalResult NDTransferOpHelper<TransferWriteOp>::doReplace() { 2234ead2cf7SAlex Zinenko Value alloc = std_alloc(memRefMinorVectorType); 2244ead2cf7SAlex Zinenko 2254ead2cf7SAlex Zinenko std_store(xferOp.vector(), 2264ead2cf7SAlex Zinenko vector_type_cast(MemRefType::get({}, vectorType), alloc)); 2274ead2cf7SAlex Zinenko 2284ead2cf7SAlex Zinenko emitLoops([&](ValueRange majorIvs, ValueRange leadingOffsets, 2294ead2cf7SAlex Zinenko ValueRange majorOffsets, ValueRange minorOffsets, 2304ead2cf7SAlex Zinenko MemRefBoundsCapture &memrefBounds) { 2314ead2cf7SAlex Zinenko auto thenBlockBuilder = [&](ValueRange majorIvsPlusOffsets) { 2324ead2cf7SAlex Zinenko // Lower to 1-D vector_transfer_write and let recursion handle it. 2334ead2cf7SAlex Zinenko SmallVector<Value, 8> indexing; 2344ead2cf7SAlex Zinenko indexing.reserve(leadingRank + majorRank + minorRank); 2354ead2cf7SAlex Zinenko indexing.append(leadingOffsets.begin(), leadingOffsets.end()); 2364ead2cf7SAlex Zinenko indexing.append(majorIvsPlusOffsets.begin(), majorIvsPlusOffsets.end()); 2374ead2cf7SAlex Zinenko indexing.append(minorOffsets.begin(), minorOffsets.end()); 238*36cdc17fSNicolas Vasilache // Lower to 1-D vector_transfer_write and let recursion handle it. 239*36cdc17fSNicolas Vasilache Value loaded1D = std_load(alloc, majorIvs); 240*36cdc17fSNicolas Vasilache auto map = TransferWriteOp::getTransferMinorIdentityMap( 241*36cdc17fSNicolas Vasilache xferOp.getMemRefType(), minorVectorType); 2424ead2cf7SAlex Zinenko vector_transfer_write(loaded1D, xferOp.memref(), indexing, 2434ead2cf7SAlex Zinenko AffineMapAttr::get(map)); 2444ead2cf7SAlex Zinenko }; 2454ead2cf7SAlex Zinenko // Don't write anything when out of bounds. 2464ead2cf7SAlex Zinenko auto elseBlockBuilder = [&](ValueRange majorIvsPlusOffsets) {}; 2474ead2cf7SAlex Zinenko emitInBounds(majorIvs, majorOffsets, memrefBounds, thenBlockBuilder, 2484ead2cf7SAlex Zinenko elseBlockBuilder); 2494ead2cf7SAlex Zinenko }); 2504ead2cf7SAlex Zinenko 2514ead2cf7SAlex Zinenko rewriter.eraseOp(op); 2524ead2cf7SAlex Zinenko 2534ead2cf7SAlex Zinenko return success(); 2544ead2cf7SAlex Zinenko } 2554ead2cf7SAlex Zinenko 2564ead2cf7SAlex Zinenko /// Analyzes the `transfer` to find an access dimension along the fastest remote 2574ead2cf7SAlex Zinenko /// MemRef dimension. If such a dimension with coalescing properties is found, 2584ead2cf7SAlex Zinenko /// `pivs` and `vectorBoundsCapture` are swapped so that the invocation of 2594ead2cf7SAlex Zinenko /// LoopNestBuilder captures it in the innermost loop. 2604ead2cf7SAlex Zinenko template <typename TransferOpTy> 2614ead2cf7SAlex Zinenko static int computeCoalescedIndex(TransferOpTy transfer) { 2624ead2cf7SAlex Zinenko // rank of the remote memory access, coalescing behavior occurs on the 2634ead2cf7SAlex Zinenko // innermost memory dimension. 2644ead2cf7SAlex Zinenko auto remoteRank = transfer.getMemRefType().getRank(); 2654ead2cf7SAlex Zinenko // Iterate over the results expressions of the permutation map to determine 2664ead2cf7SAlex Zinenko // the loop order for creating pointwise copies between remote and local 2674ead2cf7SAlex Zinenko // memories. 2684ead2cf7SAlex Zinenko int coalescedIdx = -1; 2694ead2cf7SAlex Zinenko auto exprs = transfer.permutation_map().getResults(); 2704ead2cf7SAlex Zinenko for (auto en : llvm::enumerate(exprs)) { 2714ead2cf7SAlex Zinenko auto dim = en.value().template dyn_cast<AffineDimExpr>(); 2724ead2cf7SAlex Zinenko if (!dim) { 2734ead2cf7SAlex Zinenko continue; 2744ead2cf7SAlex Zinenko } 2754ead2cf7SAlex Zinenko auto memRefDim = dim.getPosition(); 2764ead2cf7SAlex Zinenko if (memRefDim == remoteRank - 1) { 2774ead2cf7SAlex Zinenko // memRefDim has coalescing properties, it should be swapped in the last 2784ead2cf7SAlex Zinenko // position. 2794ead2cf7SAlex Zinenko assert(coalescedIdx == -1 && "Unexpected > 1 coalesced indices"); 2804ead2cf7SAlex Zinenko coalescedIdx = en.index(); 2814ead2cf7SAlex Zinenko } 2824ead2cf7SAlex Zinenko } 2834ead2cf7SAlex Zinenko return coalescedIdx; 2844ead2cf7SAlex Zinenko } 2854ead2cf7SAlex Zinenko 2864ead2cf7SAlex Zinenko /// Emits remote memory accesses that are clipped to the boundaries of the 2874ead2cf7SAlex Zinenko /// MemRef. 2884ead2cf7SAlex Zinenko template <typename TransferOpTy> 2894ead2cf7SAlex Zinenko static SmallVector<Value, 8> 2904ead2cf7SAlex Zinenko clip(TransferOpTy transfer, MemRefBoundsCapture &bounds, ArrayRef<Value> ivs) { 2914ead2cf7SAlex Zinenko using namespace mlir::edsc; 2924ead2cf7SAlex Zinenko 2934ead2cf7SAlex Zinenko Value zero(std_constant_index(0)), one(std_constant_index(1)); 2944ead2cf7SAlex Zinenko SmallVector<Value, 8> memRefAccess(transfer.indices()); 2954ead2cf7SAlex Zinenko SmallVector<Value, 8> clippedScalarAccessExprs(memRefAccess.size()); 2964ead2cf7SAlex Zinenko // Indices accessing to remote memory are clipped and their expressions are 2974ead2cf7SAlex Zinenko // returned in clippedScalarAccessExprs. 2984ead2cf7SAlex Zinenko for (unsigned memRefDim = 0; memRefDim < clippedScalarAccessExprs.size(); 2994ead2cf7SAlex Zinenko ++memRefDim) { 3004ead2cf7SAlex Zinenko // Linear search on a small number of entries. 3014ead2cf7SAlex Zinenko int loopIndex = -1; 3024ead2cf7SAlex Zinenko auto exprs = transfer.permutation_map().getResults(); 3034ead2cf7SAlex Zinenko for (auto en : llvm::enumerate(exprs)) { 3044ead2cf7SAlex Zinenko auto expr = en.value(); 3054ead2cf7SAlex Zinenko auto dim = expr.template dyn_cast<AffineDimExpr>(); 3064ead2cf7SAlex Zinenko // Sanity check. 3074ead2cf7SAlex Zinenko assert( 3084ead2cf7SAlex Zinenko (dim || expr.template cast<AffineConstantExpr>().getValue() == 0) && 3094ead2cf7SAlex Zinenko "Expected dim or 0 in permutationMap"); 3104ead2cf7SAlex Zinenko if (dim && memRefDim == dim.getPosition()) { 3114ead2cf7SAlex Zinenko loopIndex = en.index(); 3124ead2cf7SAlex Zinenko break; 3134ead2cf7SAlex Zinenko } 3144ead2cf7SAlex Zinenko } 3154ead2cf7SAlex Zinenko 3164ead2cf7SAlex Zinenko // We cannot distinguish atm between unrolled dimensions that implement 3174ead2cf7SAlex Zinenko // the "always full" tile abstraction and need clipping from the other 3184ead2cf7SAlex Zinenko // ones. So we conservatively clip everything. 3194ead2cf7SAlex Zinenko using namespace edsc::op; 3204ead2cf7SAlex Zinenko auto N = bounds.ub(memRefDim); 3214ead2cf7SAlex Zinenko auto i = memRefAccess[memRefDim]; 3224ead2cf7SAlex Zinenko if (loopIndex < 0) { 3234ead2cf7SAlex Zinenko auto N_minus_1 = N - one; 3244ead2cf7SAlex Zinenko auto select_1 = std_select(i < N, i, N_minus_1); 3254ead2cf7SAlex Zinenko clippedScalarAccessExprs[memRefDim] = 3264ead2cf7SAlex Zinenko std_select(i < zero, zero, select_1); 3274ead2cf7SAlex Zinenko } else { 3284ead2cf7SAlex Zinenko auto ii = ivs[loopIndex]; 3294ead2cf7SAlex Zinenko auto i_plus_ii = i + ii; 3304ead2cf7SAlex Zinenko auto N_minus_1 = N - one; 3314ead2cf7SAlex Zinenko auto select_1 = std_select(i_plus_ii < N, i_plus_ii, N_minus_1); 3324ead2cf7SAlex Zinenko clippedScalarAccessExprs[memRefDim] = 3334ead2cf7SAlex Zinenko std_select(i_plus_ii < zero, zero, select_1); 3344ead2cf7SAlex Zinenko } 3354ead2cf7SAlex Zinenko } 3364ead2cf7SAlex Zinenko 3374ead2cf7SAlex Zinenko return clippedScalarAccessExprs; 3384ead2cf7SAlex Zinenko } 3394ead2cf7SAlex Zinenko 3404ead2cf7SAlex Zinenko namespace { 3414ead2cf7SAlex Zinenko 3424ead2cf7SAlex Zinenko /// Implements lowering of TransferReadOp and TransferWriteOp to a 3434ead2cf7SAlex Zinenko /// proper abstraction for the hardware. 3444ead2cf7SAlex Zinenko /// 3454ead2cf7SAlex Zinenko /// For now, we only emit a simple loop nest that performs clipped pointwise 3464ead2cf7SAlex Zinenko /// copies from a remote to a locally allocated memory. 3474ead2cf7SAlex Zinenko /// 3484ead2cf7SAlex Zinenko /// Consider the case: 3494ead2cf7SAlex Zinenko /// 3504ead2cf7SAlex Zinenko /// ```mlir 3514ead2cf7SAlex Zinenko /// // Read the slice `%A[%i0, %i1:%i1+256, %i2:%i2+32]` into 3524ead2cf7SAlex Zinenko /// // vector<32x256xf32> and pad with %f0 to handle the boundary case: 3534ead2cf7SAlex Zinenko /// %f0 = constant 0.0f : f32 3544ead2cf7SAlex Zinenko /// scf.for %i0 = 0 to %0 { 3554ead2cf7SAlex Zinenko /// scf.for %i1 = 0 to %1 step %c256 { 3564ead2cf7SAlex Zinenko /// scf.for %i2 = 0 to %2 step %c32 { 3574ead2cf7SAlex Zinenko /// %v = vector.transfer_read %A[%i0, %i1, %i2], %f0 3584ead2cf7SAlex Zinenko /// {permutation_map: (d0, d1, d2) -> (d2, d1)} : 3594ead2cf7SAlex Zinenko /// memref<?x?x?xf32>, vector<32x256xf32> 3604ead2cf7SAlex Zinenko /// }}} 3614ead2cf7SAlex Zinenko /// ``` 3624ead2cf7SAlex Zinenko /// 3634ead2cf7SAlex Zinenko /// The rewriters construct loop and indices that access MemRef A in a pattern 3644ead2cf7SAlex Zinenko /// resembling the following (while guaranteeing an always full-tile 3654ead2cf7SAlex Zinenko /// abstraction): 3664ead2cf7SAlex Zinenko /// 3674ead2cf7SAlex Zinenko /// ```mlir 3684ead2cf7SAlex Zinenko /// scf.for %d2 = 0 to %c256 { 3694ead2cf7SAlex Zinenko /// scf.for %d1 = 0 to %c32 { 3704ead2cf7SAlex Zinenko /// %s = %A[%i0, %i1 + %d1, %i2 + %d2] : f32 3714ead2cf7SAlex Zinenko /// %tmp[%d2, %d1] = %s 3724ead2cf7SAlex Zinenko /// } 3734ead2cf7SAlex Zinenko /// } 3744ead2cf7SAlex Zinenko /// ``` 3754ead2cf7SAlex Zinenko /// 3764ead2cf7SAlex Zinenko /// In the current state, only a clipping transfer is implemented by `clip`, 3774ead2cf7SAlex Zinenko /// which creates individual indexing expressions of the form: 3784ead2cf7SAlex Zinenko /// 3794ead2cf7SAlex Zinenko /// ```mlir-dsc 3804ead2cf7SAlex Zinenko /// auto condMax = i + ii < N; 3814ead2cf7SAlex Zinenko /// auto max = std_select(condMax, i + ii, N - one) 3824ead2cf7SAlex Zinenko /// auto cond = i + ii < zero; 3834ead2cf7SAlex Zinenko /// std_select(cond, zero, max); 3844ead2cf7SAlex Zinenko /// ``` 3854ead2cf7SAlex Zinenko /// 3864ead2cf7SAlex Zinenko /// In the future, clipping should not be the only way and instead we should 3874ead2cf7SAlex Zinenko /// load vectors + mask them. Similarly on the write side, load/mask/store for 3884ead2cf7SAlex Zinenko /// implementing RMW behavior. 3894ead2cf7SAlex Zinenko /// 3904ead2cf7SAlex Zinenko /// Lowers TransferOp into a combination of: 3914ead2cf7SAlex Zinenko /// 1. local memory allocation; 3924ead2cf7SAlex Zinenko /// 2. perfect loop nest over: 3934ead2cf7SAlex Zinenko /// a. scalar load/stores from local buffers (viewed as a scalar memref); 3944ead2cf7SAlex Zinenko /// a. scalar store/load to original memref (with clipping). 3954ead2cf7SAlex Zinenko /// 3. vector_load/store 3964ead2cf7SAlex Zinenko /// 4. local memory deallocation. 3974ead2cf7SAlex Zinenko /// Minor variations occur depending on whether a TransferReadOp or 3984ead2cf7SAlex Zinenko /// a TransferWriteOp is rewritten. 3994ead2cf7SAlex Zinenko template <typename TransferOpTy> 4004ead2cf7SAlex Zinenko struct VectorTransferRewriter : public RewritePattern { 4014ead2cf7SAlex Zinenko explicit VectorTransferRewriter(MLIRContext *context) 4024ead2cf7SAlex Zinenko : RewritePattern(TransferOpTy::getOperationName(), 1, context) {} 4034ead2cf7SAlex Zinenko 4044ead2cf7SAlex Zinenko /// Used for staging the transfer in a local scalar buffer. 4054ead2cf7SAlex Zinenko MemRefType tmpMemRefType(TransferOpTy transfer) const { 4064ead2cf7SAlex Zinenko auto vectorType = transfer.getVectorType(); 4074ead2cf7SAlex Zinenko return MemRefType::get(vectorType.getShape(), vectorType.getElementType(), 4084ead2cf7SAlex Zinenko {}, 0); 4094ead2cf7SAlex Zinenko } 4104ead2cf7SAlex Zinenko 4114ead2cf7SAlex Zinenko /// Performs the rewrite. 4124ead2cf7SAlex Zinenko LogicalResult matchAndRewrite(Operation *op, 4134ead2cf7SAlex Zinenko PatternRewriter &rewriter) const override; 4144ead2cf7SAlex Zinenko }; 4154ead2cf7SAlex Zinenko 4164ead2cf7SAlex Zinenko /// Lowers TransferReadOp into a combination of: 4174ead2cf7SAlex Zinenko /// 1. local memory allocation; 4184ead2cf7SAlex Zinenko /// 2. perfect loop nest over: 4194ead2cf7SAlex Zinenko /// a. scalar load from local buffers (viewed as a scalar memref); 4204ead2cf7SAlex Zinenko /// a. scalar store to original memref (with clipping). 4214ead2cf7SAlex Zinenko /// 3. vector_load from local buffer (viewed as a memref<1 x vector>); 4224ead2cf7SAlex Zinenko /// 4. local memory deallocation. 4234ead2cf7SAlex Zinenko /// 4244ead2cf7SAlex Zinenko /// Lowers the data transfer part of a TransferReadOp while ensuring no 4254ead2cf7SAlex Zinenko /// out-of-bounds accesses are possible. Out-of-bounds behavior is handled by 4264ead2cf7SAlex Zinenko /// clipping. This means that a given value in memory can be read multiple 4274ead2cf7SAlex Zinenko /// times and concurrently. 4284ead2cf7SAlex Zinenko /// 4294ead2cf7SAlex Zinenko /// Important notes about clipping and "full-tiles only" abstraction: 4304ead2cf7SAlex Zinenko /// ================================================================= 4314ead2cf7SAlex Zinenko /// When using clipping for dealing with boundary conditions, the same edge 4324ead2cf7SAlex Zinenko /// value will appear multiple times (a.k.a edge padding). This is fine if the 4334ead2cf7SAlex Zinenko /// subsequent vector operations are all data-parallel but **is generally 4344ead2cf7SAlex Zinenko /// incorrect** in the presence of reductions or extract operations. 4354ead2cf7SAlex Zinenko /// 4364ead2cf7SAlex Zinenko /// More generally, clipping is a scalar abstraction that is expected to work 4374ead2cf7SAlex Zinenko /// fine as a baseline for CPUs and GPUs but not for vector_load and DMAs. 4384ead2cf7SAlex Zinenko /// To deal with real vector_load and DMAs, a "padded allocation + view" 4394ead2cf7SAlex Zinenko /// abstraction with the ability to read out-of-memref-bounds (but still within 4404ead2cf7SAlex Zinenko /// the allocated region) is necessary. 4414ead2cf7SAlex Zinenko /// 4424ead2cf7SAlex Zinenko /// Whether using scalar loops or vector_load/DMAs to perform the transfer, 4434ead2cf7SAlex Zinenko /// junk values will be materialized in the vectors and generally need to be 4444ead2cf7SAlex Zinenko /// filtered out and replaced by the "neutral element". This neutral element is 4454ead2cf7SAlex Zinenko /// op-dependent so, in the future, we expect to create a vector filter and 4464ead2cf7SAlex Zinenko /// apply it to a splatted constant vector with the proper neutral element at 4474ead2cf7SAlex Zinenko /// each ssa-use. This filtering is not necessary for pure data-parallel 4484ead2cf7SAlex Zinenko /// operations. 4494ead2cf7SAlex Zinenko /// 4504ead2cf7SAlex Zinenko /// In the case of vector_store/DMAs, Read-Modify-Write will be required, which 4514ead2cf7SAlex Zinenko /// also have concurrency implications. Note that by using clipped scalar stores 4524ead2cf7SAlex Zinenko /// in the presence of data-parallel only operations, we generate code that 4534ead2cf7SAlex Zinenko /// writes the same value multiple time on the edge locations. 4544ead2cf7SAlex Zinenko /// 4554ead2cf7SAlex Zinenko /// TODO(ntv): implement alternatives to clipping. 4564ead2cf7SAlex Zinenko /// TODO(ntv): support non-data-parallel operations. 4574ead2cf7SAlex Zinenko 4584ead2cf7SAlex Zinenko /// Performs the rewrite. 4594ead2cf7SAlex Zinenko template <> 4604ead2cf7SAlex Zinenko LogicalResult VectorTransferRewriter<TransferReadOp>::matchAndRewrite( 4614ead2cf7SAlex Zinenko Operation *op, PatternRewriter &rewriter) const { 4624ead2cf7SAlex Zinenko using namespace mlir::edsc::op; 4634ead2cf7SAlex Zinenko 4644ead2cf7SAlex Zinenko TransferReadOp transfer = cast<TransferReadOp>(op); 4654ead2cf7SAlex Zinenko if (AffineMap::isMinorIdentity(transfer.permutation_map())) { 4664ead2cf7SAlex Zinenko // If > 1D, emit a bunch of loops around 1-D vector transfers. 4674ead2cf7SAlex Zinenko if (transfer.getVectorType().getRank() > 1) 4684ead2cf7SAlex Zinenko return NDTransferOpHelper<TransferReadOp>(rewriter, transfer).doReplace(); 4694ead2cf7SAlex Zinenko // If 1-D this is now handled by the target-specific lowering. 4704ead2cf7SAlex Zinenko if (transfer.getVectorType().getRank() == 1) 4714ead2cf7SAlex Zinenko return failure(); 4724ead2cf7SAlex Zinenko } 4734ead2cf7SAlex Zinenko 4744ead2cf7SAlex Zinenko // Conservative lowering to scalar load / stores. 4754ead2cf7SAlex Zinenko // 1. Setup all the captures. 4764ead2cf7SAlex Zinenko ScopedContext scope(rewriter, transfer.getLoc()); 4774ead2cf7SAlex Zinenko StdIndexedValue remote(transfer.memref()); 4784ead2cf7SAlex Zinenko MemRefBoundsCapture memRefBoundsCapture(transfer.memref()); 4794ead2cf7SAlex Zinenko VectorBoundsCapture vectorBoundsCapture(transfer.vector()); 4804ead2cf7SAlex Zinenko int coalescedIdx = computeCoalescedIndex(transfer); 4814ead2cf7SAlex Zinenko // Swap the vectorBoundsCapture which will reorder loop bounds. 4824ead2cf7SAlex Zinenko if (coalescedIdx >= 0) 4834ead2cf7SAlex Zinenko vectorBoundsCapture.swapRanges(vectorBoundsCapture.rank() - 1, 4844ead2cf7SAlex Zinenko coalescedIdx); 4854ead2cf7SAlex Zinenko 4864ead2cf7SAlex Zinenko auto lbs = vectorBoundsCapture.getLbs(); 4874ead2cf7SAlex Zinenko auto ubs = vectorBoundsCapture.getUbs(); 4884ead2cf7SAlex Zinenko SmallVector<Value, 8> steps; 4894ead2cf7SAlex Zinenko steps.reserve(vectorBoundsCapture.getSteps().size()); 4904ead2cf7SAlex Zinenko for (auto step : vectorBoundsCapture.getSteps()) 4914ead2cf7SAlex Zinenko steps.push_back(std_constant_index(step)); 4924ead2cf7SAlex Zinenko 4934ead2cf7SAlex Zinenko // 2. Emit alloc-copy-load-dealloc. 4944ead2cf7SAlex Zinenko Value tmp = std_alloc(tmpMemRefType(transfer)); 4954ead2cf7SAlex Zinenko StdIndexedValue local(tmp); 4964ead2cf7SAlex Zinenko Value vec = vector_type_cast(tmp); 4974ead2cf7SAlex Zinenko SmallVector<Value, 8> ivs(lbs.size()); 4984ead2cf7SAlex Zinenko LoopNestBuilder(ivs, lbs, ubs, steps)([&] { 4994ead2cf7SAlex Zinenko // Swap the ivs which will reorder memory accesses. 5004ead2cf7SAlex Zinenko if (coalescedIdx >= 0) 5014ead2cf7SAlex Zinenko std::swap(ivs.back(), ivs[coalescedIdx]); 5024ead2cf7SAlex Zinenko // Computes clippedScalarAccessExprs in the loop nest scope (ivs exist). 5034ead2cf7SAlex Zinenko local(ivs) = remote(clip(transfer, memRefBoundsCapture, ivs)); 5044ead2cf7SAlex Zinenko }); 5054ead2cf7SAlex Zinenko Value vectorValue = std_load(vec); 5064ead2cf7SAlex Zinenko (std_dealloc(tmp)); // vexing parse 5074ead2cf7SAlex Zinenko 5084ead2cf7SAlex Zinenko // 3. Propagate. 5094ead2cf7SAlex Zinenko rewriter.replaceOp(op, vectorValue); 5104ead2cf7SAlex Zinenko return success(); 5114ead2cf7SAlex Zinenko } 5124ead2cf7SAlex Zinenko 5134ead2cf7SAlex Zinenko /// Lowers TransferWriteOp into a combination of: 5144ead2cf7SAlex Zinenko /// 1. local memory allocation; 5154ead2cf7SAlex Zinenko /// 2. vector_store to local buffer (viewed as a memref<1 x vector>); 5164ead2cf7SAlex Zinenko /// 3. perfect loop nest over: 5174ead2cf7SAlex Zinenko /// a. scalar load from local buffers (viewed as a scalar memref); 5184ead2cf7SAlex Zinenko /// a. scalar store to original memref (with clipping). 5194ead2cf7SAlex Zinenko /// 4. local memory deallocation. 5204ead2cf7SAlex Zinenko /// 5214ead2cf7SAlex Zinenko /// More specifically, lowers the data transfer part while ensuring no 5224ead2cf7SAlex Zinenko /// out-of-bounds accesses are possible. Out-of-bounds behavior is handled by 5234ead2cf7SAlex Zinenko /// clipping. This means that a given value in memory can be written to multiple 5244ead2cf7SAlex Zinenko /// times and concurrently. 5254ead2cf7SAlex Zinenko /// 5264ead2cf7SAlex Zinenko /// See `Important notes about clipping and full-tiles only abstraction` in the 5274ead2cf7SAlex Zinenko /// description of `readClipped` above. 5284ead2cf7SAlex Zinenko /// 5294ead2cf7SAlex Zinenko /// TODO(ntv): implement alternatives to clipping. 5304ead2cf7SAlex Zinenko /// TODO(ntv): support non-data-parallel operations. 5314ead2cf7SAlex Zinenko template <> 5324ead2cf7SAlex Zinenko LogicalResult VectorTransferRewriter<TransferWriteOp>::matchAndRewrite( 5334ead2cf7SAlex Zinenko Operation *op, PatternRewriter &rewriter) const { 5344ead2cf7SAlex Zinenko using namespace edsc::op; 5354ead2cf7SAlex Zinenko 5364ead2cf7SAlex Zinenko TransferWriteOp transfer = cast<TransferWriteOp>(op); 5374ead2cf7SAlex Zinenko if (AffineMap::isMinorIdentity(transfer.permutation_map())) { 5384ead2cf7SAlex Zinenko // If > 1D, emit a bunch of loops around 1-D vector transfers. 5394ead2cf7SAlex Zinenko if (transfer.getVectorType().getRank() > 1) 5404ead2cf7SAlex Zinenko return NDTransferOpHelper<TransferWriteOp>(rewriter, transfer) 5414ead2cf7SAlex Zinenko .doReplace(); 5424ead2cf7SAlex Zinenko // If 1-D this is now handled by the target-specific lowering. 5434ead2cf7SAlex Zinenko if (transfer.getVectorType().getRank() == 1) 5444ead2cf7SAlex Zinenko return failure(); 5454ead2cf7SAlex Zinenko } 5464ead2cf7SAlex Zinenko 5474ead2cf7SAlex Zinenko // 1. Setup all the captures. 5484ead2cf7SAlex Zinenko ScopedContext scope(rewriter, transfer.getLoc()); 5494ead2cf7SAlex Zinenko StdIndexedValue remote(transfer.memref()); 5504ead2cf7SAlex Zinenko MemRefBoundsCapture memRefBoundsCapture(transfer.memref()); 5514ead2cf7SAlex Zinenko Value vectorValue(transfer.vector()); 5524ead2cf7SAlex Zinenko VectorBoundsCapture vectorBoundsCapture(transfer.vector()); 5534ead2cf7SAlex Zinenko int coalescedIdx = computeCoalescedIndex(transfer); 5544ead2cf7SAlex Zinenko // Swap the vectorBoundsCapture which will reorder loop bounds. 5554ead2cf7SAlex Zinenko if (coalescedIdx >= 0) 5564ead2cf7SAlex Zinenko vectorBoundsCapture.swapRanges(vectorBoundsCapture.rank() - 1, 5574ead2cf7SAlex Zinenko coalescedIdx); 5584ead2cf7SAlex Zinenko 5594ead2cf7SAlex Zinenko auto lbs = vectorBoundsCapture.getLbs(); 5604ead2cf7SAlex Zinenko auto ubs = vectorBoundsCapture.getUbs(); 5614ead2cf7SAlex Zinenko SmallVector<Value, 8> steps; 5624ead2cf7SAlex Zinenko steps.reserve(vectorBoundsCapture.getSteps().size()); 5634ead2cf7SAlex Zinenko for (auto step : vectorBoundsCapture.getSteps()) 5644ead2cf7SAlex Zinenko steps.push_back(std_constant_index(step)); 5654ead2cf7SAlex Zinenko 5664ead2cf7SAlex Zinenko // 2. Emit alloc-store-copy-dealloc. 5674ead2cf7SAlex Zinenko Value tmp = std_alloc(tmpMemRefType(transfer)); 5684ead2cf7SAlex Zinenko StdIndexedValue local(tmp); 5694ead2cf7SAlex Zinenko Value vec = vector_type_cast(tmp); 5704ead2cf7SAlex Zinenko std_store(vectorValue, vec); 5714ead2cf7SAlex Zinenko SmallVector<Value, 8> ivs(lbs.size()); 5724ead2cf7SAlex Zinenko LoopNestBuilder(ivs, lbs, ubs, steps)([&] { 5734ead2cf7SAlex Zinenko // Swap the ivs which will reorder memory accesses. 5744ead2cf7SAlex Zinenko if (coalescedIdx >= 0) 5754ead2cf7SAlex Zinenko std::swap(ivs.back(), ivs[coalescedIdx]); 5764ead2cf7SAlex Zinenko // Computes clippedScalarAccessExprs in the loop nest scope (ivs exist). 5774ead2cf7SAlex Zinenko remote(clip(transfer, memRefBoundsCapture, ivs)) = local(ivs); 5784ead2cf7SAlex Zinenko }); 5794ead2cf7SAlex Zinenko (std_dealloc(tmp)); // vexing parse... 5804ead2cf7SAlex Zinenko 5814ead2cf7SAlex Zinenko rewriter.eraseOp(op); 5824ead2cf7SAlex Zinenko return success(); 5834ead2cf7SAlex Zinenko } 5844ead2cf7SAlex Zinenko 5854ead2cf7SAlex Zinenko } // namespace 5864ead2cf7SAlex Zinenko 5874ead2cf7SAlex Zinenko void mlir::populateVectorToSCFConversionPatterns( 5884ead2cf7SAlex Zinenko OwningRewritePatternList &patterns, MLIRContext *context) { 5894ead2cf7SAlex Zinenko patterns.insert<VectorTransferRewriter<vector::TransferReadOp>, 5904ead2cf7SAlex Zinenko VectorTransferRewriter<vector::TransferWriteOp>>(context); 5914ead2cf7SAlex Zinenko } 592