14ead2cf7SAlex Zinenko //===- VectorToSCF.cpp - Conversion from Vector to mix of SCF and Std -----===// 24ead2cf7SAlex Zinenko // 34ead2cf7SAlex Zinenko // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 44ead2cf7SAlex Zinenko // See https://llvm.org/LICENSE.txt for license information. 54ead2cf7SAlex Zinenko // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 64ead2cf7SAlex Zinenko // 74ead2cf7SAlex Zinenko //===----------------------------------------------------------------------===// 84ead2cf7SAlex Zinenko // 94ead2cf7SAlex Zinenko // This file implements target-dependent lowering of vector transfer operations. 104ead2cf7SAlex Zinenko // 114ead2cf7SAlex Zinenko //===----------------------------------------------------------------------===// 124ead2cf7SAlex Zinenko 134ead2cf7SAlex Zinenko #include <type_traits> 144ead2cf7SAlex Zinenko 154ead2cf7SAlex Zinenko #include "mlir/Conversion/VectorToSCF/VectorToSCF.h" 165f9e0466SNicolas Vasilache 175f9e0466SNicolas Vasilache #include "../PassDetail.h" 184ead2cf7SAlex Zinenko #include "mlir/Dialect/Affine/EDSC/Intrinsics.h" 198dace28fSJakub Lichman #include "mlir/Dialect/Linalg/Utils/Utils.h" 204ead2cf7SAlex Zinenko #include "mlir/Dialect/SCF/EDSC/Builders.h" 214ead2cf7SAlex Zinenko #include "mlir/Dialect/SCF/EDSC/Intrinsics.h" 224ead2cf7SAlex Zinenko #include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h" 234ead2cf7SAlex Zinenko #include "mlir/Dialect/Vector/EDSC/Intrinsics.h" 244ead2cf7SAlex Zinenko #include "mlir/Dialect/Vector/VectorOps.h" 257c3c5b11SNicolas Vasilache #include "mlir/Dialect/Vector/VectorUtils.h" 264ead2cf7SAlex Zinenko #include "mlir/IR/AffineExpr.h" 274ead2cf7SAlex Zinenko #include "mlir/IR/AffineMap.h" 284ead2cf7SAlex Zinenko #include "mlir/IR/Attributes.h" 294ead2cf7SAlex Zinenko #include "mlir/IR/Builders.h" 304ead2cf7SAlex Zinenko #include "mlir/IR/Location.h" 314ead2cf7SAlex Zinenko #include "mlir/IR/Matchers.h" 324ead2cf7SAlex Zinenko #include "mlir/IR/OperationSupport.h" 334ead2cf7SAlex Zinenko #include "mlir/IR/PatternMatch.h" 344ead2cf7SAlex Zinenko #include "mlir/IR/Types.h" 355f9e0466SNicolas Vasilache #include "mlir/Pass/Pass.h" 365f9e0466SNicolas Vasilache #include "mlir/Transforms/Passes.h" 374ead2cf7SAlex Zinenko 384ead2cf7SAlex Zinenko using namespace mlir; 394ead2cf7SAlex Zinenko using namespace mlir::edsc; 404ead2cf7SAlex Zinenko using namespace mlir::edsc::intrinsics; 414ead2cf7SAlex Zinenko using vector::TransferReadOp; 424ead2cf7SAlex Zinenko using vector::TransferWriteOp; 434ead2cf7SAlex Zinenko 44350dadaaSBenjamin Kramer namespace { 454ead2cf7SAlex Zinenko /// Helper class captures the common information needed to lower N>1-D vector 464ead2cf7SAlex Zinenko /// transfer operations (read and write). 474ead2cf7SAlex Zinenko /// On construction, this class opens an edsc::ScopedContext for simpler IR 484ead2cf7SAlex Zinenko /// manipulation. 494ead2cf7SAlex Zinenko /// In pseudo-IR, for an n-D vector_transfer_read such as: 504ead2cf7SAlex Zinenko /// 514ead2cf7SAlex Zinenko /// ``` 524ead2cf7SAlex Zinenko /// vector_transfer_read(%m, %offsets, identity_map, %fill) : 534ead2cf7SAlex Zinenko /// memref<(leading_dims) x (major_dims) x (minor_dims) x type>, 544ead2cf7SAlex Zinenko /// vector<(major_dims) x (minor_dims) x type> 554ead2cf7SAlex Zinenko /// ``` 564ead2cf7SAlex Zinenko /// 574ead2cf7SAlex Zinenko /// where rank(minor_dims) is the lower-level vector rank (e.g. 1 for LLVM or 584ead2cf7SAlex Zinenko /// higher). 594ead2cf7SAlex Zinenko /// 604ead2cf7SAlex Zinenko /// This is the entry point to emitting pseudo-IR resembling: 614ead2cf7SAlex Zinenko /// 624ead2cf7SAlex Zinenko /// ``` 634ead2cf7SAlex Zinenko /// %tmp = alloc(): memref<(major_dims) x vector<minor_dim x type>> 644ead2cf7SAlex Zinenko /// for (%ivs_major, {0}, {vector_shape}, {1}) { // (N-1)-D loop nest 654ead2cf7SAlex Zinenko /// if (any_of(%ivs_major + %offsets, <, major_dims)) { 664ead2cf7SAlex Zinenko /// %v = vector_transfer_read( 674ead2cf7SAlex Zinenko /// {%offsets_leading, %ivs_major + %offsets_major, %offsets_minor}, 684ead2cf7SAlex Zinenko /// %ivs_minor): 694ead2cf7SAlex Zinenko /// memref<(leading_dims) x (major_dims) x (minor_dims) x type>, 704ead2cf7SAlex Zinenko /// vector<(minor_dims) x type>; 714ead2cf7SAlex Zinenko /// store(%v, %tmp); 724ead2cf7SAlex Zinenko /// } else { 734ead2cf7SAlex Zinenko /// %v = splat(vector<(minor_dims) x type>, %fill) 744ead2cf7SAlex Zinenko /// store(%v, %tmp, %ivs_major); 754ead2cf7SAlex Zinenko /// } 764ead2cf7SAlex Zinenko /// } 774ead2cf7SAlex Zinenko /// %res = load(%tmp, %0): memref<(major_dims) x vector<minor_dim x type>>): 784ead2cf7SAlex Zinenko // vector<(major_dims) x (minor_dims) x type> 794ead2cf7SAlex Zinenko /// ``` 804ead2cf7SAlex Zinenko /// 814ead2cf7SAlex Zinenko template <typename ConcreteOp> 824ead2cf7SAlex Zinenko class NDTransferOpHelper { 834ead2cf7SAlex Zinenko public: 847c3c5b11SNicolas Vasilache NDTransferOpHelper(PatternRewriter &rewriter, ConcreteOp xferOp, 857c3c5b11SNicolas Vasilache const VectorTransferToSCFOptions &options) 867c3c5b11SNicolas Vasilache : rewriter(rewriter), options(options), loc(xferOp.getLoc()), 874ead2cf7SAlex Zinenko scope(std::make_unique<ScopedContext>(rewriter, loc)), xferOp(xferOp), 884ead2cf7SAlex Zinenko op(xferOp.getOperation()) { 894ead2cf7SAlex Zinenko vectorType = xferOp.getVectorType(); 909db53a18SRiver Riddle // TODO: when we go to k > 1-D vectors adapt minorRank. 914ead2cf7SAlex Zinenko minorRank = 1; 924ead2cf7SAlex Zinenko majorRank = vectorType.getRank() - minorRank; 93ec2f2cecSNicolas Vasilache leadingRank = xferOp.getLeadingMemRefRank(); 944ead2cf7SAlex Zinenko majorVectorType = 954ead2cf7SAlex Zinenko VectorType::get(vectorType.getShape().take_front(majorRank), 964ead2cf7SAlex Zinenko vectorType.getElementType()); 974ead2cf7SAlex Zinenko minorVectorType = 984ead2cf7SAlex Zinenko VectorType::get(vectorType.getShape().take_back(minorRank), 994ead2cf7SAlex Zinenko vectorType.getElementType()); 1004ead2cf7SAlex Zinenko /// Memref of minor vector type is used for individual transfers. 1014ead2cf7SAlex Zinenko memRefMinorVectorType = 1024ead2cf7SAlex Zinenko MemRefType::get(majorVectorType.getShape(), minorVectorType, {}, 1034ead2cf7SAlex Zinenko xferOp.getMemRefType().getMemorySpace()); 1044ead2cf7SAlex Zinenko } 1054ead2cf7SAlex Zinenko 1064ead2cf7SAlex Zinenko LogicalResult doReplace(); 1074ead2cf7SAlex Zinenko 1084ead2cf7SAlex Zinenko private: 1094ead2cf7SAlex Zinenko /// Creates the loop nest on the "major" dimensions and calls the 1104ead2cf7SAlex Zinenko /// `loopBodyBuilder` lambda in the context of the loop nest. 1114ead2cf7SAlex Zinenko template <typename Lambda> 1124ead2cf7SAlex Zinenko void emitLoops(Lambda loopBodyBuilder); 1134ead2cf7SAlex Zinenko 1144ead2cf7SAlex Zinenko /// Operate within the body of `emitLoops` to: 1157c3c5b11SNicolas Vasilache /// 1. Compute the indexings `majorIvs + majorOffsets` and save them in 1167c3c5b11SNicolas Vasilache /// `majorIvsPlusOffsets`. 1177c3c5b11SNicolas Vasilache /// 2. Return a boolean that determines whether the first `majorIvs.rank()` 1184ead2cf7SAlex Zinenko /// dimensions `majorIvs + majorOffsets` are all within `memrefBounds`. 1197c3c5b11SNicolas Vasilache Value emitInBoundsCondition(ValueRange majorIvs, ValueRange majorOffsets, 1204ead2cf7SAlex Zinenko MemRefBoundsCapture &memrefBounds, 1217c3c5b11SNicolas Vasilache SmallVectorImpl<Value> &majorIvsPlusOffsets); 1224ead2cf7SAlex Zinenko 1234ead2cf7SAlex Zinenko /// Common state to lower vector transfer ops. 1244ead2cf7SAlex Zinenko PatternRewriter &rewriter; 1257c3c5b11SNicolas Vasilache const VectorTransferToSCFOptions &options; 1264ead2cf7SAlex Zinenko Location loc; 1274ead2cf7SAlex Zinenko std::unique_ptr<ScopedContext> scope; 1284ead2cf7SAlex Zinenko ConcreteOp xferOp; 1294ead2cf7SAlex Zinenko Operation *op; 1304ead2cf7SAlex Zinenko // A vector transfer copies data between: 1314ead2cf7SAlex Zinenko // - memref<(leading_dims) x (major_dims) x (minor_dims) x type> 1324ead2cf7SAlex Zinenko // - vector<(major_dims) x (minor_dims) x type> 1334ead2cf7SAlex Zinenko unsigned minorRank; // for now always 1 1344ead2cf7SAlex Zinenko unsigned majorRank; // vector rank - minorRank 1354ead2cf7SAlex Zinenko unsigned leadingRank; // memref rank - vector rank 1364ead2cf7SAlex Zinenko VectorType vectorType; // vector<(major_dims) x (minor_dims) x type> 1374ead2cf7SAlex Zinenko VectorType majorVectorType; // vector<(major_dims) x type> 1384ead2cf7SAlex Zinenko VectorType minorVectorType; // vector<(minor_dims) x type> 1394ead2cf7SAlex Zinenko MemRefType memRefMinorVectorType; // memref<vector<(minor_dims) x type>> 1404ead2cf7SAlex Zinenko }; 1414ead2cf7SAlex Zinenko 1424ead2cf7SAlex Zinenko template <typename ConcreteOp> 1434ead2cf7SAlex Zinenko template <typename Lambda> 1444ead2cf7SAlex Zinenko void NDTransferOpHelper<ConcreteOp>::emitLoops(Lambda loopBodyBuilder) { 1454ead2cf7SAlex Zinenko /// Loop nest operates on the major dimensions 1464ead2cf7SAlex Zinenko MemRefBoundsCapture memrefBoundsCapture(xferOp.memref()); 1477c3c5b11SNicolas Vasilache 1487c3c5b11SNicolas Vasilache if (options.unroll) { 1497c3c5b11SNicolas Vasilache auto shape = majorVectorType.getShape(); 1507c3c5b11SNicolas Vasilache auto strides = computeStrides(shape); 1517c3c5b11SNicolas Vasilache unsigned numUnrolledInstances = computeMaxLinearIndex(shape); 1527c3c5b11SNicolas Vasilache ValueRange indices(xferOp.indices()); 1537c3c5b11SNicolas Vasilache for (unsigned idx = 0; idx < numUnrolledInstances; ++idx) { 1547c3c5b11SNicolas Vasilache SmallVector<int64_t, 4> offsets = delinearize(strides, idx); 1557c3c5b11SNicolas Vasilache SmallVector<Value, 4> offsetValues = 1567c3c5b11SNicolas Vasilache llvm::to_vector<4>(llvm::map_range(offsets, [](int64_t off) -> Value { 1577c3c5b11SNicolas Vasilache return std_constant_index(off); 1587c3c5b11SNicolas Vasilache })); 1597c3c5b11SNicolas Vasilache loopBodyBuilder(offsetValues, indices.take_front(leadingRank), 1607c3c5b11SNicolas Vasilache indices.drop_front(leadingRank).take_front(majorRank), 1617c3c5b11SNicolas Vasilache indices.take_back(minorRank), memrefBoundsCapture); 1627c3c5b11SNicolas Vasilache } 1637c3c5b11SNicolas Vasilache } else { 1644ead2cf7SAlex Zinenko VectorBoundsCapture vectorBoundsCapture(majorVectorType); 1654ead2cf7SAlex Zinenko auto majorLbs = vectorBoundsCapture.getLbs(); 1664ead2cf7SAlex Zinenko auto majorUbs = vectorBoundsCapture.getUbs(); 1674ead2cf7SAlex Zinenko auto majorSteps = vectorBoundsCapture.getSteps(); 1683f5bd53eSAlex Zinenko affineLoopNestBuilder( 1693f5bd53eSAlex Zinenko majorLbs, majorUbs, majorSteps, [&](ValueRange majorIvs) { 1704ead2cf7SAlex Zinenko ValueRange indices(xferOp.indices()); 1714ead2cf7SAlex Zinenko loopBodyBuilder(majorIvs, indices.take_front(leadingRank), 1724ead2cf7SAlex Zinenko indices.drop_front(leadingRank).take_front(majorRank), 1734ead2cf7SAlex Zinenko indices.take_back(minorRank), memrefBoundsCapture); 1744ead2cf7SAlex Zinenko }); 1754ead2cf7SAlex Zinenko } 1767c3c5b11SNicolas Vasilache } 1774ead2cf7SAlex Zinenko 178bd87c6bcSNicolas Vasilache static Optional<int64_t> extractConstantIndex(Value v) { 179bd87c6bcSNicolas Vasilache if (auto cstOp = v.getDefiningOp<ConstantIndexOp>()) 180bd87c6bcSNicolas Vasilache return cstOp.getValue(); 181bd87c6bcSNicolas Vasilache if (auto affineApplyOp = v.getDefiningOp<AffineApplyOp>()) 182bd87c6bcSNicolas Vasilache if (affineApplyOp.getAffineMap().isSingleConstant()) 183bd87c6bcSNicolas Vasilache return affineApplyOp.getAffineMap().getSingleConstantResult(); 184bd87c6bcSNicolas Vasilache return None; 185bd87c6bcSNicolas Vasilache } 186bd87c6bcSNicolas Vasilache 187bd87c6bcSNicolas Vasilache // Missing foldings of scf.if make it necessary to perform poor man's folding 188bd87c6bcSNicolas Vasilache // eagerly, especially in the case of unrolling. In the future, this should go 189bd87c6bcSNicolas Vasilache // away once scf.if folds properly. 190bd87c6bcSNicolas Vasilache static Value onTheFlyFoldSLT(Value v, Value ub) { 191bd87c6bcSNicolas Vasilache using namespace mlir::edsc::op; 192bd87c6bcSNicolas Vasilache auto maybeCstV = extractConstantIndex(v); 193bd87c6bcSNicolas Vasilache auto maybeCstUb = extractConstantIndex(ub); 194bd87c6bcSNicolas Vasilache if (maybeCstV && maybeCstUb && *maybeCstV < *maybeCstUb) 195bd87c6bcSNicolas Vasilache return Value(); 196bd87c6bcSNicolas Vasilache return slt(v, ub); 197bd87c6bcSNicolas Vasilache } 198bd87c6bcSNicolas Vasilache 1994ead2cf7SAlex Zinenko template <typename ConcreteOp> 2007c3c5b11SNicolas Vasilache Value NDTransferOpHelper<ConcreteOp>::emitInBoundsCondition( 2014ead2cf7SAlex Zinenko ValueRange majorIvs, ValueRange majorOffsets, 2027c3c5b11SNicolas Vasilache MemRefBoundsCapture &memrefBounds, 2037c3c5b11SNicolas Vasilache SmallVectorImpl<Value> &majorIvsPlusOffsets) { 2047c3c5b11SNicolas Vasilache Value inBoundsCondition; 2054ead2cf7SAlex Zinenko majorIvsPlusOffsets.reserve(majorIvs.size()); 2061870e787SNicolas Vasilache unsigned idx = 0; 2078dace28fSJakub Lichman SmallVector<Value, 4> bounds = 2088dace28fSJakub Lichman linalg::applyMapToValues(rewriter, xferOp.getLoc(), 2098dace28fSJakub Lichman xferOp.permutation_map(), memrefBounds.getUbs()); 2108dace28fSJakub Lichman for (auto it : llvm::zip(majorIvs, majorOffsets, bounds)) { 2114ead2cf7SAlex Zinenko Value iv = std::get<0>(it), off = std::get<1>(it), ub = std::get<2>(it); 2124ead2cf7SAlex Zinenko using namespace mlir::edsc::op; 2134ead2cf7SAlex Zinenko majorIvsPlusOffsets.push_back(iv + off); 2141870e787SNicolas Vasilache if (xferOp.isMaskedDim(leadingRank + idx)) { 215bd87c6bcSNicolas Vasilache Value inBoundsCond = onTheFlyFoldSLT(majorIvsPlusOffsets.back(), ub); 216bd87c6bcSNicolas Vasilache if (inBoundsCond) 217bd87c6bcSNicolas Vasilache inBoundsCondition = (inBoundsCondition) 218bd87c6bcSNicolas Vasilache ? (inBoundsCondition && inBoundsCond) 219bd87c6bcSNicolas Vasilache : inBoundsCond; 2201870e787SNicolas Vasilache } 2211870e787SNicolas Vasilache ++idx; 2224ead2cf7SAlex Zinenko } 2237c3c5b11SNicolas Vasilache return inBoundsCondition; 2244ead2cf7SAlex Zinenko } 2254ead2cf7SAlex Zinenko 226247e185dSNicolas Vasilache // TODO: Parallelism and threadlocal considerations. 227247e185dSNicolas Vasilache static Value setAllocAtFunctionEntry(MemRefType memRefMinorVectorType, 228247e185dSNicolas Vasilache Operation *op) { 229247e185dSNicolas Vasilache auto &b = ScopedContext::getBuilderRef(); 230247e185dSNicolas Vasilache OpBuilder::InsertionGuard guard(b); 231a4b8c2deSJakub Lichman Operation *scope = 232a4b8c2deSJakub Lichman op->getParentWithTrait<OpTrait::AutomaticAllocationScope>(); 233a4b8c2deSJakub Lichman assert(scope && "Expected op to be inside automatic allocation scope"); 234a4b8c2deSJakub Lichman b.setInsertionPointToStart(&scope->getRegion(0).front()); 235*8d64df9fSNicolas Vasilache Value res = std_alloca(memRefMinorVectorType); 236247e185dSNicolas Vasilache return res; 237247e185dSNicolas Vasilache } 238247e185dSNicolas Vasilache 2394ead2cf7SAlex Zinenko template <> 2404ead2cf7SAlex Zinenko LogicalResult NDTransferOpHelper<TransferReadOp>::doReplace() { 2417c3c5b11SNicolas Vasilache Value alloc, result; 2427c3c5b11SNicolas Vasilache if (options.unroll) 2437c3c5b11SNicolas Vasilache result = std_splat(vectorType, xferOp.padding()); 2447c3c5b11SNicolas Vasilache else 245247e185dSNicolas Vasilache alloc = setAllocAtFunctionEntry(memRefMinorVectorType, op); 2464ead2cf7SAlex Zinenko 2474ead2cf7SAlex Zinenko emitLoops([&](ValueRange majorIvs, ValueRange leadingOffsets, 2484ead2cf7SAlex Zinenko ValueRange majorOffsets, ValueRange minorOffsets, 2494ead2cf7SAlex Zinenko MemRefBoundsCapture &memrefBounds) { 2507c3c5b11SNicolas Vasilache /// Lambda to load 1-D vector in the current loop ivs + offset context. 2517c3c5b11SNicolas Vasilache auto load1DVector = [&](ValueRange majorIvsPlusOffsets) -> Value { 2524ead2cf7SAlex Zinenko SmallVector<Value, 8> indexing; 2534ead2cf7SAlex Zinenko indexing.reserve(leadingRank + majorRank + minorRank); 2544ead2cf7SAlex Zinenko indexing.append(leadingOffsets.begin(), leadingOffsets.end()); 2554ead2cf7SAlex Zinenko indexing.append(majorIvsPlusOffsets.begin(), majorIvsPlusOffsets.end()); 2564ead2cf7SAlex Zinenko indexing.append(minorOffsets.begin(), minorOffsets.end()); 25736cdc17fSNicolas Vasilache Value memref = xferOp.memref(); 25847cbd9f9SNicolas Vasilache auto map = 25947cbd9f9SNicolas Vasilache getTransferMinorIdentityMap(xferOp.getMemRefType(), minorVectorType); 2601870e787SNicolas Vasilache ArrayAttr masked; 261cc0a58d7SNicolas Vasilache if (!xferOp.isMaskedDim(xferOp.getVectorType().getRank() - 1)) { 2621870e787SNicolas Vasilache OpBuilder &b = ScopedContext::getBuilderRef(); 263cc0a58d7SNicolas Vasilache masked = b.getBoolArrayAttr({false}); 2641870e787SNicolas Vasilache } 2657c3c5b11SNicolas Vasilache return vector_transfer_read(minorVectorType, memref, indexing, 2667c3c5b11SNicolas Vasilache AffineMapAttr::get(map), xferOp.padding(), 2677c3c5b11SNicolas Vasilache masked); 2684ead2cf7SAlex Zinenko }; 2697c3c5b11SNicolas Vasilache 2707c3c5b11SNicolas Vasilache // 1. Compute the inBoundsCondition in the current loops ivs + offset 2717c3c5b11SNicolas Vasilache // context. 2727c3c5b11SNicolas Vasilache SmallVector<Value, 4> majorIvsPlusOffsets; 2737c3c5b11SNicolas Vasilache Value inBoundsCondition = emitInBoundsCondition( 2747c3c5b11SNicolas Vasilache majorIvs, majorOffsets, memrefBounds, majorIvsPlusOffsets); 2757c3c5b11SNicolas Vasilache 2767c3c5b11SNicolas Vasilache if (inBoundsCondition) { 2777c3c5b11SNicolas Vasilache // 2. If the condition is not null, we need an IfOp, which may yield 2787c3c5b11SNicolas Vasilache // if `options.unroll` is true. 2797c3c5b11SNicolas Vasilache SmallVector<Type, 1> resultType; 2807c3c5b11SNicolas Vasilache if (options.unroll) 2817c3c5b11SNicolas Vasilache resultType.push_back(vectorType); 2827c3c5b11SNicolas Vasilache 283cadb7ccfSAlex Zinenko // 3. If in-bounds, progressively lower to a 1-D transfer read, otherwise 284cadb7ccfSAlex Zinenko // splat a 1-D vector. 285cadb7ccfSAlex Zinenko ValueRange ifResults = conditionBuilder( 286cadb7ccfSAlex Zinenko resultType, inBoundsCondition, 287cadb7ccfSAlex Zinenko [&]() -> scf::ValueVector { 2887c3c5b11SNicolas Vasilache Value vector = load1DVector(majorIvsPlusOffsets); 289cadb7ccfSAlex Zinenko // 3.a. If `options.unroll` is true, insert the 1-D vector in the 2907c3c5b11SNicolas Vasilache // aggregate. We must yield and merge with the `else` branch. 2917c3c5b11SNicolas Vasilache if (options.unroll) { 2927c3c5b11SNicolas Vasilache vector = vector_insert(vector, result, majorIvs); 293cadb7ccfSAlex Zinenko return {vector}; 2947c3c5b11SNicolas Vasilache } 295cadb7ccfSAlex Zinenko // 3.b. Otherwise, just go through the temporary `alloc`. 2964ead2cf7SAlex Zinenko std_store(vector, alloc, majorIvs); 297cadb7ccfSAlex Zinenko return {}; 298cadb7ccfSAlex Zinenko }, 299cadb7ccfSAlex Zinenko [&]() -> scf::ValueVector { 3007c3c5b11SNicolas Vasilache Value vector = std_splat(minorVectorType, xferOp.padding()); 301cadb7ccfSAlex Zinenko // 3.c. If `options.unroll` is true, insert the 1-D vector in the 3027c3c5b11SNicolas Vasilache // aggregate. We must yield and merge with the `then` branch. 3037c3c5b11SNicolas Vasilache if (options.unroll) { 3047c3c5b11SNicolas Vasilache vector = vector_insert(vector, result, majorIvs); 305cadb7ccfSAlex Zinenko return {vector}; 3067c3c5b11SNicolas Vasilache } 307cadb7ccfSAlex Zinenko // 3.d. Otherwise, just go through the temporary `alloc`. 3087c3c5b11SNicolas Vasilache std_store(vector, alloc, majorIvs); 309cadb7ccfSAlex Zinenko return {}; 3107c3c5b11SNicolas Vasilache }); 311cadb7ccfSAlex Zinenko 3127c3c5b11SNicolas Vasilache if (!resultType.empty()) 313cadb7ccfSAlex Zinenko result = *ifResults.begin(); 3147c3c5b11SNicolas Vasilache } else { 3157c3c5b11SNicolas Vasilache // 4. Guaranteed in-bounds, progressively lower to a 1-D transfer read. 3167c3c5b11SNicolas Vasilache Value loaded1D = load1DVector(majorIvsPlusOffsets); 3177c3c5b11SNicolas Vasilache // 5.a. If `options.unroll` is true, insert the 1-D vector in the 3187c3c5b11SNicolas Vasilache // aggregate. 3197c3c5b11SNicolas Vasilache if (options.unroll) 3207c3c5b11SNicolas Vasilache result = vector_insert(loaded1D, result, majorIvs); 3217c3c5b11SNicolas Vasilache // 5.b. Otherwise, just go through the temporary `alloc`. 3227c3c5b11SNicolas Vasilache else 3237c3c5b11SNicolas Vasilache std_store(loaded1D, alloc, majorIvs); 3247c3c5b11SNicolas Vasilache } 3257c3c5b11SNicolas Vasilache }); 3267c3c5b11SNicolas Vasilache 327a9b5edc5SBenjamin Kramer assert((!options.unroll ^ (bool)result) && 328a9b5edc5SBenjamin Kramer "Expected resulting Value iff unroll"); 3297c3c5b11SNicolas Vasilache if (!result) 3307c3c5b11SNicolas Vasilache result = std_load(vector_type_cast(MemRefType::get({}, vectorType), alloc)); 3317c3c5b11SNicolas Vasilache rewriter.replaceOp(op, result); 3324ead2cf7SAlex Zinenko 3334ead2cf7SAlex Zinenko return success(); 3344ead2cf7SAlex Zinenko } 3354ead2cf7SAlex Zinenko 3364ead2cf7SAlex Zinenko template <> 3374ead2cf7SAlex Zinenko LogicalResult NDTransferOpHelper<TransferWriteOp>::doReplace() { 3387c3c5b11SNicolas Vasilache Value alloc; 3397c3c5b11SNicolas Vasilache if (!options.unroll) { 340247e185dSNicolas Vasilache alloc = setAllocAtFunctionEntry(memRefMinorVectorType, op); 3414ead2cf7SAlex Zinenko std_store(xferOp.vector(), 3424ead2cf7SAlex Zinenko vector_type_cast(MemRefType::get({}, vectorType), alloc)); 3437c3c5b11SNicolas Vasilache } 3444ead2cf7SAlex Zinenko 3454ead2cf7SAlex Zinenko emitLoops([&](ValueRange majorIvs, ValueRange leadingOffsets, 3464ead2cf7SAlex Zinenko ValueRange majorOffsets, ValueRange minorOffsets, 3474ead2cf7SAlex Zinenko MemRefBoundsCapture &memrefBounds) { 3487c3c5b11SNicolas Vasilache // Lower to 1-D vector_transfer_write and let recursion handle it. 3497c3c5b11SNicolas Vasilache auto emitTransferWrite = [&](ValueRange majorIvsPlusOffsets) { 3504ead2cf7SAlex Zinenko SmallVector<Value, 8> indexing; 3514ead2cf7SAlex Zinenko indexing.reserve(leadingRank + majorRank + minorRank); 3524ead2cf7SAlex Zinenko indexing.append(leadingOffsets.begin(), leadingOffsets.end()); 3534ead2cf7SAlex Zinenko indexing.append(majorIvsPlusOffsets.begin(), majorIvsPlusOffsets.end()); 3544ead2cf7SAlex Zinenko indexing.append(minorOffsets.begin(), minorOffsets.end()); 3557c3c5b11SNicolas Vasilache Value result; 3567c3c5b11SNicolas Vasilache // If `options.unroll` is true, extract the 1-D vector from the 3577c3c5b11SNicolas Vasilache // aggregate. 3587c3c5b11SNicolas Vasilache if (options.unroll) 3597c3c5b11SNicolas Vasilache result = vector_extract(xferOp.vector(), majorIvs); 3607c3c5b11SNicolas Vasilache else 3617c3c5b11SNicolas Vasilache result = std_load(alloc, majorIvs); 36247cbd9f9SNicolas Vasilache auto map = 36347cbd9f9SNicolas Vasilache getTransferMinorIdentityMap(xferOp.getMemRefType(), minorVectorType); 3641870e787SNicolas Vasilache ArrayAttr masked; 365cc0a58d7SNicolas Vasilache if (!xferOp.isMaskedDim(xferOp.getVectorType().getRank() - 1)) { 3661870e787SNicolas Vasilache OpBuilder &b = ScopedContext::getBuilderRef(); 367cc0a58d7SNicolas Vasilache masked = b.getBoolArrayAttr({false}); 3681870e787SNicolas Vasilache } 3697c3c5b11SNicolas Vasilache vector_transfer_write(result, xferOp.memref(), indexing, 3701870e787SNicolas Vasilache AffineMapAttr::get(map), masked); 3714ead2cf7SAlex Zinenko }; 3727c3c5b11SNicolas Vasilache 3737c3c5b11SNicolas Vasilache // 1. Compute the inBoundsCondition in the current loops ivs + offset 3747c3c5b11SNicolas Vasilache // context. 3757c3c5b11SNicolas Vasilache SmallVector<Value, 4> majorIvsPlusOffsets; 3767c3c5b11SNicolas Vasilache Value inBoundsCondition = emitInBoundsCondition( 3777c3c5b11SNicolas Vasilache majorIvs, majorOffsets, memrefBounds, majorIvsPlusOffsets); 3787c3c5b11SNicolas Vasilache 3797c3c5b11SNicolas Vasilache if (inBoundsCondition) { 3807c3c5b11SNicolas Vasilache // 2.a. If the condition is not null, we need an IfOp, to write 3817c3c5b11SNicolas Vasilache // conditionally. Progressively lower to a 1-D transfer write. 382cadb7ccfSAlex Zinenko conditionBuilder(inBoundsCondition, 383cadb7ccfSAlex Zinenko [&] { emitTransferWrite(majorIvsPlusOffsets); }); 3847c3c5b11SNicolas Vasilache } else { 3857c3c5b11SNicolas Vasilache // 2.b. Guaranteed in-bounds. Progressively lower to a 1-D transfer write. 3867c3c5b11SNicolas Vasilache emitTransferWrite(majorIvsPlusOffsets); 3877c3c5b11SNicolas Vasilache } 3884ead2cf7SAlex Zinenko }); 3894ead2cf7SAlex Zinenko 3904ead2cf7SAlex Zinenko rewriter.eraseOp(op); 3914ead2cf7SAlex Zinenko 3924ead2cf7SAlex Zinenko return success(); 3934ead2cf7SAlex Zinenko } 3944ead2cf7SAlex Zinenko 395da95a0d8SNicolas Vasilache } // namespace 396da95a0d8SNicolas Vasilache 3974ead2cf7SAlex Zinenko /// Analyzes the `transfer` to find an access dimension along the fastest remote 3984ead2cf7SAlex Zinenko /// MemRef dimension. If such a dimension with coalescing properties is found, 3994ead2cf7SAlex Zinenko /// `pivs` and `vectorBoundsCapture` are swapped so that the invocation of 4004ead2cf7SAlex Zinenko /// LoopNestBuilder captures it in the innermost loop. 4014ead2cf7SAlex Zinenko template <typename TransferOpTy> 4024ead2cf7SAlex Zinenko static int computeCoalescedIndex(TransferOpTy transfer) { 4034ead2cf7SAlex Zinenko // rank of the remote memory access, coalescing behavior occurs on the 4044ead2cf7SAlex Zinenko // innermost memory dimension. 4054ead2cf7SAlex Zinenko auto remoteRank = transfer.getMemRefType().getRank(); 4064ead2cf7SAlex Zinenko // Iterate over the results expressions of the permutation map to determine 4074ead2cf7SAlex Zinenko // the loop order for creating pointwise copies between remote and local 4084ead2cf7SAlex Zinenko // memories. 4094ead2cf7SAlex Zinenko int coalescedIdx = -1; 4104ead2cf7SAlex Zinenko auto exprs = transfer.permutation_map().getResults(); 4114ead2cf7SAlex Zinenko for (auto en : llvm::enumerate(exprs)) { 4124ead2cf7SAlex Zinenko auto dim = en.value().template dyn_cast<AffineDimExpr>(); 4134ead2cf7SAlex Zinenko if (!dim) { 4144ead2cf7SAlex Zinenko continue; 4154ead2cf7SAlex Zinenko } 4164ead2cf7SAlex Zinenko auto memRefDim = dim.getPosition(); 4174ead2cf7SAlex Zinenko if (memRefDim == remoteRank - 1) { 4184ead2cf7SAlex Zinenko // memRefDim has coalescing properties, it should be swapped in the last 4194ead2cf7SAlex Zinenko // position. 4204ead2cf7SAlex Zinenko assert(coalescedIdx == -1 && "Unexpected > 1 coalesced indices"); 4214ead2cf7SAlex Zinenko coalescedIdx = en.index(); 4224ead2cf7SAlex Zinenko } 4234ead2cf7SAlex Zinenko } 4244ead2cf7SAlex Zinenko return coalescedIdx; 4254ead2cf7SAlex Zinenko } 4264ead2cf7SAlex Zinenko 4274ead2cf7SAlex Zinenko /// Emits remote memory accesses that are clipped to the boundaries of the 4284ead2cf7SAlex Zinenko /// MemRef. 4294ead2cf7SAlex Zinenko template <typename TransferOpTy> 4304ead2cf7SAlex Zinenko static SmallVector<Value, 8> 4314ead2cf7SAlex Zinenko clip(TransferOpTy transfer, MemRefBoundsCapture &bounds, ArrayRef<Value> ivs) { 4324ead2cf7SAlex Zinenko using namespace mlir::edsc; 4334ead2cf7SAlex Zinenko 4344ead2cf7SAlex Zinenko Value zero(std_constant_index(0)), one(std_constant_index(1)); 4354ead2cf7SAlex Zinenko SmallVector<Value, 8> memRefAccess(transfer.indices()); 4364ead2cf7SAlex Zinenko SmallVector<Value, 8> clippedScalarAccessExprs(memRefAccess.size()); 4374ead2cf7SAlex Zinenko // Indices accessing to remote memory are clipped and their expressions are 4384ead2cf7SAlex Zinenko // returned in clippedScalarAccessExprs. 4394ead2cf7SAlex Zinenko for (unsigned memRefDim = 0; memRefDim < clippedScalarAccessExprs.size(); 4404ead2cf7SAlex Zinenko ++memRefDim) { 4414ead2cf7SAlex Zinenko // Linear search on a small number of entries. 4424ead2cf7SAlex Zinenko int loopIndex = -1; 4434ead2cf7SAlex Zinenko auto exprs = transfer.permutation_map().getResults(); 4444ead2cf7SAlex Zinenko for (auto en : llvm::enumerate(exprs)) { 4454ead2cf7SAlex Zinenko auto expr = en.value(); 4464ead2cf7SAlex Zinenko auto dim = expr.template dyn_cast<AffineDimExpr>(); 4474ead2cf7SAlex Zinenko // Sanity check. 4484ead2cf7SAlex Zinenko assert( 4494ead2cf7SAlex Zinenko (dim || expr.template cast<AffineConstantExpr>().getValue() == 0) && 4504ead2cf7SAlex Zinenko "Expected dim or 0 in permutationMap"); 4514ead2cf7SAlex Zinenko if (dim && memRefDim == dim.getPosition()) { 4524ead2cf7SAlex Zinenko loopIndex = en.index(); 4534ead2cf7SAlex Zinenko break; 4544ead2cf7SAlex Zinenko } 4554ead2cf7SAlex Zinenko } 4564ead2cf7SAlex Zinenko 4574ead2cf7SAlex Zinenko // We cannot distinguish atm between unrolled dimensions that implement 4584ead2cf7SAlex Zinenko // the "always full" tile abstraction and need clipping from the other 4594ead2cf7SAlex Zinenko // ones. So we conservatively clip everything. 4604ead2cf7SAlex Zinenko using namespace edsc::op; 4614ead2cf7SAlex Zinenko auto N = bounds.ub(memRefDim); 4624ead2cf7SAlex Zinenko auto i = memRefAccess[memRefDim]; 4634ead2cf7SAlex Zinenko if (loopIndex < 0) { 4644ead2cf7SAlex Zinenko auto N_minus_1 = N - one; 46525055a4fSAdam D Straw auto select_1 = std_select(slt(i, N), i, N_minus_1); 4664ead2cf7SAlex Zinenko clippedScalarAccessExprs[memRefDim] = 46725055a4fSAdam D Straw std_select(slt(i, zero), zero, select_1); 4684ead2cf7SAlex Zinenko } else { 4694ead2cf7SAlex Zinenko auto ii = ivs[loopIndex]; 4704ead2cf7SAlex Zinenko auto i_plus_ii = i + ii; 4714ead2cf7SAlex Zinenko auto N_minus_1 = N - one; 47225055a4fSAdam D Straw auto select_1 = std_select(slt(i_plus_ii, N), i_plus_ii, N_minus_1); 4734ead2cf7SAlex Zinenko clippedScalarAccessExprs[memRefDim] = 47425055a4fSAdam D Straw std_select(slt(i_plus_ii, zero), zero, select_1); 4754ead2cf7SAlex Zinenko } 4764ead2cf7SAlex Zinenko } 4774ead2cf7SAlex Zinenko 4784ead2cf7SAlex Zinenko return clippedScalarAccessExprs; 4794ead2cf7SAlex Zinenko } 4804ead2cf7SAlex Zinenko 4813393cc4cSNicolas Vasilache namespace mlir { 4823393cc4cSNicolas Vasilache 4834ead2cf7SAlex Zinenko template <typename TransferOpTy> 4843393cc4cSNicolas Vasilache VectorTransferRewriter<TransferOpTy>::VectorTransferRewriter( 4857c3c5b11SNicolas Vasilache VectorTransferToSCFOptions options, MLIRContext *context) 4867c3c5b11SNicolas Vasilache : RewritePattern(TransferOpTy::getOperationName(), 1, context), 4877c3c5b11SNicolas Vasilache options(options) {} 4884ead2cf7SAlex Zinenko 4897c3c5b11SNicolas Vasilache /// Used for staging the transfer in a local buffer. 4907c3c5b11SNicolas Vasilache template <typename TransferOpTy> 4913393cc4cSNicolas Vasilache MemRefType VectorTransferRewriter<TransferOpTy>::tmpMemRefType( 4927c3c5b11SNicolas Vasilache TransferOpTy transfer) const { 4934ead2cf7SAlex Zinenko auto vectorType = transfer.getVectorType(); 494*8d64df9fSNicolas Vasilache return MemRefType::get(vectorType.getShape().drop_back(), 495*8d64df9fSNicolas Vasilache VectorType::get(vectorType.getShape().take_back(), 496*8d64df9fSNicolas Vasilache vectorType.getElementType()), 497*8d64df9fSNicolas Vasilache {}, 0); 4984ead2cf7SAlex Zinenko } 4994ead2cf7SAlex Zinenko 5004ead2cf7SAlex Zinenko /// Lowers TransferReadOp into a combination of: 5014ead2cf7SAlex Zinenko /// 1. local memory allocation; 5024ead2cf7SAlex Zinenko /// 2. perfect loop nest over: 5034ead2cf7SAlex Zinenko /// a. scalar load from local buffers (viewed as a scalar memref); 5044ead2cf7SAlex Zinenko /// a. scalar store to original memref (with clipping). 5054ead2cf7SAlex Zinenko /// 3. vector_load from local buffer (viewed as a memref<1 x vector>); 5064ead2cf7SAlex Zinenko /// 4. local memory deallocation. 5074ead2cf7SAlex Zinenko /// 5084ead2cf7SAlex Zinenko /// Lowers the data transfer part of a TransferReadOp while ensuring no 5094ead2cf7SAlex Zinenko /// out-of-bounds accesses are possible. Out-of-bounds behavior is handled by 5104ead2cf7SAlex Zinenko /// clipping. This means that a given value in memory can be read multiple 5114ead2cf7SAlex Zinenko /// times and concurrently. 5124ead2cf7SAlex Zinenko /// 5134ead2cf7SAlex Zinenko /// Important notes about clipping and "full-tiles only" abstraction: 5144ead2cf7SAlex Zinenko /// ================================================================= 5154ead2cf7SAlex Zinenko /// When using clipping for dealing with boundary conditions, the same edge 5164ead2cf7SAlex Zinenko /// value will appear multiple times (a.k.a edge padding). This is fine if the 5174ead2cf7SAlex Zinenko /// subsequent vector operations are all data-parallel but **is generally 5184ead2cf7SAlex Zinenko /// incorrect** in the presence of reductions or extract operations. 5194ead2cf7SAlex Zinenko /// 5204ead2cf7SAlex Zinenko /// More generally, clipping is a scalar abstraction that is expected to work 5214ead2cf7SAlex Zinenko /// fine as a baseline for CPUs and GPUs but not for vector_load and DMAs. 5224ead2cf7SAlex Zinenko /// To deal with real vector_load and DMAs, a "padded allocation + view" 5234ead2cf7SAlex Zinenko /// abstraction with the ability to read out-of-memref-bounds (but still within 5244ead2cf7SAlex Zinenko /// the allocated region) is necessary. 5254ead2cf7SAlex Zinenko /// 5264ead2cf7SAlex Zinenko /// Whether using scalar loops or vector_load/DMAs to perform the transfer, 5274ead2cf7SAlex Zinenko /// junk values will be materialized in the vectors and generally need to be 5284ead2cf7SAlex Zinenko /// filtered out and replaced by the "neutral element". This neutral element is 5294ead2cf7SAlex Zinenko /// op-dependent so, in the future, we expect to create a vector filter and 5304ead2cf7SAlex Zinenko /// apply it to a splatted constant vector with the proper neutral element at 5314ead2cf7SAlex Zinenko /// each ssa-use. This filtering is not necessary for pure data-parallel 5324ead2cf7SAlex Zinenko /// operations. 5334ead2cf7SAlex Zinenko /// 5344ead2cf7SAlex Zinenko /// In the case of vector_store/DMAs, Read-Modify-Write will be required, which 5354ead2cf7SAlex Zinenko /// also have concurrency implications. Note that by using clipped scalar stores 5364ead2cf7SAlex Zinenko /// in the presence of data-parallel only operations, we generate code that 5374ead2cf7SAlex Zinenko /// writes the same value multiple time on the edge locations. 5384ead2cf7SAlex Zinenko /// 5399db53a18SRiver Riddle /// TODO: implement alternatives to clipping. 5409db53a18SRiver Riddle /// TODO: support non-data-parallel operations. 5414ead2cf7SAlex Zinenko 5424ead2cf7SAlex Zinenko /// Performs the rewrite. 5434ead2cf7SAlex Zinenko template <> 5443393cc4cSNicolas Vasilache LogicalResult VectorTransferRewriter<TransferReadOp>::matchAndRewrite( 5454ead2cf7SAlex Zinenko Operation *op, PatternRewriter &rewriter) const { 5464ead2cf7SAlex Zinenko using namespace mlir::edsc::op; 5474ead2cf7SAlex Zinenko 5484ead2cf7SAlex Zinenko TransferReadOp transfer = cast<TransferReadOp>(op); 549dfb7b3feSBenjamin Kramer 550dfb7b3feSBenjamin Kramer // Fall back to a loop if the fastest varying stride is not 1 or it is 551dfb7b3feSBenjamin Kramer // permuted. 552dfb7b3feSBenjamin Kramer int64_t offset; 553dfb7b3feSBenjamin Kramer SmallVector<int64_t, 4> strides; 554dfb7b3feSBenjamin Kramer auto successStrides = 555dfb7b3feSBenjamin Kramer getStridesAndOffset(transfer.getMemRefType(), strides, offset); 556dfb7b3feSBenjamin Kramer if (succeeded(successStrides) && strides.back() == 1 && 557dfb7b3feSBenjamin Kramer transfer.permutation_map().isMinorIdentity()) { 5584ead2cf7SAlex Zinenko // If > 1D, emit a bunch of loops around 1-D vector transfers. 5594ead2cf7SAlex Zinenko if (transfer.getVectorType().getRank() > 1) 5607c3c5b11SNicolas Vasilache return NDTransferOpHelper<TransferReadOp>(rewriter, transfer, options) 5617c3c5b11SNicolas Vasilache .doReplace(); 5624ead2cf7SAlex Zinenko // If 1-D this is now handled by the target-specific lowering. 5634ead2cf7SAlex Zinenko if (transfer.getVectorType().getRank() == 1) 5644ead2cf7SAlex Zinenko return failure(); 5654ead2cf7SAlex Zinenko } 5664ead2cf7SAlex Zinenko 5674ead2cf7SAlex Zinenko // Conservative lowering to scalar load / stores. 5684ead2cf7SAlex Zinenko // 1. Setup all the captures. 5694ead2cf7SAlex Zinenko ScopedContext scope(rewriter, transfer.getLoc()); 5704ead2cf7SAlex Zinenko StdIndexedValue remote(transfer.memref()); 5714ead2cf7SAlex Zinenko MemRefBoundsCapture memRefBoundsCapture(transfer.memref()); 5724ead2cf7SAlex Zinenko VectorBoundsCapture vectorBoundsCapture(transfer.vector()); 5734ead2cf7SAlex Zinenko int coalescedIdx = computeCoalescedIndex(transfer); 5744ead2cf7SAlex Zinenko // Swap the vectorBoundsCapture which will reorder loop bounds. 5754ead2cf7SAlex Zinenko if (coalescedIdx >= 0) 5764ead2cf7SAlex Zinenko vectorBoundsCapture.swapRanges(vectorBoundsCapture.rank() - 1, 5774ead2cf7SAlex Zinenko coalescedIdx); 5784ead2cf7SAlex Zinenko 5794ead2cf7SAlex Zinenko auto lbs = vectorBoundsCapture.getLbs(); 5804ead2cf7SAlex Zinenko auto ubs = vectorBoundsCapture.getUbs(); 5814ead2cf7SAlex Zinenko SmallVector<Value, 8> steps; 5824ead2cf7SAlex Zinenko steps.reserve(vectorBoundsCapture.getSteps().size()); 5834ead2cf7SAlex Zinenko for (auto step : vectorBoundsCapture.getSteps()) 5844ead2cf7SAlex Zinenko steps.push_back(std_constant_index(step)); 5854ead2cf7SAlex Zinenko 5864ead2cf7SAlex Zinenko // 2. Emit alloc-copy-load-dealloc. 587*8d64df9fSNicolas Vasilache Value tmp = setAllocAtFunctionEntry(tmpMemRefType(transfer), transfer); 5884ead2cf7SAlex Zinenko StdIndexedValue local(tmp); 5894ead2cf7SAlex Zinenko Value vec = vector_type_cast(tmp); 590d1560f39SAlex Zinenko loopNestBuilder(lbs, ubs, steps, [&](ValueRange loopIvs) { 591d1560f39SAlex Zinenko auto ivs = llvm::to_vector<8>(loopIvs); 5924ead2cf7SAlex Zinenko // Swap the ivs which will reorder memory accesses. 5934ead2cf7SAlex Zinenko if (coalescedIdx >= 0) 5944ead2cf7SAlex Zinenko std::swap(ivs.back(), ivs[coalescedIdx]); 5954ead2cf7SAlex Zinenko // Computes clippedScalarAccessExprs in the loop nest scope (ivs exist). 596*8d64df9fSNicolas Vasilache SmallVector<Value, 8> indices = clip(transfer, memRefBoundsCapture, ivs); 597*8d64df9fSNicolas Vasilache ArrayRef<Value> indicesRef(indices), ivsRef(ivs); 598*8d64df9fSNicolas Vasilache Value pos = 599*8d64df9fSNicolas Vasilache std_index_cast(IntegerType::get(32, op->getContext()), ivsRef.back()); 600*8d64df9fSNicolas Vasilache Value vector = vector_insert_element(remote(indicesRef), 601*8d64df9fSNicolas Vasilache local(ivsRef.drop_back()), pos); 602*8d64df9fSNicolas Vasilache local(ivsRef.drop_back()) = vector; 6034ead2cf7SAlex Zinenko }); 6044ead2cf7SAlex Zinenko Value vectorValue = std_load(vec); 6054ead2cf7SAlex Zinenko 6064ead2cf7SAlex Zinenko // 3. Propagate. 6074ead2cf7SAlex Zinenko rewriter.replaceOp(op, vectorValue); 6084ead2cf7SAlex Zinenko return success(); 6094ead2cf7SAlex Zinenko } 6104ead2cf7SAlex Zinenko 6114ead2cf7SAlex Zinenko /// Lowers TransferWriteOp into a combination of: 6124ead2cf7SAlex Zinenko /// 1. local memory allocation; 6134ead2cf7SAlex Zinenko /// 2. vector_store to local buffer (viewed as a memref<1 x vector>); 6144ead2cf7SAlex Zinenko /// 3. perfect loop nest over: 6154ead2cf7SAlex Zinenko /// a. scalar load from local buffers (viewed as a scalar memref); 6164ead2cf7SAlex Zinenko /// a. scalar store to original memref (with clipping). 6174ead2cf7SAlex Zinenko /// 4. local memory deallocation. 6184ead2cf7SAlex Zinenko /// 6194ead2cf7SAlex Zinenko /// More specifically, lowers the data transfer part while ensuring no 6204ead2cf7SAlex Zinenko /// out-of-bounds accesses are possible. Out-of-bounds behavior is handled by 6214ead2cf7SAlex Zinenko /// clipping. This means that a given value in memory can be written to multiple 6224ead2cf7SAlex Zinenko /// times and concurrently. 6234ead2cf7SAlex Zinenko /// 6244ead2cf7SAlex Zinenko /// See `Important notes about clipping and full-tiles only abstraction` in the 6254ead2cf7SAlex Zinenko /// description of `readClipped` above. 6264ead2cf7SAlex Zinenko /// 6279db53a18SRiver Riddle /// TODO: implement alternatives to clipping. 6289db53a18SRiver Riddle /// TODO: support non-data-parallel operations. 6294ead2cf7SAlex Zinenko template <> 6303393cc4cSNicolas Vasilache LogicalResult VectorTransferRewriter<TransferWriteOp>::matchAndRewrite( 6314ead2cf7SAlex Zinenko Operation *op, PatternRewriter &rewriter) const { 6324ead2cf7SAlex Zinenko using namespace edsc::op; 6334ead2cf7SAlex Zinenko 6344ead2cf7SAlex Zinenko TransferWriteOp transfer = cast<TransferWriteOp>(op); 635dfb7b3feSBenjamin Kramer 636dfb7b3feSBenjamin Kramer // Fall back to a loop if the fastest varying stride is not 1 or it is 637dfb7b3feSBenjamin Kramer // permuted. 638dfb7b3feSBenjamin Kramer int64_t offset; 639dfb7b3feSBenjamin Kramer SmallVector<int64_t, 4> strides; 640dfb7b3feSBenjamin Kramer auto successStrides = 641dfb7b3feSBenjamin Kramer getStridesAndOffset(transfer.getMemRefType(), strides, offset); 642dfb7b3feSBenjamin Kramer if (succeeded(successStrides) && strides.back() == 1 && 643dfb7b3feSBenjamin Kramer transfer.permutation_map().isMinorIdentity()) { 6444ead2cf7SAlex Zinenko // If > 1D, emit a bunch of loops around 1-D vector transfers. 6454ead2cf7SAlex Zinenko if (transfer.getVectorType().getRank() > 1) 6467c3c5b11SNicolas Vasilache return NDTransferOpHelper<TransferWriteOp>(rewriter, transfer, options) 6474ead2cf7SAlex Zinenko .doReplace(); 6484ead2cf7SAlex Zinenko // If 1-D this is now handled by the target-specific lowering. 6494ead2cf7SAlex Zinenko if (transfer.getVectorType().getRank() == 1) 6504ead2cf7SAlex Zinenko return failure(); 6514ead2cf7SAlex Zinenko } 6524ead2cf7SAlex Zinenko 6534ead2cf7SAlex Zinenko // 1. Setup all the captures. 6544ead2cf7SAlex Zinenko ScopedContext scope(rewriter, transfer.getLoc()); 6554ead2cf7SAlex Zinenko StdIndexedValue remote(transfer.memref()); 6564ead2cf7SAlex Zinenko MemRefBoundsCapture memRefBoundsCapture(transfer.memref()); 6574ead2cf7SAlex Zinenko Value vectorValue(transfer.vector()); 6584ead2cf7SAlex Zinenko VectorBoundsCapture vectorBoundsCapture(transfer.vector()); 6594ead2cf7SAlex Zinenko int coalescedIdx = computeCoalescedIndex(transfer); 6604ead2cf7SAlex Zinenko // Swap the vectorBoundsCapture which will reorder loop bounds. 6614ead2cf7SAlex Zinenko if (coalescedIdx >= 0) 6624ead2cf7SAlex Zinenko vectorBoundsCapture.swapRanges(vectorBoundsCapture.rank() - 1, 6634ead2cf7SAlex Zinenko coalescedIdx); 6644ead2cf7SAlex Zinenko 6654ead2cf7SAlex Zinenko auto lbs = vectorBoundsCapture.getLbs(); 6664ead2cf7SAlex Zinenko auto ubs = vectorBoundsCapture.getUbs(); 6674ead2cf7SAlex Zinenko SmallVector<Value, 8> steps; 6684ead2cf7SAlex Zinenko steps.reserve(vectorBoundsCapture.getSteps().size()); 6694ead2cf7SAlex Zinenko for (auto step : vectorBoundsCapture.getSteps()) 6704ead2cf7SAlex Zinenko steps.push_back(std_constant_index(step)); 6714ead2cf7SAlex Zinenko 6724ead2cf7SAlex Zinenko // 2. Emit alloc-store-copy-dealloc. 673*8d64df9fSNicolas Vasilache Value tmp = setAllocAtFunctionEntry(tmpMemRefType(transfer), transfer); 6744ead2cf7SAlex Zinenko StdIndexedValue local(tmp); 6754ead2cf7SAlex Zinenko Value vec = vector_type_cast(tmp); 6764ead2cf7SAlex Zinenko std_store(vectorValue, vec); 677d1560f39SAlex Zinenko loopNestBuilder(lbs, ubs, steps, [&](ValueRange loopIvs) { 678d1560f39SAlex Zinenko auto ivs = llvm::to_vector<8>(loopIvs); 6794ead2cf7SAlex Zinenko // Swap the ivs which will reorder memory accesses. 6804ead2cf7SAlex Zinenko if (coalescedIdx >= 0) 6814ead2cf7SAlex Zinenko std::swap(ivs.back(), ivs[coalescedIdx]); 6824ead2cf7SAlex Zinenko // Computes clippedScalarAccessExprs in the loop nest scope (ivs exist). 683*8d64df9fSNicolas Vasilache SmallVector<Value, 8> indices = clip(transfer, memRefBoundsCapture, ivs); 684*8d64df9fSNicolas Vasilache ArrayRef<Value> indicesRef(indices), ivsRef(ivs); 685*8d64df9fSNicolas Vasilache Value pos = 686*8d64df9fSNicolas Vasilache std_index_cast(IntegerType::get(32, op->getContext()), ivsRef.back()); 687*8d64df9fSNicolas Vasilache Value scalar = vector_extract_element(local(ivsRef.drop_back()), pos); 688*8d64df9fSNicolas Vasilache remote(indices) = scalar; 6894ead2cf7SAlex Zinenko }); 6904ead2cf7SAlex Zinenko 691*8d64df9fSNicolas Vasilache // 3. Erase. 6924ead2cf7SAlex Zinenko rewriter.eraseOp(op); 6934ead2cf7SAlex Zinenko return success(); 6944ead2cf7SAlex Zinenko } 6954ead2cf7SAlex Zinenko 6963393cc4cSNicolas Vasilache void populateVectorToSCFConversionPatterns( 6977c3c5b11SNicolas Vasilache OwningRewritePatternList &patterns, MLIRContext *context, 6987c3c5b11SNicolas Vasilache const VectorTransferToSCFOptions &options) { 6994ead2cf7SAlex Zinenko patterns.insert<VectorTransferRewriter<vector::TransferReadOp>, 7007c3c5b11SNicolas Vasilache VectorTransferRewriter<vector::TransferWriteOp>>(options, 7017c3c5b11SNicolas Vasilache context); 7024ead2cf7SAlex Zinenko } 7033393cc4cSNicolas Vasilache 7043393cc4cSNicolas Vasilache } // namespace mlir 7053393cc4cSNicolas Vasilache 7065f9e0466SNicolas Vasilache namespace { 7075f9e0466SNicolas Vasilache 7085f9e0466SNicolas Vasilache struct ConvertVectorToSCFPass 7095f9e0466SNicolas Vasilache : public ConvertVectorToSCFBase<ConvertVectorToSCFPass> { 7105f9e0466SNicolas Vasilache ConvertVectorToSCFPass() = default; 7115f9e0466SNicolas Vasilache ConvertVectorToSCFPass(const VectorTransferToSCFOptions &options) { 7125f9e0466SNicolas Vasilache this->fullUnroll = options.unroll; 7135f9e0466SNicolas Vasilache } 7145f9e0466SNicolas Vasilache 7155f9e0466SNicolas Vasilache void runOnFunction() override { 7165f9e0466SNicolas Vasilache OwningRewritePatternList patterns; 7175f9e0466SNicolas Vasilache auto *context = getFunction().getContext(); 7185f9e0466SNicolas Vasilache populateVectorToSCFConversionPatterns( 7195f9e0466SNicolas Vasilache patterns, context, VectorTransferToSCFOptions().setUnroll(fullUnroll)); 7205f9e0466SNicolas Vasilache applyPatternsAndFoldGreedily(getFunction(), patterns); 7215f9e0466SNicolas Vasilache } 7225f9e0466SNicolas Vasilache }; 7235f9e0466SNicolas Vasilache 7245f9e0466SNicolas Vasilache } // namespace 7255f9e0466SNicolas Vasilache 7265f9e0466SNicolas Vasilache std::unique_ptr<Pass> 7275f9e0466SNicolas Vasilache mlir::createConvertVectorToSCFPass(const VectorTransferToSCFOptions &options) { 7285f9e0466SNicolas Vasilache return std::make_unique<ConvertVectorToSCFPass>(options); 7295f9e0466SNicolas Vasilache } 730