1 //===- VectorToSCF.cpp - Conversion from Vector to mix of SCF and Std -----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements target-dependent lowering of vector transfer operations. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include <type_traits> 14 15 #include "mlir/Conversion/VectorToSCF/VectorToSCF.h" 16 #include "mlir/Dialect/Affine/EDSC/Intrinsics.h" 17 #include "mlir/Dialect/SCF/EDSC/Builders.h" 18 #include "mlir/Dialect/SCF/EDSC/Intrinsics.h" 19 #include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h" 20 #include "mlir/Dialect/Vector/EDSC/Intrinsics.h" 21 #include "mlir/Dialect/Vector/VectorOps.h" 22 #include "mlir/IR/AffineExpr.h" 23 #include "mlir/IR/AffineMap.h" 24 #include "mlir/IR/Attributes.h" 25 #include "mlir/IR/Builders.h" 26 #include "mlir/IR/Location.h" 27 #include "mlir/IR/Matchers.h" 28 #include "mlir/IR/OperationSupport.h" 29 #include "mlir/IR/PatternMatch.h" 30 #include "mlir/IR/Types.h" 31 32 using namespace mlir; 33 using namespace mlir::edsc; 34 using namespace mlir::edsc::intrinsics; 35 using vector::TransferReadOp; 36 using vector::TransferWriteOp; 37 38 /// Helper class captures the common information needed to lower N>1-D vector 39 /// transfer operations (read and write). 40 /// On construction, this class opens an edsc::ScopedContext for simpler IR 41 /// manipulation. 42 /// In pseudo-IR, for an n-D vector_transfer_read such as: 43 /// 44 /// ``` 45 /// vector_transfer_read(%m, %offsets, identity_map, %fill) : 46 /// memref<(leading_dims) x (major_dims) x (minor_dims) x type>, 47 /// vector<(major_dims) x (minor_dims) x type> 48 /// ``` 49 /// 50 /// where rank(minor_dims) is the lower-level vector rank (e.g. 1 for LLVM or 51 /// higher). 52 /// 53 /// This is the entry point to emitting pseudo-IR resembling: 54 /// 55 /// ``` 56 /// %tmp = alloc(): memref<(major_dims) x vector<minor_dim x type>> 57 /// for (%ivs_major, {0}, {vector_shape}, {1}) { // (N-1)-D loop nest 58 /// if (any_of(%ivs_major + %offsets, <, major_dims)) { 59 /// %v = vector_transfer_read( 60 /// {%offsets_leading, %ivs_major + %offsets_major, %offsets_minor}, 61 /// %ivs_minor): 62 /// memref<(leading_dims) x (major_dims) x (minor_dims) x type>, 63 /// vector<(minor_dims) x type>; 64 /// store(%v, %tmp); 65 /// } else { 66 /// %v = splat(vector<(minor_dims) x type>, %fill) 67 /// store(%v, %tmp, %ivs_major); 68 /// } 69 /// } 70 /// %res = load(%tmp, %0): memref<(major_dims) x vector<minor_dim x type>>): 71 // vector<(major_dims) x (minor_dims) x type> 72 /// ``` 73 /// 74 template <typename ConcreteOp> 75 class NDTransferOpHelper { 76 public: 77 NDTransferOpHelper(PatternRewriter &rewriter, ConcreteOp xferOp) 78 : rewriter(rewriter), loc(xferOp.getLoc()), 79 scope(std::make_unique<ScopedContext>(rewriter, loc)), xferOp(xferOp), 80 op(xferOp.getOperation()) { 81 vectorType = xferOp.getVectorType(); 82 // TODO(ntv, ajcbik): when we go to k > 1-D vectors adapt minorRank. 83 minorRank = 1; 84 majorRank = vectorType.getRank() - minorRank; 85 leadingRank = xferOp.getMemRefType().getRank() - (majorRank + minorRank); 86 majorVectorType = 87 VectorType::get(vectorType.getShape().take_front(majorRank), 88 vectorType.getElementType()); 89 minorVectorType = 90 VectorType::get(vectorType.getShape().take_back(minorRank), 91 vectorType.getElementType()); 92 /// Memref of minor vector type is used for individual transfers. 93 memRefMinorVectorType = 94 MemRefType::get(majorVectorType.getShape(), minorVectorType, {}, 95 xferOp.getMemRefType().getMemorySpace()); 96 } 97 98 LogicalResult doReplace(); 99 100 private: 101 /// Creates the loop nest on the "major" dimensions and calls the 102 /// `loopBodyBuilder` lambda in the context of the loop nest. 103 template <typename Lambda> 104 void emitLoops(Lambda loopBodyBuilder); 105 106 /// Operate within the body of `emitLoops` to: 107 /// 1. Compute the indexings `majorIvs + majorOffsets`. 108 /// 2. Compute a boolean that determines whether the first `majorIvs.rank()` 109 /// dimensions `majorIvs + majorOffsets` are all within `memrefBounds`. 110 /// 3. Create an IfOp conditioned on the boolean in step 2. 111 /// 4. Call a `thenBlockBuilder` and an `elseBlockBuilder` to append 112 /// operations to the IfOp blocks as appropriate. 113 template <typename LambdaThen, typename LambdaElse> 114 void emitInBounds(ValueRange majorIvs, ValueRange majorOffsets, 115 MemRefBoundsCapture &memrefBounds, 116 LambdaThen thenBlockBuilder, LambdaElse elseBlockBuilder); 117 118 /// Common state to lower vector transfer ops. 119 PatternRewriter &rewriter; 120 Location loc; 121 std::unique_ptr<ScopedContext> scope; 122 ConcreteOp xferOp; 123 Operation *op; 124 // A vector transfer copies data between: 125 // - memref<(leading_dims) x (major_dims) x (minor_dims) x type> 126 // - vector<(major_dims) x (minor_dims) x type> 127 unsigned minorRank; // for now always 1 128 unsigned majorRank; // vector rank - minorRank 129 unsigned leadingRank; // memref rank - vector rank 130 VectorType vectorType; // vector<(major_dims) x (minor_dims) x type> 131 VectorType majorVectorType; // vector<(major_dims) x type> 132 VectorType minorVectorType; // vector<(minor_dims) x type> 133 MemRefType memRefMinorVectorType; // memref<vector<(minor_dims) x type>> 134 }; 135 136 template <typename ConcreteOp> 137 template <typename Lambda> 138 void NDTransferOpHelper<ConcreteOp>::emitLoops(Lambda loopBodyBuilder) { 139 /// Loop nest operates on the major dimensions 140 MemRefBoundsCapture memrefBoundsCapture(xferOp.memref()); 141 VectorBoundsCapture vectorBoundsCapture(majorVectorType); 142 auto majorLbs = vectorBoundsCapture.getLbs(); 143 auto majorUbs = vectorBoundsCapture.getUbs(); 144 auto majorSteps = vectorBoundsCapture.getSteps(); 145 SmallVector<Value, 8> majorIvs(vectorBoundsCapture.rank()); 146 AffineLoopNestBuilder(majorIvs, majorLbs, majorUbs, majorSteps)([&] { 147 ValueRange indices(xferOp.indices()); 148 loopBodyBuilder(majorIvs, indices.take_front(leadingRank), 149 indices.drop_front(leadingRank).take_front(majorRank), 150 indices.take_back(minorRank), memrefBoundsCapture); 151 }); 152 } 153 154 template <typename ConcreteOp> 155 template <typename LambdaThen, typename LambdaElse> 156 void NDTransferOpHelper<ConcreteOp>::emitInBounds( 157 ValueRange majorIvs, ValueRange majorOffsets, 158 MemRefBoundsCapture &memrefBounds, LambdaThen thenBlockBuilder, 159 LambdaElse elseBlockBuilder) { 160 Value inBounds; 161 SmallVector<Value, 4> majorIvsPlusOffsets; 162 majorIvsPlusOffsets.reserve(majorIvs.size()); 163 unsigned idx = 0; 164 for (auto it : llvm::zip(majorIvs, majorOffsets, memrefBounds.getUbs())) { 165 Value iv = std::get<0>(it), off = std::get<1>(it), ub = std::get<2>(it); 166 using namespace mlir::edsc::op; 167 majorIvsPlusOffsets.push_back(iv + off); 168 if (xferOp.isMaskedDim(leadingRank + idx)) { 169 Value inBounds2 = majorIvsPlusOffsets.back() < ub; 170 inBounds = (inBounds) ? (inBounds && inBounds2) : inBounds2; 171 } 172 ++idx; 173 } 174 175 if (inBounds) { 176 auto ifOp = ScopedContext::getBuilderRef().create<scf::IfOp>( 177 ScopedContext::getLocation(), TypeRange{}, inBounds, 178 /*withElseRegion=*/std::is_same<ConcreteOp, TransferReadOp>()); 179 BlockBuilder(&ifOp.thenRegion().front(), 180 Append())([&] { thenBlockBuilder(majorIvsPlusOffsets); }); 181 if (std::is_same<ConcreteOp, TransferReadOp>()) 182 BlockBuilder(&ifOp.elseRegion().front(), 183 Append())([&] { elseBlockBuilder(majorIvsPlusOffsets); }); 184 } else { 185 // Just build the body of the then block right here. 186 thenBlockBuilder(majorIvsPlusOffsets); 187 } 188 } 189 190 template <> 191 LogicalResult NDTransferOpHelper<TransferReadOp>::doReplace() { 192 Value alloc = std_alloc(memRefMinorVectorType); 193 194 emitLoops([&](ValueRange majorIvs, ValueRange leadingOffsets, 195 ValueRange majorOffsets, ValueRange minorOffsets, 196 MemRefBoundsCapture &memrefBounds) { 197 // If in-bounds, index into memref and lower to 1-D transfer read. 198 auto thenBlockBuilder = [&](ValueRange majorIvsPlusOffsets) { 199 SmallVector<Value, 8> indexing; 200 indexing.reserve(leadingRank + majorRank + minorRank); 201 indexing.append(leadingOffsets.begin(), leadingOffsets.end()); 202 indexing.append(majorIvsPlusOffsets.begin(), majorIvsPlusOffsets.end()); 203 indexing.append(minorOffsets.begin(), minorOffsets.end()); 204 205 Value memref = xferOp.memref(); 206 auto map = TransferReadOp::getTransferMinorIdentityMap( 207 xferOp.getMemRefType(), minorVectorType); 208 ArrayAttr masked; 209 if (xferOp.isMaskedDim(xferOp.getVectorType().getRank() - 1)) { 210 OpBuilder &b = ScopedContext::getBuilderRef(); 211 masked = b.getBoolArrayAttr({true}); 212 } 213 auto loaded1D = vector_transfer_read(minorVectorType, memref, indexing, 214 AffineMapAttr::get(map), 215 xferOp.padding(), masked); 216 // Store the 1-D vector. 217 std_store(loaded1D, alloc, majorIvs); 218 }; 219 // If out-of-bounds, just store a splatted vector. 220 auto elseBlockBuilder = [&](ValueRange majorIvsPlusOffsets) { 221 auto vector = std_splat(minorVectorType, xferOp.padding()); 222 std_store(vector, alloc, majorIvs); 223 }; 224 emitInBounds(majorIvs, majorOffsets, memrefBounds, thenBlockBuilder, 225 elseBlockBuilder); 226 }); 227 228 Value loaded = 229 std_load(vector_type_cast(MemRefType::get({}, vectorType), alloc)); 230 rewriter.replaceOp(op, loaded); 231 232 return success(); 233 } 234 235 template <> 236 LogicalResult NDTransferOpHelper<TransferWriteOp>::doReplace() { 237 Value alloc = std_alloc(memRefMinorVectorType); 238 239 std_store(xferOp.vector(), 240 vector_type_cast(MemRefType::get({}, vectorType), alloc)); 241 242 emitLoops([&](ValueRange majorIvs, ValueRange leadingOffsets, 243 ValueRange majorOffsets, ValueRange minorOffsets, 244 MemRefBoundsCapture &memrefBounds) { 245 auto thenBlockBuilder = [&](ValueRange majorIvsPlusOffsets) { 246 SmallVector<Value, 8> indexing; 247 indexing.reserve(leadingRank + majorRank + minorRank); 248 indexing.append(leadingOffsets.begin(), leadingOffsets.end()); 249 indexing.append(majorIvsPlusOffsets.begin(), majorIvsPlusOffsets.end()); 250 indexing.append(minorOffsets.begin(), minorOffsets.end()); 251 // Lower to 1-D vector_transfer_write and let recursion handle it. 252 Value loaded1D = std_load(alloc, majorIvs); 253 auto map = TransferWriteOp::getTransferMinorIdentityMap( 254 xferOp.getMemRefType(), minorVectorType); 255 ArrayAttr masked; 256 if (xferOp.isMaskedDim(xferOp.getVectorType().getRank() - 1)) { 257 OpBuilder &b = ScopedContext::getBuilderRef(); 258 masked = b.getBoolArrayAttr({true}); 259 } 260 vector_transfer_write(loaded1D, xferOp.memref(), indexing, 261 AffineMapAttr::get(map), masked); 262 }; 263 // Don't write anything when out of bounds. 264 auto elseBlockBuilder = [&](ValueRange majorIvsPlusOffsets) {}; 265 emitInBounds(majorIvs, majorOffsets, memrefBounds, thenBlockBuilder, 266 elseBlockBuilder); 267 }); 268 269 rewriter.eraseOp(op); 270 271 return success(); 272 } 273 274 /// Analyzes the `transfer` to find an access dimension along the fastest remote 275 /// MemRef dimension. If such a dimension with coalescing properties is found, 276 /// `pivs` and `vectorBoundsCapture` are swapped so that the invocation of 277 /// LoopNestBuilder captures it in the innermost loop. 278 template <typename TransferOpTy> 279 static int computeCoalescedIndex(TransferOpTy transfer) { 280 // rank of the remote memory access, coalescing behavior occurs on the 281 // innermost memory dimension. 282 auto remoteRank = transfer.getMemRefType().getRank(); 283 // Iterate over the results expressions of the permutation map to determine 284 // the loop order for creating pointwise copies between remote and local 285 // memories. 286 int coalescedIdx = -1; 287 auto exprs = transfer.permutation_map().getResults(); 288 for (auto en : llvm::enumerate(exprs)) { 289 auto dim = en.value().template dyn_cast<AffineDimExpr>(); 290 if (!dim) { 291 continue; 292 } 293 auto memRefDim = dim.getPosition(); 294 if (memRefDim == remoteRank - 1) { 295 // memRefDim has coalescing properties, it should be swapped in the last 296 // position. 297 assert(coalescedIdx == -1 && "Unexpected > 1 coalesced indices"); 298 coalescedIdx = en.index(); 299 } 300 } 301 return coalescedIdx; 302 } 303 304 /// Emits remote memory accesses that are clipped to the boundaries of the 305 /// MemRef. 306 template <typename TransferOpTy> 307 static SmallVector<Value, 8> 308 clip(TransferOpTy transfer, MemRefBoundsCapture &bounds, ArrayRef<Value> ivs) { 309 using namespace mlir::edsc; 310 311 Value zero(std_constant_index(0)), one(std_constant_index(1)); 312 SmallVector<Value, 8> memRefAccess(transfer.indices()); 313 SmallVector<Value, 8> clippedScalarAccessExprs(memRefAccess.size()); 314 // Indices accessing to remote memory are clipped and their expressions are 315 // returned in clippedScalarAccessExprs. 316 for (unsigned memRefDim = 0; memRefDim < clippedScalarAccessExprs.size(); 317 ++memRefDim) { 318 // Linear search on a small number of entries. 319 int loopIndex = -1; 320 auto exprs = transfer.permutation_map().getResults(); 321 for (auto en : llvm::enumerate(exprs)) { 322 auto expr = en.value(); 323 auto dim = expr.template dyn_cast<AffineDimExpr>(); 324 // Sanity check. 325 assert( 326 (dim || expr.template cast<AffineConstantExpr>().getValue() == 0) && 327 "Expected dim or 0 in permutationMap"); 328 if (dim && memRefDim == dim.getPosition()) { 329 loopIndex = en.index(); 330 break; 331 } 332 } 333 334 // We cannot distinguish atm between unrolled dimensions that implement 335 // the "always full" tile abstraction and need clipping from the other 336 // ones. So we conservatively clip everything. 337 using namespace edsc::op; 338 auto N = bounds.ub(memRefDim); 339 auto i = memRefAccess[memRefDim]; 340 if (loopIndex < 0) { 341 auto N_minus_1 = N - one; 342 auto select_1 = std_select(i < N, i, N_minus_1); 343 clippedScalarAccessExprs[memRefDim] = 344 std_select(i < zero, zero, select_1); 345 } else { 346 auto ii = ivs[loopIndex]; 347 auto i_plus_ii = i + ii; 348 auto N_minus_1 = N - one; 349 auto select_1 = std_select(i_plus_ii < N, i_plus_ii, N_minus_1); 350 clippedScalarAccessExprs[memRefDim] = 351 std_select(i_plus_ii < zero, zero, select_1); 352 } 353 } 354 355 return clippedScalarAccessExprs; 356 } 357 358 namespace { 359 360 /// Implements lowering of TransferReadOp and TransferWriteOp to a 361 /// proper abstraction for the hardware. 362 /// 363 /// For now, we only emit a simple loop nest that performs clipped pointwise 364 /// copies from a remote to a locally allocated memory. 365 /// 366 /// Consider the case: 367 /// 368 /// ```mlir 369 /// // Read the slice `%A[%i0, %i1:%i1+256, %i2:%i2+32]` into 370 /// // vector<32x256xf32> and pad with %f0 to handle the boundary case: 371 /// %f0 = constant 0.0f : f32 372 /// scf.for %i0 = 0 to %0 { 373 /// scf.for %i1 = 0 to %1 step %c256 { 374 /// scf.for %i2 = 0 to %2 step %c32 { 375 /// %v = vector.transfer_read %A[%i0, %i1, %i2], %f0 376 /// {permutation_map: (d0, d1, d2) -> (d2, d1)} : 377 /// memref<?x?x?xf32>, vector<32x256xf32> 378 /// }}} 379 /// ``` 380 /// 381 /// The rewriters construct loop and indices that access MemRef A in a pattern 382 /// resembling the following (while guaranteeing an always full-tile 383 /// abstraction): 384 /// 385 /// ```mlir 386 /// scf.for %d2 = 0 to %c256 { 387 /// scf.for %d1 = 0 to %c32 { 388 /// %s = %A[%i0, %i1 + %d1, %i2 + %d2] : f32 389 /// %tmp[%d2, %d1] = %s 390 /// } 391 /// } 392 /// ``` 393 /// 394 /// In the current state, only a clipping transfer is implemented by `clip`, 395 /// which creates individual indexing expressions of the form: 396 /// 397 /// ```mlir-dsc 398 /// auto condMax = i + ii < N; 399 /// auto max = std_select(condMax, i + ii, N - one) 400 /// auto cond = i + ii < zero; 401 /// std_select(cond, zero, max); 402 /// ``` 403 /// 404 /// In the future, clipping should not be the only way and instead we should 405 /// load vectors + mask them. Similarly on the write side, load/mask/store for 406 /// implementing RMW behavior. 407 /// 408 /// Lowers TransferOp into a combination of: 409 /// 1. local memory allocation; 410 /// 2. perfect loop nest over: 411 /// a. scalar load/stores from local buffers (viewed as a scalar memref); 412 /// a. scalar store/load to original memref (with clipping). 413 /// 3. vector_load/store 414 /// 4. local memory deallocation. 415 /// Minor variations occur depending on whether a TransferReadOp or 416 /// a TransferWriteOp is rewritten. 417 template <typename TransferOpTy> 418 struct VectorTransferRewriter : public RewritePattern { 419 explicit VectorTransferRewriter(MLIRContext *context) 420 : RewritePattern(TransferOpTy::getOperationName(), 1, context) {} 421 422 /// Used for staging the transfer in a local scalar buffer. 423 MemRefType tmpMemRefType(TransferOpTy transfer) const { 424 auto vectorType = transfer.getVectorType(); 425 return MemRefType::get(vectorType.getShape(), vectorType.getElementType(), 426 {}, 0); 427 } 428 429 /// Performs the rewrite. 430 LogicalResult matchAndRewrite(Operation *op, 431 PatternRewriter &rewriter) const override; 432 }; 433 434 /// Lowers TransferReadOp into a combination of: 435 /// 1. local memory allocation; 436 /// 2. perfect loop nest over: 437 /// a. scalar load from local buffers (viewed as a scalar memref); 438 /// a. scalar store to original memref (with clipping). 439 /// 3. vector_load from local buffer (viewed as a memref<1 x vector>); 440 /// 4. local memory deallocation. 441 /// 442 /// Lowers the data transfer part of a TransferReadOp while ensuring no 443 /// out-of-bounds accesses are possible. Out-of-bounds behavior is handled by 444 /// clipping. This means that a given value in memory can be read multiple 445 /// times and concurrently. 446 /// 447 /// Important notes about clipping and "full-tiles only" abstraction: 448 /// ================================================================= 449 /// When using clipping for dealing with boundary conditions, the same edge 450 /// value will appear multiple times (a.k.a edge padding). This is fine if the 451 /// subsequent vector operations are all data-parallel but **is generally 452 /// incorrect** in the presence of reductions or extract operations. 453 /// 454 /// More generally, clipping is a scalar abstraction that is expected to work 455 /// fine as a baseline for CPUs and GPUs but not for vector_load and DMAs. 456 /// To deal with real vector_load and DMAs, a "padded allocation + view" 457 /// abstraction with the ability to read out-of-memref-bounds (but still within 458 /// the allocated region) is necessary. 459 /// 460 /// Whether using scalar loops or vector_load/DMAs to perform the transfer, 461 /// junk values will be materialized in the vectors and generally need to be 462 /// filtered out and replaced by the "neutral element". This neutral element is 463 /// op-dependent so, in the future, we expect to create a vector filter and 464 /// apply it to a splatted constant vector with the proper neutral element at 465 /// each ssa-use. This filtering is not necessary for pure data-parallel 466 /// operations. 467 /// 468 /// In the case of vector_store/DMAs, Read-Modify-Write will be required, which 469 /// also have concurrency implications. Note that by using clipped scalar stores 470 /// in the presence of data-parallel only operations, we generate code that 471 /// writes the same value multiple time on the edge locations. 472 /// 473 /// TODO(ntv): implement alternatives to clipping. 474 /// TODO(ntv): support non-data-parallel operations. 475 476 /// Performs the rewrite. 477 template <> 478 LogicalResult VectorTransferRewriter<TransferReadOp>::matchAndRewrite( 479 Operation *op, PatternRewriter &rewriter) const { 480 using namespace mlir::edsc::op; 481 482 TransferReadOp transfer = cast<TransferReadOp>(op); 483 if (AffineMap::isMinorIdentity(transfer.permutation_map())) { 484 // If > 1D, emit a bunch of loops around 1-D vector transfers. 485 if (transfer.getVectorType().getRank() > 1) 486 return NDTransferOpHelper<TransferReadOp>(rewriter, transfer).doReplace(); 487 // If 1-D this is now handled by the target-specific lowering. 488 if (transfer.getVectorType().getRank() == 1) 489 return failure(); 490 } 491 492 // Conservative lowering to scalar load / stores. 493 // 1. Setup all the captures. 494 ScopedContext scope(rewriter, transfer.getLoc()); 495 StdIndexedValue remote(transfer.memref()); 496 MemRefBoundsCapture memRefBoundsCapture(transfer.memref()); 497 VectorBoundsCapture vectorBoundsCapture(transfer.vector()); 498 int coalescedIdx = computeCoalescedIndex(transfer); 499 // Swap the vectorBoundsCapture which will reorder loop bounds. 500 if (coalescedIdx >= 0) 501 vectorBoundsCapture.swapRanges(vectorBoundsCapture.rank() - 1, 502 coalescedIdx); 503 504 auto lbs = vectorBoundsCapture.getLbs(); 505 auto ubs = vectorBoundsCapture.getUbs(); 506 SmallVector<Value, 8> steps; 507 steps.reserve(vectorBoundsCapture.getSteps().size()); 508 for (auto step : vectorBoundsCapture.getSteps()) 509 steps.push_back(std_constant_index(step)); 510 511 // 2. Emit alloc-copy-load-dealloc. 512 Value tmp = std_alloc(tmpMemRefType(transfer)); 513 StdIndexedValue local(tmp); 514 Value vec = vector_type_cast(tmp); 515 loopNestBuilder(lbs, ubs, steps, [&](ValueRange loopIvs) { 516 auto ivs = llvm::to_vector<8>(loopIvs); 517 // Swap the ivs which will reorder memory accesses. 518 if (coalescedIdx >= 0) 519 std::swap(ivs.back(), ivs[coalescedIdx]); 520 // Computes clippedScalarAccessExprs in the loop nest scope (ivs exist). 521 local(ivs) = remote(clip(transfer, memRefBoundsCapture, ivs)); 522 }); 523 Value vectorValue = std_load(vec); 524 (std_dealloc(tmp)); // vexing parse 525 526 // 3. Propagate. 527 rewriter.replaceOp(op, vectorValue); 528 return success(); 529 } 530 531 /// Lowers TransferWriteOp into a combination of: 532 /// 1. local memory allocation; 533 /// 2. vector_store to local buffer (viewed as a memref<1 x vector>); 534 /// 3. perfect loop nest over: 535 /// a. scalar load from local buffers (viewed as a scalar memref); 536 /// a. scalar store to original memref (with clipping). 537 /// 4. local memory deallocation. 538 /// 539 /// More specifically, lowers the data transfer part while ensuring no 540 /// out-of-bounds accesses are possible. Out-of-bounds behavior is handled by 541 /// clipping. This means that a given value in memory can be written to multiple 542 /// times and concurrently. 543 /// 544 /// See `Important notes about clipping and full-tiles only abstraction` in the 545 /// description of `readClipped` above. 546 /// 547 /// TODO(ntv): implement alternatives to clipping. 548 /// TODO(ntv): support non-data-parallel operations. 549 template <> 550 LogicalResult VectorTransferRewriter<TransferWriteOp>::matchAndRewrite( 551 Operation *op, PatternRewriter &rewriter) const { 552 using namespace edsc::op; 553 554 TransferWriteOp transfer = cast<TransferWriteOp>(op); 555 if (AffineMap::isMinorIdentity(transfer.permutation_map())) { 556 // If > 1D, emit a bunch of loops around 1-D vector transfers. 557 if (transfer.getVectorType().getRank() > 1) 558 return NDTransferOpHelper<TransferWriteOp>(rewriter, transfer) 559 .doReplace(); 560 // If 1-D this is now handled by the target-specific lowering. 561 if (transfer.getVectorType().getRank() == 1) 562 return failure(); 563 } 564 565 // 1. Setup all the captures. 566 ScopedContext scope(rewriter, transfer.getLoc()); 567 StdIndexedValue remote(transfer.memref()); 568 MemRefBoundsCapture memRefBoundsCapture(transfer.memref()); 569 Value vectorValue(transfer.vector()); 570 VectorBoundsCapture vectorBoundsCapture(transfer.vector()); 571 int coalescedIdx = computeCoalescedIndex(transfer); 572 // Swap the vectorBoundsCapture which will reorder loop bounds. 573 if (coalescedIdx >= 0) 574 vectorBoundsCapture.swapRanges(vectorBoundsCapture.rank() - 1, 575 coalescedIdx); 576 577 auto lbs = vectorBoundsCapture.getLbs(); 578 auto ubs = vectorBoundsCapture.getUbs(); 579 SmallVector<Value, 8> steps; 580 steps.reserve(vectorBoundsCapture.getSteps().size()); 581 for (auto step : vectorBoundsCapture.getSteps()) 582 steps.push_back(std_constant_index(step)); 583 584 // 2. Emit alloc-store-copy-dealloc. 585 Value tmp = std_alloc(tmpMemRefType(transfer)); 586 StdIndexedValue local(tmp); 587 Value vec = vector_type_cast(tmp); 588 std_store(vectorValue, vec); 589 loopNestBuilder(lbs, ubs, steps, [&](ValueRange loopIvs) { 590 auto ivs = llvm::to_vector<8>(loopIvs); 591 // Swap the ivs which will reorder memory accesses. 592 if (coalescedIdx >= 0) 593 std::swap(ivs.back(), ivs[coalescedIdx]); 594 // Computes clippedScalarAccessExprs in the loop nest scope (ivs exist). 595 remote(clip(transfer, memRefBoundsCapture, ivs)) = local(ivs); 596 }); 597 (std_dealloc(tmp)); // vexing parse... 598 599 rewriter.eraseOp(op); 600 return success(); 601 } 602 603 } // namespace 604 605 void mlir::populateVectorToSCFConversionPatterns( 606 OwningRewritePatternList &patterns, MLIRContext *context) { 607 patterns.insert<VectorTransferRewriter<vector::TransferReadOp>, 608 VectorTransferRewriter<vector::TransferWriteOp>>(context); 609 } 610