1 //===- VectorToSCF.cpp - Conversion from Vector to mix of SCF and Std -----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements target-dependent lowering of vector transfer operations. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include <type_traits> 14 15 #include "mlir/Conversion/VectorToSCF/VectorToSCF.h" 16 #include "mlir/Dialect/Affine/EDSC/Intrinsics.h" 17 #include "mlir/Dialect/SCF/EDSC/Builders.h" 18 #include "mlir/Dialect/SCF/EDSC/Intrinsics.h" 19 #include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h" 20 #include "mlir/Dialect/Vector/EDSC/Intrinsics.h" 21 #include "mlir/Dialect/Vector/VectorOps.h" 22 #include "mlir/Dialect/Vector/VectorUtils.h" 23 #include "mlir/IR/AffineExpr.h" 24 #include "mlir/IR/AffineMap.h" 25 #include "mlir/IR/Attributes.h" 26 #include "mlir/IR/Builders.h" 27 #include "mlir/IR/Location.h" 28 #include "mlir/IR/Matchers.h" 29 #include "mlir/IR/OperationSupport.h" 30 #include "mlir/IR/PatternMatch.h" 31 #include "mlir/IR/Types.h" 32 33 using namespace mlir; 34 using namespace mlir::edsc; 35 using namespace mlir::edsc::intrinsics; 36 using vector::TransferReadOp; 37 using vector::TransferWriteOp; 38 39 namespace { 40 /// Helper class captures the common information needed to lower N>1-D vector 41 /// transfer operations (read and write). 42 /// On construction, this class opens an edsc::ScopedContext for simpler IR 43 /// manipulation. 44 /// In pseudo-IR, for an n-D vector_transfer_read such as: 45 /// 46 /// ``` 47 /// vector_transfer_read(%m, %offsets, identity_map, %fill) : 48 /// memref<(leading_dims) x (major_dims) x (minor_dims) x type>, 49 /// vector<(major_dims) x (minor_dims) x type> 50 /// ``` 51 /// 52 /// where rank(minor_dims) is the lower-level vector rank (e.g. 1 for LLVM or 53 /// higher). 54 /// 55 /// This is the entry point to emitting pseudo-IR resembling: 56 /// 57 /// ``` 58 /// %tmp = alloc(): memref<(major_dims) x vector<minor_dim x type>> 59 /// for (%ivs_major, {0}, {vector_shape}, {1}) { // (N-1)-D loop nest 60 /// if (any_of(%ivs_major + %offsets, <, major_dims)) { 61 /// %v = vector_transfer_read( 62 /// {%offsets_leading, %ivs_major + %offsets_major, %offsets_minor}, 63 /// %ivs_minor): 64 /// memref<(leading_dims) x (major_dims) x (minor_dims) x type>, 65 /// vector<(minor_dims) x type>; 66 /// store(%v, %tmp); 67 /// } else { 68 /// %v = splat(vector<(minor_dims) x type>, %fill) 69 /// store(%v, %tmp, %ivs_major); 70 /// } 71 /// } 72 /// %res = load(%tmp, %0): memref<(major_dims) x vector<minor_dim x type>>): 73 // vector<(major_dims) x (minor_dims) x type> 74 /// ``` 75 /// 76 template <typename ConcreteOp> 77 class NDTransferOpHelper { 78 public: 79 NDTransferOpHelper(PatternRewriter &rewriter, ConcreteOp xferOp, 80 const VectorTransferToSCFOptions &options) 81 : rewriter(rewriter), options(options), loc(xferOp.getLoc()), 82 scope(std::make_unique<ScopedContext>(rewriter, loc)), xferOp(xferOp), 83 op(xferOp.getOperation()) { 84 vectorType = xferOp.getVectorType(); 85 // TODO(ntv, ajcbik): when we go to k > 1-D vectors adapt minorRank. 86 minorRank = 1; 87 majorRank = vectorType.getRank() - minorRank; 88 leadingRank = xferOp.getMemRefType().getRank() - (majorRank + minorRank); 89 majorVectorType = 90 VectorType::get(vectorType.getShape().take_front(majorRank), 91 vectorType.getElementType()); 92 minorVectorType = 93 VectorType::get(vectorType.getShape().take_back(minorRank), 94 vectorType.getElementType()); 95 /// Memref of minor vector type is used for individual transfers. 96 memRefMinorVectorType = 97 MemRefType::get(majorVectorType.getShape(), minorVectorType, {}, 98 xferOp.getMemRefType().getMemorySpace()); 99 } 100 101 LogicalResult doReplace(); 102 103 private: 104 /// Creates the loop nest on the "major" dimensions and calls the 105 /// `loopBodyBuilder` lambda in the context of the loop nest. 106 template <typename Lambda> 107 void emitLoops(Lambda loopBodyBuilder); 108 109 /// Operate within the body of `emitLoops` to: 110 /// 1. Compute the indexings `majorIvs + majorOffsets` and save them in 111 /// `majorIvsPlusOffsets`. 112 /// 2. Return a boolean that determines whether the first `majorIvs.rank()` 113 /// dimensions `majorIvs + majorOffsets` are all within `memrefBounds`. 114 Value emitInBoundsCondition(ValueRange majorIvs, ValueRange majorOffsets, 115 MemRefBoundsCapture &memrefBounds, 116 SmallVectorImpl<Value> &majorIvsPlusOffsets); 117 118 /// Common state to lower vector transfer ops. 119 PatternRewriter &rewriter; 120 const VectorTransferToSCFOptions &options; 121 Location loc; 122 std::unique_ptr<ScopedContext> scope; 123 ConcreteOp xferOp; 124 Operation *op; 125 // A vector transfer copies data between: 126 // - memref<(leading_dims) x (major_dims) x (minor_dims) x type> 127 // - vector<(major_dims) x (minor_dims) x type> 128 unsigned minorRank; // for now always 1 129 unsigned majorRank; // vector rank - minorRank 130 unsigned leadingRank; // memref rank - vector rank 131 VectorType vectorType; // vector<(major_dims) x (minor_dims) x type> 132 VectorType majorVectorType; // vector<(major_dims) x type> 133 VectorType minorVectorType; // vector<(minor_dims) x type> 134 MemRefType memRefMinorVectorType; // memref<vector<(minor_dims) x type>> 135 }; 136 137 template <typename ConcreteOp> 138 template <typename Lambda> 139 void NDTransferOpHelper<ConcreteOp>::emitLoops(Lambda loopBodyBuilder) { 140 /// Loop nest operates on the major dimensions 141 MemRefBoundsCapture memrefBoundsCapture(xferOp.memref()); 142 143 if (options.unroll) { 144 auto shape = majorVectorType.getShape(); 145 auto strides = computeStrides(shape); 146 unsigned numUnrolledInstances = computeMaxLinearIndex(shape); 147 ValueRange indices(xferOp.indices()); 148 for (unsigned idx = 0; idx < numUnrolledInstances; ++idx) { 149 SmallVector<int64_t, 4> offsets = delinearize(strides, idx); 150 SmallVector<Value, 4> offsetValues = 151 llvm::to_vector<4>(llvm::map_range(offsets, [](int64_t off) -> Value { 152 return std_constant_index(off); 153 })); 154 loopBodyBuilder(offsetValues, indices.take_front(leadingRank), 155 indices.drop_front(leadingRank).take_front(majorRank), 156 indices.take_back(minorRank), memrefBoundsCapture); 157 } 158 } else { 159 VectorBoundsCapture vectorBoundsCapture(majorVectorType); 160 auto majorLbs = vectorBoundsCapture.getLbs(); 161 auto majorUbs = vectorBoundsCapture.getUbs(); 162 auto majorSteps = vectorBoundsCapture.getSteps(); 163 SmallVector<Value, 8> majorIvs(vectorBoundsCapture.rank()); 164 AffineLoopNestBuilder(majorIvs, majorLbs, majorUbs, majorSteps)([&] { 165 ValueRange indices(xferOp.indices()); 166 loopBodyBuilder(majorIvs, indices.take_front(leadingRank), 167 indices.drop_front(leadingRank).take_front(majorRank), 168 indices.take_back(minorRank), memrefBoundsCapture); 169 }); 170 } 171 } 172 173 template <typename ConcreteOp> 174 Value NDTransferOpHelper<ConcreteOp>::emitInBoundsCondition( 175 ValueRange majorIvs, ValueRange majorOffsets, 176 MemRefBoundsCapture &memrefBounds, 177 SmallVectorImpl<Value> &majorIvsPlusOffsets) { 178 Value inBoundsCondition; 179 majorIvsPlusOffsets.reserve(majorIvs.size()); 180 unsigned idx = 0; 181 for (auto it : llvm::zip(majorIvs, majorOffsets, memrefBounds.getUbs())) { 182 Value iv = std::get<0>(it), off = std::get<1>(it), ub = std::get<2>(it); 183 using namespace mlir::edsc::op; 184 majorIvsPlusOffsets.push_back(iv + off); 185 if (xferOp.isMaskedDim(leadingRank + idx)) { 186 Value inBounds = majorIvsPlusOffsets.back() < ub; 187 inBoundsCondition = 188 (inBoundsCondition) ? (inBoundsCondition && inBounds) : inBounds; 189 } 190 ++idx; 191 } 192 return inBoundsCondition; 193 } 194 195 template <> 196 LogicalResult NDTransferOpHelper<TransferReadOp>::doReplace() { 197 Value alloc, result; 198 if (options.unroll) 199 result = std_splat(vectorType, xferOp.padding()); 200 else 201 alloc = std_alloc(memRefMinorVectorType); 202 203 emitLoops([&](ValueRange majorIvs, ValueRange leadingOffsets, 204 ValueRange majorOffsets, ValueRange minorOffsets, 205 MemRefBoundsCapture &memrefBounds) { 206 /// Lambda to load 1-D vector in the current loop ivs + offset context. 207 auto load1DVector = [&](ValueRange majorIvsPlusOffsets) -> Value { 208 SmallVector<Value, 8> indexing; 209 indexing.reserve(leadingRank + majorRank + minorRank); 210 indexing.append(leadingOffsets.begin(), leadingOffsets.end()); 211 indexing.append(majorIvsPlusOffsets.begin(), majorIvsPlusOffsets.end()); 212 indexing.append(minorOffsets.begin(), minorOffsets.end()); 213 Value memref = xferOp.memref(); 214 auto map = TransferReadOp::getTransferMinorIdentityMap( 215 xferOp.getMemRefType(), minorVectorType); 216 ArrayAttr masked; 217 if (xferOp.isMaskedDim(xferOp.getVectorType().getRank() - 1)) { 218 OpBuilder &b = ScopedContext::getBuilderRef(); 219 masked = b.getBoolArrayAttr({true}); 220 } 221 return vector_transfer_read(minorVectorType, memref, indexing, 222 AffineMapAttr::get(map), xferOp.padding(), 223 masked); 224 }; 225 226 // 1. Compute the inBoundsCondition in the current loops ivs + offset 227 // context. 228 SmallVector<Value, 4> majorIvsPlusOffsets; 229 Value inBoundsCondition = emitInBoundsCondition( 230 majorIvs, majorOffsets, memrefBounds, majorIvsPlusOffsets); 231 232 if (inBoundsCondition) { 233 // 2. If the condition is not null, we need an IfOp, which may yield 234 // if `options.unroll` is true. 235 SmallVector<Type, 1> resultType; 236 if (options.unroll) 237 resultType.push_back(vectorType); 238 auto ifOp = ScopedContext::getBuilderRef().create<scf::IfOp>( 239 ScopedContext::getLocation(), resultType, inBoundsCondition, 240 /*withElseRegion=*/true); 241 242 // 3.a. If in-bounds, progressively lower to a 1-D transfer read. 243 BlockBuilder(&ifOp.thenRegion().front(), Append())([&] { 244 Value vector = load1DVector(majorIvsPlusOffsets); 245 // 3.a.i. If `options.unroll` is true, insert the 1-D vector in the 246 // aggregate. We must yield and merge with the `else` branch. 247 if (options.unroll) { 248 vector = vector_insert(vector, result, majorIvs); 249 (loop_yield(vector)); 250 return; 251 } 252 // 3.a.ii. Otherwise, just go through the temporary `alloc`. 253 std_store(vector, alloc, majorIvs); 254 }); 255 256 // 3.b. If not in-bounds, splat a 1-D vector. 257 BlockBuilder(&ifOp.elseRegion().front(), Append())([&] { 258 Value vector = std_splat(minorVectorType, xferOp.padding()); 259 // 3.a.i. If `options.unroll` is true, insert the 1-D vector in the 260 // aggregate. We must yield and merge with the `then` branch. 261 if (options.unroll) { 262 vector = vector_insert(vector, result, majorIvs); 263 (loop_yield(vector)); 264 return; 265 } 266 // 3.b.ii. Otherwise, just go through the temporary `alloc`. 267 std_store(vector, alloc, majorIvs); 268 }); 269 if (!resultType.empty()) 270 result = *ifOp.results().begin(); 271 } else { 272 // 4. Guaranteed in-bounds, progressively lower to a 1-D transfer read. 273 Value loaded1D = load1DVector(majorIvsPlusOffsets); 274 // 5.a. If `options.unroll` is true, insert the 1-D vector in the 275 // aggregate. 276 if (options.unroll) 277 result = vector_insert(loaded1D, result, majorIvs); 278 // 5.b. Otherwise, just go through the temporary `alloc`. 279 else 280 std_store(loaded1D, alloc, majorIvs); 281 } 282 }); 283 284 assert((!options.unroll ^ (bool)result) && 285 "Expected resulting Value iff unroll"); 286 if (!result) 287 result = std_load(vector_type_cast(MemRefType::get({}, vectorType), alloc)); 288 rewriter.replaceOp(op, result); 289 290 return success(); 291 } 292 293 template <> 294 LogicalResult NDTransferOpHelper<TransferWriteOp>::doReplace() { 295 Value alloc; 296 if (!options.unroll) { 297 alloc = std_alloc(memRefMinorVectorType); 298 std_store(xferOp.vector(), 299 vector_type_cast(MemRefType::get({}, vectorType), alloc)); 300 } 301 302 emitLoops([&](ValueRange majorIvs, ValueRange leadingOffsets, 303 ValueRange majorOffsets, ValueRange minorOffsets, 304 MemRefBoundsCapture &memrefBounds) { 305 // Lower to 1-D vector_transfer_write and let recursion handle it. 306 auto emitTransferWrite = [&](ValueRange majorIvsPlusOffsets) { 307 SmallVector<Value, 8> indexing; 308 indexing.reserve(leadingRank + majorRank + minorRank); 309 indexing.append(leadingOffsets.begin(), leadingOffsets.end()); 310 indexing.append(majorIvsPlusOffsets.begin(), majorIvsPlusOffsets.end()); 311 indexing.append(minorOffsets.begin(), minorOffsets.end()); 312 Value result; 313 // If `options.unroll` is true, extract the 1-D vector from the 314 // aggregate. 315 if (options.unroll) 316 result = vector_extract(xferOp.vector(), majorIvs); 317 else 318 result = std_load(alloc, majorIvs); 319 auto map = TransferWriteOp::getTransferMinorIdentityMap( 320 xferOp.getMemRefType(), minorVectorType); 321 ArrayAttr masked; 322 if (xferOp.isMaskedDim(xferOp.getVectorType().getRank() - 1)) { 323 OpBuilder &b = ScopedContext::getBuilderRef(); 324 masked = b.getBoolArrayAttr({true}); 325 } 326 vector_transfer_write(result, xferOp.memref(), indexing, 327 AffineMapAttr::get(map), masked); 328 }; 329 330 // 1. Compute the inBoundsCondition in the current loops ivs + offset 331 // context. 332 SmallVector<Value, 4> majorIvsPlusOffsets; 333 Value inBoundsCondition = emitInBoundsCondition( 334 majorIvs, majorOffsets, memrefBounds, majorIvsPlusOffsets); 335 336 if (inBoundsCondition) { 337 // 2.a. If the condition is not null, we need an IfOp, to write 338 // conditionally. Progressively lower to a 1-D transfer write. 339 auto ifOp = ScopedContext::getBuilderRef().create<scf::IfOp>( 340 ScopedContext::getLocation(), TypeRange{}, inBoundsCondition, 341 /*withElseRegion=*/false); 342 BlockBuilder(&ifOp.thenRegion().front(), 343 Append())([&] { emitTransferWrite(majorIvsPlusOffsets); }); 344 } else { 345 // 2.b. Guaranteed in-bounds. Progressively lower to a 1-D transfer write. 346 emitTransferWrite(majorIvsPlusOffsets); 347 } 348 }); 349 350 rewriter.eraseOp(op); 351 352 return success(); 353 } 354 355 } // namespace 356 357 /// Analyzes the `transfer` to find an access dimension along the fastest remote 358 /// MemRef dimension. If such a dimension with coalescing properties is found, 359 /// `pivs` and `vectorBoundsCapture` are swapped so that the invocation of 360 /// LoopNestBuilder captures it in the innermost loop. 361 template <typename TransferOpTy> 362 static int computeCoalescedIndex(TransferOpTy transfer) { 363 // rank of the remote memory access, coalescing behavior occurs on the 364 // innermost memory dimension. 365 auto remoteRank = transfer.getMemRefType().getRank(); 366 // Iterate over the results expressions of the permutation map to determine 367 // the loop order for creating pointwise copies between remote and local 368 // memories. 369 int coalescedIdx = -1; 370 auto exprs = transfer.permutation_map().getResults(); 371 for (auto en : llvm::enumerate(exprs)) { 372 auto dim = en.value().template dyn_cast<AffineDimExpr>(); 373 if (!dim) { 374 continue; 375 } 376 auto memRefDim = dim.getPosition(); 377 if (memRefDim == remoteRank - 1) { 378 // memRefDim has coalescing properties, it should be swapped in the last 379 // position. 380 assert(coalescedIdx == -1 && "Unexpected > 1 coalesced indices"); 381 coalescedIdx = en.index(); 382 } 383 } 384 return coalescedIdx; 385 } 386 387 /// Emits remote memory accesses that are clipped to the boundaries of the 388 /// MemRef. 389 template <typename TransferOpTy> 390 static SmallVector<Value, 8> 391 clip(TransferOpTy transfer, MemRefBoundsCapture &bounds, ArrayRef<Value> ivs) { 392 using namespace mlir::edsc; 393 394 Value zero(std_constant_index(0)), one(std_constant_index(1)); 395 SmallVector<Value, 8> memRefAccess(transfer.indices()); 396 SmallVector<Value, 8> clippedScalarAccessExprs(memRefAccess.size()); 397 // Indices accessing to remote memory are clipped and their expressions are 398 // returned in clippedScalarAccessExprs. 399 for (unsigned memRefDim = 0; memRefDim < clippedScalarAccessExprs.size(); 400 ++memRefDim) { 401 // Linear search on a small number of entries. 402 int loopIndex = -1; 403 auto exprs = transfer.permutation_map().getResults(); 404 for (auto en : llvm::enumerate(exprs)) { 405 auto expr = en.value(); 406 auto dim = expr.template dyn_cast<AffineDimExpr>(); 407 // Sanity check. 408 assert( 409 (dim || expr.template cast<AffineConstantExpr>().getValue() == 0) && 410 "Expected dim or 0 in permutationMap"); 411 if (dim && memRefDim == dim.getPosition()) { 412 loopIndex = en.index(); 413 break; 414 } 415 } 416 417 // We cannot distinguish atm between unrolled dimensions that implement 418 // the "always full" tile abstraction and need clipping from the other 419 // ones. So we conservatively clip everything. 420 using namespace edsc::op; 421 auto N = bounds.ub(memRefDim); 422 auto i = memRefAccess[memRefDim]; 423 if (loopIndex < 0) { 424 auto N_minus_1 = N - one; 425 auto select_1 = std_select(i < N, i, N_minus_1); 426 clippedScalarAccessExprs[memRefDim] = 427 std_select(i < zero, zero, select_1); 428 } else { 429 auto ii = ivs[loopIndex]; 430 auto i_plus_ii = i + ii; 431 auto N_minus_1 = N - one; 432 auto select_1 = std_select(i_plus_ii < N, i_plus_ii, N_minus_1); 433 clippedScalarAccessExprs[memRefDim] = 434 std_select(i_plus_ii < zero, zero, select_1); 435 } 436 } 437 438 return clippedScalarAccessExprs; 439 } 440 441 namespace mlir { 442 443 template <typename TransferOpTy> 444 VectorTransferRewriter<TransferOpTy>::VectorTransferRewriter( 445 VectorTransferToSCFOptions options, MLIRContext *context) 446 : RewritePattern(TransferOpTy::getOperationName(), 1, context), 447 options(options) {} 448 449 /// Used for staging the transfer in a local buffer. 450 template <typename TransferOpTy> 451 MemRefType VectorTransferRewriter<TransferOpTy>::tmpMemRefType( 452 TransferOpTy transfer) const { 453 auto vectorType = transfer.getVectorType(); 454 return MemRefType::get(vectorType.getShape(), vectorType.getElementType(), {}, 455 0); 456 } 457 458 /// Lowers TransferReadOp into a combination of: 459 /// 1. local memory allocation; 460 /// 2. perfect loop nest over: 461 /// a. scalar load from local buffers (viewed as a scalar memref); 462 /// a. scalar store to original memref (with clipping). 463 /// 3. vector_load from local buffer (viewed as a memref<1 x vector>); 464 /// 4. local memory deallocation. 465 /// 466 /// Lowers the data transfer part of a TransferReadOp while ensuring no 467 /// out-of-bounds accesses are possible. Out-of-bounds behavior is handled by 468 /// clipping. This means that a given value in memory can be read multiple 469 /// times and concurrently. 470 /// 471 /// Important notes about clipping and "full-tiles only" abstraction: 472 /// ================================================================= 473 /// When using clipping for dealing with boundary conditions, the same edge 474 /// value will appear multiple times (a.k.a edge padding). This is fine if the 475 /// subsequent vector operations are all data-parallel but **is generally 476 /// incorrect** in the presence of reductions or extract operations. 477 /// 478 /// More generally, clipping is a scalar abstraction that is expected to work 479 /// fine as a baseline for CPUs and GPUs but not for vector_load and DMAs. 480 /// To deal with real vector_load and DMAs, a "padded allocation + view" 481 /// abstraction with the ability to read out-of-memref-bounds (but still within 482 /// the allocated region) is necessary. 483 /// 484 /// Whether using scalar loops or vector_load/DMAs to perform the transfer, 485 /// junk values will be materialized in the vectors and generally need to be 486 /// filtered out and replaced by the "neutral element". This neutral element is 487 /// op-dependent so, in the future, we expect to create a vector filter and 488 /// apply it to a splatted constant vector with the proper neutral element at 489 /// each ssa-use. This filtering is not necessary for pure data-parallel 490 /// operations. 491 /// 492 /// In the case of vector_store/DMAs, Read-Modify-Write will be required, which 493 /// also have concurrency implications. Note that by using clipped scalar stores 494 /// in the presence of data-parallel only operations, we generate code that 495 /// writes the same value multiple time on the edge locations. 496 /// 497 /// TODO(ntv): implement alternatives to clipping. 498 /// TODO(ntv): support non-data-parallel operations. 499 500 /// Performs the rewrite. 501 template <> 502 LogicalResult VectorTransferRewriter<TransferReadOp>::matchAndRewrite( 503 Operation *op, PatternRewriter &rewriter) const { 504 using namespace mlir::edsc::op; 505 506 TransferReadOp transfer = cast<TransferReadOp>(op); 507 if (AffineMap::isMinorIdentity(transfer.permutation_map())) { 508 // If > 1D, emit a bunch of loops around 1-D vector transfers. 509 if (transfer.getVectorType().getRank() > 1) 510 return NDTransferOpHelper<TransferReadOp>(rewriter, transfer, options) 511 .doReplace(); 512 // If 1-D this is now handled by the target-specific lowering. 513 if (transfer.getVectorType().getRank() == 1) 514 return failure(); 515 } 516 517 // Conservative lowering to scalar load / stores. 518 // 1. Setup all the captures. 519 ScopedContext scope(rewriter, transfer.getLoc()); 520 StdIndexedValue remote(transfer.memref()); 521 MemRefBoundsCapture memRefBoundsCapture(transfer.memref()); 522 VectorBoundsCapture vectorBoundsCapture(transfer.vector()); 523 int coalescedIdx = computeCoalescedIndex(transfer); 524 // Swap the vectorBoundsCapture which will reorder loop bounds. 525 if (coalescedIdx >= 0) 526 vectorBoundsCapture.swapRanges(vectorBoundsCapture.rank() - 1, 527 coalescedIdx); 528 529 auto lbs = vectorBoundsCapture.getLbs(); 530 auto ubs = vectorBoundsCapture.getUbs(); 531 SmallVector<Value, 8> steps; 532 steps.reserve(vectorBoundsCapture.getSteps().size()); 533 for (auto step : vectorBoundsCapture.getSteps()) 534 steps.push_back(std_constant_index(step)); 535 536 // 2. Emit alloc-copy-load-dealloc. 537 Value tmp = std_alloc(tmpMemRefType(transfer)); 538 StdIndexedValue local(tmp); 539 Value vec = vector_type_cast(tmp); 540 loopNestBuilder(lbs, ubs, steps, [&](ValueRange loopIvs) { 541 auto ivs = llvm::to_vector<8>(loopIvs); 542 // Swap the ivs which will reorder memory accesses. 543 if (coalescedIdx >= 0) 544 std::swap(ivs.back(), ivs[coalescedIdx]); 545 // Computes clippedScalarAccessExprs in the loop nest scope (ivs exist). 546 local(ivs) = remote(clip(transfer, memRefBoundsCapture, ivs)); 547 }); 548 Value vectorValue = std_load(vec); 549 (std_dealloc(tmp)); // vexing parse 550 551 // 3. Propagate. 552 rewriter.replaceOp(op, vectorValue); 553 return success(); 554 } 555 556 /// Lowers TransferWriteOp into a combination of: 557 /// 1. local memory allocation; 558 /// 2. vector_store to local buffer (viewed as a memref<1 x vector>); 559 /// 3. perfect loop nest over: 560 /// a. scalar load from local buffers (viewed as a scalar memref); 561 /// a. scalar store to original memref (with clipping). 562 /// 4. local memory deallocation. 563 /// 564 /// More specifically, lowers the data transfer part while ensuring no 565 /// out-of-bounds accesses are possible. Out-of-bounds behavior is handled by 566 /// clipping. This means that a given value in memory can be written to multiple 567 /// times and concurrently. 568 /// 569 /// See `Important notes about clipping and full-tiles only abstraction` in the 570 /// description of `readClipped` above. 571 /// 572 /// TODO(ntv): implement alternatives to clipping. 573 /// TODO(ntv): support non-data-parallel operations. 574 template <> 575 LogicalResult VectorTransferRewriter<TransferWriteOp>::matchAndRewrite( 576 Operation *op, PatternRewriter &rewriter) const { 577 using namespace edsc::op; 578 579 TransferWriteOp transfer = cast<TransferWriteOp>(op); 580 if (AffineMap::isMinorIdentity(transfer.permutation_map())) { 581 // If > 1D, emit a bunch of loops around 1-D vector transfers. 582 if (transfer.getVectorType().getRank() > 1) 583 return NDTransferOpHelper<TransferWriteOp>(rewriter, transfer, options) 584 .doReplace(); 585 // If 1-D this is now handled by the target-specific lowering. 586 if (transfer.getVectorType().getRank() == 1) 587 return failure(); 588 } 589 590 // 1. Setup all the captures. 591 ScopedContext scope(rewriter, transfer.getLoc()); 592 StdIndexedValue remote(transfer.memref()); 593 MemRefBoundsCapture memRefBoundsCapture(transfer.memref()); 594 Value vectorValue(transfer.vector()); 595 VectorBoundsCapture vectorBoundsCapture(transfer.vector()); 596 int coalescedIdx = computeCoalescedIndex(transfer); 597 // Swap the vectorBoundsCapture which will reorder loop bounds. 598 if (coalescedIdx >= 0) 599 vectorBoundsCapture.swapRanges(vectorBoundsCapture.rank() - 1, 600 coalescedIdx); 601 602 auto lbs = vectorBoundsCapture.getLbs(); 603 auto ubs = vectorBoundsCapture.getUbs(); 604 SmallVector<Value, 8> steps; 605 steps.reserve(vectorBoundsCapture.getSteps().size()); 606 for (auto step : vectorBoundsCapture.getSteps()) 607 steps.push_back(std_constant_index(step)); 608 609 // 2. Emit alloc-store-copy-dealloc. 610 Value tmp = std_alloc(tmpMemRefType(transfer)); 611 StdIndexedValue local(tmp); 612 Value vec = vector_type_cast(tmp); 613 std_store(vectorValue, vec); 614 loopNestBuilder(lbs, ubs, steps, [&](ValueRange loopIvs) { 615 auto ivs = llvm::to_vector<8>(loopIvs); 616 // Swap the ivs which will reorder memory accesses. 617 if (coalescedIdx >= 0) 618 std::swap(ivs.back(), ivs[coalescedIdx]); 619 // Computes clippedScalarAccessExprs in the loop nest scope (ivs exist). 620 remote(clip(transfer, memRefBoundsCapture, ivs)) = local(ivs); 621 }); 622 (std_dealloc(tmp)); // vexing parse... 623 624 rewriter.eraseOp(op); 625 return success(); 626 } 627 628 void populateVectorToSCFConversionPatterns( 629 OwningRewritePatternList &patterns, MLIRContext *context, 630 const VectorTransferToSCFOptions &options) { 631 patterns.insert<VectorTransferRewriter<vector::TransferReadOp>, 632 VectorTransferRewriter<vector::TransferWriteOp>>(options, 633 context); 634 } 635 636 } // namespace mlir 637 638