1 //===- VectorToSCF.cpp - Conversion from Vector to mix of SCF and Std -----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements target-dependent lowering of vector transfer operations. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include <type_traits> 14 15 #include "mlir/Conversion/VectorToSCF/VectorToSCF.h" 16 17 #include "../PassDetail.h" 18 #include "mlir/Dialect/Affine/EDSC/Intrinsics.h" 19 #include "mlir/Dialect/SCF/EDSC/Builders.h" 20 #include "mlir/Dialect/SCF/EDSC/Intrinsics.h" 21 #include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h" 22 #include "mlir/Dialect/Vector/EDSC/Intrinsics.h" 23 #include "mlir/Dialect/Vector/VectorOps.h" 24 #include "mlir/Dialect/Vector/VectorUtils.h" 25 #include "mlir/IR/AffineExpr.h" 26 #include "mlir/IR/AffineMap.h" 27 #include "mlir/IR/Attributes.h" 28 #include "mlir/IR/Builders.h" 29 #include "mlir/IR/Location.h" 30 #include "mlir/IR/Matchers.h" 31 #include "mlir/IR/OperationSupport.h" 32 #include "mlir/IR/PatternMatch.h" 33 #include "mlir/IR/Types.h" 34 #include "mlir/Pass/Pass.h" 35 #include "mlir/Transforms/Passes.h" 36 37 using namespace mlir; 38 using namespace mlir::edsc; 39 using namespace mlir::edsc::intrinsics; 40 using vector::TransferReadOp; 41 using vector::TransferWriteOp; 42 43 namespace { 44 /// Helper class captures the common information needed to lower N>1-D vector 45 /// transfer operations (read and write). 46 /// On construction, this class opens an edsc::ScopedContext for simpler IR 47 /// manipulation. 48 /// In pseudo-IR, for an n-D vector_transfer_read such as: 49 /// 50 /// ``` 51 /// vector_transfer_read(%m, %offsets, identity_map, %fill) : 52 /// memref<(leading_dims) x (major_dims) x (minor_dims) x type>, 53 /// vector<(major_dims) x (minor_dims) x type> 54 /// ``` 55 /// 56 /// where rank(minor_dims) is the lower-level vector rank (e.g. 1 for LLVM or 57 /// higher). 58 /// 59 /// This is the entry point to emitting pseudo-IR resembling: 60 /// 61 /// ``` 62 /// %tmp = alloc(): memref<(major_dims) x vector<minor_dim x type>> 63 /// for (%ivs_major, {0}, {vector_shape}, {1}) { // (N-1)-D loop nest 64 /// if (any_of(%ivs_major + %offsets, <, major_dims)) { 65 /// %v = vector_transfer_read( 66 /// {%offsets_leading, %ivs_major + %offsets_major, %offsets_minor}, 67 /// %ivs_minor): 68 /// memref<(leading_dims) x (major_dims) x (minor_dims) x type>, 69 /// vector<(minor_dims) x type>; 70 /// store(%v, %tmp); 71 /// } else { 72 /// %v = splat(vector<(minor_dims) x type>, %fill) 73 /// store(%v, %tmp, %ivs_major); 74 /// } 75 /// } 76 /// %res = load(%tmp, %0): memref<(major_dims) x vector<minor_dim x type>>): 77 // vector<(major_dims) x (minor_dims) x type> 78 /// ``` 79 /// 80 template <typename ConcreteOp> 81 class NDTransferOpHelper { 82 public: 83 NDTransferOpHelper(PatternRewriter &rewriter, ConcreteOp xferOp, 84 const VectorTransferToSCFOptions &options) 85 : rewriter(rewriter), options(options), loc(xferOp.getLoc()), 86 scope(std::make_unique<ScopedContext>(rewriter, loc)), xferOp(xferOp), 87 op(xferOp.getOperation()) { 88 vectorType = xferOp.getVectorType(); 89 // TODO(ntv, ajcbik): when we go to k > 1-D vectors adapt minorRank. 90 minorRank = 1; 91 majorRank = vectorType.getRank() - minorRank; 92 leadingRank = xferOp.getMemRefType().getRank() - (majorRank + minorRank); 93 majorVectorType = 94 VectorType::get(vectorType.getShape().take_front(majorRank), 95 vectorType.getElementType()); 96 minorVectorType = 97 VectorType::get(vectorType.getShape().take_back(minorRank), 98 vectorType.getElementType()); 99 /// Memref of minor vector type is used for individual transfers. 100 memRefMinorVectorType = 101 MemRefType::get(majorVectorType.getShape(), minorVectorType, {}, 102 xferOp.getMemRefType().getMemorySpace()); 103 } 104 105 LogicalResult doReplace(); 106 107 private: 108 /// Creates the loop nest on the "major" dimensions and calls the 109 /// `loopBodyBuilder` lambda in the context of the loop nest. 110 template <typename Lambda> 111 void emitLoops(Lambda loopBodyBuilder); 112 113 /// Operate within the body of `emitLoops` to: 114 /// 1. Compute the indexings `majorIvs + majorOffsets` and save them in 115 /// `majorIvsPlusOffsets`. 116 /// 2. Return a boolean that determines whether the first `majorIvs.rank()` 117 /// dimensions `majorIvs + majorOffsets` are all within `memrefBounds`. 118 Value emitInBoundsCondition(ValueRange majorIvs, ValueRange majorOffsets, 119 MemRefBoundsCapture &memrefBounds, 120 SmallVectorImpl<Value> &majorIvsPlusOffsets); 121 122 /// Common state to lower vector transfer ops. 123 PatternRewriter &rewriter; 124 const VectorTransferToSCFOptions &options; 125 Location loc; 126 std::unique_ptr<ScopedContext> scope; 127 ConcreteOp xferOp; 128 Operation *op; 129 // A vector transfer copies data between: 130 // - memref<(leading_dims) x (major_dims) x (minor_dims) x type> 131 // - vector<(major_dims) x (minor_dims) x type> 132 unsigned minorRank; // for now always 1 133 unsigned majorRank; // vector rank - minorRank 134 unsigned leadingRank; // memref rank - vector rank 135 VectorType vectorType; // vector<(major_dims) x (minor_dims) x type> 136 VectorType majorVectorType; // vector<(major_dims) x type> 137 VectorType minorVectorType; // vector<(minor_dims) x type> 138 MemRefType memRefMinorVectorType; // memref<vector<(minor_dims) x type>> 139 }; 140 141 template <typename ConcreteOp> 142 template <typename Lambda> 143 void NDTransferOpHelper<ConcreteOp>::emitLoops(Lambda loopBodyBuilder) { 144 /// Loop nest operates on the major dimensions 145 MemRefBoundsCapture memrefBoundsCapture(xferOp.memref()); 146 147 if (options.unroll) { 148 auto shape = majorVectorType.getShape(); 149 auto strides = computeStrides(shape); 150 unsigned numUnrolledInstances = computeMaxLinearIndex(shape); 151 ValueRange indices(xferOp.indices()); 152 for (unsigned idx = 0; idx < numUnrolledInstances; ++idx) { 153 SmallVector<int64_t, 4> offsets = delinearize(strides, idx); 154 SmallVector<Value, 4> offsetValues = 155 llvm::to_vector<4>(llvm::map_range(offsets, [](int64_t off) -> Value { 156 return std_constant_index(off); 157 })); 158 loopBodyBuilder(offsetValues, indices.take_front(leadingRank), 159 indices.drop_front(leadingRank).take_front(majorRank), 160 indices.take_back(minorRank), memrefBoundsCapture); 161 } 162 } else { 163 VectorBoundsCapture vectorBoundsCapture(majorVectorType); 164 auto majorLbs = vectorBoundsCapture.getLbs(); 165 auto majorUbs = vectorBoundsCapture.getUbs(); 166 auto majorSteps = vectorBoundsCapture.getSteps(); 167 SmallVector<Value, 8> majorIvs(vectorBoundsCapture.rank()); 168 AffineLoopNestBuilder(majorIvs, majorLbs, majorUbs, majorSteps)([&] { 169 ValueRange indices(xferOp.indices()); 170 loopBodyBuilder(majorIvs, indices.take_front(leadingRank), 171 indices.drop_front(leadingRank).take_front(majorRank), 172 indices.take_back(minorRank), memrefBoundsCapture); 173 }); 174 } 175 } 176 177 template <typename ConcreteOp> 178 Value NDTransferOpHelper<ConcreteOp>::emitInBoundsCondition( 179 ValueRange majorIvs, ValueRange majorOffsets, 180 MemRefBoundsCapture &memrefBounds, 181 SmallVectorImpl<Value> &majorIvsPlusOffsets) { 182 Value inBoundsCondition; 183 majorIvsPlusOffsets.reserve(majorIvs.size()); 184 unsigned idx = 0; 185 for (auto it : llvm::zip(majorIvs, majorOffsets, memrefBounds.getUbs())) { 186 Value iv = std::get<0>(it), off = std::get<1>(it), ub = std::get<2>(it); 187 using namespace mlir::edsc::op; 188 majorIvsPlusOffsets.push_back(iv + off); 189 if (xferOp.isMaskedDim(leadingRank + idx)) { 190 Value inBounds = majorIvsPlusOffsets.back() < ub; 191 inBoundsCondition = 192 (inBoundsCondition) ? (inBoundsCondition && inBounds) : inBounds; 193 } 194 ++idx; 195 } 196 return inBoundsCondition; 197 } 198 199 template <> 200 LogicalResult NDTransferOpHelper<TransferReadOp>::doReplace() { 201 Value alloc, result; 202 if (options.unroll) 203 result = std_splat(vectorType, xferOp.padding()); 204 else 205 alloc = std_alloc(memRefMinorVectorType); 206 207 emitLoops([&](ValueRange majorIvs, ValueRange leadingOffsets, 208 ValueRange majorOffsets, ValueRange minorOffsets, 209 MemRefBoundsCapture &memrefBounds) { 210 /// Lambda to load 1-D vector in the current loop ivs + offset context. 211 auto load1DVector = [&](ValueRange majorIvsPlusOffsets) -> Value { 212 SmallVector<Value, 8> indexing; 213 indexing.reserve(leadingRank + majorRank + minorRank); 214 indexing.append(leadingOffsets.begin(), leadingOffsets.end()); 215 indexing.append(majorIvsPlusOffsets.begin(), majorIvsPlusOffsets.end()); 216 indexing.append(minorOffsets.begin(), minorOffsets.end()); 217 Value memref = xferOp.memref(); 218 auto map = TransferReadOp::getTransferMinorIdentityMap( 219 xferOp.getMemRefType(), minorVectorType); 220 ArrayAttr masked; 221 if (xferOp.isMaskedDim(xferOp.getVectorType().getRank() - 1)) { 222 OpBuilder &b = ScopedContext::getBuilderRef(); 223 masked = b.getBoolArrayAttr({true}); 224 } 225 return vector_transfer_read(minorVectorType, memref, indexing, 226 AffineMapAttr::get(map), xferOp.padding(), 227 masked); 228 }; 229 230 // 1. Compute the inBoundsCondition in the current loops ivs + offset 231 // context. 232 SmallVector<Value, 4> majorIvsPlusOffsets; 233 Value inBoundsCondition = emitInBoundsCondition( 234 majorIvs, majorOffsets, memrefBounds, majorIvsPlusOffsets); 235 236 if (inBoundsCondition) { 237 // 2. If the condition is not null, we need an IfOp, which may yield 238 // if `options.unroll` is true. 239 SmallVector<Type, 1> resultType; 240 if (options.unroll) 241 resultType.push_back(vectorType); 242 243 // 3. If in-bounds, progressively lower to a 1-D transfer read, otherwise 244 // splat a 1-D vector. 245 ValueRange ifResults = conditionBuilder( 246 resultType, inBoundsCondition, 247 [&]() -> scf::ValueVector { 248 Value vector = load1DVector(majorIvsPlusOffsets); 249 // 3.a. If `options.unroll` is true, insert the 1-D vector in the 250 // aggregate. We must yield and merge with the `else` branch. 251 if (options.unroll) { 252 vector = vector_insert(vector, result, majorIvs); 253 return {vector}; 254 } 255 // 3.b. Otherwise, just go through the temporary `alloc`. 256 std_store(vector, alloc, majorIvs); 257 return {}; 258 }, 259 [&]() -> scf::ValueVector { 260 Value vector = std_splat(minorVectorType, xferOp.padding()); 261 // 3.c. If `options.unroll` is true, insert the 1-D vector in the 262 // aggregate. We must yield and merge with the `then` branch. 263 if (options.unroll) { 264 vector = vector_insert(vector, result, majorIvs); 265 return {vector}; 266 } 267 // 3.d. Otherwise, just go through the temporary `alloc`. 268 std_store(vector, alloc, majorIvs); 269 return {}; 270 }); 271 272 if (!resultType.empty()) 273 result = *ifResults.begin(); 274 } else { 275 // 4. Guaranteed in-bounds, progressively lower to a 1-D transfer read. 276 Value loaded1D = load1DVector(majorIvsPlusOffsets); 277 // 5.a. If `options.unroll` is true, insert the 1-D vector in the 278 // aggregate. 279 if (options.unroll) 280 result = vector_insert(loaded1D, result, majorIvs); 281 // 5.b. Otherwise, just go through the temporary `alloc`. 282 else 283 std_store(loaded1D, alloc, majorIvs); 284 } 285 }); 286 287 assert((!options.unroll ^ (bool)result) && 288 "Expected resulting Value iff unroll"); 289 if (!result) 290 result = std_load(vector_type_cast(MemRefType::get({}, vectorType), alloc)); 291 rewriter.replaceOp(op, result); 292 293 return success(); 294 } 295 296 template <> 297 LogicalResult NDTransferOpHelper<TransferWriteOp>::doReplace() { 298 Value alloc; 299 if (!options.unroll) { 300 alloc = std_alloc(memRefMinorVectorType); 301 std_store(xferOp.vector(), 302 vector_type_cast(MemRefType::get({}, vectorType), alloc)); 303 } 304 305 emitLoops([&](ValueRange majorIvs, ValueRange leadingOffsets, 306 ValueRange majorOffsets, ValueRange minorOffsets, 307 MemRefBoundsCapture &memrefBounds) { 308 // Lower to 1-D vector_transfer_write and let recursion handle it. 309 auto emitTransferWrite = [&](ValueRange majorIvsPlusOffsets) { 310 SmallVector<Value, 8> indexing; 311 indexing.reserve(leadingRank + majorRank + minorRank); 312 indexing.append(leadingOffsets.begin(), leadingOffsets.end()); 313 indexing.append(majorIvsPlusOffsets.begin(), majorIvsPlusOffsets.end()); 314 indexing.append(minorOffsets.begin(), minorOffsets.end()); 315 Value result; 316 // If `options.unroll` is true, extract the 1-D vector from the 317 // aggregate. 318 if (options.unroll) 319 result = vector_extract(xferOp.vector(), majorIvs); 320 else 321 result = std_load(alloc, majorIvs); 322 auto map = TransferWriteOp::getTransferMinorIdentityMap( 323 xferOp.getMemRefType(), minorVectorType); 324 ArrayAttr masked; 325 if (xferOp.isMaskedDim(xferOp.getVectorType().getRank() - 1)) { 326 OpBuilder &b = ScopedContext::getBuilderRef(); 327 masked = b.getBoolArrayAttr({true}); 328 } 329 vector_transfer_write(result, xferOp.memref(), indexing, 330 AffineMapAttr::get(map), masked); 331 }; 332 333 // 1. Compute the inBoundsCondition in the current loops ivs + offset 334 // context. 335 SmallVector<Value, 4> majorIvsPlusOffsets; 336 Value inBoundsCondition = emitInBoundsCondition( 337 majorIvs, majorOffsets, memrefBounds, majorIvsPlusOffsets); 338 339 if (inBoundsCondition) { 340 // 2.a. If the condition is not null, we need an IfOp, to write 341 // conditionally. Progressively lower to a 1-D transfer write. 342 conditionBuilder(inBoundsCondition, 343 [&] { emitTransferWrite(majorIvsPlusOffsets); }); 344 } else { 345 // 2.b. Guaranteed in-bounds. Progressively lower to a 1-D transfer write. 346 emitTransferWrite(majorIvsPlusOffsets); 347 } 348 }); 349 350 rewriter.eraseOp(op); 351 352 return success(); 353 } 354 355 } // namespace 356 357 /// Analyzes the `transfer` to find an access dimension along the fastest remote 358 /// MemRef dimension. If such a dimension with coalescing properties is found, 359 /// `pivs` and `vectorBoundsCapture` are swapped so that the invocation of 360 /// LoopNestBuilder captures it in the innermost loop. 361 template <typename TransferOpTy> 362 static int computeCoalescedIndex(TransferOpTy transfer) { 363 // rank of the remote memory access, coalescing behavior occurs on the 364 // innermost memory dimension. 365 auto remoteRank = transfer.getMemRefType().getRank(); 366 // Iterate over the results expressions of the permutation map to determine 367 // the loop order for creating pointwise copies between remote and local 368 // memories. 369 int coalescedIdx = -1; 370 auto exprs = transfer.permutation_map().getResults(); 371 for (auto en : llvm::enumerate(exprs)) { 372 auto dim = en.value().template dyn_cast<AffineDimExpr>(); 373 if (!dim) { 374 continue; 375 } 376 auto memRefDim = dim.getPosition(); 377 if (memRefDim == remoteRank - 1) { 378 // memRefDim has coalescing properties, it should be swapped in the last 379 // position. 380 assert(coalescedIdx == -1 && "Unexpected > 1 coalesced indices"); 381 coalescedIdx = en.index(); 382 } 383 } 384 return coalescedIdx; 385 } 386 387 /// Emits remote memory accesses that are clipped to the boundaries of the 388 /// MemRef. 389 template <typename TransferOpTy> 390 static SmallVector<Value, 8> 391 clip(TransferOpTy transfer, MemRefBoundsCapture &bounds, ArrayRef<Value> ivs) { 392 using namespace mlir::edsc; 393 394 Value zero(std_constant_index(0)), one(std_constant_index(1)); 395 SmallVector<Value, 8> memRefAccess(transfer.indices()); 396 SmallVector<Value, 8> clippedScalarAccessExprs(memRefAccess.size()); 397 // Indices accessing to remote memory are clipped and their expressions are 398 // returned in clippedScalarAccessExprs. 399 for (unsigned memRefDim = 0; memRefDim < clippedScalarAccessExprs.size(); 400 ++memRefDim) { 401 // Linear search on a small number of entries. 402 int loopIndex = -1; 403 auto exprs = transfer.permutation_map().getResults(); 404 for (auto en : llvm::enumerate(exprs)) { 405 auto expr = en.value(); 406 auto dim = expr.template dyn_cast<AffineDimExpr>(); 407 // Sanity check. 408 assert( 409 (dim || expr.template cast<AffineConstantExpr>().getValue() == 0) && 410 "Expected dim or 0 in permutationMap"); 411 if (dim && memRefDim == dim.getPosition()) { 412 loopIndex = en.index(); 413 break; 414 } 415 } 416 417 // We cannot distinguish atm between unrolled dimensions that implement 418 // the "always full" tile abstraction and need clipping from the other 419 // ones. So we conservatively clip everything. 420 using namespace edsc::op; 421 auto N = bounds.ub(memRefDim); 422 auto i = memRefAccess[memRefDim]; 423 if (loopIndex < 0) { 424 auto N_minus_1 = N - one; 425 auto select_1 = std_select(i < N, i, N_minus_1); 426 clippedScalarAccessExprs[memRefDim] = 427 std_select(i < zero, zero, select_1); 428 } else { 429 auto ii = ivs[loopIndex]; 430 auto i_plus_ii = i + ii; 431 auto N_minus_1 = N - one; 432 auto select_1 = std_select(i_plus_ii < N, i_plus_ii, N_minus_1); 433 clippedScalarAccessExprs[memRefDim] = 434 std_select(i_plus_ii < zero, zero, select_1); 435 } 436 } 437 438 return clippedScalarAccessExprs; 439 } 440 441 namespace mlir { 442 443 template <typename TransferOpTy> 444 VectorTransferRewriter<TransferOpTy>::VectorTransferRewriter( 445 VectorTransferToSCFOptions options, MLIRContext *context) 446 : RewritePattern(TransferOpTy::getOperationName(), 1, context), 447 options(options) {} 448 449 /// Used for staging the transfer in a local buffer. 450 template <typename TransferOpTy> 451 MemRefType VectorTransferRewriter<TransferOpTy>::tmpMemRefType( 452 TransferOpTy transfer) const { 453 auto vectorType = transfer.getVectorType(); 454 return MemRefType::get(vectorType.getShape(), vectorType.getElementType(), {}, 455 0); 456 } 457 458 /// Lowers TransferReadOp into a combination of: 459 /// 1. local memory allocation; 460 /// 2. perfect loop nest over: 461 /// a. scalar load from local buffers (viewed as a scalar memref); 462 /// a. scalar store to original memref (with clipping). 463 /// 3. vector_load from local buffer (viewed as a memref<1 x vector>); 464 /// 4. local memory deallocation. 465 /// 466 /// Lowers the data transfer part of a TransferReadOp while ensuring no 467 /// out-of-bounds accesses are possible. Out-of-bounds behavior is handled by 468 /// clipping. This means that a given value in memory can be read multiple 469 /// times and concurrently. 470 /// 471 /// Important notes about clipping and "full-tiles only" abstraction: 472 /// ================================================================= 473 /// When using clipping for dealing with boundary conditions, the same edge 474 /// value will appear multiple times (a.k.a edge padding). This is fine if the 475 /// subsequent vector operations are all data-parallel but **is generally 476 /// incorrect** in the presence of reductions or extract operations. 477 /// 478 /// More generally, clipping is a scalar abstraction that is expected to work 479 /// fine as a baseline for CPUs and GPUs but not for vector_load and DMAs. 480 /// To deal with real vector_load and DMAs, a "padded allocation + view" 481 /// abstraction with the ability to read out-of-memref-bounds (but still within 482 /// the allocated region) is necessary. 483 /// 484 /// Whether using scalar loops or vector_load/DMAs to perform the transfer, 485 /// junk values will be materialized in the vectors and generally need to be 486 /// filtered out and replaced by the "neutral element". This neutral element is 487 /// op-dependent so, in the future, we expect to create a vector filter and 488 /// apply it to a splatted constant vector with the proper neutral element at 489 /// each ssa-use. This filtering is not necessary for pure data-parallel 490 /// operations. 491 /// 492 /// In the case of vector_store/DMAs, Read-Modify-Write will be required, which 493 /// also have concurrency implications. Note that by using clipped scalar stores 494 /// in the presence of data-parallel only operations, we generate code that 495 /// writes the same value multiple time on the edge locations. 496 /// 497 /// TODO(ntv): implement alternatives to clipping. 498 /// TODO(ntv): support non-data-parallel operations. 499 500 /// Performs the rewrite. 501 template <> 502 LogicalResult VectorTransferRewriter<TransferReadOp>::matchAndRewrite( 503 Operation *op, PatternRewriter &rewriter) const { 504 using namespace mlir::edsc::op; 505 506 TransferReadOp transfer = cast<TransferReadOp>(op); 507 if (AffineMap::isMinorIdentity(transfer.permutation_map())) { 508 // If > 1D, emit a bunch of loops around 1-D vector transfers. 509 if (transfer.getVectorType().getRank() > 1) 510 return NDTransferOpHelper<TransferReadOp>(rewriter, transfer, options) 511 .doReplace(); 512 // If 1-D this is now handled by the target-specific lowering. 513 if (transfer.getVectorType().getRank() == 1) 514 return failure(); 515 } 516 517 // Conservative lowering to scalar load / stores. 518 // 1. Setup all the captures. 519 ScopedContext scope(rewriter, transfer.getLoc()); 520 StdIndexedValue remote(transfer.memref()); 521 MemRefBoundsCapture memRefBoundsCapture(transfer.memref()); 522 VectorBoundsCapture vectorBoundsCapture(transfer.vector()); 523 int coalescedIdx = computeCoalescedIndex(transfer); 524 // Swap the vectorBoundsCapture which will reorder loop bounds. 525 if (coalescedIdx >= 0) 526 vectorBoundsCapture.swapRanges(vectorBoundsCapture.rank() - 1, 527 coalescedIdx); 528 529 auto lbs = vectorBoundsCapture.getLbs(); 530 auto ubs = vectorBoundsCapture.getUbs(); 531 SmallVector<Value, 8> steps; 532 steps.reserve(vectorBoundsCapture.getSteps().size()); 533 for (auto step : vectorBoundsCapture.getSteps()) 534 steps.push_back(std_constant_index(step)); 535 536 // 2. Emit alloc-copy-load-dealloc. 537 Value tmp = std_alloc(tmpMemRefType(transfer)); 538 StdIndexedValue local(tmp); 539 Value vec = vector_type_cast(tmp); 540 loopNestBuilder(lbs, ubs, steps, [&](ValueRange loopIvs) { 541 auto ivs = llvm::to_vector<8>(loopIvs); 542 // Swap the ivs which will reorder memory accesses. 543 if (coalescedIdx >= 0) 544 std::swap(ivs.back(), ivs[coalescedIdx]); 545 // Computes clippedScalarAccessExprs in the loop nest scope (ivs exist). 546 local(ivs) = remote(clip(transfer, memRefBoundsCapture, ivs)); 547 }); 548 Value vectorValue = std_load(vec); 549 (std_dealloc(tmp)); // vexing parse 550 551 // 3. Propagate. 552 rewriter.replaceOp(op, vectorValue); 553 return success(); 554 } 555 556 /// Lowers TransferWriteOp into a combination of: 557 /// 1. local memory allocation; 558 /// 2. vector_store to local buffer (viewed as a memref<1 x vector>); 559 /// 3. perfect loop nest over: 560 /// a. scalar load from local buffers (viewed as a scalar memref); 561 /// a. scalar store to original memref (with clipping). 562 /// 4. local memory deallocation. 563 /// 564 /// More specifically, lowers the data transfer part while ensuring no 565 /// out-of-bounds accesses are possible. Out-of-bounds behavior is handled by 566 /// clipping. This means that a given value in memory can be written to multiple 567 /// times and concurrently. 568 /// 569 /// See `Important notes about clipping and full-tiles only abstraction` in the 570 /// description of `readClipped` above. 571 /// 572 /// TODO(ntv): implement alternatives to clipping. 573 /// TODO(ntv): support non-data-parallel operations. 574 template <> 575 LogicalResult VectorTransferRewriter<TransferWriteOp>::matchAndRewrite( 576 Operation *op, PatternRewriter &rewriter) const { 577 using namespace edsc::op; 578 579 TransferWriteOp transfer = cast<TransferWriteOp>(op); 580 if (AffineMap::isMinorIdentity(transfer.permutation_map())) { 581 // If > 1D, emit a bunch of loops around 1-D vector transfers. 582 if (transfer.getVectorType().getRank() > 1) 583 return NDTransferOpHelper<TransferWriteOp>(rewriter, transfer, options) 584 .doReplace(); 585 // If 1-D this is now handled by the target-specific lowering. 586 if (transfer.getVectorType().getRank() == 1) 587 return failure(); 588 } 589 590 // 1. Setup all the captures. 591 ScopedContext scope(rewriter, transfer.getLoc()); 592 StdIndexedValue remote(transfer.memref()); 593 MemRefBoundsCapture memRefBoundsCapture(transfer.memref()); 594 Value vectorValue(transfer.vector()); 595 VectorBoundsCapture vectorBoundsCapture(transfer.vector()); 596 int coalescedIdx = computeCoalescedIndex(transfer); 597 // Swap the vectorBoundsCapture which will reorder loop bounds. 598 if (coalescedIdx >= 0) 599 vectorBoundsCapture.swapRanges(vectorBoundsCapture.rank() - 1, 600 coalescedIdx); 601 602 auto lbs = vectorBoundsCapture.getLbs(); 603 auto ubs = vectorBoundsCapture.getUbs(); 604 SmallVector<Value, 8> steps; 605 steps.reserve(vectorBoundsCapture.getSteps().size()); 606 for (auto step : vectorBoundsCapture.getSteps()) 607 steps.push_back(std_constant_index(step)); 608 609 // 2. Emit alloc-store-copy-dealloc. 610 Value tmp = std_alloc(tmpMemRefType(transfer)); 611 StdIndexedValue local(tmp); 612 Value vec = vector_type_cast(tmp); 613 std_store(vectorValue, vec); 614 loopNestBuilder(lbs, ubs, steps, [&](ValueRange loopIvs) { 615 auto ivs = llvm::to_vector<8>(loopIvs); 616 // Swap the ivs which will reorder memory accesses. 617 if (coalescedIdx >= 0) 618 std::swap(ivs.back(), ivs[coalescedIdx]); 619 // Computes clippedScalarAccessExprs in the loop nest scope (ivs exist). 620 remote(clip(transfer, memRefBoundsCapture, ivs)) = local(ivs); 621 }); 622 (std_dealloc(tmp)); // vexing parse... 623 624 rewriter.eraseOp(op); 625 return success(); 626 } 627 628 void populateVectorToSCFConversionPatterns( 629 OwningRewritePatternList &patterns, MLIRContext *context, 630 const VectorTransferToSCFOptions &options) { 631 patterns.insert<VectorTransferRewriter<vector::TransferReadOp>, 632 VectorTransferRewriter<vector::TransferWriteOp>>(options, 633 context); 634 } 635 636 } // namespace mlir 637 638 namespace { 639 640 struct ConvertVectorToSCFPass 641 : public ConvertVectorToSCFBase<ConvertVectorToSCFPass> { 642 ConvertVectorToSCFPass() = default; 643 ConvertVectorToSCFPass(const ConvertVectorToSCFPass &pass) {} 644 ConvertVectorToSCFPass(const VectorTransferToSCFOptions &options) { 645 this->fullUnroll = options.unroll; 646 } 647 648 void runOnFunction() override { 649 OwningRewritePatternList patterns; 650 auto *context = getFunction().getContext(); 651 populateVectorToSCFConversionPatterns( 652 patterns, context, VectorTransferToSCFOptions().setUnroll(fullUnroll)); 653 applyPatternsAndFoldGreedily(getFunction(), patterns); 654 } 655 }; 656 657 } // namespace 658 659 std::unique_ptr<Pass> 660 mlir::createConvertVectorToSCFPass(const VectorTransferToSCFOptions &options) { 661 return std::make_unique<ConvertVectorToSCFPass>(options); 662 } 663