1 //===- VectorToSCF.cpp - Conversion from Vector to mix of SCF and Std -----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements target-dependent lowering of vector transfer operations. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include <type_traits> 14 15 #include "mlir/Conversion/VectorToSCF/VectorToSCF.h" 16 17 #include "../PassDetail.h" 18 #include "mlir/Dialect/Affine/EDSC/Intrinsics.h" 19 #include "mlir/Dialect/MemRef/EDSC/Intrinsics.h" 20 #include "mlir/Dialect/SCF/EDSC/Builders.h" 21 #include "mlir/Dialect/SCF/EDSC/Intrinsics.h" 22 #include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h" 23 #include "mlir/Dialect/Vector/EDSC/Intrinsics.h" 24 #include "mlir/Dialect/Vector/VectorOps.h" 25 #include "mlir/Dialect/Vector/VectorUtils.h" 26 #include "mlir/IR/AffineExpr.h" 27 #include "mlir/IR/AffineMap.h" 28 #include "mlir/IR/Builders.h" 29 #include "mlir/IR/Matchers.h" 30 #include "mlir/Pass/Pass.h" 31 #include "mlir/Transforms/GreedyPatternRewriteDriver.h" 32 #include "mlir/Transforms/Passes.h" 33 34 using namespace mlir; 35 using namespace mlir::edsc; 36 using namespace mlir::edsc::intrinsics; 37 using vector::TransferReadOp; 38 using vector::TransferWriteOp; 39 40 // Return a list of Values that correspond to multiple AffineApplyOp, one for 41 // each result of `map`. Each `expr` in `map` is canonicalized and folded 42 // greedily according to its operands. 43 // TODO: factor out in a common location that both linalg and vector can use. 44 static SmallVector<Value, 4> 45 applyMapToValues(OpBuilder &b, Location loc, AffineMap map, ValueRange values) { 46 SmallVector<Value, 4> res; 47 res.reserve(map.getNumResults()); 48 unsigned numDims = map.getNumDims(), numSym = map.getNumSymbols(); 49 // For each `expr` in `map`, applies the `expr` to the values extracted from 50 // ranges. If the resulting application can be folded into a Value, the 51 // folding occurs eagerly. Otherwise, an affine.apply operation is emitted. 52 for (auto expr : map.getResults()) { 53 AffineMap map = AffineMap::get(numDims, numSym, expr); 54 SmallVector<Value, 4> operands(values.begin(), values.end()); 55 fullyComposeAffineMapAndOperands(&map, &operands); 56 canonicalizeMapAndOperands(&map, &operands); 57 res.push_back(b.createOrFold<AffineApplyOp>(loc, map, operands)); 58 } 59 return res; 60 } 61 62 namespace { 63 /// Helper class captures the common information needed to lower N>1-D vector 64 /// transfer operations (read and write). 65 /// On construction, this class opens an edsc::ScopedContext for simpler IR 66 /// manipulation. 67 /// In pseudo-IR, for an n-D vector_transfer_read such as: 68 /// 69 /// ``` 70 /// vector_transfer_read(%m, %offsets, identity_map, %fill) : 71 /// memref<(leading_dims) x (major_dims) x (minor_dims) x type>, 72 /// vector<(major_dims) x (minor_dims) x type> 73 /// ``` 74 /// 75 /// where rank(minor_dims) is the lower-level vector rank (e.g. 1 for LLVM or 76 /// higher). 77 /// 78 /// This is the entry point to emitting pseudo-IR resembling: 79 /// 80 /// ``` 81 /// %tmp = alloc(): memref<(major_dims) x vector<minor_dim x type>> 82 /// for (%ivs_major, {0}, {vector_shape}, {1}) { // (N-1)-D loop nest 83 /// if (any_of(%ivs_major + %offsets, <, major_dims)) { 84 /// %v = vector_transfer_read( 85 /// {%offsets_leading, %ivs_major + %offsets_major, %offsets_minor}, 86 /// %ivs_minor): 87 /// memref<(leading_dims) x (major_dims) x (minor_dims) x type>, 88 /// vector<(minor_dims) x type>; 89 /// store(%v, %tmp); 90 /// } else { 91 /// %v = splat(vector<(minor_dims) x type>, %fill) 92 /// store(%v, %tmp, %ivs_major); 93 /// } 94 /// } 95 /// %res = load(%tmp, %0): memref<(major_dims) x vector<minor_dim x type>>): 96 // vector<(major_dims) x (minor_dims) x type> 97 /// ``` 98 /// 99 template <typename ConcreteOp> 100 class NDTransferOpHelper { 101 public: 102 NDTransferOpHelper(PatternRewriter &rewriter, ConcreteOp xferOp, 103 const VectorTransferToSCFOptions &options) 104 : rewriter(rewriter), options(options), loc(xferOp.getLoc()), 105 scope(std::make_unique<ScopedContext>(rewriter, loc)), xferOp(xferOp), 106 op(xferOp.getOperation()) { 107 vectorType = xferOp.getVectorType(); 108 // TODO: when we go to k > 1-D vectors adapt minorRank. 109 minorRank = 1; 110 majorRank = vectorType.getRank() - minorRank; 111 leadingRank = xferOp.getLeadingShapedRank(); 112 majorVectorType = 113 VectorType::get(vectorType.getShape().take_front(majorRank), 114 vectorType.getElementType()); 115 minorVectorType = 116 VectorType::get(vectorType.getShape().take_back(minorRank), 117 vectorType.getElementType()); 118 /// Memref of minor vector type is used for individual transfers. 119 memRefMinorVectorType = MemRefType::get( 120 majorVectorType.getShape(), minorVectorType, {}, 121 xferOp.getShapedType().template cast<MemRefType>().getMemorySpace()); 122 } 123 124 LogicalResult doReplace(); 125 126 private: 127 /// Creates the loop nest on the "major" dimensions and calls the 128 /// `loopBodyBuilder` lambda in the context of the loop nest. 129 void 130 emitLoops(llvm::function_ref<void(ValueRange, ValueRange, ValueRange, 131 ValueRange, const MemRefBoundsCapture &)> 132 loopBodyBuilder); 133 134 /// Common state to lower vector transfer ops. 135 PatternRewriter &rewriter; 136 const VectorTransferToSCFOptions &options; 137 Location loc; 138 std::unique_ptr<ScopedContext> scope; 139 ConcreteOp xferOp; 140 Operation *op; 141 // A vector transfer copies data between: 142 // - memref<(leading_dims) x (major_dims) x (minor_dims) x type> 143 // - vector<(major_dims) x (minor_dims) x type> 144 unsigned minorRank; // for now always 1 145 unsigned majorRank; // vector rank - minorRank 146 unsigned leadingRank; // memref rank - vector rank 147 VectorType vectorType; // vector<(major_dims) x (minor_dims) x type> 148 VectorType majorVectorType; // vector<(major_dims) x type> 149 VectorType minorVectorType; // vector<(minor_dims) x type> 150 MemRefType memRefMinorVectorType; // memref<vector<(minor_dims) x type>> 151 }; 152 153 template <typename ConcreteOp> 154 void NDTransferOpHelper<ConcreteOp>::emitLoops( 155 llvm::function_ref<void(ValueRange, ValueRange, ValueRange, ValueRange, 156 const MemRefBoundsCapture &)> 157 loopBodyBuilder) { 158 /// Loop nest operates on the major dimensions 159 MemRefBoundsCapture memrefBoundsCapture(xferOp.source()); 160 161 if (options.unroll) { 162 auto shape = majorVectorType.getShape(); 163 auto strides = computeStrides(shape); 164 unsigned numUnrolledInstances = computeMaxLinearIndex(shape); 165 ValueRange indices(xferOp.indices()); 166 for (unsigned idx = 0; idx < numUnrolledInstances; ++idx) { 167 SmallVector<int64_t, 4> offsets = delinearize(strides, idx); 168 SmallVector<Value, 4> offsetValues = 169 llvm::to_vector<4>(llvm::map_range(offsets, [](int64_t off) -> Value { 170 return std_constant_index(off); 171 })); 172 loopBodyBuilder(offsetValues, indices.take_front(leadingRank), 173 indices.drop_front(leadingRank).take_front(majorRank), 174 indices.take_back(minorRank), memrefBoundsCapture); 175 } 176 } else { 177 VectorBoundsCapture vectorBoundsCapture(majorVectorType); 178 auto majorLbs = vectorBoundsCapture.getLbs(); 179 auto majorUbs = vectorBoundsCapture.getUbs(); 180 auto majorSteps = vectorBoundsCapture.getSteps(); 181 affineLoopNestBuilder( 182 majorLbs, majorUbs, majorSteps, [&](ValueRange majorIvs) { 183 ValueRange indices(xferOp.indices()); 184 loopBodyBuilder(majorIvs, indices.take_front(leadingRank), 185 indices.drop_front(leadingRank).take_front(majorRank), 186 indices.take_back(minorRank), memrefBoundsCapture); 187 }); 188 } 189 } 190 191 static Optional<int64_t> extractConstantIndex(Value v) { 192 if (auto cstOp = v.getDefiningOp<ConstantIndexOp>()) 193 return cstOp.getValue(); 194 if (auto affineApplyOp = v.getDefiningOp<AffineApplyOp>()) 195 if (affineApplyOp.getAffineMap().isSingleConstant()) 196 return affineApplyOp.getAffineMap().getSingleConstantResult(); 197 return None; 198 } 199 200 // Missing foldings of scf.if make it necessary to perform poor man's folding 201 // eagerly, especially in the case of unrolling. In the future, this should go 202 // away once scf.if folds properly. 203 static Value onTheFlyFoldSLT(Value v, Value ub) { 204 using namespace mlir::edsc::op; 205 auto maybeCstV = extractConstantIndex(v); 206 auto maybeCstUb = extractConstantIndex(ub); 207 if (maybeCstV && maybeCstUb && *maybeCstV < *maybeCstUb) 208 return Value(); 209 return slt(v, ub); 210 } 211 212 /// 1. Compute the indexings `majorIvs + majorOffsets` and save them in 213 /// `majorIvsPlusOffsets`. 214 /// 2. Return a value of i1 that determines whether the first 215 /// `majorIvs.rank()` 216 /// dimensions `majorIvs + majorOffsets` are all within `memrefBounds`. 217 static Value 218 emitInBoundsCondition(PatternRewriter &rewriter, 219 VectorTransferOpInterface xferOp, unsigned leadingRank, 220 ValueRange majorIvs, ValueRange majorOffsets, 221 const MemRefBoundsCapture &memrefBounds, 222 SmallVectorImpl<Value> &majorIvsPlusOffsets) { 223 Value inBoundsCondition; 224 majorIvsPlusOffsets.reserve(majorIvs.size()); 225 unsigned idx = 0; 226 SmallVector<Value, 4> bounds = 227 applyMapToValues(rewriter, xferOp.getLoc(), xferOp.permutation_map(), 228 memrefBounds.getUbs()); 229 for (auto it : llvm::zip(majorIvs, majorOffsets, bounds)) { 230 Value iv = std::get<0>(it), off = std::get<1>(it), ub = std::get<2>(it); 231 using namespace mlir::edsc::op; 232 majorIvsPlusOffsets.push_back(iv + off); 233 auto affineConstExpr = 234 xferOp.permutation_map().getResult(idx).dyn_cast<AffineConstantExpr>(); 235 bool isBroadcast = affineConstExpr && affineConstExpr.getValue() == 0; 236 if (!xferOp.isDimInBounds(leadingRank + idx) && !isBroadcast) { 237 Value inBoundsCond = onTheFlyFoldSLT(majorIvsPlusOffsets.back(), ub); 238 if (inBoundsCond) 239 inBoundsCondition = (inBoundsCondition) 240 ? (inBoundsCondition && inBoundsCond) 241 : inBoundsCond; 242 } 243 ++idx; 244 } 245 return inBoundsCondition; 246 } 247 248 // TODO: Parallelism and threadlocal considerations. 249 static Value setAllocAtFunctionEntry(MemRefType memRefMinorVectorType, 250 Operation *op) { 251 auto &b = ScopedContext::getBuilderRef(); 252 OpBuilder::InsertionGuard guard(b); 253 Operation *scope = 254 op->getParentWithTrait<OpTrait::AutomaticAllocationScope>(); 255 assert(scope && "Expected op to be inside automatic allocation scope"); 256 b.setInsertionPointToStart(&scope->getRegion(0).front()); 257 Value res = memref_alloca(memRefMinorVectorType); 258 return res; 259 } 260 261 template <> 262 LogicalResult NDTransferOpHelper<TransferReadOp>::doReplace() { 263 Value alloc, result; 264 if (options.unroll) 265 result = std_splat(vectorType, xferOp.padding()); 266 else 267 alloc = setAllocAtFunctionEntry(memRefMinorVectorType, op); 268 269 emitLoops([&](ValueRange majorIvs, ValueRange leadingOffsets, 270 ValueRange majorOffsets, ValueRange minorOffsets, 271 const MemRefBoundsCapture &memrefBounds) { 272 /// Lambda to load 1-D vector in the current loop ivs + offset context. 273 auto load1DVector = [&](ValueRange majorIvsPlusOffsets) -> Value { 274 SmallVector<Value, 8> indexing; 275 indexing.reserve(leadingRank + majorRank + minorRank); 276 indexing.append(leadingOffsets.begin(), leadingOffsets.end()); 277 indexing.append(majorIvsPlusOffsets.begin(), majorIvsPlusOffsets.end()); 278 indexing.append(minorOffsets.begin(), minorOffsets.end()); 279 Value memref = xferOp.source(); 280 auto map = 281 getTransferMinorIdentityMap(xferOp.getShapedType(), minorVectorType); 282 ArrayAttr inBounds; 283 if (xferOp.isDimInBounds(xferOp.getVectorType().getRank() - 1)) { 284 OpBuilder &b = ScopedContext::getBuilderRef(); 285 inBounds = b.getBoolArrayAttr({true}); 286 } 287 return vector_transfer_read(minorVectorType, memref, indexing, 288 AffineMapAttr::get(map), xferOp.padding(), 289 inBounds); 290 }; 291 292 // 1. Compute the inBoundsCondition in the current loops ivs + offset 293 // context. 294 SmallVector<Value, 4> majorIvsPlusOffsets; 295 Value inBoundsCondition = emitInBoundsCondition( 296 rewriter, cast<VectorTransferOpInterface>(xferOp.getOperation()), 297 leadingRank, majorIvs, majorOffsets, memrefBounds, majorIvsPlusOffsets); 298 299 if (inBoundsCondition) { 300 // 2. If the condition is not null, we need an IfOp, which may yield 301 // if `options.unroll` is true. 302 SmallVector<Type, 1> resultType; 303 if (options.unroll) 304 resultType.push_back(vectorType); 305 306 // 3. If in-bounds, progressively lower to a 1-D transfer read, otherwise 307 // splat a 1-D vector. 308 ValueRange ifResults = conditionBuilder( 309 resultType, inBoundsCondition, 310 [&]() -> scf::ValueVector { 311 Value vector = load1DVector(majorIvsPlusOffsets); 312 // 3.a. If `options.unroll` is true, insert the 1-D vector in the 313 // aggregate. We must yield and merge with the `else` branch. 314 if (options.unroll) { 315 vector = vector_insert(vector, result, majorIvs); 316 return {vector}; 317 } 318 // 3.b. Otherwise, just go through the temporary `alloc`. 319 memref_store(vector, alloc, majorIvs); 320 return {}; 321 }, 322 [&]() -> scf::ValueVector { 323 Value vector = std_splat(minorVectorType, xferOp.padding()); 324 // 3.c. If `options.unroll` is true, insert the 1-D vector in the 325 // aggregate. We must yield and merge with the `then` branch. 326 if (options.unroll) { 327 vector = vector_insert(vector, result, majorIvs); 328 return {vector}; 329 } 330 // 3.d. Otherwise, just go through the temporary `alloc`. 331 memref_store(vector, alloc, majorIvs); 332 return {}; 333 }); 334 335 if (!resultType.empty()) 336 result = *ifResults.begin(); 337 } else { 338 // 4. Guaranteed in-bounds, progressively lower to a 1-D transfer read. 339 Value loaded1D = load1DVector(majorIvsPlusOffsets); 340 // 5.a. If `options.unroll` is true, insert the 1-D vector in the 341 // aggregate. 342 if (options.unroll) 343 result = vector_insert(loaded1D, result, majorIvs); 344 // 5.b. Otherwise, just go through the temporary `alloc`. 345 else 346 memref_store(loaded1D, alloc, majorIvs); 347 } 348 }); 349 350 assert((!options.unroll ^ (bool)result) && 351 "Expected resulting Value iff unroll"); 352 if (!result) 353 result = 354 memref_load(vector_type_cast(MemRefType::get({}, vectorType), alloc)); 355 rewriter.replaceOp(op, result); 356 357 return success(); 358 } 359 360 template <> 361 LogicalResult NDTransferOpHelper<TransferWriteOp>::doReplace() { 362 Value alloc; 363 if (!options.unroll) { 364 alloc = setAllocAtFunctionEntry(memRefMinorVectorType, op); 365 memref_store(xferOp.vector(), 366 vector_type_cast(MemRefType::get({}, vectorType), alloc)); 367 } 368 369 emitLoops([&](ValueRange majorIvs, ValueRange leadingOffsets, 370 ValueRange majorOffsets, ValueRange minorOffsets, 371 const MemRefBoundsCapture &memrefBounds) { 372 // Lower to 1-D vector_transfer_write and let recursion handle it. 373 auto emitTransferWrite = [&](ValueRange majorIvsPlusOffsets) { 374 SmallVector<Value, 8> indexing; 375 indexing.reserve(leadingRank + majorRank + minorRank); 376 indexing.append(leadingOffsets.begin(), leadingOffsets.end()); 377 indexing.append(majorIvsPlusOffsets.begin(), majorIvsPlusOffsets.end()); 378 indexing.append(minorOffsets.begin(), minorOffsets.end()); 379 Value result; 380 // If `options.unroll` is true, extract the 1-D vector from the 381 // aggregate. 382 if (options.unroll) 383 result = vector_extract(xferOp.vector(), majorIvs); 384 else 385 result = memref_load(alloc, majorIvs); 386 auto map = 387 getTransferMinorIdentityMap(xferOp.getShapedType(), minorVectorType); 388 ArrayAttr inBounds; 389 if (xferOp.isDimInBounds(xferOp.getVectorType().getRank() - 1)) { 390 OpBuilder &b = ScopedContext::getBuilderRef(); 391 inBounds = b.getBoolArrayAttr({true}); 392 } 393 vector_transfer_write(result, xferOp.source(), indexing, 394 AffineMapAttr::get(map), inBounds); 395 }; 396 397 // 1. Compute the inBoundsCondition in the current loops ivs + offset 398 // context. 399 SmallVector<Value, 4> majorIvsPlusOffsets; 400 Value inBoundsCondition = emitInBoundsCondition( 401 rewriter, cast<VectorTransferOpInterface>(xferOp.getOperation()), 402 leadingRank, majorIvs, majorOffsets, memrefBounds, majorIvsPlusOffsets); 403 404 if (inBoundsCondition) { 405 // 2.a. If the condition is not null, we need an IfOp, to write 406 // conditionally. Progressively lower to a 1-D transfer write. 407 conditionBuilder(inBoundsCondition, 408 [&] { emitTransferWrite(majorIvsPlusOffsets); }); 409 } else { 410 // 2.b. Guaranteed in-bounds. Progressively lower to a 1-D transfer write. 411 emitTransferWrite(majorIvsPlusOffsets); 412 } 413 }); 414 415 rewriter.eraseOp(op); 416 417 return success(); 418 } 419 420 } // namespace 421 422 /// Analyzes the `transfer` to find an access dimension along the fastest remote 423 /// MemRef dimension. If such a dimension with coalescing properties is found, 424 /// `pivs` and `vectorBoundsCapture` are swapped so that the invocation of 425 /// LoopNestBuilder captures it in the innermost loop. 426 template <typename TransferOpTy> 427 static int computeCoalescedIndex(TransferOpTy transfer) { 428 // rank of the remote memory access, coalescing behavior occurs on the 429 // innermost memory dimension. 430 auto remoteRank = transfer.getShapedType().getRank(); 431 // Iterate over the results expressions of the permutation map to determine 432 // the loop order for creating pointwise copies between remote and local 433 // memories. 434 int coalescedIdx = -1; 435 auto exprs = transfer.permutation_map().getResults(); 436 for (auto en : llvm::enumerate(exprs)) { 437 auto dim = en.value().template dyn_cast<AffineDimExpr>(); 438 if (!dim) { 439 continue; 440 } 441 auto memRefDim = dim.getPosition(); 442 if (memRefDim == remoteRank - 1) { 443 // memRefDim has coalescing properties, it should be swapped in the last 444 // position. 445 assert(coalescedIdx == -1 && "Unexpected > 1 coalesced indices"); 446 coalescedIdx = en.index(); 447 } 448 } 449 return coalescedIdx; 450 } 451 452 template <typename TransferOpTy> 453 VectorTransferRewriter<TransferOpTy>::VectorTransferRewriter( 454 VectorTransferToSCFOptions options, MLIRContext *context) 455 : RewritePattern(TransferOpTy::getOperationName(), 1, context), 456 options(options) {} 457 458 /// Used for staging the transfer in a local buffer. 459 template <typename TransferOpTy> 460 MemRefType VectorTransferRewriter<TransferOpTy>::tmpMemRefType( 461 TransferOpTy transfer) const { 462 auto vectorType = transfer.getVectorType(); 463 return MemRefType::get(vectorType.getShape().drop_back(), 464 VectorType::get(vectorType.getShape().take_back(), 465 vectorType.getElementType()), 466 {}, 0); 467 } 468 469 static void emitWithBoundsChecks( 470 PatternRewriter &rewriter, VectorTransferOpInterface transfer, 471 ValueRange ivs, const MemRefBoundsCapture &memRefBoundsCapture, 472 function_ref<void(ArrayRef<Value>)> inBoundsFun, 473 function_ref<void(ArrayRef<Value>)> outOfBoundsFun = nullptr) { 474 // Permute the incoming indices according to the permutation map. 475 SmallVector<Value, 4> indices = 476 applyMapToValues(rewriter, transfer.getLoc(), transfer.permutation_map(), 477 transfer.indices()); 478 479 // Generate a bounds check if necessary. 480 SmallVector<Value, 4> majorIvsPlusOffsets; 481 Value inBoundsCondition = 482 emitInBoundsCondition(rewriter, transfer, 0, ivs, indices, 483 memRefBoundsCapture, majorIvsPlusOffsets); 484 485 // Apply the permutation map to the ivs. The permutation map may not use all 486 // the inputs. 487 SmallVector<Value, 4> scalarAccessExprs(transfer.indices().size()); 488 for (unsigned memRefDim = 0; memRefDim < transfer.indices().size(); 489 ++memRefDim) { 490 // Linear search on a small number of entries. 491 int loopIndex = -1; 492 auto exprs = transfer.permutation_map().getResults(); 493 for (auto en : llvm::enumerate(exprs)) { 494 auto expr = en.value(); 495 auto dim = expr.dyn_cast<AffineDimExpr>(); 496 // Sanity check. 497 assert((dim || expr.cast<AffineConstantExpr>().getValue() == 0) && 498 "Expected dim or 0 in permutationMap"); 499 if (dim && memRefDim == dim.getPosition()) { 500 loopIndex = en.index(); 501 break; 502 } 503 } 504 505 using namespace edsc::op; 506 auto i = transfer.indices()[memRefDim]; 507 scalarAccessExprs[memRefDim] = loopIndex < 0 ? i : i + ivs[loopIndex]; 508 } 509 510 if (inBoundsCondition) 511 conditionBuilder( 512 /* scf.if */ inBoundsCondition, // { 513 [&] { inBoundsFun(scalarAccessExprs); }, 514 // } else { 515 outOfBoundsFun ? [&] { outOfBoundsFun(scalarAccessExprs); } 516 : function_ref<void()>() 517 // } 518 ); 519 else 520 inBoundsFun(scalarAccessExprs); 521 } 522 523 namespace mlir { 524 525 /// Lowers TransferReadOp into a combination of: 526 /// 1. local memory allocation; 527 /// 2. perfect loop nest over: 528 /// a. scalar load from local buffers (viewed as a scalar memref); 529 /// a. scalar store to original memref (with padding). 530 /// 3. vector_load from local buffer (viewed as a memref<1 x vector>); 531 /// 4. local memory deallocation. 532 /// 533 /// Lowers the data transfer part of a TransferReadOp while ensuring no 534 /// out-of-bounds accesses are possible. Out-of-bounds behavior is handled by 535 /// padding. 536 537 /// Performs the rewrite. 538 template <> 539 LogicalResult VectorTransferRewriter<TransferReadOp>::matchAndRewrite( 540 Operation *op, PatternRewriter &rewriter) const { 541 using namespace mlir::edsc::op; 542 543 TransferReadOp transfer = cast<TransferReadOp>(op); 544 if (transfer.mask()) 545 return failure(); 546 auto memRefType = transfer.getShapedType().dyn_cast<MemRefType>(); 547 if (!memRefType) 548 return failure(); 549 // Fall back to a loop if the fastest varying stride is not 1 or it is 550 // permuted. 551 int64_t offset; 552 SmallVector<int64_t, 4> strides; 553 auto successStrides = getStridesAndOffset(memRefType, strides, offset); 554 if (succeeded(successStrides) && strides.back() == 1 && 555 transfer.permutation_map().isMinorIdentity()) { 556 // If > 1D, emit a bunch of loops around 1-D vector transfers. 557 if (transfer.getVectorType().getRank() > 1) 558 return NDTransferOpHelper<TransferReadOp>(rewriter, transfer, options) 559 .doReplace(); 560 // If 1-D this is now handled by the target-specific lowering. 561 if (transfer.getVectorType().getRank() == 1) 562 return failure(); 563 } 564 565 // Conservative lowering to scalar load / stores. 566 // 1. Setup all the captures. 567 ScopedContext scope(rewriter, transfer.getLoc()); 568 MemRefIndexedValue remote(transfer.source()); 569 MemRefBoundsCapture memRefBoundsCapture(transfer.source()); 570 VectorBoundsCapture vectorBoundsCapture(transfer.vector()); 571 int coalescedIdx = computeCoalescedIndex(transfer); 572 // Swap the vectorBoundsCapture which will reorder loop bounds. 573 if (coalescedIdx >= 0) 574 vectorBoundsCapture.swapRanges(vectorBoundsCapture.rank() - 1, 575 coalescedIdx); 576 577 auto lbs = vectorBoundsCapture.getLbs(); 578 auto ubs = vectorBoundsCapture.getUbs(); 579 SmallVector<Value, 8> steps; 580 steps.reserve(vectorBoundsCapture.getSteps().size()); 581 for (auto step : vectorBoundsCapture.getSteps()) 582 steps.push_back(std_constant_index(step)); 583 584 // 2. Emit alloc-copy-load-dealloc. 585 MLIRContext *ctx = op->getContext(); 586 Value tmp = setAllocAtFunctionEntry(tmpMemRefType(transfer), transfer); 587 MemRefIndexedValue local(tmp); 588 loopNestBuilder(lbs, ubs, steps, [&](ValueRange loopIvs) { 589 auto ivsStorage = llvm::to_vector<8>(loopIvs); 590 // Swap the ivs which will reorder memory accesses. 591 if (coalescedIdx >= 0) 592 std::swap(ivsStorage.back(), ivsStorage[coalescedIdx]); 593 594 ArrayRef<Value> ivs(ivsStorage); 595 Value pos = std_index_cast(IntegerType::get(ctx, 32), ivs.back()); 596 Value inVector = local(ivs.drop_back()); 597 auto loadValue = [&](ArrayRef<Value> indices) { 598 Value vector = vector_insert_element(remote(indices), inVector, pos); 599 local(ivs.drop_back()) = vector; 600 }; 601 auto loadPadding = [&](ArrayRef<Value>) { 602 Value vector = vector_insert_element(transfer.padding(), inVector, pos); 603 local(ivs.drop_back()) = vector; 604 }; 605 emitWithBoundsChecks( 606 rewriter, cast<VectorTransferOpInterface>(transfer.getOperation()), ivs, 607 memRefBoundsCapture, loadValue, loadPadding); 608 }); 609 Value vectorValue = memref_load(vector_type_cast(tmp)); 610 611 // 3. Propagate. 612 rewriter.replaceOp(op, vectorValue); 613 return success(); 614 } 615 616 /// Lowers TransferWriteOp into a combination of: 617 /// 1. local memory allocation; 618 /// 2. vector_store to local buffer (viewed as a memref<1 x vector>); 619 /// 3. perfect loop nest over: 620 /// a. scalar load from local buffers (viewed as a scalar memref); 621 /// a. scalar store to original memref (if in bounds). 622 /// 4. local memory deallocation. 623 /// 624 /// More specifically, lowers the data transfer part while ensuring no 625 /// out-of-bounds accesses are possible. 626 template <> 627 LogicalResult VectorTransferRewriter<TransferWriteOp>::matchAndRewrite( 628 Operation *op, PatternRewriter &rewriter) const { 629 using namespace edsc::op; 630 631 TransferWriteOp transfer = cast<TransferWriteOp>(op); 632 if (transfer.mask()) 633 return failure(); 634 auto memRefType = transfer.getShapedType().template dyn_cast<MemRefType>(); 635 if (!memRefType) 636 return failure(); 637 638 // Fall back to a loop if the fastest varying stride is not 1 or it is 639 // permuted. 640 int64_t offset; 641 SmallVector<int64_t, 4> strides; 642 auto successStrides = getStridesAndOffset(memRefType, strides, offset); 643 if (succeeded(successStrides) && strides.back() == 1 && 644 transfer.permutation_map().isMinorIdentity()) { 645 // If > 1D, emit a bunch of loops around 1-D vector transfers. 646 if (transfer.getVectorType().getRank() > 1) 647 return NDTransferOpHelper<TransferWriteOp>(rewriter, transfer, options) 648 .doReplace(); 649 // If 1-D this is now handled by the target-specific lowering. 650 if (transfer.getVectorType().getRank() == 1) 651 return failure(); 652 } 653 654 // 1. Setup all the captures. 655 ScopedContext scope(rewriter, transfer.getLoc()); 656 MemRefIndexedValue remote(transfer.source()); 657 MemRefBoundsCapture memRefBoundsCapture(transfer.source()); 658 Value vectorValue(transfer.vector()); 659 VectorBoundsCapture vectorBoundsCapture(transfer.vector()); 660 int coalescedIdx = computeCoalescedIndex(transfer); 661 // Swap the vectorBoundsCapture which will reorder loop bounds. 662 if (coalescedIdx >= 0) 663 vectorBoundsCapture.swapRanges(vectorBoundsCapture.rank() - 1, 664 coalescedIdx); 665 666 auto lbs = vectorBoundsCapture.getLbs(); 667 auto ubs = vectorBoundsCapture.getUbs(); 668 SmallVector<Value, 8> steps; 669 steps.reserve(vectorBoundsCapture.getSteps().size()); 670 for (auto step : vectorBoundsCapture.getSteps()) 671 steps.push_back(std_constant_index(step)); 672 673 // 2. Emit alloc-store-copy-dealloc. 674 Value tmp = setAllocAtFunctionEntry(tmpMemRefType(transfer), transfer); 675 MemRefIndexedValue local(tmp); 676 Value vec = vector_type_cast(tmp); 677 memref_store(vectorValue, vec); 678 loopNestBuilder(lbs, ubs, steps, [&](ValueRange loopIvs) { 679 auto ivsStorage = llvm::to_vector<8>(loopIvs); 680 // Swap the ivsStorage which will reorder memory accesses. 681 if (coalescedIdx >= 0) 682 std::swap(ivsStorage.back(), ivsStorage[coalescedIdx]); 683 684 ArrayRef<Value> ivs(ivsStorage); 685 Value pos = 686 std_index_cast(IntegerType::get(op->getContext(), 32), ivs.back()); 687 auto storeValue = [&](ArrayRef<Value> indices) { 688 Value scalar = vector_extract_element(local(ivs.drop_back()), pos); 689 remote(indices) = scalar; 690 }; 691 emitWithBoundsChecks( 692 rewriter, cast<VectorTransferOpInterface>(transfer.getOperation()), ivs, 693 memRefBoundsCapture, storeValue); 694 }); 695 696 // 3. Erase. 697 rewriter.eraseOp(op); 698 return success(); 699 } 700 701 void populateVectorToSCFConversionPatterns( 702 RewritePatternSet &patterns, const VectorTransferToSCFOptions &options) { 703 patterns.add<VectorTransferRewriter<vector::TransferReadOp>, 704 VectorTransferRewriter<vector::TransferWriteOp>>( 705 options, patterns.getContext()); 706 } 707 708 } // namespace mlir 709 710 namespace { 711 712 struct ConvertVectorToSCFPass 713 : public ConvertVectorToSCFBase<ConvertVectorToSCFPass> { 714 ConvertVectorToSCFPass() = default; 715 ConvertVectorToSCFPass(const VectorTransferToSCFOptions &options) { 716 this->fullUnroll = options.unroll; 717 } 718 719 void runOnFunction() override { 720 RewritePatternSet patterns(getFunction().getContext()); 721 populateVectorToSCFConversionPatterns( 722 patterns, VectorTransferToSCFOptions().setUnroll(fullUnroll)); 723 (void)applyPatternsAndFoldGreedily(getFunction(), std::move(patterns)); 724 } 725 }; 726 727 } // namespace 728 729 std::unique_ptr<Pass> 730 mlir::createConvertVectorToSCFPass(const VectorTransferToSCFOptions &options) { 731 return std::make_unique<ConvertVectorToSCFPass>(options); 732 } 733