1 //===- VectorToSCF.cpp - Conversion from Vector to mix of SCF and Std -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements target-dependent lowering of vector transfer operations.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include <type_traits>
14 
15 #include "mlir/Conversion/VectorToSCF/VectorToSCF.h"
16 
17 #include "../PassDetail.h"
18 #include "mlir/Dialect/Affine/EDSC/Intrinsics.h"
19 #include "mlir/Dialect/SCF/EDSC/Builders.h"
20 #include "mlir/Dialect/SCF/EDSC/Intrinsics.h"
21 #include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h"
22 #include "mlir/Dialect/Vector/EDSC/Intrinsics.h"
23 #include "mlir/Dialect/Vector/VectorOps.h"
24 #include "mlir/Dialect/Vector/VectorUtils.h"
25 #include "mlir/IR/AffineExpr.h"
26 #include "mlir/IR/AffineMap.h"
27 #include "mlir/IR/Attributes.h"
28 #include "mlir/IR/Builders.h"
29 #include "mlir/IR/Location.h"
30 #include "mlir/IR/Matchers.h"
31 #include "mlir/IR/OperationSupport.h"
32 #include "mlir/IR/PatternMatch.h"
33 #include "mlir/IR/Types.h"
34 #include "mlir/Pass/Pass.h"
35 #include "mlir/Transforms/Passes.h"
36 
37 using namespace mlir;
38 using namespace mlir::edsc;
39 using namespace mlir::edsc::intrinsics;
40 using vector::TransferReadOp;
41 using vector::TransferWriteOp;
42 
43 namespace {
44 /// Helper class captures the common information needed to lower N>1-D vector
45 /// transfer operations (read and write).
46 /// On construction, this class opens an edsc::ScopedContext for simpler IR
47 /// manipulation.
48 /// In pseudo-IR, for an n-D vector_transfer_read such as:
49 ///
50 /// ```
51 ///   vector_transfer_read(%m, %offsets, identity_map, %fill) :
52 ///     memref<(leading_dims) x (major_dims) x (minor_dims) x type>,
53 ///     vector<(major_dims) x (minor_dims) x type>
54 /// ```
55 ///
56 /// where rank(minor_dims) is the lower-level vector rank (e.g. 1 for LLVM or
57 /// higher).
58 ///
59 /// This is the entry point to emitting pseudo-IR resembling:
60 ///
61 /// ```
62 ///   %tmp = alloc(): memref<(major_dims) x vector<minor_dim x type>>
63 ///   for (%ivs_major, {0}, {vector_shape}, {1}) { // (N-1)-D loop nest
64 ///     if (any_of(%ivs_major + %offsets, <, major_dims)) {
65 ///       %v = vector_transfer_read(
66 ///         {%offsets_leading, %ivs_major + %offsets_major, %offsets_minor},
67 ///          %ivs_minor):
68 ///         memref<(leading_dims) x (major_dims) x (minor_dims) x type>,
69 ///         vector<(minor_dims) x type>;
70 ///       store(%v, %tmp);
71 ///     } else {
72 ///       %v = splat(vector<(minor_dims) x type>, %fill)
73 ///       store(%v, %tmp, %ivs_major);
74 ///     }
75 ///   }
76 ///   %res = load(%tmp, %0): memref<(major_dims) x vector<minor_dim x type>>):
77 //      vector<(major_dims) x (minor_dims) x type>
78 /// ```
79 ///
80 template <typename ConcreteOp>
81 class NDTransferOpHelper {
82 public:
83   NDTransferOpHelper(PatternRewriter &rewriter, ConcreteOp xferOp,
84                      const VectorTransferToSCFOptions &options)
85       : rewriter(rewriter), options(options), loc(xferOp.getLoc()),
86         scope(std::make_unique<ScopedContext>(rewriter, loc)), xferOp(xferOp),
87         op(xferOp.getOperation()) {
88     vectorType = xferOp.getVectorType();
89     // TODO: when we go to k > 1-D vectors adapt minorRank.
90     minorRank = 1;
91     majorRank = vectorType.getRank() - minorRank;
92     leadingRank = xferOp.getLeadingMemRefRank();
93     majorVectorType =
94         VectorType::get(vectorType.getShape().take_front(majorRank),
95                         vectorType.getElementType());
96     minorVectorType =
97         VectorType::get(vectorType.getShape().take_back(minorRank),
98                         vectorType.getElementType());
99     /// Memref of minor vector type is used for individual transfers.
100     memRefMinorVectorType =
101         MemRefType::get(majorVectorType.getShape(), minorVectorType, {},
102                         xferOp.getMemRefType().getMemorySpace());
103   }
104 
105   LogicalResult doReplace();
106 
107 private:
108   /// Creates the loop nest on the "major" dimensions and calls the
109   /// `loopBodyBuilder` lambda in the context of the loop nest.
110   template <typename Lambda>
111   void emitLoops(Lambda loopBodyBuilder);
112 
113   /// Operate within the body of `emitLoops` to:
114   ///   1. Compute the indexings `majorIvs + majorOffsets` and save them in
115   ///      `majorIvsPlusOffsets`.
116   ///   2. Return a boolean that determines whether the first `majorIvs.rank()`
117   ///      dimensions `majorIvs + majorOffsets` are all within `memrefBounds`.
118   Value emitInBoundsCondition(ValueRange majorIvs, ValueRange majorOffsets,
119                               MemRefBoundsCapture &memrefBounds,
120                               SmallVectorImpl<Value> &majorIvsPlusOffsets);
121 
122   /// Common state to lower vector transfer ops.
123   PatternRewriter &rewriter;
124   const VectorTransferToSCFOptions &options;
125   Location loc;
126   std::unique_ptr<ScopedContext> scope;
127   ConcreteOp xferOp;
128   Operation *op;
129   // A vector transfer copies data between:
130   //   - memref<(leading_dims) x (major_dims) x (minor_dims) x type>
131   //   - vector<(major_dims) x (minor_dims) x type>
132   unsigned minorRank;         // for now always 1
133   unsigned majorRank;         // vector rank - minorRank
134   unsigned leadingRank;       // memref rank - vector rank
135   VectorType vectorType;      // vector<(major_dims) x (minor_dims) x type>
136   VectorType majorVectorType; // vector<(major_dims) x type>
137   VectorType minorVectorType; // vector<(minor_dims) x type>
138   MemRefType memRefMinorVectorType; // memref<vector<(minor_dims) x type>>
139 };
140 
141 template <typename ConcreteOp>
142 template <typename Lambda>
143 void NDTransferOpHelper<ConcreteOp>::emitLoops(Lambda loopBodyBuilder) {
144   /// Loop nest operates on the major dimensions
145   MemRefBoundsCapture memrefBoundsCapture(xferOp.memref());
146 
147   if (options.unroll) {
148     auto shape = majorVectorType.getShape();
149     auto strides = computeStrides(shape);
150     unsigned numUnrolledInstances = computeMaxLinearIndex(shape);
151     ValueRange indices(xferOp.indices());
152     for (unsigned idx = 0; idx < numUnrolledInstances; ++idx) {
153       SmallVector<int64_t, 4> offsets = delinearize(strides, idx);
154       SmallVector<Value, 4> offsetValues =
155           llvm::to_vector<4>(llvm::map_range(offsets, [](int64_t off) -> Value {
156             return std_constant_index(off);
157           }));
158       loopBodyBuilder(offsetValues, indices.take_front(leadingRank),
159                       indices.drop_front(leadingRank).take_front(majorRank),
160                       indices.take_back(minorRank), memrefBoundsCapture);
161     }
162   } else {
163     VectorBoundsCapture vectorBoundsCapture(majorVectorType);
164     auto majorLbs = vectorBoundsCapture.getLbs();
165     auto majorUbs = vectorBoundsCapture.getUbs();
166     auto majorSteps = vectorBoundsCapture.getSteps();
167     affineLoopNestBuilder(
168         majorLbs, majorUbs, majorSteps, [&](ValueRange majorIvs) {
169           ValueRange indices(xferOp.indices());
170           loopBodyBuilder(majorIvs, indices.take_front(leadingRank),
171                           indices.drop_front(leadingRank).take_front(majorRank),
172                           indices.take_back(minorRank), memrefBoundsCapture);
173         });
174   }
175 }
176 
177 static Optional<int64_t> extractConstantIndex(Value v) {
178   if (auto cstOp = v.getDefiningOp<ConstantIndexOp>())
179     return cstOp.getValue();
180   if (auto affineApplyOp = v.getDefiningOp<AffineApplyOp>())
181     if (affineApplyOp.getAffineMap().isSingleConstant())
182       return affineApplyOp.getAffineMap().getSingleConstantResult();
183   return None;
184 }
185 
186 // Missing foldings of scf.if make it necessary to perform poor man's folding
187 // eagerly, especially in the case of unrolling. In the future, this should go
188 // away once scf.if folds properly.
189 static Value onTheFlyFoldSLT(Value v, Value ub) {
190   using namespace mlir::edsc::op;
191   auto maybeCstV = extractConstantIndex(v);
192   auto maybeCstUb = extractConstantIndex(ub);
193   if (maybeCstV && maybeCstUb && *maybeCstV < *maybeCstUb)
194     return Value();
195   return slt(v, ub);
196 }
197 
198 template <typename ConcreteOp>
199 Value NDTransferOpHelper<ConcreteOp>::emitInBoundsCondition(
200     ValueRange majorIvs, ValueRange majorOffsets,
201     MemRefBoundsCapture &memrefBounds,
202     SmallVectorImpl<Value> &majorIvsPlusOffsets) {
203   Value inBoundsCondition;
204   majorIvsPlusOffsets.reserve(majorIvs.size());
205   unsigned idx = 0;
206   for (auto it : llvm::zip(majorIvs, majorOffsets, memrefBounds.getUbs())) {
207     Value iv = std::get<0>(it), off = std::get<1>(it), ub = std::get<2>(it);
208     using namespace mlir::edsc::op;
209     majorIvsPlusOffsets.push_back(iv + off);
210     if (xferOp.isMaskedDim(leadingRank + idx)) {
211       Value inBoundsCond = onTheFlyFoldSLT(majorIvsPlusOffsets.back(), ub);
212       if (inBoundsCond)
213         inBoundsCondition = (inBoundsCondition)
214                                 ? (inBoundsCondition && inBoundsCond)
215                                 : inBoundsCond;
216     }
217     ++idx;
218   }
219   return inBoundsCondition;
220 }
221 
222 // TODO: Parallelism and threadlocal considerations.
223 static Value setAllocAtFunctionEntry(MemRefType memRefMinorVectorType,
224                                      Operation *op) {
225   auto &b = ScopedContext::getBuilderRef();
226   OpBuilder::InsertionGuard guard(b);
227   b.setInsertionPointToStart(&op->getParentOfType<FuncOp>().front());
228   Value res =
229       std_alloca(memRefMinorVectorType, ValueRange{}, b.getI64IntegerAttr(128));
230   return res;
231 }
232 
233 template <>
234 LogicalResult NDTransferOpHelper<TransferReadOp>::doReplace() {
235   Value alloc, result;
236   if (options.unroll)
237     result = std_splat(vectorType, xferOp.padding());
238   else
239     alloc = setAllocAtFunctionEntry(memRefMinorVectorType, op);
240 
241   emitLoops([&](ValueRange majorIvs, ValueRange leadingOffsets,
242                 ValueRange majorOffsets, ValueRange minorOffsets,
243                 MemRefBoundsCapture &memrefBounds) {
244     /// Lambda to load 1-D vector in the current loop ivs + offset context.
245     auto load1DVector = [&](ValueRange majorIvsPlusOffsets) -> Value {
246       SmallVector<Value, 8> indexing;
247       indexing.reserve(leadingRank + majorRank + minorRank);
248       indexing.append(leadingOffsets.begin(), leadingOffsets.end());
249       indexing.append(majorIvsPlusOffsets.begin(), majorIvsPlusOffsets.end());
250       indexing.append(minorOffsets.begin(), minorOffsets.end());
251       Value memref = xferOp.memref();
252       auto map =
253           getTransferMinorIdentityMap(xferOp.getMemRefType(), minorVectorType);
254       ArrayAttr masked;
255       if (!xferOp.isMaskedDim(xferOp.getVectorType().getRank() - 1)) {
256         OpBuilder &b = ScopedContext::getBuilderRef();
257         masked = b.getBoolArrayAttr({false});
258       }
259       return vector_transfer_read(minorVectorType, memref, indexing,
260                                   AffineMapAttr::get(map), xferOp.padding(),
261                                   masked);
262     };
263 
264     // 1. Compute the inBoundsCondition in the current loops ivs + offset
265     // context.
266     SmallVector<Value, 4> majorIvsPlusOffsets;
267     Value inBoundsCondition = emitInBoundsCondition(
268         majorIvs, majorOffsets, memrefBounds, majorIvsPlusOffsets);
269 
270     if (inBoundsCondition) {
271       // 2. If the condition is not null, we need an IfOp, which may yield
272       // if `options.unroll` is true.
273       SmallVector<Type, 1> resultType;
274       if (options.unroll)
275         resultType.push_back(vectorType);
276 
277       // 3. If in-bounds, progressively lower to a 1-D transfer read, otherwise
278       // splat a 1-D vector.
279       ValueRange ifResults = conditionBuilder(
280           resultType, inBoundsCondition,
281           [&]() -> scf::ValueVector {
282             Value vector = load1DVector(majorIvsPlusOffsets);
283             // 3.a. If `options.unroll` is true, insert the 1-D vector in the
284             // aggregate. We must yield and merge with the `else` branch.
285             if (options.unroll) {
286               vector = vector_insert(vector, result, majorIvs);
287               return {vector};
288             }
289             // 3.b. Otherwise, just go through the temporary `alloc`.
290             std_store(vector, alloc, majorIvs);
291             return {};
292           },
293           [&]() -> scf::ValueVector {
294             Value vector = std_splat(minorVectorType, xferOp.padding());
295             // 3.c. If `options.unroll` is true, insert the 1-D vector in the
296             // aggregate. We must yield and merge with the `then` branch.
297             if (options.unroll) {
298               vector = vector_insert(vector, result, majorIvs);
299               return {vector};
300             }
301             // 3.d. Otherwise, just go through the temporary `alloc`.
302             std_store(vector, alloc, majorIvs);
303             return {};
304           });
305 
306       if (!resultType.empty())
307         result = *ifResults.begin();
308     } else {
309       // 4. Guaranteed in-bounds, progressively lower to a 1-D transfer read.
310       Value loaded1D = load1DVector(majorIvsPlusOffsets);
311       // 5.a. If `options.unroll` is true, insert the 1-D vector in the
312       // aggregate.
313       if (options.unroll)
314         result = vector_insert(loaded1D, result, majorIvs);
315       // 5.b. Otherwise, just go through the temporary `alloc`.
316       else
317         std_store(loaded1D, alloc, majorIvs);
318     }
319   });
320 
321   assert((!options.unroll ^ (bool)result) &&
322          "Expected resulting Value iff unroll");
323   if (!result)
324     result = std_load(vector_type_cast(MemRefType::get({}, vectorType), alloc));
325   rewriter.replaceOp(op, result);
326 
327   return success();
328 }
329 
330 template <>
331 LogicalResult NDTransferOpHelper<TransferWriteOp>::doReplace() {
332   Value alloc;
333   if (!options.unroll) {
334     alloc = setAllocAtFunctionEntry(memRefMinorVectorType, op);
335     std_store(xferOp.vector(),
336               vector_type_cast(MemRefType::get({}, vectorType), alloc));
337   }
338 
339   emitLoops([&](ValueRange majorIvs, ValueRange leadingOffsets,
340                 ValueRange majorOffsets, ValueRange minorOffsets,
341                 MemRefBoundsCapture &memrefBounds) {
342     // Lower to 1-D vector_transfer_write and let recursion handle it.
343     auto emitTransferWrite = [&](ValueRange majorIvsPlusOffsets) {
344       SmallVector<Value, 8> indexing;
345       indexing.reserve(leadingRank + majorRank + minorRank);
346       indexing.append(leadingOffsets.begin(), leadingOffsets.end());
347       indexing.append(majorIvsPlusOffsets.begin(), majorIvsPlusOffsets.end());
348       indexing.append(minorOffsets.begin(), minorOffsets.end());
349       Value result;
350       // If `options.unroll` is true, extract the 1-D vector from the
351       // aggregate.
352       if (options.unroll)
353         result = vector_extract(xferOp.vector(), majorIvs);
354       else
355         result = std_load(alloc, majorIvs);
356       auto map =
357           getTransferMinorIdentityMap(xferOp.getMemRefType(), minorVectorType);
358       ArrayAttr masked;
359       if (!xferOp.isMaskedDim(xferOp.getVectorType().getRank() - 1)) {
360         OpBuilder &b = ScopedContext::getBuilderRef();
361         masked = b.getBoolArrayAttr({false});
362       }
363       vector_transfer_write(result, xferOp.memref(), indexing,
364                             AffineMapAttr::get(map), masked);
365     };
366 
367     // 1. Compute the inBoundsCondition in the current loops ivs + offset
368     // context.
369     SmallVector<Value, 4> majorIvsPlusOffsets;
370     Value inBoundsCondition = emitInBoundsCondition(
371         majorIvs, majorOffsets, memrefBounds, majorIvsPlusOffsets);
372 
373     if (inBoundsCondition) {
374       // 2.a. If the condition is not null, we need an IfOp, to write
375       // conditionally. Progressively lower to a 1-D transfer write.
376       conditionBuilder(inBoundsCondition,
377                        [&] { emitTransferWrite(majorIvsPlusOffsets); });
378     } else {
379       // 2.b. Guaranteed in-bounds. Progressively lower to a 1-D transfer write.
380       emitTransferWrite(majorIvsPlusOffsets);
381     }
382   });
383 
384   rewriter.eraseOp(op);
385 
386   return success();
387 }
388 
389 } // namespace
390 
391 /// Analyzes the `transfer` to find an access dimension along the fastest remote
392 /// MemRef dimension. If such a dimension with coalescing properties is found,
393 /// `pivs` and `vectorBoundsCapture` are swapped so that the invocation of
394 /// LoopNestBuilder captures it in the innermost loop.
395 template <typename TransferOpTy>
396 static int computeCoalescedIndex(TransferOpTy transfer) {
397   // rank of the remote memory access, coalescing behavior occurs on the
398   // innermost memory dimension.
399   auto remoteRank = transfer.getMemRefType().getRank();
400   // Iterate over the results expressions of the permutation map to determine
401   // the loop order for creating pointwise copies between remote and local
402   // memories.
403   int coalescedIdx = -1;
404   auto exprs = transfer.permutation_map().getResults();
405   for (auto en : llvm::enumerate(exprs)) {
406     auto dim = en.value().template dyn_cast<AffineDimExpr>();
407     if (!dim) {
408       continue;
409     }
410     auto memRefDim = dim.getPosition();
411     if (memRefDim == remoteRank - 1) {
412       // memRefDim has coalescing properties, it should be swapped in the last
413       // position.
414       assert(coalescedIdx == -1 && "Unexpected > 1 coalesced indices");
415       coalescedIdx = en.index();
416     }
417   }
418   return coalescedIdx;
419 }
420 
421 /// Emits remote memory accesses that are clipped to the boundaries of the
422 /// MemRef.
423 template <typename TransferOpTy>
424 static SmallVector<Value, 8>
425 clip(TransferOpTy transfer, MemRefBoundsCapture &bounds, ArrayRef<Value> ivs) {
426   using namespace mlir::edsc;
427 
428   Value zero(std_constant_index(0)), one(std_constant_index(1));
429   SmallVector<Value, 8> memRefAccess(transfer.indices());
430   SmallVector<Value, 8> clippedScalarAccessExprs(memRefAccess.size());
431   // Indices accessing to remote memory are clipped and their expressions are
432   // returned in clippedScalarAccessExprs.
433   for (unsigned memRefDim = 0; memRefDim < clippedScalarAccessExprs.size();
434        ++memRefDim) {
435     // Linear search on a small number of entries.
436     int loopIndex = -1;
437     auto exprs = transfer.permutation_map().getResults();
438     for (auto en : llvm::enumerate(exprs)) {
439       auto expr = en.value();
440       auto dim = expr.template dyn_cast<AffineDimExpr>();
441       // Sanity check.
442       assert(
443           (dim || expr.template cast<AffineConstantExpr>().getValue() == 0) &&
444           "Expected dim or 0 in permutationMap");
445       if (dim && memRefDim == dim.getPosition()) {
446         loopIndex = en.index();
447         break;
448       }
449     }
450 
451     // We cannot distinguish atm between unrolled dimensions that implement
452     // the "always full" tile abstraction and need clipping from the other
453     // ones. So we conservatively clip everything.
454     using namespace edsc::op;
455     auto N = bounds.ub(memRefDim);
456     auto i = memRefAccess[memRefDim];
457     if (loopIndex < 0) {
458       auto N_minus_1 = N - one;
459       auto select_1 = std_select(slt(i, N), i, N_minus_1);
460       clippedScalarAccessExprs[memRefDim] =
461           std_select(slt(i, zero), zero, select_1);
462     } else {
463       auto ii = ivs[loopIndex];
464       auto i_plus_ii = i + ii;
465       auto N_minus_1 = N - one;
466       auto select_1 = std_select(slt(i_plus_ii, N), i_plus_ii, N_minus_1);
467       clippedScalarAccessExprs[memRefDim] =
468           std_select(slt(i_plus_ii, zero), zero, select_1);
469     }
470   }
471 
472   return clippedScalarAccessExprs;
473 }
474 
475 namespace mlir {
476 
477 template <typename TransferOpTy>
478 VectorTransferRewriter<TransferOpTy>::VectorTransferRewriter(
479     VectorTransferToSCFOptions options, MLIRContext *context)
480     : RewritePattern(TransferOpTy::getOperationName(), 1, context),
481       options(options) {}
482 
483 /// Used for staging the transfer in a local buffer.
484 template <typename TransferOpTy>
485 MemRefType VectorTransferRewriter<TransferOpTy>::tmpMemRefType(
486     TransferOpTy transfer) const {
487   auto vectorType = transfer.getVectorType();
488   return MemRefType::get(vectorType.getShape(), vectorType.getElementType(), {},
489                          0);
490 }
491 
492 /// Lowers TransferReadOp into a combination of:
493 ///   1. local memory allocation;
494 ///   2. perfect loop nest over:
495 ///      a. scalar load from local buffers (viewed as a scalar memref);
496 ///      a. scalar store to original memref (with clipping).
497 ///   3. vector_load from local buffer (viewed as a memref<1 x vector>);
498 ///   4. local memory deallocation.
499 ///
500 /// Lowers the data transfer part of a TransferReadOp while ensuring no
501 /// out-of-bounds accesses are possible. Out-of-bounds behavior is handled by
502 /// clipping. This means that a given value in memory can be read multiple
503 /// times and concurrently.
504 ///
505 /// Important notes about clipping and "full-tiles only" abstraction:
506 /// =================================================================
507 /// When using clipping for dealing with boundary conditions, the same edge
508 /// value will appear multiple times (a.k.a edge padding). This is fine if the
509 /// subsequent vector operations are all data-parallel but **is generally
510 /// incorrect** in the presence of reductions or extract operations.
511 ///
512 /// More generally, clipping is a scalar abstraction that is expected to work
513 /// fine as a baseline for CPUs and GPUs but not for vector_load and DMAs.
514 /// To deal with real vector_load and DMAs, a "padded allocation + view"
515 /// abstraction with the ability to read out-of-memref-bounds (but still within
516 /// the allocated region) is necessary.
517 ///
518 /// Whether using scalar loops or vector_load/DMAs to perform the transfer,
519 /// junk values will be materialized in the vectors and generally need to be
520 /// filtered out and replaced by the "neutral element". This neutral element is
521 /// op-dependent so, in the future, we expect to create a vector filter and
522 /// apply it to a splatted constant vector with the proper neutral element at
523 /// each ssa-use. This filtering is not necessary for pure data-parallel
524 /// operations.
525 ///
526 /// In the case of vector_store/DMAs, Read-Modify-Write will be required, which
527 /// also have concurrency implications. Note that by using clipped scalar stores
528 /// in the presence of data-parallel only operations, we generate code that
529 /// writes the same value multiple time on the edge locations.
530 ///
531 /// TODO: implement alternatives to clipping.
532 /// TODO: support non-data-parallel operations.
533 
534 /// Performs the rewrite.
535 template <>
536 LogicalResult VectorTransferRewriter<TransferReadOp>::matchAndRewrite(
537     Operation *op, PatternRewriter &rewriter) const {
538   using namespace mlir::edsc::op;
539 
540   TransferReadOp transfer = cast<TransferReadOp>(op);
541   if (transfer.permutation_map().isMinorIdentity()) {
542     // If > 1D, emit a bunch of loops around 1-D vector transfers.
543     if (transfer.getVectorType().getRank() > 1)
544       return NDTransferOpHelper<TransferReadOp>(rewriter, transfer, options)
545           .doReplace();
546     // If 1-D this is now handled by the target-specific lowering.
547     if (transfer.getVectorType().getRank() == 1)
548       return failure();
549   }
550 
551   // Conservative lowering to scalar load / stores.
552   // 1. Setup all the captures.
553   ScopedContext scope(rewriter, transfer.getLoc());
554   StdIndexedValue remote(transfer.memref());
555   MemRefBoundsCapture memRefBoundsCapture(transfer.memref());
556   VectorBoundsCapture vectorBoundsCapture(transfer.vector());
557   int coalescedIdx = computeCoalescedIndex(transfer);
558   // Swap the vectorBoundsCapture which will reorder loop bounds.
559   if (coalescedIdx >= 0)
560     vectorBoundsCapture.swapRanges(vectorBoundsCapture.rank() - 1,
561                                    coalescedIdx);
562 
563   auto lbs = vectorBoundsCapture.getLbs();
564   auto ubs = vectorBoundsCapture.getUbs();
565   SmallVector<Value, 8> steps;
566   steps.reserve(vectorBoundsCapture.getSteps().size());
567   for (auto step : vectorBoundsCapture.getSteps())
568     steps.push_back(std_constant_index(step));
569 
570   // 2. Emit alloc-copy-load-dealloc.
571   Value tmp = std_alloc(tmpMemRefType(transfer));
572   StdIndexedValue local(tmp);
573   Value vec = vector_type_cast(tmp);
574   loopNestBuilder(lbs, ubs, steps, [&](ValueRange loopIvs) {
575     auto ivs = llvm::to_vector<8>(loopIvs);
576     // Swap the ivs which will reorder memory accesses.
577     if (coalescedIdx >= 0)
578       std::swap(ivs.back(), ivs[coalescedIdx]);
579     // Computes clippedScalarAccessExprs in the loop nest scope (ivs exist).
580     local(ivs) = remote(clip(transfer, memRefBoundsCapture, ivs));
581   });
582   Value vectorValue = std_load(vec);
583   (std_dealloc(tmp)); // vexing parse
584 
585   // 3. Propagate.
586   rewriter.replaceOp(op, vectorValue);
587   return success();
588 }
589 
590 /// Lowers TransferWriteOp into a combination of:
591 ///   1. local memory allocation;
592 ///   2. vector_store to local buffer (viewed as a memref<1 x vector>);
593 ///   3. perfect loop nest over:
594 ///      a. scalar load from local buffers (viewed as a scalar memref);
595 ///      a. scalar store to original memref (with clipping).
596 ///   4. local memory deallocation.
597 ///
598 /// More specifically, lowers the data transfer part while ensuring no
599 /// out-of-bounds accesses are possible. Out-of-bounds behavior is handled by
600 /// clipping. This means that a given value in memory can be written to multiple
601 /// times and concurrently.
602 ///
603 /// See `Important notes about clipping and full-tiles only abstraction` in the
604 /// description of `readClipped` above.
605 ///
606 /// TODO: implement alternatives to clipping.
607 /// TODO: support non-data-parallel operations.
608 template <>
609 LogicalResult VectorTransferRewriter<TransferWriteOp>::matchAndRewrite(
610     Operation *op, PatternRewriter &rewriter) const {
611   using namespace edsc::op;
612 
613   TransferWriteOp transfer = cast<TransferWriteOp>(op);
614   if (transfer.permutation_map().isMinorIdentity()) {
615     // If > 1D, emit a bunch of loops around 1-D vector transfers.
616     if (transfer.getVectorType().getRank() > 1)
617       return NDTransferOpHelper<TransferWriteOp>(rewriter, transfer, options)
618           .doReplace();
619     // If 1-D this is now handled by the target-specific lowering.
620     if (transfer.getVectorType().getRank() == 1)
621       return failure();
622   }
623 
624   // 1. Setup all the captures.
625   ScopedContext scope(rewriter, transfer.getLoc());
626   StdIndexedValue remote(transfer.memref());
627   MemRefBoundsCapture memRefBoundsCapture(transfer.memref());
628   Value vectorValue(transfer.vector());
629   VectorBoundsCapture vectorBoundsCapture(transfer.vector());
630   int coalescedIdx = computeCoalescedIndex(transfer);
631   // Swap the vectorBoundsCapture which will reorder loop bounds.
632   if (coalescedIdx >= 0)
633     vectorBoundsCapture.swapRanges(vectorBoundsCapture.rank() - 1,
634                                    coalescedIdx);
635 
636   auto lbs = vectorBoundsCapture.getLbs();
637   auto ubs = vectorBoundsCapture.getUbs();
638   SmallVector<Value, 8> steps;
639   steps.reserve(vectorBoundsCapture.getSteps().size());
640   for (auto step : vectorBoundsCapture.getSteps())
641     steps.push_back(std_constant_index(step));
642 
643   // 2. Emit alloc-store-copy-dealloc.
644   Value tmp = std_alloc(tmpMemRefType(transfer));
645   StdIndexedValue local(tmp);
646   Value vec = vector_type_cast(tmp);
647   std_store(vectorValue, vec);
648   loopNestBuilder(lbs, ubs, steps, [&](ValueRange loopIvs) {
649     auto ivs = llvm::to_vector<8>(loopIvs);
650     // Swap the ivs which will reorder memory accesses.
651     if (coalescedIdx >= 0)
652       std::swap(ivs.back(), ivs[coalescedIdx]);
653     // Computes clippedScalarAccessExprs in the loop nest scope (ivs exist).
654     remote(clip(transfer, memRefBoundsCapture, ivs)) = local(ivs);
655   });
656   (std_dealloc(tmp)); // vexing parse...
657 
658   rewriter.eraseOp(op);
659   return success();
660 }
661 
662 void populateVectorToSCFConversionPatterns(
663     OwningRewritePatternList &patterns, MLIRContext *context,
664     const VectorTransferToSCFOptions &options) {
665   patterns.insert<VectorTransferRewriter<vector::TransferReadOp>,
666                   VectorTransferRewriter<vector::TransferWriteOp>>(options,
667                                                                    context);
668 }
669 
670 } // namespace mlir
671 
672 namespace {
673 
674 struct ConvertVectorToSCFPass
675     : public ConvertVectorToSCFBase<ConvertVectorToSCFPass> {
676   ConvertVectorToSCFPass() = default;
677   ConvertVectorToSCFPass(const VectorTransferToSCFOptions &options) {
678     this->fullUnroll = options.unroll;
679   }
680 
681   void runOnFunction() override {
682     OwningRewritePatternList patterns;
683     auto *context = getFunction().getContext();
684     populateVectorToSCFConversionPatterns(
685         patterns, context, VectorTransferToSCFOptions().setUnroll(fullUnroll));
686     applyPatternsAndFoldGreedily(getFunction(), patterns);
687   }
688 };
689 
690 } // namespace
691 
692 std::unique_ptr<Pass>
693 mlir::createConvertVectorToSCFPass(const VectorTransferToSCFOptions &options) {
694   return std::make_unique<ConvertVectorToSCFPass>(options);
695 }
696