1 //===- VectorToSCF.cpp - Conversion from Vector to mix of SCF and Std -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements target-dependent lowering of vector transfer operations.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include <type_traits>
14 
15 #include "mlir/Conversion/VectorToSCF/VectorToSCF.h"
16 #include "mlir/Dialect/Affine/EDSC/Intrinsics.h"
17 #include "mlir/Dialect/SCF/EDSC/Builders.h"
18 #include "mlir/Dialect/SCF/EDSC/Intrinsics.h"
19 #include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h"
20 #include "mlir/Dialect/Vector/EDSC/Intrinsics.h"
21 #include "mlir/Dialect/Vector/VectorOps.h"
22 #include "mlir/Dialect/Vector/VectorUtils.h"
23 #include "mlir/IR/AffineExpr.h"
24 #include "mlir/IR/AffineMap.h"
25 #include "mlir/IR/Attributes.h"
26 #include "mlir/IR/Builders.h"
27 #include "mlir/IR/Location.h"
28 #include "mlir/IR/Matchers.h"
29 #include "mlir/IR/OperationSupport.h"
30 #include "mlir/IR/PatternMatch.h"
31 #include "mlir/IR/Types.h"
32 
33 using namespace mlir;
34 using namespace mlir::edsc;
35 using namespace mlir::edsc::intrinsics;
36 using vector::TransferReadOp;
37 using vector::TransferWriteOp;
38 
39 namespace {
40 /// Helper class captures the common information needed to lower N>1-D vector
41 /// transfer operations (read and write).
42 /// On construction, this class opens an edsc::ScopedContext for simpler IR
43 /// manipulation.
44 /// In pseudo-IR, for an n-D vector_transfer_read such as:
45 ///
46 /// ```
47 ///   vector_transfer_read(%m, %offsets, identity_map, %fill) :
48 ///     memref<(leading_dims) x (major_dims) x (minor_dims) x type>,
49 ///     vector<(major_dims) x (minor_dims) x type>
50 /// ```
51 ///
52 /// where rank(minor_dims) is the lower-level vector rank (e.g. 1 for LLVM or
53 /// higher).
54 ///
55 /// This is the entry point to emitting pseudo-IR resembling:
56 ///
57 /// ```
58 ///   %tmp = alloc(): memref<(major_dims) x vector<minor_dim x type>>
59 ///   for (%ivs_major, {0}, {vector_shape}, {1}) { // (N-1)-D loop nest
60 ///     if (any_of(%ivs_major + %offsets, <, major_dims)) {
61 ///       %v = vector_transfer_read(
62 ///         {%offsets_leading, %ivs_major + %offsets_major, %offsets_minor},
63 ///          %ivs_minor):
64 ///         memref<(leading_dims) x (major_dims) x (minor_dims) x type>,
65 ///         vector<(minor_dims) x type>;
66 ///       store(%v, %tmp);
67 ///     } else {
68 ///       %v = splat(vector<(minor_dims) x type>, %fill)
69 ///       store(%v, %tmp, %ivs_major);
70 ///     }
71 ///   }
72 ///   %res = load(%tmp, %0): memref<(major_dims) x vector<minor_dim x type>>):
73 //      vector<(major_dims) x (minor_dims) x type>
74 /// ```
75 ///
76 template <typename ConcreteOp>
77 class NDTransferOpHelper {
78 public:
79   NDTransferOpHelper(PatternRewriter &rewriter, ConcreteOp xferOp,
80                      const VectorTransferToSCFOptions &options)
81       : rewriter(rewriter), options(options), loc(xferOp.getLoc()),
82         scope(std::make_unique<ScopedContext>(rewriter, loc)), xferOp(xferOp),
83         op(xferOp.getOperation()) {
84     vectorType = xferOp.getVectorType();
85     // TODO(ntv, ajcbik): when we go to k > 1-D vectors adapt minorRank.
86     minorRank = 1;
87     majorRank = vectorType.getRank() - minorRank;
88     leadingRank = xferOp.getMemRefType().getRank() - (majorRank + minorRank);
89     majorVectorType =
90         VectorType::get(vectorType.getShape().take_front(majorRank),
91                         vectorType.getElementType());
92     minorVectorType =
93         VectorType::get(vectorType.getShape().take_back(minorRank),
94                         vectorType.getElementType());
95     /// Memref of minor vector type is used for individual transfers.
96     memRefMinorVectorType =
97         MemRefType::get(majorVectorType.getShape(), minorVectorType, {},
98                         xferOp.getMemRefType().getMemorySpace());
99   }
100 
101   LogicalResult doReplace();
102 
103 private:
104   /// Creates the loop nest on the "major" dimensions and calls the
105   /// `loopBodyBuilder` lambda in the context of the loop nest.
106   template <typename Lambda>
107   void emitLoops(Lambda loopBodyBuilder);
108 
109   /// Operate within the body of `emitLoops` to:
110   ///   1. Compute the indexings `majorIvs + majorOffsets` and save them in
111   ///      `majorIvsPlusOffsets`.
112   ///   2. Return a boolean that determines whether the first `majorIvs.rank()`
113   ///      dimensions `majorIvs + majorOffsets` are all within `memrefBounds`.
114   Value emitInBoundsCondition(ValueRange majorIvs, ValueRange majorOffsets,
115                               MemRefBoundsCapture &memrefBounds,
116                               SmallVectorImpl<Value> &majorIvsPlusOffsets);
117 
118   /// Common state to lower vector transfer ops.
119   PatternRewriter &rewriter;
120   const VectorTransferToSCFOptions &options;
121   Location loc;
122   std::unique_ptr<ScopedContext> scope;
123   ConcreteOp xferOp;
124   Operation *op;
125   // A vector transfer copies data between:
126   //   - memref<(leading_dims) x (major_dims) x (minor_dims) x type>
127   //   - vector<(major_dims) x (minor_dims) x type>
128   unsigned minorRank;         // for now always 1
129   unsigned majorRank;         // vector rank - minorRank
130   unsigned leadingRank;       // memref rank - vector rank
131   VectorType vectorType;      // vector<(major_dims) x (minor_dims) x type>
132   VectorType majorVectorType; // vector<(major_dims) x type>
133   VectorType minorVectorType; // vector<(minor_dims) x type>
134   MemRefType memRefMinorVectorType; // memref<vector<(minor_dims) x type>>
135 };
136 
137 template <typename ConcreteOp>
138 template <typename Lambda>
139 void NDTransferOpHelper<ConcreteOp>::emitLoops(Lambda loopBodyBuilder) {
140   /// Loop nest operates on the major dimensions
141   MemRefBoundsCapture memrefBoundsCapture(xferOp.memref());
142 
143   if (options.unroll) {
144     auto shape = majorVectorType.getShape();
145     auto strides = computeStrides(shape);
146     unsigned numUnrolledInstances = computeMaxLinearIndex(shape);
147     ValueRange indices(xferOp.indices());
148     for (unsigned idx = 0; idx < numUnrolledInstances; ++idx) {
149       SmallVector<int64_t, 4> offsets = delinearize(strides, idx);
150       SmallVector<Value, 4> offsetValues =
151           llvm::to_vector<4>(llvm::map_range(offsets, [](int64_t off) -> Value {
152             return std_constant_index(off);
153           }));
154       loopBodyBuilder(offsetValues, indices.take_front(leadingRank),
155                       indices.drop_front(leadingRank).take_front(majorRank),
156                       indices.take_back(minorRank), memrefBoundsCapture);
157     }
158   } else {
159     VectorBoundsCapture vectorBoundsCapture(majorVectorType);
160     auto majorLbs = vectorBoundsCapture.getLbs();
161     auto majorUbs = vectorBoundsCapture.getUbs();
162     auto majorSteps = vectorBoundsCapture.getSteps();
163     SmallVector<Value, 8> majorIvs(vectorBoundsCapture.rank());
164     AffineLoopNestBuilder(majorIvs, majorLbs, majorUbs, majorSteps)([&] {
165       ValueRange indices(xferOp.indices());
166       loopBodyBuilder(majorIvs, indices.take_front(leadingRank),
167                       indices.drop_front(leadingRank).take_front(majorRank),
168                       indices.take_back(minorRank), memrefBoundsCapture);
169     });
170   }
171 }
172 
173 template <typename ConcreteOp>
174 Value NDTransferOpHelper<ConcreteOp>::emitInBoundsCondition(
175     ValueRange majorIvs, ValueRange majorOffsets,
176     MemRefBoundsCapture &memrefBounds,
177     SmallVectorImpl<Value> &majorIvsPlusOffsets) {
178   Value inBoundsCondition;
179   majorIvsPlusOffsets.reserve(majorIvs.size());
180   unsigned idx = 0;
181   for (auto it : llvm::zip(majorIvs, majorOffsets, memrefBounds.getUbs())) {
182     Value iv = std::get<0>(it), off = std::get<1>(it), ub = std::get<2>(it);
183     using namespace mlir::edsc::op;
184     majorIvsPlusOffsets.push_back(iv + off);
185     if (xferOp.isMaskedDim(leadingRank + idx)) {
186       Value inBounds = majorIvsPlusOffsets.back() < ub;
187       inBoundsCondition =
188           (inBoundsCondition) ? (inBoundsCondition && inBounds) : inBounds;
189     }
190     ++idx;
191   }
192   return inBoundsCondition;
193 }
194 
195 template <>
196 LogicalResult NDTransferOpHelper<TransferReadOp>::doReplace() {
197   Value alloc, result;
198   if (options.unroll)
199     result = std_splat(vectorType, xferOp.padding());
200   else
201     alloc = std_alloc(memRefMinorVectorType);
202 
203   emitLoops([&](ValueRange majorIvs, ValueRange leadingOffsets,
204                 ValueRange majorOffsets, ValueRange minorOffsets,
205                 MemRefBoundsCapture &memrefBounds) {
206     /// Lambda to load 1-D vector in the current loop ivs + offset context.
207     auto load1DVector = [&](ValueRange majorIvsPlusOffsets) -> Value {
208       SmallVector<Value, 8> indexing;
209       indexing.reserve(leadingRank + majorRank + minorRank);
210       indexing.append(leadingOffsets.begin(), leadingOffsets.end());
211       indexing.append(majorIvsPlusOffsets.begin(), majorIvsPlusOffsets.end());
212       indexing.append(minorOffsets.begin(), minorOffsets.end());
213       Value memref = xferOp.memref();
214       auto map = TransferReadOp::getTransferMinorIdentityMap(
215           xferOp.getMemRefType(), minorVectorType);
216       ArrayAttr masked;
217       if (xferOp.isMaskedDim(xferOp.getVectorType().getRank() - 1)) {
218         OpBuilder &b = ScopedContext::getBuilderRef();
219         masked = b.getBoolArrayAttr({true});
220       }
221       return vector_transfer_read(minorVectorType, memref, indexing,
222                                   AffineMapAttr::get(map), xferOp.padding(),
223                                   masked);
224     };
225 
226     // 1. Compute the inBoundsCondition in the current loops ivs + offset
227     // context.
228     SmallVector<Value, 4> majorIvsPlusOffsets;
229     Value inBoundsCondition = emitInBoundsCondition(
230         majorIvs, majorOffsets, memrefBounds, majorIvsPlusOffsets);
231 
232     if (inBoundsCondition) {
233       // 2. If the condition is not null, we need an IfOp, which may yield
234       // if `options.unroll` is true.
235       SmallVector<Type, 1> resultType;
236       if (options.unroll)
237         resultType.push_back(vectorType);
238       auto ifOp = ScopedContext::getBuilderRef().create<scf::IfOp>(
239           ScopedContext::getLocation(), resultType, inBoundsCondition,
240           /*withElseRegion=*/true);
241 
242       // 3.a. If in-bounds, progressively lower to a 1-D transfer read.
243       BlockBuilder(&ifOp.thenRegion().front(), Append())([&] {
244         Value vector = load1DVector(majorIvsPlusOffsets);
245         // 3.a.i. If `options.unroll` is true, insert the 1-D vector in the
246         // aggregate. We must yield and merge with the `else` branch.
247         if (options.unroll) {
248           vector = vector_insert(vector, result, majorIvs);
249           (loop_yield(vector));
250           return;
251         }
252         // 3.a.ii. Otherwise, just go through the temporary `alloc`.
253         std_store(vector, alloc, majorIvs);
254       });
255 
256       // 3.b. If not in-bounds, splat a 1-D vector.
257       BlockBuilder(&ifOp.elseRegion().front(), Append())([&] {
258         Value vector = std_splat(minorVectorType, xferOp.padding());
259         // 3.a.i. If `options.unroll` is true, insert the 1-D vector in the
260         // aggregate. We must yield and merge with the `then` branch.
261         if (options.unroll) {
262           vector = vector_insert(vector, result, majorIvs);
263           (loop_yield(vector));
264           return;
265         }
266         // 3.b.ii. Otherwise, just go through the temporary `alloc`.
267         std_store(vector, alloc, majorIvs);
268       });
269       if (!resultType.empty())
270         result = *ifOp.results().begin();
271     } else {
272       // 4. Guaranteed in-bounds, progressively lower to a 1-D transfer read.
273       Value loaded1D = load1DVector(majorIvsPlusOffsets);
274       // 5.a. If `options.unroll` is true, insert the 1-D vector in the
275       // aggregate.
276       if (options.unroll)
277         result = vector_insert(loaded1D, result, majorIvs);
278       // 5.b. Otherwise, just go through the temporary `alloc`.
279       else
280         std_store(loaded1D, alloc, majorIvs);
281     }
282   });
283 
284   assert((!options.unroll ^ result) && "Expected resulting Value iff unroll");
285   if (!result)
286     result = std_load(vector_type_cast(MemRefType::get({}, vectorType), alloc));
287   rewriter.replaceOp(op, result);
288 
289   return success();
290 }
291 
292 template <>
293 LogicalResult NDTransferOpHelper<TransferWriteOp>::doReplace() {
294   Value alloc;
295   if (!options.unroll) {
296     alloc = std_alloc(memRefMinorVectorType);
297     std_store(xferOp.vector(),
298               vector_type_cast(MemRefType::get({}, vectorType), alloc));
299   }
300 
301   emitLoops([&](ValueRange majorIvs, ValueRange leadingOffsets,
302                 ValueRange majorOffsets, ValueRange minorOffsets,
303                 MemRefBoundsCapture &memrefBounds) {
304     // Lower to 1-D vector_transfer_write and let recursion handle it.
305     auto emitTransferWrite = [&](ValueRange majorIvsPlusOffsets) {
306       SmallVector<Value, 8> indexing;
307       indexing.reserve(leadingRank + majorRank + minorRank);
308       indexing.append(leadingOffsets.begin(), leadingOffsets.end());
309       indexing.append(majorIvsPlusOffsets.begin(), majorIvsPlusOffsets.end());
310       indexing.append(minorOffsets.begin(), minorOffsets.end());
311       Value result;
312       // If `options.unroll` is true, extract the 1-D vector from the
313       // aggregate.
314       if (options.unroll)
315         result = vector_extract(xferOp.vector(), majorIvs);
316       else
317         result = std_load(alloc, majorIvs);
318       auto map = TransferWriteOp::getTransferMinorIdentityMap(
319           xferOp.getMemRefType(), minorVectorType);
320       ArrayAttr masked;
321       if (xferOp.isMaskedDim(xferOp.getVectorType().getRank() - 1)) {
322         OpBuilder &b = ScopedContext::getBuilderRef();
323         masked = b.getBoolArrayAttr({true});
324       }
325       vector_transfer_write(result, xferOp.memref(), indexing,
326                             AffineMapAttr::get(map), masked);
327     };
328 
329     // 1. Compute the inBoundsCondition in the current loops ivs + offset
330     // context.
331     SmallVector<Value, 4> majorIvsPlusOffsets;
332     Value inBoundsCondition = emitInBoundsCondition(
333         majorIvs, majorOffsets, memrefBounds, majorIvsPlusOffsets);
334 
335     if (inBoundsCondition) {
336       // 2.a. If the condition is not null, we need an IfOp, to write
337       // conditionally. Progressively lower to a 1-D transfer write.
338       auto ifOp = ScopedContext::getBuilderRef().create<scf::IfOp>(
339           ScopedContext::getLocation(), TypeRange{}, inBoundsCondition,
340           /*withElseRegion=*/false);
341       BlockBuilder(&ifOp.thenRegion().front(),
342                    Append())([&] { emitTransferWrite(majorIvsPlusOffsets); });
343     } else {
344       // 2.b. Guaranteed in-bounds. Progressively lower to a 1-D transfer write.
345       emitTransferWrite(majorIvsPlusOffsets);
346     }
347   });
348 
349   rewriter.eraseOp(op);
350 
351   return success();
352 }
353 
354 } // namespace
355 
356 /// Analyzes the `transfer` to find an access dimension along the fastest remote
357 /// MemRef dimension. If such a dimension with coalescing properties is found,
358 /// `pivs` and `vectorBoundsCapture` are swapped so that the invocation of
359 /// LoopNestBuilder captures it in the innermost loop.
360 template <typename TransferOpTy>
361 static int computeCoalescedIndex(TransferOpTy transfer) {
362   // rank of the remote memory access, coalescing behavior occurs on the
363   // innermost memory dimension.
364   auto remoteRank = transfer.getMemRefType().getRank();
365   // Iterate over the results expressions of the permutation map to determine
366   // the loop order for creating pointwise copies between remote and local
367   // memories.
368   int coalescedIdx = -1;
369   auto exprs = transfer.permutation_map().getResults();
370   for (auto en : llvm::enumerate(exprs)) {
371     auto dim = en.value().template dyn_cast<AffineDimExpr>();
372     if (!dim) {
373       continue;
374     }
375     auto memRefDim = dim.getPosition();
376     if (memRefDim == remoteRank - 1) {
377       // memRefDim has coalescing properties, it should be swapped in the last
378       // position.
379       assert(coalescedIdx == -1 && "Unexpected > 1 coalesced indices");
380       coalescedIdx = en.index();
381     }
382   }
383   return coalescedIdx;
384 }
385 
386 /// Emits remote memory accesses that are clipped to the boundaries of the
387 /// MemRef.
388 template <typename TransferOpTy>
389 static SmallVector<Value, 8>
390 clip(TransferOpTy transfer, MemRefBoundsCapture &bounds, ArrayRef<Value> ivs) {
391   using namespace mlir::edsc;
392 
393   Value zero(std_constant_index(0)), one(std_constant_index(1));
394   SmallVector<Value, 8> memRefAccess(transfer.indices());
395   SmallVector<Value, 8> clippedScalarAccessExprs(memRefAccess.size());
396   // Indices accessing to remote memory are clipped and their expressions are
397   // returned in clippedScalarAccessExprs.
398   for (unsigned memRefDim = 0; memRefDim < clippedScalarAccessExprs.size();
399        ++memRefDim) {
400     // Linear search on a small number of entries.
401     int loopIndex = -1;
402     auto exprs = transfer.permutation_map().getResults();
403     for (auto en : llvm::enumerate(exprs)) {
404       auto expr = en.value();
405       auto dim = expr.template dyn_cast<AffineDimExpr>();
406       // Sanity check.
407       assert(
408           (dim || expr.template cast<AffineConstantExpr>().getValue() == 0) &&
409           "Expected dim or 0 in permutationMap");
410       if (dim && memRefDim == dim.getPosition()) {
411         loopIndex = en.index();
412         break;
413       }
414     }
415 
416     // We cannot distinguish atm between unrolled dimensions that implement
417     // the "always full" tile abstraction and need clipping from the other
418     // ones. So we conservatively clip everything.
419     using namespace edsc::op;
420     auto N = bounds.ub(memRefDim);
421     auto i = memRefAccess[memRefDim];
422     if (loopIndex < 0) {
423       auto N_minus_1 = N - one;
424       auto select_1 = std_select(i < N, i, N_minus_1);
425       clippedScalarAccessExprs[memRefDim] =
426           std_select(i < zero, zero, select_1);
427     } else {
428       auto ii = ivs[loopIndex];
429       auto i_plus_ii = i + ii;
430       auto N_minus_1 = N - one;
431       auto select_1 = std_select(i_plus_ii < N, i_plus_ii, N_minus_1);
432       clippedScalarAccessExprs[memRefDim] =
433           std_select(i_plus_ii < zero, zero, select_1);
434     }
435   }
436 
437   return clippedScalarAccessExprs;
438 }
439 
440 namespace mlir {
441 
442 template <typename TransferOpTy>
443 VectorTransferRewriter<TransferOpTy>::VectorTransferRewriter(
444     VectorTransferToSCFOptions options, MLIRContext *context)
445     : RewritePattern(TransferOpTy::getOperationName(), 1, context),
446       options(options) {}
447 
448 /// Used for staging the transfer in a local buffer.
449 template <typename TransferOpTy>
450 MemRefType VectorTransferRewriter<TransferOpTy>::tmpMemRefType(
451     TransferOpTy transfer) const {
452   auto vectorType = transfer.getVectorType();
453   return MemRefType::get(vectorType.getShape(), vectorType.getElementType(), {},
454                          0);
455 }
456 
457 /// Lowers TransferReadOp into a combination of:
458 ///   1. local memory allocation;
459 ///   2. perfect loop nest over:
460 ///      a. scalar load from local buffers (viewed as a scalar memref);
461 ///      a. scalar store to original memref (with clipping).
462 ///   3. vector_load from local buffer (viewed as a memref<1 x vector>);
463 ///   4. local memory deallocation.
464 ///
465 /// Lowers the data transfer part of a TransferReadOp while ensuring no
466 /// out-of-bounds accesses are possible. Out-of-bounds behavior is handled by
467 /// clipping. This means that a given value in memory can be read multiple
468 /// times and concurrently.
469 ///
470 /// Important notes about clipping and "full-tiles only" abstraction:
471 /// =================================================================
472 /// When using clipping for dealing with boundary conditions, the same edge
473 /// value will appear multiple times (a.k.a edge padding). This is fine if the
474 /// subsequent vector operations are all data-parallel but **is generally
475 /// incorrect** in the presence of reductions or extract operations.
476 ///
477 /// More generally, clipping is a scalar abstraction that is expected to work
478 /// fine as a baseline for CPUs and GPUs but not for vector_load and DMAs.
479 /// To deal with real vector_load and DMAs, a "padded allocation + view"
480 /// abstraction with the ability to read out-of-memref-bounds (but still within
481 /// the allocated region) is necessary.
482 ///
483 /// Whether using scalar loops or vector_load/DMAs to perform the transfer,
484 /// junk values will be materialized in the vectors and generally need to be
485 /// filtered out and replaced by the "neutral element". This neutral element is
486 /// op-dependent so, in the future, we expect to create a vector filter and
487 /// apply it to a splatted constant vector with the proper neutral element at
488 /// each ssa-use. This filtering is not necessary for pure data-parallel
489 /// operations.
490 ///
491 /// In the case of vector_store/DMAs, Read-Modify-Write will be required, which
492 /// also have concurrency implications. Note that by using clipped scalar stores
493 /// in the presence of data-parallel only operations, we generate code that
494 /// writes the same value multiple time on the edge locations.
495 ///
496 /// TODO(ntv): implement alternatives to clipping.
497 /// TODO(ntv): support non-data-parallel operations.
498 
499 /// Performs the rewrite.
500 template <>
501 LogicalResult VectorTransferRewriter<TransferReadOp>::matchAndRewrite(
502     Operation *op, PatternRewriter &rewriter) const {
503   using namespace mlir::edsc::op;
504 
505   TransferReadOp transfer = cast<TransferReadOp>(op);
506   if (AffineMap::isMinorIdentity(transfer.permutation_map())) {
507     // If > 1D, emit a bunch of loops around 1-D vector transfers.
508     if (transfer.getVectorType().getRank() > 1)
509       return NDTransferOpHelper<TransferReadOp>(rewriter, transfer, options)
510           .doReplace();
511     // If 1-D this is now handled by the target-specific lowering.
512     if (transfer.getVectorType().getRank() == 1)
513       return failure();
514   }
515 
516   // Conservative lowering to scalar load / stores.
517   // 1. Setup all the captures.
518   ScopedContext scope(rewriter, transfer.getLoc());
519   StdIndexedValue remote(transfer.memref());
520   MemRefBoundsCapture memRefBoundsCapture(transfer.memref());
521   VectorBoundsCapture vectorBoundsCapture(transfer.vector());
522   int coalescedIdx = computeCoalescedIndex(transfer);
523   // Swap the vectorBoundsCapture which will reorder loop bounds.
524   if (coalescedIdx >= 0)
525     vectorBoundsCapture.swapRanges(vectorBoundsCapture.rank() - 1,
526                                    coalescedIdx);
527 
528   auto lbs = vectorBoundsCapture.getLbs();
529   auto ubs = vectorBoundsCapture.getUbs();
530   SmallVector<Value, 8> steps;
531   steps.reserve(vectorBoundsCapture.getSteps().size());
532   for (auto step : vectorBoundsCapture.getSteps())
533     steps.push_back(std_constant_index(step));
534 
535   // 2. Emit alloc-copy-load-dealloc.
536   Value tmp = std_alloc(tmpMemRefType(transfer));
537   StdIndexedValue local(tmp);
538   Value vec = vector_type_cast(tmp);
539   loopNestBuilder(lbs, ubs, steps, [&](ValueRange loopIvs) {
540     auto ivs = llvm::to_vector<8>(loopIvs);
541     // Swap the ivs which will reorder memory accesses.
542     if (coalescedIdx >= 0)
543       std::swap(ivs.back(), ivs[coalescedIdx]);
544     // Computes clippedScalarAccessExprs in the loop nest scope (ivs exist).
545     local(ivs) = remote(clip(transfer, memRefBoundsCapture, ivs));
546   });
547   Value vectorValue = std_load(vec);
548   (std_dealloc(tmp)); // vexing parse
549 
550   // 3. Propagate.
551   rewriter.replaceOp(op, vectorValue);
552   return success();
553 }
554 
555 /// Lowers TransferWriteOp into a combination of:
556 ///   1. local memory allocation;
557 ///   2. vector_store to local buffer (viewed as a memref<1 x vector>);
558 ///   3. perfect loop nest over:
559 ///      a. scalar load from local buffers (viewed as a scalar memref);
560 ///      a. scalar store to original memref (with clipping).
561 ///   4. local memory deallocation.
562 ///
563 /// More specifically, lowers the data transfer part while ensuring no
564 /// out-of-bounds accesses are possible. Out-of-bounds behavior is handled by
565 /// clipping. This means that a given value in memory can be written to multiple
566 /// times and concurrently.
567 ///
568 /// See `Important notes about clipping and full-tiles only abstraction` in the
569 /// description of `readClipped` above.
570 ///
571 /// TODO(ntv): implement alternatives to clipping.
572 /// TODO(ntv): support non-data-parallel operations.
573 template <>
574 LogicalResult VectorTransferRewriter<TransferWriteOp>::matchAndRewrite(
575     Operation *op, PatternRewriter &rewriter) const {
576   using namespace edsc::op;
577 
578   TransferWriteOp transfer = cast<TransferWriteOp>(op);
579   if (AffineMap::isMinorIdentity(transfer.permutation_map())) {
580     // If > 1D, emit a bunch of loops around 1-D vector transfers.
581     if (transfer.getVectorType().getRank() > 1)
582       return NDTransferOpHelper<TransferWriteOp>(rewriter, transfer, options)
583           .doReplace();
584     // If 1-D this is now handled by the target-specific lowering.
585     if (transfer.getVectorType().getRank() == 1)
586       return failure();
587   }
588 
589   // 1. Setup all the captures.
590   ScopedContext scope(rewriter, transfer.getLoc());
591   StdIndexedValue remote(transfer.memref());
592   MemRefBoundsCapture memRefBoundsCapture(transfer.memref());
593   Value vectorValue(transfer.vector());
594   VectorBoundsCapture vectorBoundsCapture(transfer.vector());
595   int coalescedIdx = computeCoalescedIndex(transfer);
596   // Swap the vectorBoundsCapture which will reorder loop bounds.
597   if (coalescedIdx >= 0)
598     vectorBoundsCapture.swapRanges(vectorBoundsCapture.rank() - 1,
599                                    coalescedIdx);
600 
601   auto lbs = vectorBoundsCapture.getLbs();
602   auto ubs = vectorBoundsCapture.getUbs();
603   SmallVector<Value, 8> steps;
604   steps.reserve(vectorBoundsCapture.getSteps().size());
605   for (auto step : vectorBoundsCapture.getSteps())
606     steps.push_back(std_constant_index(step));
607 
608   // 2. Emit alloc-store-copy-dealloc.
609   Value tmp = std_alloc(tmpMemRefType(transfer));
610   StdIndexedValue local(tmp);
611   Value vec = vector_type_cast(tmp);
612   std_store(vectorValue, vec);
613   loopNestBuilder(lbs, ubs, steps, [&](ValueRange loopIvs) {
614     auto ivs = llvm::to_vector<8>(loopIvs);
615     // Swap the ivs which will reorder memory accesses.
616     if (coalescedIdx >= 0)
617       std::swap(ivs.back(), ivs[coalescedIdx]);
618     // Computes clippedScalarAccessExprs in the loop nest scope (ivs exist).
619     remote(clip(transfer, memRefBoundsCapture, ivs)) = local(ivs);
620   });
621   (std_dealloc(tmp)); // vexing parse...
622 
623   rewriter.eraseOp(op);
624   return success();
625 }
626 
627 void populateVectorToSCFConversionPatterns(
628     OwningRewritePatternList &patterns, MLIRContext *context,
629     const VectorTransferToSCFOptions &options) {
630   patterns.insert<VectorTransferRewriter<vector::TransferReadOp>,
631                   VectorTransferRewriter<vector::TransferWriteOp>>(options,
632                                                                    context);
633 }
634 
635 } // namespace mlir
636 
637