1 //===- Loops.cpp - conversion from Linalg named and generic ops to loops --===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "PassDetail.h"
10 #include "mlir/Dialect/Affine/EDSC/Intrinsics.h"
11 #include "mlir/Dialect/Linalg/EDSC/FoldedIntrinsics.h"
12 #include "mlir/Dialect/Linalg/IR/LinalgOps.h"
13 #include "mlir/Dialect/Linalg/IR/LinalgTypes.h"
14 #include "mlir/Dialect/Linalg/Passes.h"
15 #include "mlir/Dialect/Linalg/Transforms/Transforms.h"
16 #include "mlir/Dialect/Linalg/Utils/Utils.h"
17 #include "mlir/Dialect/SCF/EDSC/Builders.h"
18 #include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h"
19 #include "mlir/IR/AffineExpr.h"
20 #include "mlir/IR/AffineMap.h"
21 #include "mlir/IR/BlockAndValueMapping.h"
22 #include "mlir/Support/LLVM.h"
23 #include "mlir/Transforms/DialectConversion.h"
24 #include "mlir/Transforms/FoldUtils.h"
25 
26 using namespace mlir;
27 using namespace mlir::edsc;
28 using namespace mlir::edsc::intrinsics;
29 using namespace mlir::linalg;
30 
31 using edsc::op::operator+;
32 
33 static SmallVector<Value, 8> makeCanonicalAffineApplies(OpBuilder &b,
34                                                         Location loc,
35                                                         AffineMap map,
36                                                         ArrayRef<Value> vals) {
37   if (map.isEmpty())
38     return {};
39 
40   assert(map.getNumInputs() == vals.size());
41   SmallVector<Value, 8> res;
42   res.reserve(map.getNumResults());
43   auto dims = map.getNumDims();
44   for (auto e : map.getResults()) {
45     auto exprMap = AffineMap::get(dims, map.getNumSymbols(), e);
46     SmallVector<Value, 4> operands(vals.begin(), vals.end());
47     canonicalizeMapAndOperands(&exprMap, &operands);
48     res.push_back(affine_apply(exprMap, operands));
49   }
50   return res;
51 }
52 
53 static SmallVector<Value, 4> permuteIvs(ArrayRef<Value> ivs,
54                                         Optional<AffineMap> permutation) {
55   return permutation ? applyMapToValues(ScopedContext::getBuilderRef(),
56                                         ScopedContext::getLocation(),
57                                         permutation.getValue(), ivs)
58                      : SmallVector<Value, 4>(ivs.begin(), ivs.end());
59 }
60 
61 /// Creates a number of ranges equal to the number of dimensions in the `map`.
62 /// The returned ranges correspond to the loop ranges, in the proper order, for
63 /// which new loops will be created.
64 /// The function supports only maps that are invertible and have results of type
65 /// DimExpr or (DimExpr + DimExpr - SymbolExpr floordiv ConstExpr).
66 /// It expects a non-inverted, concatenated map and last values in
67 /// allViewSizes will be applied to the symbols in the map if it contains any.
68 static SmallVector<SubViewOp::Range, 4> emitLoopRanges(OpBuilder &b,
69                                                        Location loc,
70                                                        AffineMap map,
71                                                        ValueRange viewSizes) {
72   unsigned numDims = map.getNumDims(), numRes = map.getNumResults();
73   unsigned numSym = map.getNumSymbols();
74   assert(viewSizes.size() == numRes + numSym &&
75          "viewSizes must contain sizes of all views and values for symbols");
76   SmallVector<SubViewOp::Range, 4> res(numDims);
77   for (unsigned idx = 0; idx < numRes; ++idx) {
78     auto result = map.getResult(idx);
79     if (auto d = result.dyn_cast<AffineDimExpr>()) {
80       if (res[d.getPosition()].offset)
81         continue;
82       res[d.getPosition()] = SubViewOp::Range{
83           std_constant_index(0), viewSizes[idx], std_constant_index(1)};
84     }
85 
86     // If the access pattern is of form (m, n)[s] -> (m + n - s floordiv 2),
87     // then the bounds are:
88     //   (s floordiv 2) <= m <= (size(m) + s floordiv 2 - s + 1).
89     // where size(n) is applied to the symbol s.
90     // This is done statically now.
91     if (auto binOp = result.dyn_cast<AffineBinaryOpExpr>()) {
92       auto lhs = binOp.getLHS().dyn_cast<AffineBinaryOpExpr>();
93       auto rhs = binOp.getRHS().dyn_cast<AffineBinaryOpExpr>();
94       if (!lhs || !rhs || binOp.getKind() != AffineExprKind::Add ||
95           lhs.getKind() != AffineExprKind::Add ||
96           rhs.getKind() != mlir::AffineExprKind::Mul)
97         continue;
98 
99       auto m = lhs.getLHS().dyn_cast<AffineDimExpr>();
100       auto n = lhs.getRHS().dyn_cast<AffineDimExpr>();
101       auto fDiv = rhs.getLHS().dyn_cast<AffineBinaryOpExpr>();
102       auto minusOne = rhs.getRHS().dyn_cast<AffineConstantExpr>();
103       if (!m || !n || !fDiv || !minusOne ||
104           fDiv.getKind() != AffineExprKind::FloorDiv ||
105           fDiv.getLHS().getKind() != AffineExprKind::SymbolId ||
106           fDiv.getRHS().getKind() != AffineExprKind::Constant)
107         continue;
108 
109       auto s = fDiv.getLHS().dyn_cast<AffineSymbolExpr>();
110       if (minusOne.getValue() != -1)
111         continue;
112 
113       int mPos = m.getPosition();
114       AffineExpr one = getAffineConstantExpr(1, s.getContext());
115       AffineExpr sizeOfM = getAffineSymbolExpr(numSym, s.getContext());
116       // Construction of upper bound (size(m) + s floordiv 2 - s + 1).
117       AffineExpr upperOffsetExpr = sizeOfM + fDiv + one - s;
118       AffineMap fromMap = AffineMap::get(numDims, numSym + 1, fDiv);
119       AffineMap toMap = AffineMap::get(numDims, numSym + 1, upperOffsetExpr);
120       SmallVector<Value, 8> values(viewSizes.begin(),
121                                    viewSizes.begin() + numDims);
122       values.insert(values.end(), viewSizes.begin() + numRes, viewSizes.end());
123       values.push_back(viewSizes[mPos]);
124       // Construction of the lower bound (s floordiv 2).
125       Value from = applyMapToValues(b, loc, fromMap, values).front();
126       Value to = applyMapToValues(b, loc, toMap, values).front();
127       res[mPos] = SubViewOp::Range{from, to, std_constant_index(1)};
128     }
129   }
130   return res;
131 }
132 
133 template <typename IndexedValueType, typename OpType>
134 static void inlineRegionAndEmitStore(OpType op, ArrayRef<Value> indexedValues,
135                                      ArrayRef<SmallVector<Value, 8>> indexing,
136                                      ArrayRef<Value> outputBuffers) {
137   assert(op.getOperation()->getNumRegions() == 1 &&
138          "Expected single region op");
139   auto &b = ScopedContext::getBuilderRef();
140   auto &block = op.region().front();
141   BlockAndValueMapping map;
142   map.map(block.getArguments(), indexedValues);
143   for (auto &op : block.without_terminator()) {
144     assert(op.getNumRegions() == 0 && "expected a non-nested region");
145     auto *newOp = b.clone(op, map);
146     map.map(op.getResults(), newOp->getResults());
147   }
148 
149   Operation &terminator = block.back();
150   assert(isa<YieldOp>(terminator) &&
151          "expected a yield op in the end of the region");
152   for (unsigned i = 0, e = terminator.getNumOperands(); i < e; ++i) {
153     IndexedValueType O(outputBuffers[i]);
154     O(indexing[i]) = map.lookupOrDefault(terminator.getOperand(i));
155   }
156 }
157 
158 // Returns a pair that contains input indices and output indices of a
159 // SingleInputPoolingOp `op`.
160 struct InputAndOutputIndices {
161   SmallVector<Value, 8> inputs;
162   SmallVector<Value, 8> outputs;
163 };
164 template <typename SingleInputPoolingOp>
165 static InputAndOutputIndices getInputAndOutputIndices(ArrayRef<Value> allIvs,
166                                                       SingleInputPoolingOp op) {
167   auto &b = ScopedContext::getBuilderRef();
168   auto loc = ScopedContext::getLocation();
169   auto mapsRange = op.indexing_maps().template getAsRange<AffineMapAttr>();
170   auto maps = llvm::to_vector<8>(
171       llvm::map_range(mapsRange, [](AffineMapAttr a) { return a.getValue(); }));
172   return InputAndOutputIndices{
173       makeCanonicalAffineApplies(b, loc, maps[0], allIvs),
174       makeCanonicalAffineApplies(b, loc, maps[2], allIvs)};
175 }
176 
177 namespace {
178 
179 /// Emits the MLIR for the scalar part of the generic op by:
180 ///   1. Emitting load ops for each input and output view in order. This is
181 ///      achieved by applying the appropriate input or output map to the
182 ///      enclosing induction variables.
183 ///   2. Emitting a call to `op.fun()` that takes as arguments the scalars
184 ///      from point 1. above.
185 ///   3. Emitting store ops to store the results of 2. to the output
186 ///      views.
187 ///
188 /// An example output may resemble:
189 ///
190 /// ```
191 ///    scf.for %i = %c0 to %0 step %c1 {
192 ///      scf.for %j = %c0 to %1 step %c1 {
193 ///        scf.for %k = %c0 to %4 step %c1 {
194 ///          %11 = load %arg0[%i, %j] :
195 ///            memref<?x?xf32, stride_specification>
196 ///          %12 = load %arg1[%i, %j, %k] :
197 ///            memref<?x?x?xf32, stride_specification>
198 ///          %13 = load %arg2[%i, %k, %j] :
199 ///            memref<?x?x?xf32, stride_specification>
200 ///          %14:2 = call @foo(%11, %12, %13) : (f32, f32, f32) -> (f32, f32)
201 ///          store %14#0, %arg1[%i, %j, %k] :
202 ///            memref<?x?x?Xf32, stride_specification>
203 ///          store %14#1, %arg2[%i, %k, %j] :
204 ///            memref<?x?x?Xf32, stride_specification>
205 ///       }
206 ///      }
207 ///    }
208 /// ```
209 // TODO: need a LinalgStructuredOpInterface.
210 template <typename IndexedValueType, typename LinalgStructuredOpType>
211 void emitScalarImplementation(ArrayRef<Value> allIvs,
212                               LinalgStructuredOpType linalgOp) {
213   assert(linalgOp.hasBufferSemantics() &&
214          "expected linalg op with buffer semantics");
215   auto &b = ScopedContext::getBuilderRef();
216   auto loc = ScopedContext::getLocation();
217   unsigned nInputs = linalgOp.getNumInputs();
218   unsigned nOutputs = linalgOp.getNumOutputs();
219   SmallVector<Value, 4> indexedValues;
220   indexedValues.reserve(nInputs + nOutputs);
221 
222   auto attr = linalgOp.template getAttrOfType<IntegerAttr>("symbol_source");
223   auto allIvsPlusDims = SmallVector<Value, 4>(allIvs.begin(), allIvs.end());
224   if (attr) {
225     auto operand = linalgOp.getOperand(attr.getInt());
226     auto shapedType = operand.getType().template cast<ShapedType>();
227     allIvsPlusDims.reserve(allIvs.size() + shapedType.getRank());
228     for (unsigned idx = 0, e = shapedType.getRank(); idx < e; ++idx)
229       allIvsPlusDims.push_back(b.create<DimOp>(loc, operand, idx));
230   }
231 
232   // TODO: Avoid the loads if the corresponding argument of the
233   // region has no uses.
234   // 1.a. Emit load from input views.
235   for (unsigned i = 0; i < nInputs; ++i) {
236     auto indexing = makeCanonicalAffineApplies(
237         b, loc, linalgOp.getInputIndexingMap(i), allIvsPlusDims);
238     // Passing through IndexedValueType emits the proper load operation.
239     indexedValues.push_back(IndexedValueType(linalgOp.getInput(i))(indexing));
240   }
241   // 1.b. Emit load from output views.
242   for (unsigned i = 0; i < nOutputs; ++i) {
243     auto indexing = makeCanonicalAffineApplies(
244         b, loc, linalgOp.getOutputIndexingMap(i), allIvsPlusDims);
245     // Passing through IndexedValueType emits the proper load operation.
246     indexedValues.push_back(
247         IndexedValueType(linalgOp.getOutputBuffer(i))(indexing));
248   }
249 
250   // TODO: When a region inliner exists, use it.
251   // 2. Inline region, currently only works for a single basic block.
252   // 3. Emit store.
253   SmallVector<SmallVector<Value, 8>, 8> indexing;
254   SmallVector<Value, 8> outputBuffers;
255   for (unsigned i = 0; i < nOutputs; ++i) {
256     indexing.push_back(makeCanonicalAffineApplies(
257         b, loc, linalgOp.getOutputIndexingMap(i), allIvsPlusDims));
258     outputBuffers.push_back(linalgOp.getOutputBuffer(i));
259   }
260   inlineRegionAndEmitStore<IndexedValueType>(linalgOp, indexedValues, indexing,
261                                              outputBuffers);
262 }
263 
264 template <typename IndexedValueType>
265 void emitScalarImplementation(ArrayRef<Value> allIvs, CopyOp copyOp) {
266   assert(copyOp.hasBufferSemantics() &&
267          "expected linalg op with buffer semantics");
268   auto nPar = copyOp.getNumParallelLoops();
269   assert(nPar == allIvs.size());
270   auto inputIvs =
271       permuteIvs(allIvs.take_front(nPar), copyOp.inputPermutation());
272   auto outputIvs =
273       permuteIvs(allIvs.take_front(nPar), copyOp.outputPermutation());
274   SmallVector<Value, 8> iivs(inputIvs.begin(), inputIvs.end());
275   SmallVector<Value, 8> oivs(outputIvs.begin(), outputIvs.end());
276   IndexedValueType O(copyOp.getOutputBuffer(0)), I(copyOp.getInput(0));
277   // Emit the proper scalar assignment, whether we are dealing with a 0-D or
278   // an n-D loop nest; with or without permutations.
279   // clang-format off
280     nPar > 0 ? O(oivs) = I(iivs) :
281                O() = I();
282   // clang-format on
283 }
284 
285 template <typename IndexedValueType>
286 void emitScalarImplementation(ArrayRef<Value> allIvs, FillOp fillOp) {
287   assert(fillOp.hasBufferSemantics() &&
288          "expected linalg op with buffer semantics");
289   auto nPar = fillOp.getNumParallelLoops();
290   assert(nPar == allIvs.size());
291   auto ivs = SmallVector<Value, 4>(allIvs.begin(), allIvs.begin() + nPar);
292   IndexedValueType O(fillOp.getOutputBuffer(0));
293   // Emit the proper scalar assignment, whether we are dealing with a 0-D or
294   // an n-D loop nest; with or without permutations.
295   nPar > 0 ? O(ivs) = fillOp.value() : O() = fillOp.value();
296 }
297 
298 template <typename IndexedValueType>
299 Value getConvOpInput(ConvOp convOp, StdIndexedValue im,
300                      MutableArrayRef<Value> imIdx) {
301   // TODO: add a level of indirection to linalg.generic.
302   if (!convOp.padding())
303     return im(imIdx);
304 
305   auto *context = ScopedContext::getContext();
306   Value zeroIndex = std_constant_index(0);
307   SmallVector<Value, 8> conds;
308   SmallVector<Value, 8> clampedImIdx;
309   for (auto iter : llvm::enumerate(imIdx)) {
310     int idx = iter.index();
311     auto dim = iter.value();
312     // Only need to iterate over the window dimensions.
313     if (idx == 0 || idx == static_cast<int>(imIdx.size()) - 1) {
314       clampedImIdx.push_back(dim);
315       continue;
316     }
317 
318     using edsc::op::sge;
319     using edsc::op::slt;
320     using edsc::op::operator||;
321     Value leftOutOfBound = slt(dim, zeroIndex);
322     if (conds.empty())
323       conds.push_back(leftOutOfBound);
324     else
325       conds.push_back(conds.back() || leftOutOfBound);
326     Value rightBound = std_dim(convOp.input(), idx);
327     conds.push_back(conds.back() || (sge(dim, rightBound)));
328 
329     // When padding is involved, the indices will only be shifted to negative,
330     // so having a max op is enough.
331     auto maxMap = AffineMap::get(/*dimCount=*/1, 0,
332                                  {getAffineDimExpr(/*position=*/0, context),
333                                   getAffineConstantExpr(0, context)},
334                                  context);
335     clampedImIdx.push_back(affine_max(dim.getType(), maxMap, ValueRange{dim}));
336   }
337 
338   auto &b = ScopedContext::getBuilderRef();
339   Type type = convOp.input().getType().cast<MemRefType>().getElementType();
340   Value zero = std_constant(type, b.getZeroAttr(type));
341   Value readInput = im(clampedImIdx);
342   return conds.empty() ? readInput
343                        : (Value)std_select(conds.back(), zero, readInput);
344 }
345 
346 /// Returns true is `convOp` has a non-zero padding.
347 static bool hasPadding(ConvOp convOp) {
348   for (unsigned i = 0, e = convOp.getNumSpatialDimensions(); i < e; ++i) {
349     if (convOp.getLowPad(i) > 0 || convOp.getHighPad(i) > 0)
350       return true;
351   }
352   return false;
353 }
354 
355 template <typename IndexedValueType>
356 static void emitScalarImplementation(ArrayRef<Value> allIvs, ConvOp convOp) {
357   assert(convOp.hasBufferSemantics() &&
358          "expected linalg op with buffer semantics");
359   auto &b = ScopedContext::getBuilderRef();
360   auto loc = ScopedContext::getLocation();
361   auto mapsRange = convOp.indexing_maps().getAsRange<AffineMapAttr>();
362   auto maps = llvm::to_vector<8>(
363       llvm::map_range(mapsRange, [](AffineMapAttr a) { return a.getValue(); }));
364   SmallVector<Value, 8> fIdx(
365       makeCanonicalAffineApplies(b, loc, maps[0], allIvs));
366   SmallVector<Value, 8> imIdx(
367       makeCanonicalAffineApplies(b, loc, maps[1], allIvs));
368   SmallVector<Value, 8> oIdx(
369       makeCanonicalAffineApplies(b, loc, maps[2], allIvs));
370 
371   IndexedValueType F(convOp.filter()), O(convOp.output());
372 
373   // Emit scalar form. Padded conv involves an affine.max in the memory access
374   // which is not allowed by affine.load. Override to use an StdIndexedValue
375   // when there is non-zero padding.
376   if (hasPadding(convOp)) {
377     StdIndexedValue I(convOp.input());
378     Value paddedInput = getConvOpInput<IndexedValueType>(convOp, I, imIdx);
379     O(oIdx) += F(fIdx) * paddedInput;
380   } else {
381     IndexedValueType I(convOp.input());
382     O(oIdx) += F(fIdx) * I(imIdx);
383   }
384 }
385 
386 template <typename IndexedValueType>
387 void emitScalarImplementation(ArrayRef<Value> allIvs, PoolingMaxOp op) {
388   InputAndOutputIndices indices = getInputAndOutputIndices(allIvs, op);
389   // Emit scalar form.
390   IndexedValueType output(op.output());
391   IndexedValueType input(op.input());
392   Value lhs = output(indices.outputs);
393   Value rhs = input(indices.inputs);
394   using edsc::op::sgt;
395   Value maxValue = std_select(sgt(lhs, rhs), lhs, rhs);
396   output(indices.outputs) = maxValue;
397 }
398 
399 template <typename IndexedValueType>
400 void emitScalarImplementation(ArrayRef<Value> allIvs, PoolingMinOp op) {
401   InputAndOutputIndices indices = getInputAndOutputIndices(allIvs, op);
402   // Emit scalar form.
403   IndexedValueType output(op.output());
404   IndexedValueType input(op.input());
405   Value lhs = output(indices.outputs);
406   Value rhs = input(indices.inputs);
407   using edsc::op::slt;
408   Value minValue = std_select(slt(lhs, rhs), lhs, rhs);
409   output(indices.outputs) = minValue;
410 }
411 template <typename IndexedValueType>
412 void emitScalarImplementation(ArrayRef<Value> allIvs, PoolingSumOp op) {
413   auto indices = getInputAndOutputIndices(allIvs, op);
414   IndexedValueType input(op.input()), output(op.output());
415 
416   // Emit scalar form.
417   output(indices.outputs) += input(indices.inputs);
418 }
419 /// Emits the MLIR for the scalar part of the indexed generic op by:
420 ///   1. Emitting load ops for each input and output view in order. This is
421 ///      achieved by applying the appropriate input or output map to the
422 ///      enclosing induction variables.
423 ///   2. Emitting a call to `op.fun()` that takes as arguments the induction
424 ///      variables and the scalars from point 1. above.
425 ///   3. Emitting store ops to store the results of 2. to the output views.
426 ///
427 /// An example output may resemble:
428 ///
429 /// ```
430 ///    scf.for %i = %c0 to %0 step %c1 {
431 ///      scf.for %j = %c0 to %1 step %c1 {
432 ///        scf.for %k = %c0 to %4 step %c1 {
433 ///          %11 = load %arg0[%i, %j] :
434 ///            memref<?x?xf32, stride_specification>
435 ///          %12 = load %arg1[%i, %j, %k] :
436 ///            memref<?x?x?xf32, stride_specification>
437 ///          %13 = load %arg2[%i, %k, %j] :
438 ///            memref<?x?x?xf32, stride_specification>
439 ///          %14:2 = call @foo(%i, %j, %k, %11, %12, %13) :
440 ///            (index, index, index, f32, f32, f32) -> (f32, f32)
441 ///          store %14#0, %arg1[%i, %j, %k] :
442 ///            memref<?x?x?Xf32, stride_specification>
443 ///          store %14#1, %arg2[%i, %k, %j] :
444 ///            memref<?x?x?Xf32, stride_specification>
445 ///       }
446 ///      }
447 ///    }
448 /// ```
449 template <typename IndexedValueType>
450 static void emitScalarImplementation(ArrayRef<Value> allIvs,
451                                      IndexedGenericOp indexedGenericOp) {
452   assert(indexedGenericOp.hasBufferSemantics() &&
453          "expected linalg op with buffer semantics");
454   auto &b = ScopedContext::getBuilderRef();
455   auto loc = ScopedContext::getLocation();
456   unsigned nInputs = indexedGenericOp.getNumInputs();
457   unsigned nOutputs = indexedGenericOp.getNumOutputs();
458   unsigned nLoops = allIvs.size();
459   SmallVector<Value, 4> indexedValues;
460   indexedValues.reserve(nLoops + nInputs + nOutputs);
461   for (unsigned i = 0; i < nLoops; ++i)
462     indexedValues.push_back(allIvs[i]);
463 
464   // TODO: Avoid the loads if the corresponding argument of the
465   // region has no uses.
466   // 1.a. Emit load from input views.
467   for (unsigned i = 0; i < nInputs; ++i) {
468     auto indexing = makeCanonicalAffineApplies(
469         b, loc, indexedGenericOp.getInputIndexingMap(i), allIvs);
470     // Pass input i through IndexedValueType emits the proper load operation.
471     indexedValues.push_back(
472         IndexedValueType(indexedGenericOp.getInput(i))(indexing));
473   }
474   // 1.b. Emit load from output views.
475   for (unsigned i = 0; i < nOutputs; ++i) {
476     auto indexing = makeCanonicalAffineApplies(
477         b, loc, indexedGenericOp.getOutputIndexingMap(i), allIvs);
478     // Pass output i through IndexedValueType emits the proper load operation.
479     indexedValues.push_back(
480         IndexedValueType(indexedGenericOp.getOutputBuffer(i))(indexing));
481   }
482 
483   // TODO: When a region inliner exists, use it.
484   // 2. Inline region, currently only works for a single basic block.
485   // 3. Emit store.
486   SmallVector<SmallVector<Value, 8>, 8> indexing;
487   SmallVector<Value, 8> outputBuffers;
488   for (unsigned i = 0; i < nOutputs; ++i) {
489     indexing.push_back(makeCanonicalAffineApplies(
490         b, loc, indexedGenericOp.getOutputIndexingMap(i), allIvs));
491     outputBuffers.push_back(indexedGenericOp.getOutputBuffer(i));
492   }
493   inlineRegionAndEmitStore<IndexedValueType>(indexedGenericOp, indexedValues,
494                                              indexing, outputBuffers);
495 }
496 
497 template <typename LoopTy, typename ConcreteOpTy>
498 Optional<LinalgLoops> linalgOpToLoopsImpl(Operation *op, OpBuilder &builder) {
499   using IndexedValueTy = typename GenerateLoopNest<LoopTy>::IndexedValueTy;
500 
501   ScopedContext scope(builder, op->getLoc());
502 
503   // The flattened loopToOperandRangesMaps is expected to be an invertible
504   // permutation map (which is asserted in the inverse calculation).
505   auto linalgOp = cast<ConcreteOpTy>(op);
506   assert(linalgOp.hasBufferSemantics() &&
507          "expected linalg op with buffer semantics");
508   auto mapsRange =
509       linalgOp.indexing_maps().template getAsRange<AffineMapAttr>();
510   auto maps = llvm::to_vector<8>(
511       llvm::map_range(mapsRange, [](AffineMapAttr a) { return a.getValue(); }));
512   SmallVector<Value, 8> sizes = getViewSizes(builder, linalgOp);
513   AffineMap map = concatAffineMaps(maps);
514   auto loopRanges = emitLoopRanges(scope.getBuilderRef(), scope.getLocation(),
515                                    map, getViewSizes(builder, linalgOp));
516   SmallVector<Value, 4> allIvs;
517   GenerateLoopNest<LoopTy>::doit(
518       loopRanges, linalgOp.iterator_types().getValue(), [&](ValueRange ivs) {
519         allIvs.append(ivs.begin(), ivs.end());
520         emitScalarImplementation<IndexedValueTy>(allIvs, linalgOp);
521       });
522   // Number of loop ops might be different from the number of ivs since some
523   // loops like affine.parallel and scf.parallel have multiple ivs.
524   llvm::SetVector<Operation *> loopSet;
525   for (Value iv : allIvs) {
526     if (!iv)
527       return {};
528     // The induction variable is a block argument of the entry block of the
529     // loop operation.
530     BlockArgument ivVal = iv.dyn_cast<BlockArgument>();
531     if (!ivVal)
532       return {};
533     loopSet.insert(ivVal.getOwner()->getParentOp());
534   }
535   LinalgLoops loops(loopSet.begin(), loopSet.end());
536   return loops;
537 }
538 
539 template <typename LoopType, typename ConcreteOp>
540 class LinalgRewritePattern : public RewritePattern {
541 public:
542   explicit LinalgRewritePattern(MLIRContext *context)
543       : RewritePattern(ConcreteOp::getOperationName(), 1, context) {}
544 
545   LogicalResult matchAndRewrite(Operation *op,
546                                 PatternRewriter &rewriter) const override {
547     if (!linalgOpToLoopsImpl<LoopType, ConcreteOp>(op, rewriter))
548       return failure();
549     rewriter.eraseOp(op);
550     return success();
551   }
552 };
553 
554 template <typename LoopType, typename ConcreteOp>
555 void insertOnePattern(OwningRewritePatternList &patterns, MLIRContext *ctx) {
556   patterns.insert<LinalgRewritePattern<LoopType, ConcreteOp>>(ctx);
557 }
558 
559 template <typename LoopType, typename... Args>
560 void insertPatterns(OwningRewritePatternList &patterns, MLIRContext *ctx) {
561   (void)std::initializer_list<int>{
562       0, (insertOnePattern<LoopType, Args>(patterns, ctx), 0)...};
563 }
564 
565 /// Local folding pattern for AffineApplyOp that we can apply greedily.
566 /// This replaces AffineApplyOp by the proper value in cases where the
567 /// associated map is trivial.
568 /// A trivial map here is defined as a map with a single result and either:
569 ///   1. Zero operand + returns a single AffineConstantExpr
570 ///   2. One operand + returns a single AffineDimExpr
571 ///   3. One operand + returns a single AffineSymbolExpr
572 //
573 /// In the first case, the AffineApplyOp is replaced by a new constant. In the
574 /// other cases, it is replaced by its unique operand.
575 struct FoldAffineOp : public RewritePattern {
576   FoldAffineOp(MLIRContext *context)
577       : RewritePattern(AffineApplyOp::getOperationName(), 0, context) {}
578 
579   LogicalResult matchAndRewrite(Operation *op,
580                                 PatternRewriter &rewriter) const override {
581     AffineApplyOp affineApplyOp = cast<AffineApplyOp>(op);
582     auto map = affineApplyOp.getAffineMap();
583     if (map.getNumResults() != 1 || map.getNumInputs() > 1)
584       return failure();
585 
586     AffineExpr expr = map.getResult(0);
587     if (map.getNumInputs() == 0) {
588       if (auto val = expr.dyn_cast<AffineConstantExpr>()) {
589         rewriter.replaceOpWithNewOp<ConstantIndexOp>(op, val.getValue());
590         return success();
591       }
592       return failure();
593     }
594     if (expr.dyn_cast<AffineDimExpr>() || expr.dyn_cast<AffineSymbolExpr>()) {
595       rewriter.replaceOp(op, op->getOperand(0));
596       return success();
597     }
598     return failure();
599   }
600 };
601 } // namespace
602 
603 template <typename LoopType>
604 static void lowerLinalgToLoopsImpl(FuncOp funcOp, MLIRContext *context) {
605   OwningRewritePatternList patterns;
606   // Canonicalization and folding patterns applied greedily allow cleaning up
607   // the emitted IR on the fly.
608   // TODO: fold view and subview ops?
609   insertPatterns<LoopType,
610 #define GET_OP_LIST
611 #include "mlir/Dialect/Linalg/IR/LinalgStructuredOps.cpp.inc"
612                  >(patterns, context);
613 
614   DimOp::getCanonicalizationPatterns(patterns, context);
615   AffineApplyOp::getCanonicalizationPatterns(patterns, context);
616   patterns.insert<FoldAffineOp>(context);
617   // Just apply the patterns greedily.
618   applyPatternsAndFoldGreedily(funcOp, patterns);
619 }
620 
621 namespace {
622 struct LowerToAffineLoops
623     : public LinalgLowerToAffineLoopsBase<LowerToAffineLoops> {
624   void runOnFunction() override {
625     lowerLinalgToLoopsImpl<AffineForOp>(getFunction(), &getContext());
626   }
627 };
628 struct LowerToLoops : public LinalgLowerToLoopsBase<LowerToLoops> {
629   void runOnFunction() override {
630     lowerLinalgToLoopsImpl<scf::ForOp>(getFunction(), &getContext());
631   }
632 };
633 struct LowerToParallelLoops
634     : public LinalgLowerToParallelLoopsBase<LowerToParallelLoops> {
635   void runOnFunction() override {
636     lowerLinalgToLoopsImpl<scf::ParallelOp>(getFunction(), &getContext());
637   }
638 };
639 } // namespace
640 
641 std::unique_ptr<OperationPass<FuncOp>> mlir::createConvertLinalgToLoopsPass() {
642   return std::make_unique<LowerToLoops>();
643 }
644 
645 std::unique_ptr<OperationPass<FuncOp>>
646 mlir::createConvertLinalgToParallelLoopsPass() {
647   return std::make_unique<LowerToParallelLoops>();
648 }
649 
650 std::unique_ptr<OperationPass<FuncOp>>
651 mlir::createConvertLinalgToAffineLoopsPass() {
652   return std::make_unique<LowerToAffineLoops>();
653 }
654 
655 // TODO: gradually remove this layer as more ops become "named".
656 template <typename LoopTy>
657 static Optional<LinalgLoops> linalgOpToLoopsImplSwitch(Operation *op,
658                                                        OpBuilder &builder) {
659   assert(isa<LinalgOp>(op) && "LinalgOp expected");
660   if (isa<CopyOp>(op))
661     return linalgOpToLoopsImpl<LoopTy, CopyOp>(op, builder);
662   if (isa<FillOp>(op))
663     return linalgOpToLoopsImpl<LoopTy, FillOp>(op, builder);
664   if (isa<ConvOp>(op))
665     return linalgOpToLoopsImpl<LoopTy, ConvOp>(op, builder);
666   if (isa<PoolingMaxOp>(op))
667     return linalgOpToLoopsImpl<LoopTy, PoolingMaxOp>(op, builder);
668   if (isa<PoolingMinOp>(op))
669     return linalgOpToLoopsImpl<LoopTy, PoolingMinOp>(op, builder);
670   if (isa<PoolingSumOp>(op))
671     return linalgOpToLoopsImpl<LoopTy, PoolingSumOp>(op, builder);
672   if (isa<IndexedGenericOp>(op))
673     return linalgOpToLoopsImpl<LoopTy, IndexedGenericOp>(op, builder);
674 
675   // TODO: Cases below are generic and need a LinalgStructuredOpInterface.
676   if (isa<GenericOp>(op))
677     return linalgOpToLoopsImpl<LoopTy, GenericOp>(op, builder);
678   if (isa<MatmulOp>(op))
679     return linalgOpToLoopsImpl<LoopTy, MatmulOp>(op, builder);
680   if (isa<MatvecOp>(op))
681     return linalgOpToLoopsImpl<LoopTy, MatvecOp>(op, builder);
682   if (isa<DotOp>(op))
683     return linalgOpToLoopsImpl<LoopTy, DotOp>(op, builder);
684   if (isa<BatchMatmulOp>(op))
685     return linalgOpToLoopsImpl<LoopTy, BatchMatmulOp>(op, builder);
686   if (isa<ConvWOp>(op))
687     return linalgOpToLoopsImpl<LoopTy, ConvWOp>(op, builder);
688   if (isa<ConvNWCOp>(op))
689     return linalgOpToLoopsImpl<LoopTy, ConvNWCOp>(op, builder);
690   if (isa<ConvNCWOp>(op))
691     return linalgOpToLoopsImpl<LoopTy, ConvNCWOp>(op, builder);
692   if (isa<ConvHWOp>(op))
693     return linalgOpToLoopsImpl<LoopTy, ConvHWOp>(op, builder);
694   if (isa<ConvNHWCOp>(op))
695     return linalgOpToLoopsImpl<LoopTy, ConvNHWCOp>(op, builder);
696   if (isa<ConvNCHWOp>(op))
697     return linalgOpToLoopsImpl<LoopTy, ConvNCHWOp>(op, builder);
698   if (isa<ConvDHWOp>(op))
699     return linalgOpToLoopsImpl<LoopTy, ConvDHWOp>(op, builder);
700   if (isa<ConvNDHWCOp>(op))
701     return linalgOpToLoopsImpl<LoopTy, ConvNDHWCOp>(op, builder);
702   if (isa<ConvNCDHWOp>(op))
703     return linalgOpToLoopsImpl<LoopTy, ConvNCDHWOp>(op, builder);
704   llvm_unreachable("Unexpected op in linalgOpToLoopsImpl");
705 }
706 
707 /// Emits a loop nest with the proper body for `op`.
708 template <typename LoopTy>
709 Optional<LinalgLoops> mlir::linalg::linalgLowerOpToLoops(OpBuilder &builder,
710                                                          Operation *op) {
711   return linalgOpToLoopsImplSwitch<LoopTy>(op, builder);
712 }
713 
714 template Optional<LinalgLoops>
715 mlir::linalg::linalgLowerOpToLoops<AffineForOp>(OpBuilder &builder,
716                                                 Operation *op);
717 template Optional<LinalgLoops>
718 mlir::linalg::linalgLowerOpToLoops<scf::ForOp>(OpBuilder &builder,
719                                                Operation *op);
720 template Optional<LinalgLoops>
721 mlir::linalg::linalgLowerOpToLoops<scf::ParallelOp>(OpBuilder &builder,
722                                                     Operation *op);
723 
724 /// Emits a loop nest of `affine.for` with the proper body for `op`.
725 LogicalResult mlir::linalg::linalgOpToAffineLoops(OpBuilder &builder,
726                                                   Operation *op) {
727   Optional<LinalgLoops> loops = linalgLowerOpToLoops<AffineForOp>(builder, op);
728   return loops ? success() : failure();
729 }
730 
731 /// Emits a loop nest of `scf.for` with the proper body for `op`.
732 LogicalResult mlir::linalg::linalgOpToLoops(OpBuilder &builder, Operation *op) {
733   Optional<LinalgLoops> loops = linalgLowerOpToLoops<scf::ForOp>(builder, op);
734   return loops ? success() : failure();
735 }
736 
737 /// Emits a loop nest of `scf.parallel` with the proper body for `op`.
738 LogicalResult mlir::linalg::linalgOpToParallelLoops(OpBuilder &builder,
739                                                     Operation *op) {
740   Optional<LinalgLoops> loops =
741       linalgLowerOpToLoops<scf::ParallelOp>(builder, op);
742   return loops ? success() : failure();
743 }
744