1 //===- Loops.cpp - conversion from Linalg named and generic ops to loops --===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "PassDetail.h"
10 #include "mlir/Dialect/Affine/EDSC/Intrinsics.h"
11 #include "mlir/Dialect/Linalg/EDSC/FoldedIntrinsics.h"
12 #include "mlir/Dialect/Linalg/IR/LinalgOps.h"
13 #include "mlir/Dialect/Linalg/IR/LinalgTypes.h"
14 #include "mlir/Dialect/Linalg/Passes.h"
15 #include "mlir/Dialect/Linalg/Transforms/Transforms.h"
16 #include "mlir/Dialect/Linalg/Utils/Utils.h"
17 #include "mlir/Dialect/SCF/EDSC/Builders.h"
18 #include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h"
19 #include "mlir/IR/AffineExpr.h"
20 #include "mlir/IR/AffineMap.h"
21 #include "mlir/IR/BlockAndValueMapping.h"
22 #include "mlir/Support/LLVM.h"
23 #include "mlir/Transforms/DialectConversion.h"
24 #include "mlir/Transforms/FoldUtils.h"
25 
26 using namespace mlir;
27 using namespace mlir::edsc;
28 using namespace mlir::edsc::intrinsics;
29 using namespace mlir::linalg;
30 
31 using edsc::op::operator+;
32 
33 static SmallVector<Value, 8> makeCanonicalAffineApplies(OpBuilder &b,
34                                                         Location loc,
35                                                         AffineMap map,
36                                                         ArrayRef<Value> vals) {
37   if (map.isEmpty())
38     return {};
39   assert(map.getNumSymbols() == 0);
40   assert(map.getNumInputs() == vals.size());
41   SmallVector<Value, 8> res;
42   res.reserve(map.getNumResults());
43   auto dims = map.getNumDims();
44   for (auto e : map.getResults()) {
45     auto exprMap = AffineMap::get(dims, 0, e);
46     SmallVector<Value, 4> operands(vals.begin(), vals.end());
47     canonicalizeMapAndOperands(&exprMap, &operands);
48     res.push_back(affine_apply(exprMap, operands));
49   }
50   return res;
51 }
52 
53 static SmallVector<Value, 4> permuteIvs(ArrayRef<Value> ivs,
54                                         Optional<AffineMap> permutation) {
55   return permutation ? applyMapToValues(ScopedContext::getBuilderRef(),
56                                         ScopedContext::getLocation(),
57                                         permutation.getValue(), ivs)
58                      : SmallVector<Value, 4>(ivs.begin(), ivs.end());
59 }
60 
61 // Creates a number of ranges equal to the number of results in `map`.
62 // The returned ranges correspond to the loop ranges, in the proper order, for
63 // which new loops will be created.
64 static SmallVector<SubViewOp::Range, 4>
65 emitLoopRanges(OpBuilder &b, Location loc, AffineMap map,
66                ArrayRef<Value> allViewSizes) {
67   // Apply `map` to get view sizes in loop order.
68   auto sizes = applyMapToValues(b, loc, map, allViewSizes);
69   // Create a new range with the applied tile sizes.
70   ScopedContext scope(b, loc);
71   SmallVector<SubViewOp::Range, 4> res;
72   for (unsigned idx = 0, e = map.getNumResults(); idx < e; ++idx) {
73     res.push_back(SubViewOp::Range{std_constant_index(0), sizes[idx],
74                                    std_constant_index(1)});
75   }
76   return res;
77 }
78 
79 template <typename IndexedValueType, typename OpType>
80 static void inlineRegionAndEmitStore(OpType op, ArrayRef<Value> indexedValues,
81                                      ArrayRef<SmallVector<Value, 8>> indexing,
82                                      ArrayRef<Value> outputBuffers) {
83   auto &b = ScopedContext::getBuilderRef();
84   auto &block = op.region().front();
85   BlockAndValueMapping map;
86   map.map(block.getArguments(), indexedValues);
87   for (auto &op : block.without_terminator()) {
88     assert(op.getNumRegions() == 0 && "expected a non-nested region");
89     auto *newOp = b.clone(op, map);
90     map.map(op.getResults(), newOp->getResults());
91   }
92 
93   Operation &terminator = block.back();
94   assert(isa<YieldOp>(terminator) &&
95          "expected a yield op in the end of the region");
96   for (unsigned i = 0, e = terminator.getNumOperands(); i < e; ++i) {
97     IndexedValueType O(outputBuffers[i]);
98     O(indexing[i]) = map.lookupOrDefault(terminator.getOperand(i));
99   }
100 }
101 
102 // Returns a pair that contains input indices and output indices of a
103 // SingleInputPoolingOp `op`.
104 struct InputAndOutputIndices {
105   SmallVector<Value, 8> inputs;
106   SmallVector<Value, 8> outputs;
107 };
108 template <typename SingleInputPoolingOp>
109 static InputAndOutputIndices getInputAndOutputIndices(ArrayRef<Value> allIvs,
110                                                       SingleInputPoolingOp op) {
111   auto &b = ScopedContext::getBuilderRef();
112   auto loc = ScopedContext::getLocation();
113   auto mapsRange = op.indexing_maps().template getAsRange<AffineMapAttr>();
114   auto maps = llvm::to_vector<8>(
115       llvm::map_range(mapsRange, [](AffineMapAttr a) { return a.getValue(); }));
116   return InputAndOutputIndices{
117       makeCanonicalAffineApplies(b, loc, maps[0], allIvs),
118       makeCanonicalAffineApplies(b, loc, maps[2], allIvs)};
119 }
120 
121 namespace {
122 
123 /// Emits the MLIR for the scalar part of the generic op by:
124 ///   1. Emitting load ops for each input and output view in order. This is
125 ///      achieved by applying the appropriate input or output map to the
126 ///      enclosing induction variables.
127 ///   2. Emitting a call to `op.fun()` that takes as arguments the scalars
128 ///      from point 1. above.
129 ///   3. Emitting store ops to store the results of 2. to the output
130 ///      views.
131 ///
132 /// An example output may resemble:
133 ///
134 /// ```
135 ///    scf.for %i = %c0 to %0 step %c1 {
136 ///      scf.for %j = %c0 to %1 step %c1 {
137 ///        scf.for %k = %c0 to %4 step %c1 {
138 ///          %11 = load %arg0[%i, %j] :
139 ///            memref<?x?xf32, stride_specification>
140 ///          %12 = load %arg1[%i, %j, %k] :
141 ///            memref<?x?x?xf32, stride_specification>
142 ///          %13 = load %arg2[%i, %k, %j] :
143 ///            memref<?x?x?xf32, stride_specification>
144 ///          %14:2 = call @foo(%11, %12, %13) : (f32, f32, f32) -> (f32, f32)
145 ///          store %14#0, %arg1[%i, %j, %k] :
146 ///            memref<?x?x?Xf32, stride_specification>
147 ///          store %14#1, %arg2[%i, %k, %j] :
148 ///            memref<?x?x?Xf32, stride_specification>
149 ///       }
150 ///      }
151 ///    }
152 /// ```
153 template <typename IndexedValueType, typename LinalgOpType>
154 class LinalgScopedEmitter {
155 public:
156   static void emitScalarImplementation(ArrayRef<Value> allIvs,
157                                        LinalgOpType linalgOp) {
158     assert(linalgOp.hasBufferSemantics() &&
159            "expected linalg op with buffer semantics");
160     auto &b = ScopedContext::getBuilderRef();
161     auto loc = ScopedContext::getLocation();
162     unsigned nInputs = linalgOp.getNumInputs();
163     unsigned nOutputs = linalgOp.getNumOutputs();
164     SmallVector<Value, 4> indexedValues;
165     indexedValues.reserve(nInputs + nOutputs);
166 
167     // TODO(mravishankar): Avoid the loads if the corresponding argument of the
168     // region has no uses.
169     // 1.a. Emit load from input views.
170     for (unsigned i = 0; i < nInputs; ++i) {
171       auto indexing = makeCanonicalAffineApplies(
172           b, loc, linalgOp.getInputIndexingMap(i), allIvs);
173       // Passing through IndexedValueType emits the proper load operation.
174       indexedValues.push_back(IndexedValueType(linalgOp.getInput(i))(indexing));
175     }
176     // 1.b. Emit load from output views.
177     for (unsigned i = 0; i < nOutputs; ++i) {
178       auto indexing = makeCanonicalAffineApplies(
179           b, loc, linalgOp.getOutputIndexingMap(i), allIvs);
180       // Passing through IndexedValueType emits the proper load operation.
181       indexedValues.push_back(
182           IndexedValueType(linalgOp.getOutputBuffer(i))(indexing));
183     }
184 
185     // TODO(ntv): When a region inliner exists, use it.
186     // 2. Inline region, currently only works for a single basic block.
187     // 3. Emit store.
188     SmallVector<SmallVector<Value, 8>, 8> indexing;
189     SmallVector<Value, 8> outputBuffers;
190     for (unsigned i = 0; i < nOutputs; ++i) {
191       indexing.push_back(makeCanonicalAffineApplies(
192           b, loc, linalgOp.getOutputIndexingMap(i), allIvs));
193       outputBuffers.push_back(linalgOp.getOutputBuffer(i));
194     }
195     inlineRegionAndEmitStore<IndexedValueType>(linalgOp, indexedValues,
196                                                indexing, outputBuffers);
197   }
198 };
199 
200 template <typename IndexedValueType>
201 class LinalgScopedEmitter<IndexedValueType, CopyOp> {
202 public:
203   static void emitScalarImplementation(ArrayRef<Value> allIvs, CopyOp copyOp) {
204     assert(copyOp.hasBufferSemantics() &&
205            "expected linalg op with buffer semantics");
206     auto nPar = copyOp.getNumParallelLoops();
207     assert(nPar == allIvs.size());
208     auto inputIvs =
209         permuteIvs(allIvs.take_front(nPar), copyOp.inputPermutation());
210     auto outputIvs =
211         permuteIvs(allIvs.take_front(nPar), copyOp.outputPermutation());
212     SmallVector<Value, 8> iivs(inputIvs.begin(), inputIvs.end());
213     SmallVector<Value, 8> oivs(outputIvs.begin(), outputIvs.end());
214     IndexedValueType O(copyOp.getOutputBuffer(0)), I(copyOp.getInput(0));
215     // Emit the proper scalar assignment, whether we are dealing with a 0-D or
216     // an n-D loop nest; with or without permutations.
217     // clang-format off
218     nPar > 0 ? O(oivs) = I(iivs) :
219                O() = I();
220     // clang-format on
221   }
222 };
223 
224 template <typename IndexedValueType>
225 class LinalgScopedEmitter<IndexedValueType, FillOp> {
226 public:
227   static void emitScalarImplementation(ArrayRef<Value> allIvs, FillOp fillOp) {
228     assert(fillOp.hasBufferSemantics() &&
229            "expected linalg op with buffer semantics");
230     auto nPar = fillOp.getNumParallelLoops();
231     assert(nPar == allIvs.size());
232     auto ivs = SmallVector<Value, 4>(allIvs.begin(), allIvs.begin() + nPar);
233     IndexedValueType O(fillOp.getOutputBuffer(0));
234     // Emit the proper scalar assignment, whether we are dealing with a 0-D or
235     // an n-D loop nest; with or without permutations.
236     nPar > 0 ? O(ivs) = fillOp.value() : O() = fillOp.value();
237   }
238 };
239 
240 template <typename IndexedValueType>
241 class LinalgScopedEmitter<IndexedValueType, DotOp> {
242 public:
243   static void emitScalarImplementation(ArrayRef<Value> allIvs, DotOp dotOp) {
244     assert(dotOp.hasBufferSemantics() &&
245            "expected linalg op with buffer semantics");
246     assert(allIvs.size() == 1);
247     Value r_i(allIvs[0]);
248     IndexedValueType A(dotOp.getInput(0)), B(dotOp.getInput(1)),
249         C(dotOp.getOutputBuffer(0));
250     // Emit scalar form.
251     C() = C() + A(r_i) * B(r_i);
252   }
253 };
254 
255 template <typename IndexedValueType>
256 class LinalgScopedEmitter<IndexedValueType, MatvecOp> {
257 public:
258   static void emitScalarImplementation(ArrayRef<Value> allIvs,
259                                        MatvecOp matvecOp) {
260     assert(matvecOp.hasBufferSemantics() &&
261            "expected linalg op with buffer semantics");
262     assert(allIvs.size() == 2);
263     Value i(allIvs[0]), r_j(allIvs[1]);
264     IndexedValueType A(matvecOp.getInput(0)), B(matvecOp.getInput(1)),
265         C(matvecOp.getOutputBuffer(0));
266     // Emit scalar form.
267     C(i) = C(i) + A(i, r_j) * B(r_j);
268   }
269 };
270 
271 template <typename IndexedValueType>
272 class LinalgScopedEmitter<IndexedValueType, MatmulOp> {
273 public:
274   static void emitScalarImplementation(ArrayRef<Value> allIvs,
275                                        MatmulOp matmulOp) {
276     assert(matmulOp.hasBufferSemantics() &&
277            "expected linalg op with buffer semantics");
278     assert(allIvs.size() == 3);
279     Value i(allIvs[0]), j(allIvs[1]), r_k(allIvs[2]);
280     IndexedValueType A(matmulOp.getInput(0)), B(matmulOp.getInput(1)),
281         C(matmulOp.getOutputBuffer(0));
282     // Emit scalar form.
283     C(i, j) = C(i, j) + A(i, r_k) * B(r_k, j);
284   }
285 };
286 
287 template <typename IndexedValueType>
288 class LinalgScopedEmitter<IndexedValueType, ConvOp> {
289 public:
290   /// Returns the input value of convOp. If the indices in `imIdx` is out of
291   /// boundary, returns 0 instead.
292   static Value getConvOpInput(ConvOp convOp, StdIndexedValue im,
293                               MutableArrayRef<Value> imIdx) {
294     // TODO(ntv): add a level of indirection to linalg.generic.
295     if (!convOp.padding())
296       return im(imIdx);
297 
298     auto *context = ScopedContext::getContext();
299     Value zeroIndex = std_constant_index(0);
300     SmallVector<Value, 8> conds;
301     SmallVector<Value, 8> clampedImIdx;
302     for (auto iter : llvm::enumerate(imIdx)) {
303       int idx = iter.index();
304       auto dim = iter.value();
305       // Only need to iterate over the window dimensions.
306       if (idx == 0 || idx == static_cast<int>(imIdx.size()) - 1) {
307         clampedImIdx.push_back(dim);
308         continue;
309       }
310 
311       using edsc::op::operator<;
312       using edsc::op::operator>=;
313       using edsc::op::operator||;
314       Value leftOutOfBound = dim < zeroIndex;
315       if (conds.empty())
316         conds.push_back(leftOutOfBound);
317       else
318         conds.push_back(conds.back() || leftOutOfBound);
319       Value rightBound = std_dim(convOp.input(), idx);
320       conds.push_back(conds.back() || (dim >= rightBound));
321 
322       // When padding is involved, the indices will only be shifted to negative,
323       // so having a max op is enough.
324       auto maxMap = AffineMap::get(/*dimCount=*/1, 0,
325                                    {getAffineDimExpr(/*position=*/0, context),
326                                     getAffineConstantExpr(0, context)},
327                                    context);
328       clampedImIdx.push_back(
329           affine_max(dim.getType(), maxMap, ValueRange{dim}));
330     }
331 
332     auto &b = ScopedContext::getBuilderRef();
333     Type type = convOp.input().getType().cast<MemRefType>().getElementType();
334     Value zero = std_constant(type, b.getZeroAttr(type));
335     Value readInput = im(clampedImIdx);
336     return conds.empty() ? readInput
337                          : (Value)std_select(conds.back(), zero, readInput);
338   }
339 
340   /// Returns true is `convOp` has a non-zero padding.
341   static bool hasPadding(ConvOp convOp) {
342     for (unsigned i = 0, e = convOp.getNumSpatialDimensions(); i < e; ++i) {
343       if (convOp.getLowPad(i) > 0 || convOp.getHighPad(i) > 0)
344         return true;
345     }
346     return false;
347   }
348 
349   static void emitScalarImplementation(ArrayRef<Value> allIvs, ConvOp convOp) {
350     assert(convOp.hasBufferSemantics() &&
351            "expected linalg op with buffer semantics");
352     auto &b = ScopedContext::getBuilderRef();
353     auto loc = ScopedContext::getLocation();
354     auto mapsRange = convOp.indexing_maps().getAsRange<AffineMapAttr>();
355     auto maps = llvm::to_vector<8>(llvm::map_range(
356         mapsRange, [](AffineMapAttr a) { return a.getValue(); }));
357     SmallVector<Value, 8> fIdx(
358         makeCanonicalAffineApplies(b, loc, maps[0], allIvs));
359     SmallVector<Value, 8> imIdx(
360         makeCanonicalAffineApplies(b, loc, maps[1], allIvs));
361     SmallVector<Value, 8> oIdx(
362         makeCanonicalAffineApplies(b, loc, maps[2], allIvs));
363 
364     IndexedValueType F(convOp.filter()), O(convOp.output());
365 
366     // Emit scalar form. Padded conv involves an affine.max in the memory access
367     // which is not allowed by affine.load. Override to use an StdIndexedValue
368     // when there is non-zero padding.
369     if (hasPadding(convOp)) {
370       StdIndexedValue I(convOp.input());
371       Value paddedInput = getConvOpInput(convOp, I, imIdx);
372       O(oIdx) += F(fIdx) * paddedInput;
373     } else {
374       IndexedValueType I(convOp.input());
375       O(oIdx) += F(fIdx) * I(imIdx);
376     }
377   }
378 };
379 
380 template <typename IndexedValueType>
381 class LinalgScopedEmitter<IndexedValueType, PoolingMaxOp> {
382 public:
383   static void emitScalarImplementation(ArrayRef<Value> allIvs,
384                                        PoolingMaxOp op) {
385     auto indices = getInputAndOutputIndices(allIvs, op);
386     // Emit scalar form.
387     Value lhs = std_load(op.output(), indices.outputs);
388     Value rhs = std_load(op.input(), indices.inputs);
389     using edsc::op::operator>;
390     Value maxValue = std_select(lhs > rhs, lhs, rhs);
391     std_store(maxValue, op.output(), indices.outputs);
392   }
393 };
394 
395 template <typename IndexedValueType>
396 class LinalgScopedEmitter<IndexedValueType, PoolingMinOp> {
397 public:
398   static void emitScalarImplementation(ArrayRef<Value> allIvs,
399                                        PoolingMinOp op) {
400     auto indices = getInputAndOutputIndices(allIvs, op);
401     // Emit scalar form.
402     Value lhs = std_load(op.output(), indices.outputs);
403     Value rhs = std_load(op.input(), indices.inputs);
404     using edsc::op::operator<;
405     Value minValue = std_select(lhs < rhs, lhs, rhs);
406     std_store(minValue, op.output(), indices.outputs);
407   }
408 };
409 
410 template <typename IndexedValueType>
411 class LinalgScopedEmitter<IndexedValueType, PoolingSumOp> {
412 public:
413   static void emitScalarImplementation(ArrayRef<Value> allIvs,
414                                        PoolingSumOp op) {
415     auto indices = getInputAndOutputIndices(allIvs, op);
416     IndexedValueType input(op.input()), output(op.output());
417 
418     // Emit scalar form.
419     output(indices.outputs) += input(indices.inputs);
420   }
421 };
422 
423 /// Emits the MLIR for the scalar part of the indexed generic op by:
424 ///   1. Emitting load ops for each input and output view in order. This is
425 ///      achieved by applying the appropriate input or output map to the
426 ///      enclosing induction variables.
427 ///   2. Emitting a call to `op.fun()` that takes as arguments the induction
428 ///      variables and the scalars from point 1. above.
429 ///   3. Emitting store ops to store the results of 2. to the output views.
430 ///
431 /// An example output may resemble:
432 ///
433 /// ```
434 ///    scf.for %i = %c0 to %0 step %c1 {
435 ///      scf.for %j = %c0 to %1 step %c1 {
436 ///        scf.for %k = %c0 to %4 step %c1 {
437 ///          %11 = load %arg0[%i, %j] :
438 ///            memref<?x?xf32, stride_specification>
439 ///          %12 = load %arg1[%i, %j, %k] :
440 ///            memref<?x?x?xf32, stride_specification>
441 ///          %13 = load %arg2[%i, %k, %j] :
442 ///            memref<?x?x?xf32, stride_specification>
443 ///          %14:2 = call @foo(%i, %j, %k, %11, %12, %13) :
444 ///            (index, index, index, f32, f32, f32) -> (f32, f32)
445 ///          store %14#0, %arg1[%i, %j, %k] :
446 ///            memref<?x?x?Xf32, stride_specification>
447 ///          store %14#1, %arg2[%i, %k, %j] :
448 ///            memref<?x?x?Xf32, stride_specification>
449 ///       }
450 ///      }
451 ///    }
452 /// ```
453 template <typename IndexedValueType>
454 class LinalgScopedEmitter<IndexedValueType, IndexedGenericOp> {
455 public:
456   static void emitScalarImplementation(ArrayRef<Value> allIvs,
457                                        IndexedGenericOp indexedGenericOp) {
458     assert(indexedGenericOp.hasBufferSemantics() &&
459            "expected linalg op with buffer semantics");
460     auto &b = ScopedContext::getBuilderRef();
461     auto loc = ScopedContext::getLocation();
462     unsigned nInputs = indexedGenericOp.getNumInputs();
463     unsigned nOutputs = indexedGenericOp.getNumOutputs();
464     unsigned nLoops = allIvs.size();
465     SmallVector<Value, 4> indexedValues;
466     indexedValues.reserve(nLoops + nInputs + nOutputs);
467     for (unsigned i = 0; i < nLoops; ++i)
468       indexedValues.push_back(allIvs[i]);
469 
470     // TODO(mravishankar): Avoid the loads if the corresponding argument of the
471     // region has no uses.
472     // 1.a. Emit load from input views.
473     for (unsigned i = 0; i < nInputs; ++i) {
474       auto indexing = makeCanonicalAffineApplies(
475           b, loc, indexedGenericOp.getInputIndexingMap(i), allIvs);
476       // Pass input i through IndexedValueType emits the proper load operation.
477       indexedValues.push_back(
478           IndexedValueType(indexedGenericOp.getInput(i))(indexing));
479     }
480     // 1.b. Emit load from output views.
481     for (unsigned i = 0; i < nOutputs; ++i) {
482       auto indexing = makeCanonicalAffineApplies(
483           b, loc, indexedGenericOp.getOutputIndexingMap(i), allIvs);
484       // Pass output i through IndexedValueType emits the proper load operation.
485       indexedValues.push_back(
486           IndexedValueType(indexedGenericOp.getOutputBuffer(i))(indexing));
487     }
488 
489     // TODO(ntv): When a region inliner exists, use it.
490     // 2. Inline region, currently only works for a single basic block.
491     // 3. Emit store.
492     SmallVector<SmallVector<Value, 8>, 8> indexing;
493     SmallVector<Value, 8> outputBuffers;
494     for (unsigned i = 0; i < nOutputs; ++i) {
495       indexing.push_back(makeCanonicalAffineApplies(
496           b, loc, indexedGenericOp.getOutputIndexingMap(i), allIvs));
497       outputBuffers.push_back(indexedGenericOp.getOutputBuffer(i));
498     }
499     inlineRegionAndEmitStore<IndexedValueType>(indexedGenericOp, indexedValues,
500                                                indexing, outputBuffers);
501   }
502 };
503 
504 template <typename LoopTy, typename ConcreteOpTy>
505 Optional<LinalgLoops> linalgOpToLoopsImpl(Operation *op, OpBuilder &builder) {
506   using IndexedValueTy = typename GenerateLoopNest<LoopTy>::IndexedValueTy;
507 
508   ScopedContext scope(builder, op->getLoc());
509 
510   // The flattened loopToOperandRangesMaps is expected to be an invertible
511   // permutation map (which is asserted in the inverse calculation).
512   auto linalgOp = cast<ConcreteOpTy>(op);
513   assert(linalgOp.hasBufferSemantics() &&
514          "expected linalg op with buffer semantics");
515   auto nPar = linalgOp.getNumParallelLoops();
516   auto nRed = linalgOp.getNumReductionLoops();
517   auto nWin = linalgOp.getNumWindowLoops();
518   auto nLoops = nPar + nRed + nWin;
519   auto mapsRange =
520       linalgOp.indexing_maps().template getAsRange<AffineMapAttr>();
521   auto maps = llvm::to_vector<8>(
522       llvm::map_range(mapsRange, [](AffineMapAttr a) { return a.getValue(); }));
523   AffineMap invertedMap = inversePermutation(concatAffineMaps(maps));
524   if (!invertedMap)
525     return {};
526   if (invertedMap.isEmpty()) {
527     LinalgScopedEmitter<IndexedValueTy, ConcreteOpTy>::emitScalarImplementation(
528         {}, linalgOp);
529     return LinalgLoops();
530   }
531 
532   SmallVector<Value, 4> allIvs(nLoops);
533   auto loopRanges =
534       emitLoopRanges(scope.getBuilderRef(), scope.getLocation(), invertedMap,
535                      getViewSizes(builder, linalgOp));
536   assert(loopRanges.size() == allIvs.size());
537   GenerateLoopNest<LoopTy>::doit(
538       allIvs, loopRanges, linalgOp.iterator_types().getValue(), [&] {
539         SmallVector<Value, 4> allIvValues(allIvs.begin(), allIvs.end());
540         LinalgScopedEmitter<IndexedValueTy,
541                             ConcreteOpTy>::emitScalarImplementation(allIvValues,
542                                                                     linalgOp);
543       });
544   // Number of loop ops might be different from the number of ivs since some
545   // loops like affine.parallel and scf.parallel have multiple ivs.
546   llvm::SetVector<Operation *> loopSet;
547   for (Value iv : allIvs) {
548     if (!iv)
549       return {};
550     // The induction variable is a block argument of the entry block of the
551     // loop operation.
552     BlockArgument ivVal = iv.dyn_cast<BlockArgument>();
553     if (!ivVal)
554       return {};
555     loopSet.insert(ivVal.getOwner()->getParentOp());
556   }
557   LinalgLoops loops(loopSet.begin(), loopSet.end());
558   return loops;
559 }
560 
561 template <typename LoopType, typename ConcreteOp>
562 class LinalgRewritePattern : public RewritePattern {
563 public:
564   explicit LinalgRewritePattern(MLIRContext *context)
565       : RewritePattern(ConcreteOp::getOperationName(), 1, context) {}
566 
567   LogicalResult matchAndRewrite(Operation *op,
568                                 PatternRewriter &rewriter) const override {
569     if (!linalgOpToLoopsImpl<LoopType, ConcreteOp>(op, rewriter))
570       return failure();
571     rewriter.eraseOp(op);
572     return success();
573   }
574 };
575 
576 /// Helper classes for type list expansion.
577 template <typename LoopType, typename... LinalgOps>
578 class RewritePatternList;
579 
580 template <typename LoopType>
581 class RewritePatternList<LoopType> {
582 public:
583   static void build(OwningRewritePatternList &patterns, MLIRContext *ctx) {}
584 };
585 
586 template <typename LoopType, typename ConcreteOp, typename... LinalgOps>
587 class RewritePatternList<LoopType, ConcreteOp, LinalgOps...> {
588 public:
589   static void build(OwningRewritePatternList &patterns, MLIRContext *ctx) {
590     patterns.insert<LinalgRewritePattern<LoopType, ConcreteOp>>(ctx);
591     RewritePatternList<LoopType, LinalgOps...>::build(patterns, ctx);
592   }
593 };
594 
595 /// Populate the given list with patterns that convert from Linalg to loops.
596 template <typename LoopType>
597 void FillRewritePatterns(OwningRewritePatternList &patterns, MLIRContext *ctx) {
598   RewritePatternList<LoopType,
599 #define GET_OP_LIST
600 #include "mlir/Dialect/Linalg/IR/LinalgStructuredOps.cpp.inc"
601                      >::build(patterns, ctx);
602 }
603 
604 /// Local folding pattern for AffineApplyOp that we can apply greedily.
605 /// This replaces AffineApplyOp by the proper value in cases where the
606 /// associated map is trivial.
607 /// A trivial map here is defined as a map with a single result and either:
608 ///   1. Zero operand + returns a single AffineConstantExpr
609 ///   2. One operand + returns a single AffineDimExpr
610 ///   3. One operand + returns a single AffineSymbolExpr
611 //
612 /// In the first case, the AffineApplyOp is replaced by a new constant. In the
613 /// other cases, it is replaced by its unique operand.
614 struct FoldAffineOp : public RewritePattern {
615   FoldAffineOp(MLIRContext *context)
616       : RewritePattern(AffineApplyOp::getOperationName(), 0, context) {}
617 
618   LogicalResult matchAndRewrite(Operation *op,
619                                 PatternRewriter &rewriter) const override {
620     AffineApplyOp affineApplyOp = cast<AffineApplyOp>(op);
621     auto map = affineApplyOp.getAffineMap();
622     if (map.getNumResults() != 1 || map.getNumInputs() > 1)
623       return failure();
624 
625     AffineExpr expr = map.getResult(0);
626     if (map.getNumInputs() == 0) {
627       if (auto val = expr.dyn_cast<AffineConstantExpr>()) {
628         rewriter.replaceOpWithNewOp<ConstantIndexOp>(op, val.getValue());
629         return success();
630       }
631       return failure();
632     }
633     if (expr.dyn_cast<AffineDimExpr>() || expr.dyn_cast<AffineSymbolExpr>()) {
634       rewriter.replaceOp(op, op->getOperand(0));
635       return success();
636     }
637     return failure();
638   }
639 };
640 } // namespace
641 
642 template <typename LoopType>
643 static void lowerLinalgToLoopsImpl(Operation *op, MLIRContext *context) {
644   OwningRewritePatternList patterns;
645   // Canonicalization and folding patterns applied greedily allow cleaning up
646   // the emitted IR on the fly.
647   // TODO(ntv) fold view and subview ops?
648   FillRewritePatterns<LoopType>(patterns, context);
649   DimOp::getCanonicalizationPatterns(patterns, context);
650   AffineApplyOp::getCanonicalizationPatterns(patterns, context);
651   patterns.insert<FoldAffineOp>(context);
652   // Just apply the patterns greedily.
653   applyPatternsAndFoldGreedily(op, patterns);
654 }
655 
656 namespace {
657 struct LowerToAffineLoops
658     : public LinalgLowerToAffineLoopsBase<LowerToAffineLoops> {
659   void runOnFunction() override {
660     lowerLinalgToLoopsImpl<AffineForOp>(getFunction(), &getContext());
661   }
662 };
663 struct LowerToLoops : public LinalgLowerToLoopsBase<LowerToLoops> {
664   void runOnFunction() override {
665     lowerLinalgToLoopsImpl<scf::ForOp>(getFunction(), &getContext());
666   }
667 };
668 struct LowerToParallelLoops
669     : public LinalgLowerToParallelLoopsBase<LowerToParallelLoops> {
670   void runOnFunction() override {
671     lowerLinalgToLoopsImpl<scf::ParallelOp>(getFunction(), &getContext());
672   }
673 };
674 } // namespace
675 
676 std::unique_ptr<OperationPass<FuncOp>> mlir::createConvertLinalgToLoopsPass() {
677   return std::make_unique<LowerToLoops>();
678 }
679 
680 std::unique_ptr<OperationPass<FuncOp>>
681 mlir::createConvertLinalgToParallelLoopsPass() {
682   return std::make_unique<LowerToParallelLoops>();
683 }
684 
685 std::unique_ptr<OperationPass<FuncOp>>
686 mlir::createConvertLinalgToAffineLoopsPass() {
687   return std::make_unique<LowerToAffineLoops>();
688 }
689 
690 /// Emits a loop nest with the proper body for `op`.
691 template <typename LoopTy, typename ConcreteOp>
692 Optional<LinalgLoops> mlir::linalg::linalgLowerOpToLoops(OpBuilder &builder,
693                                                          Operation *op) {
694   return linalgOpToLoopsImpl<LoopTy, ConcreteOp>(op, builder);
695 }
696 
697 /// Emits a loop nest of `scf.for` with the proper body for `op`.
698 template <typename ConcreteOp>
699 LogicalResult mlir::linalg::linalgOpToLoops(OpBuilder &builder, Operation *op) {
700   Optional<LinalgLoops> loops =
701       linalgLowerOpToLoops<scf::ForOp, ConcreteOp>(builder, op);
702   return loops ? success() : failure();
703 }
704 
705 /// Emits a loop nest of `affine.for` with the proper body for `op`.
706 template <typename ConcreteOp>
707 LogicalResult mlir::linalg::linalgOpToAffineLoops(OpBuilder &builder,
708                                                   Operation *op) {
709   Optional<LinalgLoops> loops =
710       linalgLowerOpToLoops<AffineForOp, ConcreteOp>(builder, op);
711   return loops ? success() : failure();
712 }
713 
714 /// Emits a loop nest of `scf.parallel` with the proper body for `op`.
715 template <typename ConcreteOp>
716 LogicalResult mlir::linalg::linalgOpToParallelLoops(OpBuilder &builder,
717                                                     Operation *op) {
718   Optional<LinalgLoops> loops =
719       linalgLowerOpToLoops<scf::ParallelOp, ConcreteOp>(builder, op);
720   return loops ? success() : failure();
721 }
722 
723 // TODO Need to make these instantiations more future-proof to avoid the need to
724 // update as soon as we add new ops.
725 #define INSTANTIATE_LINALG_OP_TO_LOOPS(OP_TYPE)                                \
726   template LogicalResult mlir::linalg::linalgOpToLoops<OP_TYPE>(               \
727       OpBuilder & builder, Operation * op);                                    \
728   template LogicalResult mlir::linalg::linalgOpToAffineLoops<OP_TYPE>(         \
729       OpBuilder & builder, Operation * op);                                    \
730   template LogicalResult mlir::linalg::linalgOpToParallelLoops<OP_TYPE>(       \
731       OpBuilder & builder, Operation * op);                                    \
732   template Optional<LinalgLoops>                                               \
733       mlir::linalg::linalgLowerOpToLoops<scf::ParallelOp, OP_TYPE>(            \
734           OpBuilder & builder, Operation * op);
735 
736 INSTANTIATE_LINALG_OP_TO_LOOPS(CopyOp)
737 INSTANTIATE_LINALG_OP_TO_LOOPS(FillOp)
738 INSTANTIATE_LINALG_OP_TO_LOOPS(DotOp)
739 INSTANTIATE_LINALG_OP_TO_LOOPS(MatvecOp)
740 INSTANTIATE_LINALG_OP_TO_LOOPS(MatmulOp)
741 INSTANTIATE_LINALG_OP_TO_LOOPS(ConvOp)
742 INSTANTIATE_LINALG_OP_TO_LOOPS(PoolingMaxOp)
743 INSTANTIATE_LINALG_OP_TO_LOOPS(PoolingMinOp)
744 INSTANTIATE_LINALG_OP_TO_LOOPS(PoolingSumOp)
745 INSTANTIATE_LINALG_OP_TO_LOOPS(GenericOp)
746 INSTANTIATE_LINALG_OP_TO_LOOPS(IndexedGenericOp)
747