1 //===- Loops.cpp - conversion from Linalg named and generic ops to loops --===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "PassDetail.h"
10 #include "mlir/Dialect/Affine/EDSC/Intrinsics.h"
11 #include "mlir/Dialect/Linalg/EDSC/FoldedIntrinsics.h"
12 #include "mlir/Dialect/Linalg/IR/LinalgOps.h"
13 #include "mlir/Dialect/Linalg/IR/LinalgTypes.h"
14 #include "mlir/Dialect/Linalg/Passes.h"
15 #include "mlir/Dialect/Linalg/Transforms/Transforms.h"
16 #include "mlir/Dialect/Linalg/Utils/Utils.h"
17 #include "mlir/Dialect/SCF/EDSC/Builders.h"
18 #include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h"
19 #include "mlir/IR/AffineExpr.h"
20 #include "mlir/IR/AffineMap.h"
21 #include "mlir/IR/BlockAndValueMapping.h"
22 #include "mlir/Support/LLVM.h"
23 #include "mlir/Transforms/DialectConversion.h"
24 #include "mlir/Transforms/FoldUtils.h"
25 
26 #include "llvm/ADT/TypeSwitch.h"
27 
28 using namespace mlir;
29 using namespace mlir::edsc;
30 using namespace mlir::edsc::intrinsics;
31 using namespace mlir::linalg;
32 
33 using edsc::op::operator+;
34 
35 static SmallVector<Value, 8> makeCanonicalAffineApplies(OpBuilder &b,
36                                                         Location loc,
37                                                         AffineMap map,
38                                                         ArrayRef<Value> vals) {
39   if (map.isEmpty())
40     return {};
41 
42   assert(map.getNumInputs() == vals.size());
43   SmallVector<Value, 8> res;
44   res.reserve(map.getNumResults());
45   auto dims = map.getNumDims();
46   for (auto e : map.getResults()) {
47     auto exprMap = AffineMap::get(dims, map.getNumSymbols(), e);
48     SmallVector<Value, 4> operands(vals.begin(), vals.end());
49     canonicalizeMapAndOperands(&exprMap, &operands);
50     res.push_back(affine_apply(exprMap, operands));
51   }
52   return res;
53 }
54 
55 static SmallVector<Value, 4> permuteIvs(ArrayRef<Value> ivs,
56                                         Optional<AffineMap> permutation) {
57   return permutation ? applyMapToValues(ScopedContext::getBuilderRef(),
58                                         ScopedContext::getLocation(),
59                                         permutation.getValue(), ivs)
60                      : SmallVector<Value, 4>(ivs.begin(), ivs.end());
61 }
62 
63 template <typename IndexedValueType, typename OpType>
64 static void inlineRegionAndEmitStore(OpType op, ArrayRef<Value> indexedValues,
65                                      ArrayRef<SmallVector<Value, 8>> indexing,
66                                      ArrayRef<Value> outputBuffers) {
67   assert(op.getOperation()->getNumRegions() == 1 &&
68          "Expected single region op");
69   auto &b = ScopedContext::getBuilderRef();
70   auto &block = op.getOperation()->getRegion(0).front();
71   BlockAndValueMapping map;
72   map.map(block.getArguments(), indexedValues);
73   for (auto &op : block.without_terminator()) {
74     assert(op.getNumRegions() == 0 && "expected a non-nested region");
75     auto *newOp = b.clone(op, map);
76     map.map(op.getResults(), newOp->getResults());
77   }
78 
79   Operation &terminator = block.back();
80   assert(isa<linalg::YieldOp>(terminator) &&
81          "expected a yield op in the end of the region");
82   for (unsigned i = 0, e = terminator.getNumOperands(); i < e; ++i) {
83     IndexedValueType O(outputBuffers[i]);
84     O(indexing[i]) = map.lookupOrDefault(terminator.getOperand(i));
85   }
86 }
87 
88 // Returns a pair that contains input indices and output indices of a
89 // SingleInputPoolingOp `op`.
90 struct InputAndOutputIndices {
91   SmallVector<Value, 8> inputs;
92   SmallVector<Value, 8> outputs;
93 };
94 template <typename SingleInputPoolingOp>
95 static InputAndOutputIndices getInputAndOutputIndices(ArrayRef<Value> allIvs,
96                                                       SingleInputPoolingOp op) {
97   auto &b = ScopedContext::getBuilderRef();
98   auto loc = ScopedContext::getLocation();
99   auto mapsRange = op.indexing_maps().template getAsRange<AffineMapAttr>();
100   auto maps = llvm::to_vector<8>(
101       llvm::map_range(mapsRange, [](AffineMapAttr a) { return a.getValue(); }));
102   return InputAndOutputIndices{
103       makeCanonicalAffineApplies(b, loc, maps[0], allIvs),
104       makeCanonicalAffineApplies(b, loc, maps[2], allIvs)};
105 }
106 
107 /// Emits the MLIR for the scalar part of the generic op by:
108 ///   1. Emitting load ops for each input and output view in order. This is
109 ///      achieved by applying the appropriate input or output map to the
110 ///      enclosing induction variables.
111 ///   2. Emitting a call to `op.fun()` that takes as arguments the scalars
112 ///      from point 1. above.
113 ///   3. Emitting store ops to store the results of 2. to the output
114 ///      views.
115 ///
116 /// An example output may resemble:
117 ///
118 /// ```
119 ///    scf.for %i = %c0 to %0 step %c1 {
120 ///      scf.for %j = %c0 to %1 step %c1 {
121 ///        scf.for %k = %c0 to %4 step %c1 {
122 ///          %11 = load %arg0[%i, %j] :
123 ///            memref<?x?xf32, stride_specification>
124 ///          %12 = load %arg1[%i, %j, %k] :
125 ///            memref<?x?x?xf32, stride_specification>
126 ///          %13 = load %arg2[%i, %k, %j] :
127 ///            memref<?x?x?xf32, stride_specification>
128 ///          %14:2 = call @foo(%11, %12, %13) : (f32, f32, f32) -> (f32, f32)
129 ///          store %14#0, %arg1[%i, %j, %k] :
130 ///            memref<?x?x?Xf32, stride_specification>
131 ///          store %14#1, %arg2[%i, %k, %j] :
132 ///            memref<?x?x?Xf32, stride_specification>
133 ///       }
134 ///      }
135 ///    }
136 /// ```
137 template <typename IndexedValueType>
138 static void emitScalarImplementation(ArrayRef<Value> allIvs,
139                                      LinalgOp linalgOp) {
140   assert(linalgOp.hasBufferSemantics() &&
141          "expected linalg op with buffer semantics");
142   auto &b = ScopedContext::getBuilderRef();
143   auto loc = ScopedContext::getLocation();
144   unsigned nInputs = linalgOp.getNumInputs();
145   unsigned nOutputs = linalgOp.getNumOutputs();
146   SmallVector<Value, 4> indexedValues;
147   indexedValues.reserve(nInputs + nOutputs);
148 
149   auto attr = linalgOp.template getAttrOfType<IntegerAttr>("symbol_source");
150   auto allIvsPlusDims = SmallVector<Value, 4>(allIvs.begin(), allIvs.end());
151   if (attr) {
152     auto operand = linalgOp.getOperation()->getOperand(attr.getInt());
153     auto shapedType = operand.getType().template cast<ShapedType>();
154     allIvsPlusDims.reserve(allIvs.size() + shapedType.getRank());
155     for (unsigned idx = 0, e = shapedType.getRank(); idx < e; ++idx)
156       allIvsPlusDims.push_back(b.create<DimOp>(loc, operand, idx));
157   }
158 
159   // TODO: Avoid the loads if the corresponding argument of the
160   // region has no uses.
161   // 1.a. Emit load from input views.
162   for (unsigned i = 0; i < nInputs; ++i) {
163     auto indexing = makeCanonicalAffineApplies(
164         b, loc, linalgOp.getInputIndexingMap(i), allIvsPlusDims);
165     // Passing through IndexedValueType emits the proper load operation.
166     indexedValues.push_back(IndexedValueType(linalgOp.getInput(i))(indexing));
167   }
168   // 1.b. Emit load from output views.
169   for (unsigned i = 0; i < nOutputs; ++i) {
170     auto indexing = makeCanonicalAffineApplies(
171         b, loc, linalgOp.getOutputIndexingMap(i), allIvsPlusDims);
172     // Passing through IndexedValueType emits the proper load operation.
173     indexedValues.push_back(
174         IndexedValueType(linalgOp.getOutputBuffer(i))(indexing));
175   }
176 
177   // TODO: When a region inliner exists, use it.
178   // 2. Inline region, currently only works for a single basic block.
179   // 3. Emit store.
180   SmallVector<SmallVector<Value, 8>, 8> indexing;
181   SmallVector<Value, 8> outputBuffers;
182   for (unsigned i = 0; i < nOutputs; ++i) {
183     indexing.push_back(makeCanonicalAffineApplies(
184         b, loc, linalgOp.getOutputIndexingMap(i), allIvsPlusDims));
185     outputBuffers.push_back(linalgOp.getOutputBuffer(i));
186   }
187   inlineRegionAndEmitStore<IndexedValueType>(linalgOp, indexedValues, indexing,
188                                              outputBuffers);
189 }
190 
191 template <typename IndexedValueType>
192 static void emitScalarImplementation(ArrayRef<Value> allIvs, CopyOp copyOp) {
193   assert(copyOp.hasBufferSemantics() &&
194          "expected linalg op with buffer semantics");
195   auto nPar = copyOp.getNumParallelLoops();
196   assert(nPar == allIvs.size());
197   auto inputIvs =
198       permuteIvs(allIvs.take_front(nPar), copyOp.inputPermutation());
199   auto outputIvs =
200       permuteIvs(allIvs.take_front(nPar), copyOp.outputPermutation());
201   SmallVector<Value, 8> iivs(inputIvs.begin(), inputIvs.end());
202   SmallVector<Value, 8> oivs(outputIvs.begin(), outputIvs.end());
203   IndexedValueType O(copyOp.getOutputBuffer(0)), I(copyOp.getInput(0));
204   // Emit the proper scalar assignment, whether we are dealing with a 0-D or
205   // an n-D loop nest; with or without permutations.
206   // clang-format off
207     nPar > 0 ? O(oivs) = I(iivs) :
208                O() = I();
209   // clang-format on
210 }
211 
212 template <typename IndexedValueType>
213 static void emitScalarImplementation(ArrayRef<Value> allIvs, FillOp fillOp) {
214   assert(fillOp.hasBufferSemantics() &&
215          "expected linalg op with buffer semantics");
216   auto nPar = fillOp.getNumParallelLoops();
217   assert(nPar == allIvs.size());
218   auto ivs = SmallVector<Value, 4>(allIvs.begin(), allIvs.begin() + nPar);
219   IndexedValueType O(fillOp.getOutputBuffer(0));
220   // Emit the proper scalar assignment, whether we are dealing with a 0-D or
221   // an n-D loop nest; with or without permutations.
222   nPar > 0 ? O(ivs) = fillOp.value() : O() = fillOp.value();
223 }
224 
225 // Create a padded view into the given `input` tensor using the 'indices'
226 // to access the tensor. `skipPadding` lists the dimensions for which no padding
227 // is needed e.g. the non-spatial dimensions for convolutions.
228 template <typename IndexedValueType>
229 Value getPaddedInput(Value input, ArrayRef<Value> indices,
230                      ArrayRef<int> skipPadding, Value padValue) {
231   // TODO: add a level of indirection to linalg.generic.
232 
233   IndexedValueType indexedInput(input);
234 
235   auto *context = ScopedContext::getContext();
236   Value zeroIndex = std_constant_index(0);
237   SmallVector<Value, 8> conds;
238   SmallVector<Value, 8> clampedImIdx;
239   for (auto iter : llvm::enumerate(indices)) {
240     int idx = iter.index();
241     auto dim = iter.value();
242     if (is_contained(skipPadding, idx)) {
243       clampedImIdx.push_back(dim);
244       continue;
245     }
246 
247     using edsc::op::sge;
248     using edsc::op::slt;
249     using edsc::op::operator||;
250     Value leftOutOfBound = slt(dim, zeroIndex);
251     if (conds.empty())
252       conds.push_back(leftOutOfBound);
253     else
254       conds.push_back(conds.back() || leftOutOfBound);
255     Value rightBound = std_dim(input, idx);
256     conds.push_back(conds.back() || (sge(dim, rightBound)));
257 
258     // When padding is involved, the indices will only be shifted to negative,
259     // so having a max op is enough.
260     auto maxMap = AffineMap::get(/*dimCount=*/1, 0,
261                                  {getAffineDimExpr(/*position=*/0, context),
262                                   getAffineConstantExpr(0, context)},
263                                  context);
264     clampedImIdx.push_back(affine_max(dim.getType(), maxMap, ValueRange{dim}));
265   }
266 
267   Value readInput = indexedInput(clampedImIdx);
268   return conds.empty() ? readInput
269                        : (Value)std_select(conds.back(), padValue, readInput);
270 }
271 
272 namespace {
273 
274 /// The padding value for a given Op depends on the semantics of the Op.
275 /// The identity value for ConvOp and PoolingSumOp is 0, for PoolingMaxOp is
276 /// -inf or minInt and for PoolingMinOp is inf or maxInt.
277 template <typename OpType>
278 Attribute getPadValueAttr(Type type) {
279   llvm_unreachable("Unexpected op type for getPadValueAttr");
280   return {};
281 }
282 
283 template <>
284 Attribute getPadValueAttr<PoolingMaxOp>(Type type) {
285   auto &b = ScopedContext::getBuilderRef();
286   if (auto floatType = type.dyn_cast<FloatType>()) {
287     return b.getFloatAttr(
288         floatType,
289         APFloat::getInf(floatType.getFloatSemantics(), /*Negative*/ true));
290   }
291   if (auto intType = type.dyn_cast<IntegerType>()) {
292     unsigned width = intType.getWidth();
293     // The select instruction used to lower the PoolingMin uses a signed
294     // comparison, use a signed constant irrespective of the signedness of the
295     // integer type.
296     return b.getIntegerAttr(intType, APInt::getSignedMinValue(width));
297   }
298   llvm_unreachable("Unsupported data type for PoolingMaxOp");
299   return {};
300 }
301 
302 template <>
303 Attribute getPadValueAttr<PoolingMinOp>(Type type) {
304   auto &b = ScopedContext::getBuilderRef();
305   if (auto floatType = type.dyn_cast<FloatType>()) {
306     return b.getFloatAttr(floatType,
307                           APFloat::getInf(floatType.getFloatSemantics()));
308   }
309   if (auto intType = type.dyn_cast<IntegerType>()) {
310     unsigned width = intType.getWidth();
311     // The select instruction used to lower the PoolingMin uses a signed
312     // comparison, use a signed constant irrespective of the signedness of the
313     // integer type.
314     return b.getIntegerAttr(intType, APInt::getSignedMaxValue(width));
315   }
316   llvm_unreachable("Unsupported data type for PoolingMinOp");
317   return {};
318 }
319 
320 template <>
321 Attribute getPadValueAttr<PoolingSumOp>(Type type) {
322   auto &b = ScopedContext::getBuilderRef();
323   return b.getZeroAttr(type);
324 }
325 
326 template <>
327 Attribute getPadValueAttr<ConvOp>(Type type) {
328   auto &b = ScopedContext::getBuilderRef();
329   return b.getZeroAttr(type);
330 }
331 
332 } // namespace
333 
334 /// Returns true is `convOp` has a non-zero padding.
335 static bool hasPadding(ConvOp convOp) {
336   for (unsigned i = 0, e = convOp.getNumSpatialDimensions(); i < e; ++i) {
337     if (convOp.getLowPad(i) > 0 || convOp.getHighPad(i) > 0)
338       return true;
339   }
340   return false;
341 }
342 
343 template <typename IndexedValueType>
344 static void emitScalarImplementation(ArrayRef<Value> allIvs, ConvOp convOp) {
345   assert(convOp.hasBufferSemantics() &&
346          "expected linalg op with buffer semantics");
347   auto &b = ScopedContext::getBuilderRef();
348   auto loc = ScopedContext::getLocation();
349   auto mapsRange = convOp.indexing_maps().getAsRange<AffineMapAttr>();
350   auto maps = llvm::to_vector<8>(
351       llvm::map_range(mapsRange, [](AffineMapAttr a) { return a.getValue(); }));
352   SmallVector<Value, 8> fIdx(
353       makeCanonicalAffineApplies(b, loc, maps[0], allIvs));
354   SmallVector<Value, 8> imIdx(
355       makeCanonicalAffineApplies(b, loc, maps[1], allIvs));
356   SmallVector<Value, 8> oIdx(
357       makeCanonicalAffineApplies(b, loc, maps[2], allIvs));
358 
359   IndexedValueType F(convOp.filter()), O(convOp.output());
360 
361   // Emit scalar form. Padded conv involves an affine.max in the memory access
362   // which is not allowed by affine.load. Override to use an StdIndexedValue
363   // when there is non-zero padding.
364   if (hasPadding(convOp)) {
365     Type type = convOp.input().getType().cast<MemRefType>().getElementType();
366     Value padValue = std_constant(type, getPadValueAttr<ConvOp>(type));
367     Value paddedInput = getPaddedInput<StdIndexedValue>(
368         convOp.input(), imIdx,
369         /* Only need to pad the window dimensions */
370         {0, static_cast<int>(imIdx.size()) - 1}, padValue);
371     O(oIdx) += F(fIdx) * paddedInput;
372   } else {
373     IndexedValueType I(convOp.input());
374     O(oIdx) += F(fIdx) * I(imIdx);
375   }
376 }
377 
378 template <typename PoolingOp>
379 static bool hasPadding(PoolingOp poolingOp) {
380   for (unsigned i = 0, e = poolingOp.getNumWindowLoops(); i < e; ++i) {
381     if (poolingOp.getLowPad(i) > 0 || poolingOp.getHighPad(i) > 0)
382       return true;
383   }
384   return false;
385 }
386 
387 template <typename IndexedValueType, typename PoolingOp>
388 static Value getPoolingInput(PoolingOp op, ArrayRef<Value> inputIndices) {
389   if (hasPadding(op)) {
390     Type type =
391         op.input().getType().template cast<MemRefType>().getElementType();
392     Value padValue = std_constant(type, getPadValueAttr<PoolingOp>(type));
393     return getPaddedInput<StdIndexedValue>(op.input(), inputIndices,
394                                            /*Pad every dimension*/ {},
395                                            padValue);
396   }
397   IndexedValueType input(op.input());
398   return input(inputIndices);
399 }
400 
401 template <typename IndexedValueType, typename OpType>
402 void emitPoolingMinMaxScalarImplementation(ArrayRef<Value> allIvs, OpType op) {
403   InputAndOutputIndices indices = getInputAndOutputIndices(allIvs, op);
404   // Emit scalar form.
405   IndexedValueType output(op.output());
406   Value lhs = output(indices.outputs);
407   Value rhs = getPoolingInput<IndexedValueType>(op, indices.inputs);
408   using edsc::op::sgt;
409   using edsc::op::slt;
410   Value value = std::is_same<OpType, PoolingMinOp>()
411                     ? std_select(slt(lhs, rhs), lhs, rhs)
412                     : std_select(sgt(lhs, rhs), lhs, rhs);
413   output(indices.outputs) = value;
414 }
415 
416 template <typename IndexedValueType>
417 static void emitScalarImplementation(ArrayRef<Value> allIvs, PoolingMaxOp op) {
418   emitPoolingMinMaxScalarImplementation<IndexedValueType, PoolingMaxOp>(allIvs,
419                                                                         op);
420 }
421 
422 template <typename IndexedValueType>
423 static void emitScalarImplementation(ArrayRef<Value> allIvs, PoolingMinOp op) {
424   emitPoolingMinMaxScalarImplementation<IndexedValueType, PoolingMinOp>(allIvs,
425                                                                         op);
426 }
427 
428 template <typename IndexedValueType>
429 static void emitScalarImplementation(ArrayRef<Value> allIvs, PoolingSumOp op) {
430   auto indices = getInputAndOutputIndices(allIvs, op);
431   IndexedValueType output(op.output());
432 
433   // Emit scalar form.
434   output(indices.outputs) +=
435       getPoolingInput<IndexedValueType>(op, indices.inputs);
436 }
437 
438 /// Emits the MLIR for the scalar part of the indexed generic op by:
439 ///   1. Emitting load ops for each input and output view in order. This is
440 ///      achieved by applying the appropriate input or output map to the
441 ///      enclosing induction variables.
442 ///   2. Emitting a call to `op.fun()` that takes as arguments the induction
443 ///      variables and the scalars from point 1. above.
444 ///   3. Emitting store ops to store the results of 2. to the output views.
445 ///
446 /// An example output may resemble:
447 ///
448 /// ```
449 ///    scf.for %i = %c0 to %0 step %c1 {
450 ///      scf.for %j = %c0 to %1 step %c1 {
451 ///        scf.for %k = %c0 to %4 step %c1 {
452 ///          %11 = load %arg0[%i, %j] :
453 ///            memref<?x?xf32, stride_specification>
454 ///          %12 = load %arg1[%i, %j, %k] :
455 ///            memref<?x?x?xf32, stride_specification>
456 ///          %13 = load %arg2[%i, %k, %j] :
457 ///            memref<?x?x?xf32, stride_specification>
458 ///          %14:2 = call @foo(%i, %j, %k, %11, %12, %13) :
459 ///            (index, index, index, f32, f32, f32) -> (f32, f32)
460 ///          store %14#0, %arg1[%i, %j, %k] :
461 ///            memref<?x?x?Xf32, stride_specification>
462 ///          store %14#1, %arg2[%i, %k, %j] :
463 ///            memref<?x?x?Xf32, stride_specification>
464 ///       }
465 ///      }
466 ///    }
467 /// ```
468 template <typename IndexedValueType>
469 static void emitScalarImplementation(ArrayRef<Value> allIvs,
470                                      IndexedGenericOp indexedGenericOp) {
471   assert(indexedGenericOp.hasBufferSemantics() &&
472          "expected linalg op with buffer semantics");
473   auto &b = ScopedContext::getBuilderRef();
474   auto loc = ScopedContext::getLocation();
475   unsigned nInputs = indexedGenericOp.getNumInputs();
476   unsigned nOutputs = indexedGenericOp.getNumOutputs();
477   unsigned nLoops = allIvs.size();
478   SmallVector<Value, 4> indexedValues;
479   indexedValues.reserve(nLoops + nInputs + nOutputs);
480   for (unsigned i = 0; i < nLoops; ++i)
481     indexedValues.push_back(allIvs[i]);
482 
483   // TODO: Avoid the loads if the corresponding argument of the
484   // region has no uses.
485   // 1.a. Emit load from input views.
486   for (unsigned i = 0; i < nInputs; ++i) {
487     auto indexing = makeCanonicalAffineApplies(
488         b, loc, indexedGenericOp.getInputIndexingMap(i), allIvs);
489     // Pass input i through IndexedValueType emits the proper load operation.
490     indexedValues.push_back(
491         IndexedValueType(indexedGenericOp.getInput(i))(indexing));
492   }
493   // 1.b. Emit load from output views.
494   for (unsigned i = 0; i < nOutputs; ++i) {
495     auto indexing = makeCanonicalAffineApplies(
496         b, loc, indexedGenericOp.getOutputIndexingMap(i), allIvs);
497     // Pass output i through IndexedValueType emits the proper load operation.
498     indexedValues.push_back(
499         IndexedValueType(indexedGenericOp.getOutputBuffer(i))(indexing));
500   }
501 
502   // TODO: When a region inliner exists, use it.
503   // 2. Inline region, currently only works for a single basic block.
504   // 3. Emit store.
505   SmallVector<SmallVector<Value, 8>, 8> indexing;
506   SmallVector<Value, 8> outputBuffers;
507   for (unsigned i = 0; i < nOutputs; ++i) {
508     indexing.push_back(makeCanonicalAffineApplies(
509         b, loc, indexedGenericOp.getOutputIndexingMap(i), allIvs));
510     outputBuffers.push_back(indexedGenericOp.getOutputBuffer(i));
511   }
512   inlineRegionAndEmitStore<IndexedValueType>(indexedGenericOp, indexedValues,
513                                              indexing, outputBuffers);
514 }
515 
516 template <typename LoopTy>
517 static Optional<LinalgLoops> linalgOpToLoopsImpl(Operation *op,
518                                                  OpBuilder &builder) {
519   using IndexedValueTy = typename GenerateLoopNest<LoopTy>::IndexedValueTy;
520 
521   ScopedContext scope(builder, op->getLoc());
522 
523   // The flattened loopToOperandRangesMaps is expected to be an invertible
524   // permutation map (which is asserted in the inverse calculation).
525   auto linalgOp = cast<LinalgOp>(op);
526   assert(linalgOp.hasBufferSemantics() &&
527          "expected linalg op with buffer semantics");
528   auto mapsRange =
529       linalgOp.indexing_maps().template getAsRange<AffineMapAttr>();
530   auto maps = llvm::to_vector<8>(
531       llvm::map_range(mapsRange, [](AffineMapAttr a) { return a.getValue(); }));
532   SmallVector<Value, 8> sizes = getShape(builder, linalgOp);
533   AffineMap map = concatAffineMaps(maps);
534   auto loopRanges = emitLoopRanges(scope.getBuilderRef(), scope.getLocation(),
535                                    map, getShape(builder, linalgOp));
536   SmallVector<Value, 4> allIvs;
537   GenerateLoopNest<LoopTy>::doit(
538       loopRanges, /*iterInitArgs*/ {}, linalgOp.iterator_types().getValue(),
539       [&](ValueRange ivs, ValueRange iterArgs) -> scf::ValueVector {
540         assert(iterArgs.empty() && "unexpected iterArgs");
541         allIvs.append(ivs.begin(), ivs.end());
542         llvm::TypeSwitch<Operation *>(op)
543             .Case<CopyOp, FillOp, ConvOp, PoolingMaxOp, PoolingMinOp,
544                   PoolingSumOp, IndexedGenericOp, LinalgOp>([&](auto op) {
545               emitScalarImplementation<IndexedValueTy>(allIvs, op);
546             })
547             .Default([&](Operation *op) { assert(false && "unexpected op"); });
548         return scf::ValueVector{};
549       });
550   // Number of loop ops might be different from the number of ivs since some
551   // loops like affine.parallel and scf.parallel have multiple ivs.
552   llvm::SetVector<Operation *> loopSet;
553   for (Value iv : allIvs) {
554     if (!iv)
555       return {};
556     // The induction variable is a block argument of the entry block of the
557     // loop operation.
558     BlockArgument ivVal = iv.dyn_cast<BlockArgument>();
559     if (!ivVal)
560       return {};
561     loopSet.insert(ivVal.getOwner()->getParentOp());
562   }
563   LinalgLoops loops(loopSet.begin(), loopSet.end());
564   return loops;
565 }
566 
567 namespace {
568 template <typename LoopType>
569 class LinalgRewritePattern : public RewritePattern {
570 public:
571   LinalgRewritePattern() : RewritePattern(/*benefit=*/1, MatchAnyOpTypeTag()) {}
572 
573   LogicalResult matchAndRewrite(Operation *op,
574                                 PatternRewriter &rewriter) const override {
575     if (!isa<LinalgOp>(op))
576       return failure();
577     if (!linalgOpToLoopsImpl<LoopType>(op, rewriter))
578       return failure();
579     rewriter.eraseOp(op);
580     return success();
581   }
582 };
583 
584 struct FoldAffineOp;
585 } // namespace
586 
587 template <typename LoopType>
588 static void lowerLinalgToLoopsImpl(FuncOp funcOp, MLIRContext *context) {
589   OwningRewritePatternList patterns;
590   patterns.insert<LinalgRewritePattern<LoopType>>();
591   DimOp::getCanonicalizationPatterns(patterns, context);
592   AffineApplyOp::getCanonicalizationPatterns(patterns, context);
593   patterns.insert<FoldAffineOp>(context);
594   // Just apply the patterns greedily.
595   applyPatternsAndFoldGreedily(funcOp, patterns);
596 }
597 
598 namespace {
599 /// Local folding pattern for AffineApplyOp that we can apply greedily.
600 /// This replaces AffineApplyOp by the proper value in cases where the
601 /// associated map is trivial.
602 /// A trivial map here is defined as a map with a single result and either:
603 ///   1. Zero operand + returns a single AffineConstantExpr
604 ///   2. One operand + returns a single AffineDimExpr
605 ///   3. One operand + returns a single AffineSymbolExpr
606 //
607 /// In the first case, the AffineApplyOp is replaced by a new constant. In the
608 /// other cases, it is replaced by its unique operand.
609 struct FoldAffineOp : public RewritePattern {
610   FoldAffineOp(MLIRContext *context)
611       : RewritePattern(AffineApplyOp::getOperationName(), 0, context) {}
612 
613   LogicalResult matchAndRewrite(Operation *op,
614                                 PatternRewriter &rewriter) const override {
615     AffineApplyOp affineApplyOp = cast<AffineApplyOp>(op);
616     auto map = affineApplyOp.getAffineMap();
617     if (map.getNumResults() != 1 || map.getNumInputs() > 1)
618       return failure();
619 
620     AffineExpr expr = map.getResult(0);
621     if (map.getNumInputs() == 0) {
622       if (auto val = expr.dyn_cast<AffineConstantExpr>()) {
623         rewriter.replaceOpWithNewOp<ConstantIndexOp>(op, val.getValue());
624         return success();
625       }
626       return failure();
627     }
628     if (expr.dyn_cast<AffineDimExpr>() || expr.dyn_cast<AffineSymbolExpr>()) {
629       rewriter.replaceOp(op, op->getOperand(0));
630       return success();
631     }
632     return failure();
633   }
634 };
635 
636 struct LowerToAffineLoops
637     : public LinalgLowerToAffineLoopsBase<LowerToAffineLoops> {
638   void runOnFunction() override {
639     lowerLinalgToLoopsImpl<AffineForOp>(getFunction(), &getContext());
640   }
641 };
642 
643 struct LowerToLoops : public LinalgLowerToLoopsBase<LowerToLoops> {
644   void runOnFunction() override {
645     lowerLinalgToLoopsImpl<scf::ForOp>(getFunction(), &getContext());
646   }
647 };
648 
649 struct LowerToParallelLoops
650     : public LinalgLowerToParallelLoopsBase<LowerToParallelLoops> {
651   void runOnFunction() override {
652     lowerLinalgToLoopsImpl<scf::ParallelOp>(getFunction(), &getContext());
653   }
654 };
655 } // namespace
656 
657 std::unique_ptr<OperationPass<FuncOp>> mlir::createConvertLinalgToLoopsPass() {
658   return std::make_unique<LowerToLoops>();
659 }
660 
661 std::unique_ptr<OperationPass<FuncOp>>
662 mlir::createConvertLinalgToParallelLoopsPass() {
663   return std::make_unique<LowerToParallelLoops>();
664 }
665 
666 std::unique_ptr<OperationPass<FuncOp>>
667 mlir::createConvertLinalgToAffineLoopsPass() {
668   return std::make_unique<LowerToAffineLoops>();
669 }
670 
671 SmallVector<Range, 4> mlir::linalg::emitLoopRanges(OpBuilder &b, Location loc,
672                                                    AffineMap map,
673                                                    ValueRange viewSizes) {
674   unsigned numDims = map.getNumDims(), numRes = map.getNumResults();
675   unsigned numSym = map.getNumSymbols();
676   assert(viewSizes.size() == numRes + numSym &&
677          "viewSizes must contain sizes of all views and values for symbols");
678   SmallVector<Range, 4> res(numDims);
679   for (unsigned idx = 0; idx < numRes; ++idx) {
680     auto result = map.getResult(idx);
681     if (auto d = result.dyn_cast<AffineDimExpr>()) {
682       if (res[d.getPosition()].offset)
683         continue;
684       res[d.getPosition()] =
685           Range{std_constant_index(0), viewSizes[idx], std_constant_index(1)};
686     }
687 
688     // If the access pattern is of form (m, n)[s] -> (m + n - s floordiv 2),
689     // then the bounds are:
690     //   (s floordiv 2) <= m <= (size(m) + s floordiv 2 - s + 1).
691     // where size(n) is applied to the symbol s.
692     // This is done statically now.
693     if (auto binOp = result.dyn_cast<AffineBinaryOpExpr>()) {
694       auto lhs = binOp.getLHS().dyn_cast<AffineBinaryOpExpr>();
695       auto rhs = binOp.getRHS().dyn_cast<AffineBinaryOpExpr>();
696       if (!lhs || !rhs || binOp.getKind() != AffineExprKind::Add ||
697           lhs.getKind() != AffineExprKind::Add ||
698           rhs.getKind() != mlir::AffineExprKind::Mul)
699         continue;
700 
701       auto m = lhs.getLHS().dyn_cast<AffineDimExpr>();
702       auto n = lhs.getRHS().dyn_cast<AffineDimExpr>();
703       auto fDiv = rhs.getLHS().dyn_cast<AffineBinaryOpExpr>();
704       auto minusOne = rhs.getRHS().dyn_cast<AffineConstantExpr>();
705       if (!m || !n || !fDiv || !minusOne ||
706           fDiv.getKind() != AffineExprKind::FloorDiv ||
707           fDiv.getLHS().getKind() != AffineExprKind::SymbolId ||
708           fDiv.getRHS().getKind() != AffineExprKind::Constant)
709         continue;
710 
711       auto s = fDiv.getLHS().dyn_cast<AffineSymbolExpr>();
712       if (minusOne.getValue() != -1)
713         continue;
714 
715       int mPos = m.getPosition();
716       AffineExpr one = getAffineConstantExpr(1, s.getContext());
717       AffineExpr sizeOfM = getAffineSymbolExpr(numSym, s.getContext());
718       // Construction of upper bound (size(m) + s floordiv 2 - s + 1).
719       AffineExpr upperOffsetExpr = sizeOfM + fDiv + one - s;
720       AffineMap fromMap = AffineMap::get(numDims, numSym + 1, fDiv);
721       AffineMap toMap = AffineMap::get(numDims, numSym + 1, upperOffsetExpr);
722       SmallVector<Value, 8> values(viewSizes.begin(),
723                                    viewSizes.begin() + numDims);
724       values.insert(values.end(), viewSizes.begin() + numRes, viewSizes.end());
725       values.push_back(viewSizes[mPos]);
726       // Construction of the lower bound (s floordiv 2).
727       Value from = applyMapToValues(b, loc, fromMap, values).front();
728       Value to = applyMapToValues(b, loc, toMap, values).front();
729       res[mPos] = Range{from, to, std_constant_index(1)};
730     }
731   }
732   return res;
733 }
734 
735 /// Emits a loop nest with the proper body for `op`.
736 template <typename LoopTy>
737 Optional<LinalgLoops> mlir::linalg::linalgLowerOpToLoops(OpBuilder &builder,
738                                                          Operation *op) {
739   return linalgOpToLoopsImpl<LoopTy>(op, builder);
740 }
741 
742 template Optional<LinalgLoops>
743 mlir::linalg::linalgLowerOpToLoops<AffineForOp>(OpBuilder &builder,
744                                                 Operation *op);
745 template Optional<LinalgLoops>
746 mlir::linalg::linalgLowerOpToLoops<scf::ForOp>(OpBuilder &builder,
747                                                Operation *op);
748 template Optional<LinalgLoops>
749 mlir::linalg::linalgLowerOpToLoops<scf::ParallelOp>(OpBuilder &builder,
750                                                     Operation *op);
751 
752 /// Emits a loop nest of `affine.for` with the proper body for `op`.
753 LogicalResult mlir::linalg::linalgOpToAffineLoops(OpBuilder &builder,
754                                                   Operation *op) {
755   Optional<LinalgLoops> loops = linalgLowerOpToLoops<AffineForOp>(builder, op);
756   return loops ? success() : failure();
757 }
758 
759 /// Emits a loop nest of `scf.for` with the proper body for `op`.
760 LogicalResult mlir::linalg::linalgOpToLoops(OpBuilder &builder, Operation *op) {
761   Optional<LinalgLoops> loops = linalgLowerOpToLoops<scf::ForOp>(builder, op);
762   return loops ? success() : failure();
763 }
764 
765 /// Emits a loop nest of `scf.parallel` with the proper body for `op`.
766 LogicalResult mlir::linalg::linalgOpToParallelLoops(OpBuilder &builder,
767                                                     Operation *op) {
768   Optional<LinalgLoops> loops =
769       linalgLowerOpToLoops<scf::ParallelOp>(builder, op);
770   return loops ? success() : failure();
771 }
772