1 //===- Loops.cpp - conversion from Linalg named and generic ops to loops --===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "PassDetail.h"
10 #include "mlir/Dialect/Affine/EDSC/Intrinsics.h"
11 #include "mlir/Dialect/Linalg/EDSC/FoldedIntrinsics.h"
12 #include "mlir/Dialect/Linalg/IR/LinalgOps.h"
13 #include "mlir/Dialect/Linalg/IR/LinalgTypes.h"
14 #include "mlir/Dialect/Linalg/Passes.h"
15 #include "mlir/Dialect/Linalg/Transforms/Transforms.h"
16 #include "mlir/Dialect/Linalg/Utils/Utils.h"
17 #include "mlir/Dialect/SCF/EDSC/Builders.h"
18 #include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h"
19 #include "mlir/IR/AffineExpr.h"
20 #include "mlir/IR/AffineMap.h"
21 #include "mlir/IR/BlockAndValueMapping.h"
22 #include "mlir/Support/LLVM.h"
23 #include "mlir/Transforms/DialectConversion.h"
24 #include "mlir/Transforms/FoldUtils.h"
25 
26 using namespace mlir;
27 using namespace mlir::edsc;
28 using namespace mlir::edsc::intrinsics;
29 using namespace mlir::linalg;
30 
31 using edsc::op::operator+;
32 
33 static SmallVector<Value, 8> makeCanonicalAffineApplies(OpBuilder &b,
34                                                         Location loc,
35                                                         AffineMap map,
36                                                         ArrayRef<Value> vals) {
37   if (map.isEmpty())
38     return {};
39 
40   assert(map.getNumInputs() == vals.size());
41   SmallVector<Value, 8> res;
42   res.reserve(map.getNumResults());
43   auto dims = map.getNumDims();
44   for (auto e : map.getResults()) {
45     auto exprMap = AffineMap::get(dims, map.getNumSymbols(), e);
46     SmallVector<Value, 4> operands(vals.begin(), vals.end());
47     canonicalizeMapAndOperands(&exprMap, &operands);
48     res.push_back(affine_apply(exprMap, operands));
49   }
50   return res;
51 }
52 
53 static SmallVector<Value, 4> permuteIvs(ArrayRef<Value> ivs,
54                                         Optional<AffineMap> permutation) {
55   return permutation ? applyMapToValues(ScopedContext::getBuilderRef(),
56                                         ScopedContext::getLocation(),
57                                         permutation.getValue(), ivs)
58                      : SmallVector<Value, 4>(ivs.begin(), ivs.end());
59 }
60 
61 /// Creates a number of ranges equal to the number of dimensions in the `map`.
62 /// The returned ranges correspond to the loop ranges, in the proper order, for
63 /// which new loops will be created.
64 /// The function supports only maps that are invertible and have results of type
65 /// DimExpr or (DimExpr + DimExpr - SymbolExpr floordiv ConstExpr).
66 /// It expects a non-inverted, concatenated map and last values in
67 /// allViewSizes will be applied to the symbols in the map if it contains any.
68 static SmallVector<SubViewOp::Range, 4> emitLoopRanges(OpBuilder &b,
69                                                        Location loc,
70                                                        AffineMap map,
71                                                        ValueRange viewSizes) {
72   unsigned numDims = map.getNumDims(), numRes = map.getNumResults();
73   unsigned numSym = map.getNumSymbols();
74   assert(viewSizes.size() == numRes + numSym &&
75          "viewSizes must contain sizes of all views and values for symbols");
76   SmallVector<SubViewOp::Range, 4> res(numDims);
77   for (unsigned idx = 0; idx < numRes; ++idx) {
78     auto result = map.getResult(idx);
79     if (auto d = result.dyn_cast<AffineDimExpr>()) {
80       if (res[d.getPosition()].offset)
81         continue;
82       res[d.getPosition()] = SubViewOp::Range{
83           std_constant_index(0), viewSizes[idx], std_constant_index(1)};
84     }
85 
86     // If the access pattern is of form (m, n)[s] -> (m + n - s floordiv 2),
87     // then the bounds are:
88     //   (s floordiv 2) <= m <= (size(m) + s floordiv 2 - s + 1).
89     // where size(n) is applied to the symbol s.
90     // This is done statically now.
91     if (auto binOp = result.dyn_cast<AffineBinaryOpExpr>()) {
92       auto lhs = binOp.getLHS().dyn_cast<AffineBinaryOpExpr>();
93       auto rhs = binOp.getRHS().dyn_cast<AffineBinaryOpExpr>();
94       if (!lhs || !rhs || binOp.getKind() != AffineExprKind::Add ||
95           lhs.getKind() != AffineExprKind::Add ||
96           rhs.getKind() != mlir::AffineExprKind::Mul)
97         continue;
98 
99       auto m = lhs.getLHS().dyn_cast<AffineDimExpr>();
100       auto n = lhs.getRHS().dyn_cast<AffineDimExpr>();
101       auto fDiv = rhs.getLHS().dyn_cast<AffineBinaryOpExpr>();
102       auto minusOne = rhs.getRHS().dyn_cast<AffineConstantExpr>();
103       if (!m || !n || !fDiv || !minusOne ||
104           fDiv.getKind() != AffineExprKind::FloorDiv ||
105           fDiv.getLHS().getKind() != AffineExprKind::SymbolId ||
106           fDiv.getRHS().getKind() != AffineExprKind::Constant)
107         continue;
108 
109       auto s = fDiv.getLHS().dyn_cast<AffineSymbolExpr>();
110       if (minusOne.getValue() != -1)
111         continue;
112 
113       int mPos = m.getPosition();
114       AffineExpr one = getAffineConstantExpr(1, s.getContext());
115       AffineExpr sizeOfM = getAffineSymbolExpr(numSym, s.getContext());
116       // Construction of upper bound (size(m) + s floordiv 2 - s + 1).
117       AffineExpr upperOffsetExpr = sizeOfM + fDiv + one - s;
118       AffineMap fromMap = AffineMap::get(numDims, numSym + 1, fDiv);
119       AffineMap toMap = AffineMap::get(numDims, numSym + 1, upperOffsetExpr);
120       SmallVector<Value, 8> values(viewSizes.begin(),
121                                    viewSizes.begin() + numDims);
122       values.insert(values.end(), viewSizes.begin() + numRes, viewSizes.end());
123       values.push_back(viewSizes[mPos]);
124       // Construction of the lower bound (s floordiv 2).
125       Value from = applyMapToValues(b, loc, fromMap, values).front();
126       Value to = applyMapToValues(b, loc, toMap, values).front();
127       res[mPos] = SubViewOp::Range{from, to, std_constant_index(1)};
128     }
129   }
130   return res;
131 }
132 
133 template <typename IndexedValueType, typename OpType>
134 static void inlineRegionAndEmitStore(OpType op, ArrayRef<Value> indexedValues,
135                                      ArrayRef<SmallVector<Value, 8>> indexing,
136                                      ArrayRef<Value> outputBuffers) {
137   assert(op.getOperation()->getNumRegions() == 1 &&
138          "Expected single region op");
139   auto &b = ScopedContext::getBuilderRef();
140   auto &block = op.region().front();
141   BlockAndValueMapping map;
142   map.map(block.getArguments(), indexedValues);
143   for (auto &op : block.without_terminator()) {
144     assert(op.getNumRegions() == 0 && "expected a non-nested region");
145     auto *newOp = b.clone(op, map);
146     map.map(op.getResults(), newOp->getResults());
147   }
148 
149   Operation &terminator = block.back();
150   assert(isa<YieldOp>(terminator) &&
151          "expected a yield op in the end of the region");
152   for (unsigned i = 0, e = terminator.getNumOperands(); i < e; ++i) {
153     IndexedValueType O(outputBuffers[i]);
154     O(indexing[i]) = map.lookupOrDefault(terminator.getOperand(i));
155   }
156 }
157 
158 // Returns a pair that contains input indices and output indices of a
159 // SingleInputPoolingOp `op`.
160 struct InputAndOutputIndices {
161   SmallVector<Value, 8> inputs;
162   SmallVector<Value, 8> outputs;
163 };
164 template <typename SingleInputPoolingOp>
165 static InputAndOutputIndices getInputAndOutputIndices(ArrayRef<Value> allIvs,
166                                                       SingleInputPoolingOp op) {
167   auto &b = ScopedContext::getBuilderRef();
168   auto loc = ScopedContext::getLocation();
169   auto mapsRange = op.indexing_maps().template getAsRange<AffineMapAttr>();
170   auto maps = llvm::to_vector<8>(
171       llvm::map_range(mapsRange, [](AffineMapAttr a) { return a.getValue(); }));
172   return InputAndOutputIndices{
173       makeCanonicalAffineApplies(b, loc, maps[0], allIvs),
174       makeCanonicalAffineApplies(b, loc, maps[2], allIvs)};
175 }
176 
177 namespace {
178 
179 /// Emits the MLIR for the scalar part of the generic op by:
180 ///   1. Emitting load ops for each input and output view in order. This is
181 ///      achieved by applying the appropriate input or output map to the
182 ///      enclosing induction variables.
183 ///   2. Emitting a call to `op.fun()` that takes as arguments the scalars
184 ///      from point 1. above.
185 ///   3. Emitting store ops to store the results of 2. to the output
186 ///      views.
187 ///
188 /// An example output may resemble:
189 ///
190 /// ```
191 ///    scf.for %i = %c0 to %0 step %c1 {
192 ///      scf.for %j = %c0 to %1 step %c1 {
193 ///        scf.for %k = %c0 to %4 step %c1 {
194 ///          %11 = load %arg0[%i, %j] :
195 ///            memref<?x?xf32, stride_specification>
196 ///          %12 = load %arg1[%i, %j, %k] :
197 ///            memref<?x?x?xf32, stride_specification>
198 ///          %13 = load %arg2[%i, %k, %j] :
199 ///            memref<?x?x?xf32, stride_specification>
200 ///          %14:2 = call @foo(%11, %12, %13) : (f32, f32, f32) -> (f32, f32)
201 ///          store %14#0, %arg1[%i, %j, %k] :
202 ///            memref<?x?x?Xf32, stride_specification>
203 ///          store %14#1, %arg2[%i, %k, %j] :
204 ///            memref<?x?x?Xf32, stride_specification>
205 ///       }
206 ///      }
207 ///    }
208 /// ```
209 // TODO: need a LinalgStructuredOpInterface.
210 template <typename IndexedValueType, typename LinalgStructuredOpType>
211 void emitScalarImplementation(ArrayRef<Value> allIvs,
212                               LinalgStructuredOpType linalgOp) {
213   assert(linalgOp.hasBufferSemantics() &&
214          "expected linalg op with buffer semantics");
215   auto &b = ScopedContext::getBuilderRef();
216   auto loc = ScopedContext::getLocation();
217   unsigned nInputs = linalgOp.getNumInputs();
218   unsigned nOutputs = linalgOp.getNumOutputs();
219   SmallVector<Value, 4> indexedValues;
220   indexedValues.reserve(nInputs + nOutputs);
221 
222   auto attr = linalgOp.template getAttrOfType<IntegerAttr>("symbol_source");
223   auto allIvsPlusDims = SmallVector<Value, 4>(allIvs.begin(), allIvs.end());
224   if (attr) {
225     auto operand = linalgOp.getOperand(attr.getInt());
226     auto shapedType = operand.getType().template cast<ShapedType>();
227     allIvsPlusDims.reserve(allIvs.size() + shapedType.getRank());
228     for (unsigned idx = 0, e = shapedType.getRank(); idx < e; ++idx)
229       allIvsPlusDims.push_back(b.create<DimOp>(loc, operand, idx));
230   }
231 
232   // TODO: Avoid the loads if the corresponding argument of the
233   // region has no uses.
234   // 1.a. Emit load from input views.
235   for (unsigned i = 0; i < nInputs; ++i) {
236     auto indexing = makeCanonicalAffineApplies(
237         b, loc, linalgOp.getInputIndexingMap(i), allIvsPlusDims);
238     // Passing through IndexedValueType emits the proper load operation.
239     indexedValues.push_back(IndexedValueType(linalgOp.getInput(i))(indexing));
240   }
241   // 1.b. Emit load from output views.
242   for (unsigned i = 0; i < nOutputs; ++i) {
243     auto indexing = makeCanonicalAffineApplies(
244         b, loc, linalgOp.getOutputIndexingMap(i), allIvsPlusDims);
245     // Passing through IndexedValueType emits the proper load operation.
246     indexedValues.push_back(
247         IndexedValueType(linalgOp.getOutputBuffer(i))(indexing));
248   }
249 
250   // TODO: When a region inliner exists, use it.
251   // 2. Inline region, currently only works for a single basic block.
252   // 3. Emit store.
253   SmallVector<SmallVector<Value, 8>, 8> indexing;
254   SmallVector<Value, 8> outputBuffers;
255   for (unsigned i = 0; i < nOutputs; ++i) {
256     indexing.push_back(makeCanonicalAffineApplies(
257         b, loc, linalgOp.getOutputIndexingMap(i), allIvsPlusDims));
258     outputBuffers.push_back(linalgOp.getOutputBuffer(i));
259   }
260   inlineRegionAndEmitStore<IndexedValueType>(linalgOp, indexedValues, indexing,
261                                              outputBuffers);
262 }
263 
264 template <typename IndexedValueType>
265 void emitScalarImplementation(ArrayRef<Value> allIvs, CopyOp copyOp) {
266   assert(copyOp.hasBufferSemantics() &&
267          "expected linalg op with buffer semantics");
268   auto nPar = copyOp.getNumParallelLoops();
269   assert(nPar == allIvs.size());
270   auto inputIvs =
271       permuteIvs(allIvs.take_front(nPar), copyOp.inputPermutation());
272   auto outputIvs =
273       permuteIvs(allIvs.take_front(nPar), copyOp.outputPermutation());
274   SmallVector<Value, 8> iivs(inputIvs.begin(), inputIvs.end());
275   SmallVector<Value, 8> oivs(outputIvs.begin(), outputIvs.end());
276   IndexedValueType O(copyOp.getOutputBuffer(0)), I(copyOp.getInput(0));
277   // Emit the proper scalar assignment, whether we are dealing with a 0-D or
278   // an n-D loop nest; with or without permutations.
279   // clang-format off
280     nPar > 0 ? O(oivs) = I(iivs) :
281                O() = I();
282   // clang-format on
283 }
284 
285 template <typename IndexedValueType>
286 void emitScalarImplementation(ArrayRef<Value> allIvs, FillOp fillOp) {
287   assert(fillOp.hasBufferSemantics() &&
288          "expected linalg op with buffer semantics");
289   auto nPar = fillOp.getNumParallelLoops();
290   assert(nPar == allIvs.size());
291   auto ivs = SmallVector<Value, 4>(allIvs.begin(), allIvs.begin() + nPar);
292   IndexedValueType O(fillOp.getOutputBuffer(0));
293   // Emit the proper scalar assignment, whether we are dealing with a 0-D or
294   // an n-D loop nest; with or without permutations.
295   nPar > 0 ? O(ivs) = fillOp.value() : O() = fillOp.value();
296 }
297 
298 /// Following functions emit scalar part of the N-D convolution op.
299 /// N-D convolution has 2N loops:
300 ///   1-N: Iterate over the output array *O* with iterators *m1, ..., mN*.
301 ///   N-2N:. Iterate over the kernel *K* with iterators *n1, ..., nN*.
302 ///
303 /// The scalar part accumulates products of input array *I* values with kernel
304 /// ones. The accumulation expression therefore looks like:
305 ///   O[m1, ..., mN] += I[m1 + n1, ..., mN + nN] * K[n1, ..., nN].
306 /// Note that the input array has to be padded in order to prevent
307 /// out of bounds accesses.
308 template <typename IndexedValueType>
309 void emitScalarImplementation(ArrayRef<Value> allIvs, Conv1DOp convOp) {
310   assert(convOp.hasBufferSemantics() &&
311          "expected linalg op with buffer semantics");
312   assert(allIvs.size() == 2);
313   Value m1(allIvs[0]);
314   Value n1(allIvs[1]);
315   IndexedValueType I(convOp.getInput(0)), K(convOp.getInput(1)),
316       O(convOp.getOutputBuffer(0));
317   // Emit scalar form for the 1D conv case.
318   Value i1 = m1 + n1;
319   O(m1) = O(m1) + I(i1) * K(n1);
320 }
321 
322 template <typename IndexedValueType>
323 void emitScalarImplementation(ArrayRef<Value> allIvs, Conv2DOp convOp) {
324   assert(convOp.hasBufferSemantics() &&
325          "expected linalg op with buffer semantics");
326   assert(allIvs.size() == 4);
327   Value m1(allIvs[0]), m2(allIvs[1]);
328   Value n1(allIvs[2]), n2(allIvs[3]);
329   IndexedValueType I(convOp.getInput(0)), K(convOp.getInput(1)),
330       O(convOp.getOutputBuffer(0));
331   // Emit scalar form for the 2D conv case.
332   Value i1 = m1 + n1;
333   Value i2 = m2 + n2;
334   O(m1, m2) = O(m1, m2) + I(i1, i2) * K(n1, n2);
335 }
336 
337 template <typename IndexedValueType>
338 void emitScalarImplementation(ArrayRef<Value> allIvs, Conv3DOp convOp) {
339   assert(convOp.hasBufferSemantics() &&
340          "expected linalg op with buffer semantics");
341   assert(allIvs.size() == 6);
342   Value m1(allIvs[0]), m2(allIvs[1]), m3(allIvs[2]);
343   Value n1(allIvs[3]), n2(allIvs[4]), n3(allIvs[5]);
344   IndexedValueType I(convOp.getInput(0)), K(convOp.getInput(1)),
345       O(convOp.getOutputBuffer(0));
346   // Emit scalar form for the 3D conv case.
347   Value i1 = m1 + n1;
348   Value i2 = m2 + n2;
349   Value i3 = m3 + n3;
350   O(m1, m2, m3) = O(m1, m2, m3) + I(i1, i2, i3) * K(n1, n2, n3);
351 }
352 
353 template <typename IndexedValueType>
354 Value getConvOpInput(ConvOp convOp, StdIndexedValue im,
355                      MutableArrayRef<Value> imIdx) {
356   // TODO: add a level of indirection to linalg.generic.
357   if (!convOp.padding())
358     return im(imIdx);
359 
360   auto *context = ScopedContext::getContext();
361   Value zeroIndex = std_constant_index(0);
362   SmallVector<Value, 8> conds;
363   SmallVector<Value, 8> clampedImIdx;
364   for (auto iter : llvm::enumerate(imIdx)) {
365     int idx = iter.index();
366     auto dim = iter.value();
367     // Only need to iterate over the window dimensions.
368     if (idx == 0 || idx == static_cast<int>(imIdx.size()) - 1) {
369       clampedImIdx.push_back(dim);
370       continue;
371     }
372 
373     using edsc::op::sge;
374     using edsc::op::slt;
375     using edsc::op::operator||;
376     Value leftOutOfBound = slt(dim, zeroIndex);
377     if (conds.empty())
378       conds.push_back(leftOutOfBound);
379     else
380       conds.push_back(conds.back() || leftOutOfBound);
381     Value rightBound = std_dim(convOp.input(), idx);
382     conds.push_back(conds.back() || (sge(dim, rightBound)));
383 
384     // When padding is involved, the indices will only be shifted to negative,
385     // so having a max op is enough.
386     auto maxMap = AffineMap::get(/*dimCount=*/1, 0,
387                                  {getAffineDimExpr(/*position=*/0, context),
388                                   getAffineConstantExpr(0, context)},
389                                  context);
390     clampedImIdx.push_back(affine_max(dim.getType(), maxMap, ValueRange{dim}));
391   }
392 
393   auto &b = ScopedContext::getBuilderRef();
394   Type type = convOp.input().getType().cast<MemRefType>().getElementType();
395   Value zero = std_constant(type, b.getZeroAttr(type));
396   Value readInput = im(clampedImIdx);
397   return conds.empty() ? readInput
398                        : (Value)std_select(conds.back(), zero, readInput);
399 }
400 
401 /// Returns true is `convOp` has a non-zero padding.
402 static bool hasPadding(ConvOp convOp) {
403   for (unsigned i = 0, e = convOp.getNumSpatialDimensions(); i < e; ++i) {
404     if (convOp.getLowPad(i) > 0 || convOp.getHighPad(i) > 0)
405       return true;
406   }
407   return false;
408 }
409 
410 template <typename IndexedValueType>
411 static void emitScalarImplementation(ArrayRef<Value> allIvs, ConvOp convOp) {
412   assert(convOp.hasBufferSemantics() &&
413          "expected linalg op with buffer semantics");
414   auto &b = ScopedContext::getBuilderRef();
415   auto loc = ScopedContext::getLocation();
416   auto mapsRange = convOp.indexing_maps().getAsRange<AffineMapAttr>();
417   auto maps = llvm::to_vector<8>(
418       llvm::map_range(mapsRange, [](AffineMapAttr a) { return a.getValue(); }));
419   SmallVector<Value, 8> fIdx(
420       makeCanonicalAffineApplies(b, loc, maps[0], allIvs));
421   SmallVector<Value, 8> imIdx(
422       makeCanonicalAffineApplies(b, loc, maps[1], allIvs));
423   SmallVector<Value, 8> oIdx(
424       makeCanonicalAffineApplies(b, loc, maps[2], allIvs));
425 
426   IndexedValueType F(convOp.filter()), O(convOp.output());
427 
428   // Emit scalar form. Padded conv involves an affine.max in the memory access
429   // which is not allowed by affine.load. Override to use an StdIndexedValue
430   // when there is non-zero padding.
431   if (hasPadding(convOp)) {
432     StdIndexedValue I(convOp.input());
433     Value paddedInput = getConvOpInput<IndexedValueType>(convOp, I, imIdx);
434     O(oIdx) += F(fIdx) * paddedInput;
435   } else {
436     IndexedValueType I(convOp.input());
437     O(oIdx) += F(fIdx) * I(imIdx);
438   }
439 }
440 
441 template <typename IndexedValueType>
442 void emitScalarImplementation(ArrayRef<Value> allIvs, PoolingMaxOp op) {
443   InputAndOutputIndices indices = getInputAndOutputIndices(allIvs, op);
444   // Emit scalar form.
445   IndexedValueType output(op.output());
446   IndexedValueType input(op.input());
447   Value lhs = output(indices.outputs);
448   Value rhs = input(indices.inputs);
449   using edsc::op::sgt;
450   Value maxValue = std_select(sgt(lhs, rhs), lhs, rhs);
451   output(indices.outputs) = maxValue;
452 }
453 
454 template <typename IndexedValueType>
455 void emitScalarImplementation(ArrayRef<Value> allIvs, PoolingMinOp op) {
456   InputAndOutputIndices indices = getInputAndOutputIndices(allIvs, op);
457   // Emit scalar form.
458   IndexedValueType output(op.output());
459   IndexedValueType input(op.input());
460   Value lhs = output(indices.outputs);
461   Value rhs = input(indices.inputs);
462   using edsc::op::slt;
463   Value minValue = std_select(slt(lhs, rhs), lhs, rhs);
464   output(indices.outputs) = minValue;
465 }
466 template <typename IndexedValueType>
467 void emitScalarImplementation(ArrayRef<Value> allIvs, PoolingSumOp op) {
468   auto indices = getInputAndOutputIndices(allIvs, op);
469   IndexedValueType input(op.input()), output(op.output());
470 
471   // Emit scalar form.
472   output(indices.outputs) += input(indices.inputs);
473 }
474 /// Emits the MLIR for the scalar part of the indexed generic op by:
475 ///   1. Emitting load ops for each input and output view in order. This is
476 ///      achieved by applying the appropriate input or output map to the
477 ///      enclosing induction variables.
478 ///   2. Emitting a call to `op.fun()` that takes as arguments the induction
479 ///      variables and the scalars from point 1. above.
480 ///   3. Emitting store ops to store the results of 2. to the output views.
481 ///
482 /// An example output may resemble:
483 ///
484 /// ```
485 ///    scf.for %i = %c0 to %0 step %c1 {
486 ///      scf.for %j = %c0 to %1 step %c1 {
487 ///        scf.for %k = %c0 to %4 step %c1 {
488 ///          %11 = load %arg0[%i, %j] :
489 ///            memref<?x?xf32, stride_specification>
490 ///          %12 = load %arg1[%i, %j, %k] :
491 ///            memref<?x?x?xf32, stride_specification>
492 ///          %13 = load %arg2[%i, %k, %j] :
493 ///            memref<?x?x?xf32, stride_specification>
494 ///          %14:2 = call @foo(%i, %j, %k, %11, %12, %13) :
495 ///            (index, index, index, f32, f32, f32) -> (f32, f32)
496 ///          store %14#0, %arg1[%i, %j, %k] :
497 ///            memref<?x?x?Xf32, stride_specification>
498 ///          store %14#1, %arg2[%i, %k, %j] :
499 ///            memref<?x?x?Xf32, stride_specification>
500 ///       }
501 ///      }
502 ///    }
503 /// ```
504 template <typename IndexedValueType>
505 static void emitScalarImplementation(ArrayRef<Value> allIvs,
506                                      IndexedGenericOp indexedGenericOp) {
507   assert(indexedGenericOp.hasBufferSemantics() &&
508          "expected linalg op with buffer semantics");
509   auto &b = ScopedContext::getBuilderRef();
510   auto loc = ScopedContext::getLocation();
511   unsigned nInputs = indexedGenericOp.getNumInputs();
512   unsigned nOutputs = indexedGenericOp.getNumOutputs();
513   unsigned nLoops = allIvs.size();
514   SmallVector<Value, 4> indexedValues;
515   indexedValues.reserve(nLoops + nInputs + nOutputs);
516   for (unsigned i = 0; i < nLoops; ++i)
517     indexedValues.push_back(allIvs[i]);
518 
519   // TODO: Avoid the loads if the corresponding argument of the
520   // region has no uses.
521   // 1.a. Emit load from input views.
522   for (unsigned i = 0; i < nInputs; ++i) {
523     auto indexing = makeCanonicalAffineApplies(
524         b, loc, indexedGenericOp.getInputIndexingMap(i), allIvs);
525     // Pass input i through IndexedValueType emits the proper load operation.
526     indexedValues.push_back(
527         IndexedValueType(indexedGenericOp.getInput(i))(indexing));
528   }
529   // 1.b. Emit load from output views.
530   for (unsigned i = 0; i < nOutputs; ++i) {
531     auto indexing = makeCanonicalAffineApplies(
532         b, loc, indexedGenericOp.getOutputIndexingMap(i), allIvs);
533     // Pass output i through IndexedValueType emits the proper load operation.
534     indexedValues.push_back(
535         IndexedValueType(indexedGenericOp.getOutputBuffer(i))(indexing));
536   }
537 
538   // TODO: When a region inliner exists, use it.
539   // 2. Inline region, currently only works for a single basic block.
540   // 3. Emit store.
541   SmallVector<SmallVector<Value, 8>, 8> indexing;
542   SmallVector<Value, 8> outputBuffers;
543   for (unsigned i = 0; i < nOutputs; ++i) {
544     indexing.push_back(makeCanonicalAffineApplies(
545         b, loc, indexedGenericOp.getOutputIndexingMap(i), allIvs));
546     outputBuffers.push_back(indexedGenericOp.getOutputBuffer(i));
547   }
548   inlineRegionAndEmitStore<IndexedValueType>(indexedGenericOp, indexedValues,
549                                              indexing, outputBuffers);
550 }
551 
552 template <typename LoopTy, typename ConcreteOpTy>
553 Optional<LinalgLoops> linalgOpToLoopsImpl(Operation *op, OpBuilder &builder) {
554   using IndexedValueTy = typename GenerateLoopNest<LoopTy>::IndexedValueTy;
555 
556   ScopedContext scope(builder, op->getLoc());
557 
558   // The flattened loopToOperandRangesMaps is expected to be an invertible
559   // permutation map (which is asserted in the inverse calculation).
560   auto linalgOp = cast<ConcreteOpTy>(op);
561   assert(linalgOp.hasBufferSemantics() &&
562          "expected linalg op with buffer semantics");
563   auto mapsRange =
564       linalgOp.indexing_maps().template getAsRange<AffineMapAttr>();
565   auto maps = llvm::to_vector<8>(
566       llvm::map_range(mapsRange, [](AffineMapAttr a) { return a.getValue(); }));
567   SmallVector<Value, 8> sizes = getViewSizes(builder, linalgOp);
568   AffineMap map = concatAffineMaps(maps);
569   auto loopRanges = emitLoopRanges(scope.getBuilderRef(), scope.getLocation(),
570                                    map, getViewSizes(builder, linalgOp));
571   SmallVector<Value, 4> allIvs;
572   GenerateLoopNest<LoopTy>::doit(
573       loopRanges, linalgOp.iterator_types().getValue(), [&](ValueRange ivs) {
574         allIvs.append(ivs.begin(), ivs.end());
575         emitScalarImplementation<IndexedValueTy>(allIvs, linalgOp);
576       });
577   // Number of loop ops might be different from the number of ivs since some
578   // loops like affine.parallel and scf.parallel have multiple ivs.
579   llvm::SetVector<Operation *> loopSet;
580   for (Value iv : allIvs) {
581     if (!iv)
582       return {};
583     // The induction variable is a block argument of the entry block of the
584     // loop operation.
585     BlockArgument ivVal = iv.dyn_cast<BlockArgument>();
586     if (!ivVal)
587       return {};
588     loopSet.insert(ivVal.getOwner()->getParentOp());
589   }
590   LinalgLoops loops(loopSet.begin(), loopSet.end());
591   return loops;
592 }
593 
594 template <typename LoopType, typename ConcreteOp>
595 class LinalgRewritePattern : public RewritePattern {
596 public:
597   explicit LinalgRewritePattern(MLIRContext *context)
598       : RewritePattern(ConcreteOp::getOperationName(), 1, context) {}
599 
600   LogicalResult matchAndRewrite(Operation *op,
601                                 PatternRewriter &rewriter) const override {
602     if (!linalgOpToLoopsImpl<LoopType, ConcreteOp>(op, rewriter))
603       return failure();
604     rewriter.eraseOp(op);
605     return success();
606   }
607 };
608 
609 template <typename LoopType, typename ConcreteOp>
610 void insertOnePattern(OwningRewritePatternList &patterns, MLIRContext *ctx) {
611   patterns.insert<LinalgRewritePattern<LoopType, ConcreteOp>>(ctx);
612 }
613 
614 template <typename LoopType, typename... Args>
615 void insertPatterns(OwningRewritePatternList &patterns, MLIRContext *ctx) {
616   (void)std::initializer_list<int>{
617       0, (insertOnePattern<LoopType, Args>(patterns, ctx), 0)...};
618 }
619 
620 /// Local folding pattern for AffineApplyOp that we can apply greedily.
621 /// This replaces AffineApplyOp by the proper value in cases where the
622 /// associated map is trivial.
623 /// A trivial map here is defined as a map with a single result and either:
624 ///   1. Zero operand + returns a single AffineConstantExpr
625 ///   2. One operand + returns a single AffineDimExpr
626 ///   3. One operand + returns a single AffineSymbolExpr
627 //
628 /// In the first case, the AffineApplyOp is replaced by a new constant. In the
629 /// other cases, it is replaced by its unique operand.
630 struct FoldAffineOp : public RewritePattern {
631   FoldAffineOp(MLIRContext *context)
632       : RewritePattern(AffineApplyOp::getOperationName(), 0, context) {}
633 
634   LogicalResult matchAndRewrite(Operation *op,
635                                 PatternRewriter &rewriter) const override {
636     AffineApplyOp affineApplyOp = cast<AffineApplyOp>(op);
637     auto map = affineApplyOp.getAffineMap();
638     if (map.getNumResults() != 1 || map.getNumInputs() > 1)
639       return failure();
640 
641     AffineExpr expr = map.getResult(0);
642     if (map.getNumInputs() == 0) {
643       if (auto val = expr.dyn_cast<AffineConstantExpr>()) {
644         rewriter.replaceOpWithNewOp<ConstantIndexOp>(op, val.getValue());
645         return success();
646       }
647       return failure();
648     }
649     if (expr.dyn_cast<AffineDimExpr>() || expr.dyn_cast<AffineSymbolExpr>()) {
650       rewriter.replaceOp(op, op->getOperand(0));
651       return success();
652     }
653     return failure();
654   }
655 };
656 } // namespace
657 
658 template <typename LoopType>
659 static void lowerLinalgToLoopsImpl(FuncOp funcOp, MLIRContext *context) {
660   OwningRewritePatternList patterns;
661   // Canonicalization and folding patterns applied greedily allow cleaning up
662   // the emitted IR on the fly.
663   // TODO: fold view and subview ops?
664   insertPatterns<LoopType,
665 #define GET_OP_LIST
666 #include "mlir/Dialect/Linalg/IR/LinalgStructuredOps.cpp.inc"
667                  >(patterns, context);
668 
669   DimOp::getCanonicalizationPatterns(patterns, context);
670   AffineApplyOp::getCanonicalizationPatterns(patterns, context);
671   patterns.insert<FoldAffineOp>(context);
672   // Just apply the patterns greedily.
673   applyPatternsAndFoldGreedily(funcOp, patterns);
674 }
675 
676 namespace {
677 struct LowerToAffineLoops
678     : public LinalgLowerToAffineLoopsBase<LowerToAffineLoops> {
679   void runOnFunction() override {
680     lowerLinalgToLoopsImpl<AffineForOp>(getFunction(), &getContext());
681   }
682 };
683 struct LowerToLoops : public LinalgLowerToLoopsBase<LowerToLoops> {
684   void runOnFunction() override {
685     lowerLinalgToLoopsImpl<scf::ForOp>(getFunction(), &getContext());
686   }
687 };
688 struct LowerToParallelLoops
689     : public LinalgLowerToParallelLoopsBase<LowerToParallelLoops> {
690   void runOnFunction() override {
691     lowerLinalgToLoopsImpl<scf::ParallelOp>(getFunction(), &getContext());
692   }
693 };
694 } // namespace
695 
696 std::unique_ptr<OperationPass<FuncOp>> mlir::createConvertLinalgToLoopsPass() {
697   return std::make_unique<LowerToLoops>();
698 }
699 
700 std::unique_ptr<OperationPass<FuncOp>>
701 mlir::createConvertLinalgToParallelLoopsPass() {
702   return std::make_unique<LowerToParallelLoops>();
703 }
704 
705 std::unique_ptr<OperationPass<FuncOp>>
706 mlir::createConvertLinalgToAffineLoopsPass() {
707   return std::make_unique<LowerToAffineLoops>();
708 }
709 
710 // TODO: gradually remove this layer as more ops become "named".
711 template <typename LoopTy>
712 static Optional<LinalgLoops> linalgOpToLoopsImplSwitch(Operation *op,
713                                                        OpBuilder &builder) {
714   assert(isa<LinalgOp>(op) && "LinalgOp expected");
715   if (isa<CopyOp>(op))
716     return linalgOpToLoopsImpl<LoopTy, CopyOp>(op, builder);
717   if (isa<FillOp>(op))
718     return linalgOpToLoopsImpl<LoopTy, FillOp>(op, builder);
719   if (isa<ConvOp>(op))
720     return linalgOpToLoopsImpl<LoopTy, ConvOp>(op, builder);
721   if (isa<PoolingMaxOp>(op))
722     return linalgOpToLoopsImpl<LoopTy, PoolingMaxOp>(op, builder);
723   if (isa<PoolingMinOp>(op))
724     return linalgOpToLoopsImpl<LoopTy, PoolingMinOp>(op, builder);
725   if (isa<PoolingSumOp>(op))
726     return linalgOpToLoopsImpl<LoopTy, PoolingSumOp>(op, builder);
727   if (isa<IndexedGenericOp>(op))
728     return linalgOpToLoopsImpl<LoopTy, IndexedGenericOp>(op, builder);
729 
730   // TODO: Cases below are generic and need a LinalgStructuredOpInterface.
731   if (isa<GenericOp>(op))
732     return linalgOpToLoopsImpl<LoopTy, GenericOp>(op, builder);
733   if (isa<MatmulOp>(op))
734     return linalgOpToLoopsImpl<LoopTy, MatmulOp>(op, builder);
735   if (isa<MatvecOp>(op))
736     return linalgOpToLoopsImpl<LoopTy, MatvecOp>(op, builder);
737   if (isa<DotOp>(op))
738     return linalgOpToLoopsImpl<LoopTy, DotOp>(op, builder);
739   if (isa<BatchMatmulOp>(op))
740     return linalgOpToLoopsImpl<LoopTy, BatchMatmulOp>(op, builder);
741   llvm_unreachable("Unexpected op in linalgOpToLoopsImpl");
742 }
743 
744 /// Emits a loop nest with the proper body for `op`.
745 template <typename LoopTy>
746 Optional<LinalgLoops> mlir::linalg::linalgLowerOpToLoops(OpBuilder &builder,
747                                                          Operation *op) {
748   return linalgOpToLoopsImplSwitch<LoopTy>(op, builder);
749 }
750 
751 template Optional<LinalgLoops>
752 mlir::linalg::linalgLowerOpToLoops<AffineForOp>(OpBuilder &builder,
753                                                 Operation *op);
754 template Optional<LinalgLoops>
755 mlir::linalg::linalgLowerOpToLoops<scf::ForOp>(OpBuilder &builder,
756                                                Operation *op);
757 template Optional<LinalgLoops>
758 mlir::linalg::linalgLowerOpToLoops<scf::ParallelOp>(OpBuilder &builder,
759                                                     Operation *op);
760 
761 /// Emits a loop nest of `affine.for` with the proper body for `op`.
762 LogicalResult mlir::linalg::linalgOpToAffineLoops(OpBuilder &builder,
763                                                   Operation *op) {
764   Optional<LinalgLoops> loops = linalgLowerOpToLoops<AffineForOp>(builder, op);
765   return loops ? success() : failure();
766 }
767 
768 /// Emits a loop nest of `scf.for` with the proper body for `op`.
769 LogicalResult mlir::linalg::linalgOpToLoops(OpBuilder &builder, Operation *op) {
770   Optional<LinalgLoops> loops = linalgLowerOpToLoops<scf::ForOp>(builder, op);
771   return loops ? success() : failure();
772 }
773 
774 /// Emits a loop nest of `scf.parallel` with the proper body for `op`.
775 LogicalResult mlir::linalg::linalgOpToParallelLoops(OpBuilder &builder,
776                                                     Operation *op) {
777   Optional<LinalgLoops> loops =
778       linalgLowerOpToLoops<scf::ParallelOp>(builder, op);
779   return loops ? success() : failure();
780 }
781