//===- Loops.cpp - conversion from Linalg named and generic ops to loops --===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #include "PassDetail.h" #include "mlir/Dialect/Affine/EDSC/Intrinsics.h" #include "mlir/Dialect/Linalg/EDSC/FoldedIntrinsics.h" #include "mlir/Dialect/Linalg/IR/LinalgOps.h" #include "mlir/Dialect/Linalg/IR/LinalgTypes.h" #include "mlir/Dialect/Linalg/Passes.h" #include "mlir/Dialect/Linalg/Transforms/Transforms.h" #include "mlir/Dialect/Linalg/Utils/Utils.h" #include "mlir/Dialect/SCF/EDSC/Builders.h" #include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h" #include "mlir/IR/AffineExpr.h" #include "mlir/IR/AffineMap.h" #include "mlir/IR/BlockAndValueMapping.h" #include "mlir/Support/LLVM.h" #include "mlir/Transforms/DialectConversion.h" #include "mlir/Transforms/FoldUtils.h" using namespace mlir; using namespace mlir::edsc; using namespace mlir::edsc::intrinsics; using namespace mlir::linalg; using edsc::op::operator+; static SmallVector makeCanonicalAffineApplies(OpBuilder &b, Location loc, AffineMap map, ArrayRef vals) { if (map.isEmpty()) return {}; assert(map.getNumSymbols() == 0); assert(map.getNumInputs() == vals.size()); SmallVector res; res.reserve(map.getNumResults()); auto dims = map.getNumDims(); for (auto e : map.getResults()) { auto exprMap = AffineMap::get(dims, 0, e); SmallVector operands(vals.begin(), vals.end()); canonicalizeMapAndOperands(&exprMap, &operands); res.push_back(affine_apply(exprMap, operands)); } return res; } static SmallVector permuteIvs(ArrayRef ivs, Optional permutation) { return permutation ? applyMapToValues(ScopedContext::getBuilderRef(), ScopedContext::getLocation(), permutation.getValue(), ivs) : SmallVector(ivs.begin(), ivs.end()); } // Creates a number of ranges equal to the number of results in `map`. // The returned ranges correspond to the loop ranges, in the proper order, for // which new loops will be created. static SmallVector emitLoopRanges(OpBuilder &b, Location loc, AffineMap map, ArrayRef allViewSizes); SmallVector emitLoopRanges(OpBuilder &b, Location loc, AffineMap map, ArrayRef allViewSizes) { // Apply `map` to get view sizes in loop order. auto sizes = applyMapToValues(b, loc, map, allViewSizes); // Create a new range with the applied tile sizes. ScopedContext scope(b, loc); SmallVector res; for (unsigned idx = 0, e = map.getNumResults(); idx < e; ++idx) { res.push_back( linalg_range(std_constant_index(0), sizes[idx], std_constant_index(1))); } return res; } template static void inlineRegionAndEmitStore(OpType op, ArrayRef indexedValues, ArrayRef> indexing, ArrayRef outputBuffers) { auto &b = ScopedContext::getBuilderRef(); auto &block = op.region().front(); BlockAndValueMapping map; map.map(block.getArguments(), indexedValues); for (auto &op : block.without_terminator()) { assert(op.getNumRegions() == 0 && "expected a non-nested region"); auto *newOp = b.clone(op, map); map.map(op.getResults(), newOp->getResults()); } Operation &terminator = block.back(); assert(isa(terminator) && "expected a yield op in the end of the region"); for (unsigned i = 0, e = terminator.getNumOperands(); i < e; ++i) { IndexedValueType O(outputBuffers[i]); O(indexing[i]) = map.lookupOrDefault(terminator.getOperand(i)); } } // Returns a pair that contains input indices and output indices of a // SingleInputPoolingOp `op`. struct InputAndOutputIndices { SmallVector inputs; SmallVector outputs; }; template static InputAndOutputIndices getInputAndOutputIndices(ArrayRef allIvs, SingleInputPoolingOp op) { auto &b = ScopedContext::getBuilderRef(); auto loc = ScopedContext::getLocation(); auto mapsRange = op.indexing_maps().template getAsRange(); auto maps = llvm::to_vector<8>( llvm::map_range(mapsRange, [](AffineMapAttr a) { return a.getValue(); })); return InputAndOutputIndices{ makeCanonicalAffineApplies(b, loc, maps[0], allIvs), makeCanonicalAffineApplies(b, loc, maps[2], allIvs)}; } namespace { /// Emits the MLIR for the scalar part of the generic op by: /// 1. Emitting load ops for each input and output view in order. This is /// achieved by applying the appropriate input or output map to the /// enclosing induction variables. /// 2. Emitting a call to `op.fun()` that takes as arguments the scalars /// from point 1. above. /// 3. Emitting store ops to store the results of 2. to the output /// views. /// /// An example output may resemble: /// /// ``` /// scf.for %i = %c0 to %0 step %c1 { /// scf.for %j = %c0 to %1 step %c1 { /// scf.for %k = %c0 to %4 step %c1 { /// %11 = load %arg0[%i, %j] : /// memref /// %12 = load %arg1[%i, %j, %k] : /// memref /// %13 = load %arg2[%i, %k, %j] : /// memref /// %14:2 = call @foo(%11, %12, %13) : (f32, f32, f32) -> (f32, f32) /// store %14#0, %arg1[%i, %j, %k] : /// memref /// store %14#1, %arg2[%i, %k, %j] : /// memref /// } /// } /// } /// ``` template class LinalgScopedEmitter { public: static void emitScalarImplementation(ArrayRef allIvs, LinalgOpType linalgOp) { assert(linalgOp.hasBufferSemantics() && "expected linalg op with buffer semantics"); auto &b = ScopedContext::getBuilderRef(); auto loc = ScopedContext::getLocation(); unsigned nInputs = linalgOp.getNumInputs(); unsigned nOutputs = linalgOp.getNumOutputs(); SmallVector indexedValues; indexedValues.reserve(nInputs + nOutputs); // TODO(mravishankar): Avoid the loads if the corresponding argument of the // region has no uses. // 1.a. Emit load from input views. for (unsigned i = 0; i < nInputs; ++i) { auto indexing = makeCanonicalAffineApplies( b, loc, linalgOp.getInputIndexingMap(i), allIvs); // Passing through IndexedValueType emits the proper load operation. indexedValues.push_back(IndexedValueType(linalgOp.getInput(i))(indexing)); } // 1.b. Emit load from output views. for (unsigned i = 0; i < nOutputs; ++i) { auto indexing = makeCanonicalAffineApplies( b, loc, linalgOp.getOutputIndexingMap(i), allIvs); // Passing through IndexedValueType emits the proper load operation. indexedValues.push_back( IndexedValueType(linalgOp.getOutputBuffer(i))(indexing)); } // TODO(ntv): When a region inliner exists, use it. // 2. Inline region, currently only works for a single basic block. // 3. Emit store. SmallVector, 8> indexing; SmallVector outputBuffers; for (unsigned i = 0; i < nOutputs; ++i) { indexing.push_back(makeCanonicalAffineApplies( b, loc, linalgOp.getOutputIndexingMap(i), allIvs)); outputBuffers.push_back(linalgOp.getOutputBuffer(i)); } inlineRegionAndEmitStore(linalgOp, indexedValues, indexing, outputBuffers); } }; template class LinalgScopedEmitter { public: static void emitScalarImplementation(ArrayRef allIvs, CopyOp copyOp) { assert(copyOp.hasBufferSemantics() && "expected linalg op with buffer semantics"); auto nPar = copyOp.getNumParallelLoops(); assert(nPar == allIvs.size()); auto inputIvs = permuteIvs(allIvs.take_front(nPar), copyOp.inputPermutation()); auto outputIvs = permuteIvs(allIvs.take_front(nPar), copyOp.outputPermutation()); SmallVector iivs(inputIvs.begin(), inputIvs.end()); SmallVector oivs(outputIvs.begin(), outputIvs.end()); IndexedValueType O(copyOp.getOutputBuffer(0)), I(copyOp.getInput(0)); // Emit the proper scalar assignment, whether we are dealing with a 0-D or // an n-D loop nest; with or without permutations. // clang-format off nPar > 0 ? O(oivs) = I(iivs) : O() = I(); // clang-format on } }; template class LinalgScopedEmitter { public: static void emitScalarImplementation(ArrayRef allIvs, FillOp fillOp) { assert(fillOp.hasBufferSemantics() && "expected linalg op with buffer semantics"); auto nPar = fillOp.getNumParallelLoops(); assert(nPar == allIvs.size()); auto ivs = SmallVector(allIvs.begin(), allIvs.begin() + nPar); IndexedValueType O(fillOp.getOutputBuffer(0)); // Emit the proper scalar assignment, whether we are dealing with a 0-D or // an n-D loop nest; with or without permutations. nPar > 0 ? O(ivs) = fillOp.value() : O() = fillOp.value(); } }; template class LinalgScopedEmitter { public: static void emitScalarImplementation(ArrayRef allIvs, DotOp dotOp) { assert(dotOp.hasBufferSemantics() && "expected linalg op with buffer semantics"); assert(allIvs.size() == 1); Value r_i(allIvs[0]); IndexedValueType A(dotOp.getInput(0)), B(dotOp.getInput(1)), C(dotOp.getOutputBuffer(0)); // Emit scalar form. C() = C() + A(r_i) * B(r_i); } }; template class LinalgScopedEmitter { public: static void emitScalarImplementation(ArrayRef allIvs, MatvecOp matvecOp) { assert(matvecOp.hasBufferSemantics() && "expected linalg op with buffer semantics"); assert(allIvs.size() == 2); Value i(allIvs[0]), r_j(allIvs[1]); IndexedValueType A(matvecOp.getInput(0)), B(matvecOp.getInput(1)), C(matvecOp.getOutputBuffer(0)); // Emit scalar form. C(i) = C(i) + A(i, r_j) * B(r_j); } }; template class LinalgScopedEmitter { public: static void emitScalarImplementation(ArrayRef allIvs, MatmulOp matmulOp) { assert(matmulOp.hasBufferSemantics() && "expected linalg op with buffer semantics"); assert(allIvs.size() == 3); Value i(allIvs[0]), j(allIvs[1]), r_k(allIvs[2]); IndexedValueType A(matmulOp.getInput(0)), B(matmulOp.getInput(1)), C(matmulOp.getOutputBuffer(0)); // Emit scalar form. C(i, j) = C(i, j) + A(i, r_k) * B(r_k, j); } }; template class LinalgScopedEmitter { public: /// Returns the input value of convOp. If the indices in `imIdx` is out of /// boundary, returns 0 instead. static Value getConvOpInput(ConvOp convOp, StdIndexedValue im, MutableArrayRef imIdx) { // TODO(ntv): add a level of indirection to linalg.generic. if (!convOp.padding()) return im(imIdx); auto *context = ScopedContext::getContext(); Value zeroIndex = std_constant_index(0); SmallVector conds; SmallVector clampedImIdx; for (auto iter : llvm::enumerate(imIdx)) { int idx = iter.index(); auto dim = iter.value(); // Only need to iterate over the window dimensions. if (idx == 0 || idx == static_cast(imIdx.size()) - 1) { clampedImIdx.push_back(dim); continue; } using edsc::op::operator<; using edsc::op::operator>=; using edsc::op::operator||; Value leftOutOfBound = dim < zeroIndex; if (conds.empty()) conds.push_back(leftOutOfBound); else conds.push_back(conds.back() || leftOutOfBound); Value rightBound = std_dim(convOp.input(), idx); conds.push_back(conds.back() || (dim >= rightBound)); // When padding is involved, the indices will only be shifted to negative, // so having a max op is enough. auto maxMap = AffineMap::get(/*dimCount=*/1, 0, {getAffineDimExpr(/*position=*/0, context), getAffineConstantExpr(0, context)}, context); clampedImIdx.push_back( affine_max(dim.getType(), maxMap, ValueRange{dim})); } auto &b = ScopedContext::getBuilderRef(); Type type = convOp.input().getType().cast().getElementType(); Value zero = std_constant(type, b.getZeroAttr(type)); Value readInput = im(clampedImIdx); return conds.empty() ? readInput : (Value)std_select(conds.back(), zero, readInput); } static void emitScalarImplementation(ArrayRef allIvs, ConvOp convOp) { assert(convOp.hasBufferSemantics() && "expected linalg op with buffer semantics"); auto &b = ScopedContext::getBuilderRef(); auto loc = ScopedContext::getLocation(); auto mapsRange = convOp.indexing_maps().getAsRange(); auto maps = llvm::to_vector<8>(llvm::map_range( mapsRange, [](AffineMapAttr a) { return a.getValue(); })); SmallVector fIdx( makeCanonicalAffineApplies(b, loc, maps[0], allIvs)); SmallVector imIdx( makeCanonicalAffineApplies(b, loc, maps[1], allIvs)); SmallVector oIdx( makeCanonicalAffineApplies(b, loc, maps[2], allIvs)); // Padded conv involves an affine.max in the memory access which is not // allowed by affine.load. Override to always use an StdIndexedValue. StdIndexedValue I(convOp.input()); IndexedValueType F(convOp.filter()), O(convOp.output()); // Emit scalar form. Value paddedInput = getConvOpInput(convOp, I, imIdx); O(oIdx) += F(fIdx) * paddedInput; } }; template class LinalgScopedEmitter { public: static void emitScalarImplementation(ArrayRef allIvs, PoolingMaxOp op) { auto indices = getInputAndOutputIndices(allIvs, op); // Emit scalar form. Value lhs = std_load(op.output(), indices.outputs); Value rhs = std_load(op.input(), indices.inputs); using edsc::op::operator>; Value maxValue = std_select(lhs > rhs, lhs, rhs); std_store(maxValue, op.output(), indices.outputs); } }; template class LinalgScopedEmitter { public: static void emitScalarImplementation(ArrayRef allIvs, PoolingMinOp op) { auto indices = getInputAndOutputIndices(allIvs, op); // Emit scalar form. Value lhs = std_load(op.output(), indices.outputs); Value rhs = std_load(op.input(), indices.inputs); using edsc::op::operator<; Value minValue = std_select(lhs < rhs, lhs, rhs); std_store(minValue, op.output(), indices.outputs); } }; template class LinalgScopedEmitter { public: static void emitScalarImplementation(ArrayRef allIvs, PoolingSumOp op) { auto indices = getInputAndOutputIndices(allIvs, op); IndexedValueType input(op.input()), output(op.output()); // Emit scalar form. output(indices.outputs) += input(indices.inputs); } }; /// Emits the MLIR for the scalar part of the indexed generic op by: /// 1. Emitting load ops for each input and output view in order. This is /// achieved by applying the appropriate input or output map to the /// enclosing induction variables. /// 2. Emitting a call to `op.fun()` that takes as arguments the induction /// variables and the scalars from point 1. above. /// 3. Emitting store ops to store the results of 2. to the output views. /// /// An example output may resemble: /// /// ``` /// scf.for %i = %c0 to %0 step %c1 { /// scf.for %j = %c0 to %1 step %c1 { /// scf.for %k = %c0 to %4 step %c1 { /// %11 = load %arg0[%i, %j] : /// memref /// %12 = load %arg1[%i, %j, %k] : /// memref /// %13 = load %arg2[%i, %k, %j] : /// memref /// %14:2 = call @foo(%i, %j, %k, %11, %12, %13) : /// (index, index, index, f32, f32, f32) -> (f32, f32) /// store %14#0, %arg1[%i, %j, %k] : /// memref /// store %14#1, %arg2[%i, %k, %j] : /// memref /// } /// } /// } /// ``` template class LinalgScopedEmitter { public: static void emitScalarImplementation(ArrayRef allIvs, IndexedGenericOp indexedGenericOp) { assert(indexedGenericOp.hasBufferSemantics() && "expected linalg op with buffer semantics"); auto &b = ScopedContext::getBuilderRef(); auto loc = ScopedContext::getLocation(); unsigned nInputs = indexedGenericOp.getNumInputs(); unsigned nOutputs = indexedGenericOp.getNumOutputs(); unsigned nLoops = allIvs.size(); SmallVector indexedValues; indexedValues.reserve(nLoops + nInputs + nOutputs); for (unsigned i = 0; i < nLoops; ++i) indexedValues.push_back(allIvs[i]); // TODO(mravishankar): Avoid the loads if the corresponding argument of the // region has no uses. // 1.a. Emit load from input views. for (unsigned i = 0; i < nInputs; ++i) { auto indexing = makeCanonicalAffineApplies( b, loc, indexedGenericOp.getInputIndexingMap(i), allIvs); // Pass input i through IndexedValueType emits the proper load operation. indexedValues.push_back( IndexedValueType(indexedGenericOp.getInput(i))(indexing)); } // 1.b. Emit load from output views. for (unsigned i = 0; i < nOutputs; ++i) { auto indexing = makeCanonicalAffineApplies( b, loc, indexedGenericOp.getOutputIndexingMap(i), allIvs); // Pass output i through IndexedValueType emits the proper load operation. indexedValues.push_back( IndexedValueType(indexedGenericOp.getOutputBuffer(i))(indexing)); } // TODO(ntv): When a region inliner exists, use it. // 2. Inline region, currently only works for a single basic block. // 3. Emit store. SmallVector, 8> indexing; SmallVector outputBuffers; for (unsigned i = 0; i < nOutputs; ++i) { indexing.push_back(makeCanonicalAffineApplies( b, loc, indexedGenericOp.getOutputIndexingMap(i), allIvs)); outputBuffers.push_back(indexedGenericOp.getOutputBuffer(i)); } inlineRegionAndEmitStore(indexedGenericOp, indexedValues, indexing, outputBuffers); } }; namespace { /// Helper struct to generate the loop nest for the op. This factored out here /// to be able to partially specialize this for different LoopTy. template class GenerateLoopNest { public: using IndexedValueTy = typename std::conditional::value, AffineIndexedValue, StdIndexedValue>::type; static void doit(ConcreteOpTy linalgOp, ArrayRef loopRanges, MutableArrayRef allIvs) { GenericLoopNestRangeBuilder(allIvs, loopRanges)([&] { SmallVector allIvValues(allIvs.begin(), allIvs.end()); LinalgScopedEmitter::emitScalarImplementation(allIvValues, linalgOp); }); } }; /// Generates loop nest using scf.parallel. scf.parallel is only used for the /// outer parallel loops. All other loops are generated using scf.for /// operation. template class GenerateLoopNest { public: using IndexedValueTy = StdIndexedValue; static void doit(ConcreteOpTy linalgOp, ArrayRef loopRanges, MutableArrayRef allIvs) { // Only generate scf.parallel for outer consecutive "parallel" // iterator_types. // TODO(ravishankarm): Generate scf.parallel for all "parallel" iterator // types, not just the outer most ones. Also handle "reduction" iterator // types. auto nOuterPar = linalgOp.iterator_types() .getValue() .take_while([](Attribute attr) { return attr.cast().getValue() == getParallelIteratorTypeName(); }) .size(); // If there are no outer parallel loops, then number of loop ops is same as // the number of loops, and they are all scf.for ops. if (nOuterPar) { GenericLoopNestRangeBuilder( allIvs.take_front(nOuterPar), loopRanges.take_front(nOuterPar))([&] { GenericLoopNestRangeBuilder( allIvs.drop_front(nOuterPar), loopRanges.drop_front(nOuterPar))([&] { SmallVector allIvValues(allIvs.begin(), allIvs.end()); LinalgScopedEmitter:: emitScalarImplementation(allIvValues, linalgOp); }); }); } else { // If there are no parallel loops then fallback to generating all scf.for // operations. GenericLoopNestRangeBuilder(allIvs, loopRanges)([&] { SmallVector allIvValues(allIvs.begin(), allIvs.end()); LinalgScopedEmitter::emitScalarImplementation(allIvValues, linalgOp); }); } } }; } // namespace template Optional linalgOpToLoopsImpl(Operation *op, OpBuilder &builder) { using Impl = GenerateLoopNest; using IndexedValueTy = typename GenerateLoopNest::IndexedValueTy; ScopedContext scope(builder, op->getLoc()); // The flattened loopToOperandRangesMaps is expected to be an invertible // permutation map (which is asserted in the inverse calculation). auto linalgOp = cast(op); assert(linalgOp.hasBufferSemantics() && "expected linalg op with buffer semantics"); auto nPar = linalgOp.getNumParallelLoops(); auto nRed = linalgOp.getNumReductionLoops(); auto nWin = linalgOp.getNumWindowLoops(); auto nLoops = nPar + nRed + nWin; auto mapsRange = linalgOp.indexing_maps().template getAsRange(); auto maps = llvm::to_vector<8>( llvm::map_range(mapsRange, [](AffineMapAttr a) { return a.getValue(); })); AffineMap invertedMap = inversePermutation(concatAffineMaps(maps)); if (!invertedMap) return {}; if (invertedMap.isEmpty()) { LinalgScopedEmitter::emitScalarImplementation( {}, linalgOp); return LinalgLoops(); } SmallVector allIvs(nLoops); auto loopRanges = emitLoopRanges(scope.getBuilderRef(), scope.getLocation(), invertedMap, getViewSizes(builder, linalgOp)); assert(loopRanges.size() == allIvs.size()); Impl::doit(linalgOp, loopRanges, allIvs); // Number of loop ops might be different from the number of ivs since some // loops like affine.parallel and scf.parallel have multiple ivs. llvm::SetVector loopSet; for (Value iv : allIvs) { if (!iv) return {}; // The induction variable is a block argument of the entry block of the // loop operation. BlockArgument ivVal = iv.dyn_cast(); if (!ivVal) return {}; loopSet.insert(ivVal.getOwner()->getParentOp()); } LinalgLoops loops(loopSet.begin(), loopSet.end()); return loops; } template class LinalgRewritePattern : public RewritePattern { public: explicit LinalgRewritePattern(MLIRContext *context) : RewritePattern(ConcreteOp::getOperationName(), 1, context) {} LogicalResult matchAndRewrite(Operation *op, PatternRewriter &rewriter) const override { if (!linalgOpToLoopsImpl(op, rewriter)) return failure(); rewriter.eraseOp(op); return success(); } }; /// Helper classes for type list expansion. template class RewritePatternList; template class RewritePatternList { public: static void build(OwningRewritePatternList &patterns, MLIRContext *ctx) {} }; template class RewritePatternList { public: static void build(OwningRewritePatternList &patterns, MLIRContext *ctx) { patterns.insert>(ctx); RewritePatternList::build(patterns, ctx); } }; /// Populate the given list with patterns that convert from Linalg to loops. template void FillRewritePatterns(OwningRewritePatternList &patterns, MLIRContext *ctx) { RewritePatternList::build(patterns, ctx); } /// Local folding pattern for AffineApplyOp that we can apply greedily. /// This replaces AffineApplyOp by the proper value in cases where the /// associated map is trivial. /// A trivial map here is defined as a map with a single result and either: /// 1. Zero operand + returns a single AffineConstantExpr /// 2. One operand + returns a single AffineDimExpr /// 3. One operand + returns a single AffineSymbolExpr // /// In the first case, the AffineApplyOp is replaced by a new constant. In the /// other cases, it is replaced by its unique operand. struct FoldAffineOp : public RewritePattern { FoldAffineOp(MLIRContext *context) : RewritePattern(AffineApplyOp::getOperationName(), 0, context) {} LogicalResult matchAndRewrite(Operation *op, PatternRewriter &rewriter) const override { AffineApplyOp affineApplyOp = cast(op); auto map = affineApplyOp.getAffineMap(); if (map.getNumResults() != 1 || map.getNumInputs() > 1) return failure(); AffineExpr expr = map.getResult(0); if (map.getNumInputs() == 0) { if (auto val = expr.dyn_cast()) { rewriter.replaceOpWithNewOp(op, val.getValue()); return success(); } return failure(); } if (expr.dyn_cast() || expr.dyn_cast()) { rewriter.replaceOp(op, op->getOperand(0)); return success(); } return failure(); } }; } // namespace template static void lowerLinalgToLoopsImpl(Operation *op, MLIRContext *context) { OwningRewritePatternList patterns; // Canonicalization and folding patterns applied greedily allow cleaning up // the emitted IR on the fly. // TODO(ntv) fold view and subview ops? FillRewritePatterns(patterns, context); DimOp::getCanonicalizationPatterns(patterns, context); AffineApplyOp::getCanonicalizationPatterns(patterns, context); patterns.insert(context); // Just apply the patterns greedily. applyPatternsAndFoldGreedily(op, patterns); } namespace { struct LowerToAffineLoops : public LinalgLowerToAffineLoopsBase { void runOnFunction() override { lowerLinalgToLoopsImpl(getFunction(), &getContext()); } }; struct LowerToLoops : public LinalgLowerToLoopsBase { void runOnFunction() override { lowerLinalgToLoopsImpl(getFunction(), &getContext()); } }; struct LowerToParallelLoops : public LinalgLowerToParallelLoopsBase { void runOnFunction() override { lowerLinalgToLoopsImpl(getFunction(), &getContext()); } }; } // namespace std::unique_ptr> mlir::createConvertLinalgToLoopsPass() { return std::make_unique(); } std::unique_ptr> mlir::createConvertLinalgToParallelLoopsPass() { return std::make_unique(); } std::unique_ptr> mlir::createConvertLinalgToAffineLoopsPass() { return std::make_unique(); } /// Emits a loop nest with the proper body for `op`. template Optional mlir::linalg::linalgLowerOpToLoops(OpBuilder &builder, Operation *op) { return linalgOpToLoopsImpl(op, builder); } /// Emits a loop nest of `scf.for` with the proper body for `op`. template LogicalResult mlir::linalg::linalgOpToLoops(OpBuilder &builder, Operation *op) { Optional loops = linalgLowerOpToLoops(builder, op); return loops ? success() : failure(); } /// Emits a loop nest of `affine.for` with the proper body for `op`. template LogicalResult mlir::linalg::linalgOpToAffineLoops(OpBuilder &builder, Operation *op) { Optional loops = linalgLowerOpToLoops(builder, op); return loops ? success() : failure(); } /// Emits a loop nest of `scf.parallel` with the proper body for `op`. template LogicalResult mlir::linalg::linalgOpToParallelLoops(OpBuilder &builder, Operation *op) { Optional loops = linalgLowerOpToLoops(builder, op); return loops ? success() : failure(); } // TODO Need to make these instantiations more future-proof to avoid the need to // update as soon as we add new ops. #define INSTANTIATE_LINALG_OP_TO_LOOPS(OP_TYPE) \ template LogicalResult mlir::linalg::linalgOpToLoops( \ OpBuilder & builder, Operation * op); \ template LogicalResult mlir::linalg::linalgOpToAffineLoops( \ OpBuilder & builder, Operation * op); \ template LogicalResult mlir::linalg::linalgOpToParallelLoops( \ OpBuilder & builder, Operation * op); \ template Optional \ mlir::linalg::linalgLowerOpToLoops( \ OpBuilder & builder, Operation * op); INSTANTIATE_LINALG_OP_TO_LOOPS(CopyOp) INSTANTIATE_LINALG_OP_TO_LOOPS(FillOp) INSTANTIATE_LINALG_OP_TO_LOOPS(DotOp) INSTANTIATE_LINALG_OP_TO_LOOPS(MatvecOp) INSTANTIATE_LINALG_OP_TO_LOOPS(MatmulOp) INSTANTIATE_LINALG_OP_TO_LOOPS(ConvOp) INSTANTIATE_LINALG_OP_TO_LOOPS(PoolingMaxOp) INSTANTIATE_LINALG_OP_TO_LOOPS(PoolingMinOp) INSTANTIATE_LINALG_OP_TO_LOOPS(PoolingSumOp) INSTANTIATE_LINALG_OP_TO_LOOPS(GenericOp) INSTANTIATE_LINALG_OP_TO_LOOPS(IndexedGenericOp)