//===- Bufferize.cpp - Bufferization for `tensor` dialect ops -------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file implements bufferization of `tensor` dialect ops // //===----------------------------------------------------------------------===// #include "mlir/Transforms/Bufferize.h" #include "PassDetail.h" #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h" #include "mlir/Dialect/MemRef/IR/MemRef.h" #include "mlir/Dialect/SCF/SCF.h" #include "mlir/Dialect/StandardOps/IR/Ops.h" #include "mlir/Dialect/Tensor/IR/Tensor.h" #include "mlir/Dialect/Tensor/Transforms/Passes.h" #include "mlir/Transforms/DialectConversion.h" using namespace mlir; namespace { class BufferizeCastOp : public OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; LogicalResult matchAndRewrite(tensor::CastOp op, OpAdaptor adaptor, ConversionPatternRewriter &rewriter) const override { auto resultType = getTypeConverter()->convertType(op.getType()); rewriter.replaceOpWithNewOp(op, resultType, adaptor.getOperands()[0]); return success(); } }; } // namespace namespace { class BufferizeDimOp : public OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; LogicalResult matchAndRewrite(tensor::DimOp op, OpAdaptor adaptor, ConversionPatternRewriter &rewriter) const override { rewriter.replaceOpWithNewOp(op, adaptor.source(), adaptor.index()); return success(); } }; } // namespace namespace { class BufferizeExtractOp : public OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; LogicalResult matchAndRewrite(tensor::ExtractOp op, OpAdaptor adaptor, ConversionPatternRewriter &rewriter) const override { rewriter.replaceOpWithNewOp(op, adaptor.tensor(), adaptor.indices()); return success(); } }; } // namespace namespace { class BufferizeFromElementsOp : public OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; LogicalResult matchAndRewrite(tensor::FromElementsOp op, OpAdaptor adaptor, ConversionPatternRewriter &rewriter) const override { int numberOfElements = op.elements().size(); auto resultType = MemRefType::get( {numberOfElements}, op.getType().cast().getElementType()); Value result = rewriter.create(op.getLoc(), resultType); for (auto element : llvm::enumerate(op.elements())) { Value index = rewriter.create(op.getLoc(), element.index()); rewriter.create(op.getLoc(), element.value(), result, index); } rewriter.replaceOp(op, {result}); return success(); } }; } // namespace namespace { class BufferizeGenerateOp : public OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; LogicalResult matchAndRewrite(tensor::GenerateOp op, OpAdaptor adaptor, ConversionPatternRewriter &rewriter) const final { // Allocate memory. Location loc = op.getLoc(); RankedTensorType tensorType = op.getType().cast(); MemRefType memrefType = MemRefType::get(tensorType.getShape(), tensorType.getElementType()); Value result = rewriter.create(loc, memrefType, adaptor.dynamicExtents()); // Collect loop bounds. int64_t rank = tensorType.getRank(); Value zero = rewriter.create(loc, 0); Value one = rewriter.create(loc, 1); SmallVector lowerBounds(rank, zero); SmallVector steps(rank, one); SmallVector upperBounds; int nextDynamicIndex = 0; for (int i = 0; i < rank; i++) { Value upperBound = tensorType.isDynamicDim(i) ? adaptor.dynamicExtents()[nextDynamicIndex++] : rewriter.create( loc, memrefType.getDimSize(i)); upperBounds.push_back(upperBound); } // Generate tensor elements with a parallel loop that stores into // each element of the resulting memref. // // This is a bit tricky. We cannot simply clone the ops because when an op // is cloned, it must be legalized. However, we want to allow arbitrary ops // in the body that we don't necessarily have legalization patterns for as // part of this dialect conversion invocation. // // To accomplish this, we use mergeBlockBefore to "move" this op's body // into the scf.parallel's body. auto parallel = rewriter.create(loc, lowerBounds, upperBounds, steps); Block *parallelBody = parallel.getBody(); rewriter.mergeBlockBefore(op.getBody(), parallelBody->getTerminator(), parallelBody->getArguments()); // Replace the inlined yield op with a store op. The scf.parallel's builder // already populated an scf.yield at the end, so we don't need to worry // about creating that. Operation *elementYield = parallelBody->getTerminator()->getPrevNode(); rewriter.setInsertionPointAfter(elementYield); rewriter.replaceOpWithNewOp( elementYield, elementYield->getOperands()[0], result, parallelBody->getArguments()); rewriter.replaceOp(op, {result}); return success(); } }; } // namespace void mlir::populateTensorBufferizePatterns( BufferizeTypeConverter &typeConverter, RewritePatternSet &patterns) { patterns.add( typeConverter, patterns.getContext()); } namespace { struct TensorBufferizePass : public TensorBufferizeBase { void runOnFunction() override { auto *context = &getContext(); BufferizeTypeConverter typeConverter; RewritePatternSet patterns(context); ConversionTarget target(*context); populateBufferizeMaterializationLegality(target); populateTensorBufferizePatterns(typeConverter, patterns); target.addIllegalOp(); target.addLegalDialect(); target.addDynamicallyLegalDialect( [&](Operation *op) { return typeConverter.isLegal(op); }); target.addLegalOp(); target.addLegalOp(); target.addLegalDialect(); if (failed( applyPartialConversion(getFunction(), target, std::move(patterns)))) signalPassFailure(); } }; } // namespace std::unique_ptr mlir::createTensorBufferizePass() { return std::make_unique(); }