1 //===- Bufferize.cpp - Bufferization for `tensor` dialect ops -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements bufferization of `tensor` dialect ops 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "mlir/Transforms/Bufferize.h" 14 #include "PassDetail.h" 15 #include "mlir/Dialect/MemRef/IR/MemRef.h" 16 #include "mlir/Dialect/SCF/SCF.h" 17 #include "mlir/Dialect/StandardOps/IR/Ops.h" 18 #include "mlir/Dialect/Tensor/IR/Tensor.h" 19 #include "mlir/Dialect/Tensor/Transforms/Passes.h" 20 #include "mlir/Transforms/DialectConversion.h" 21 22 using namespace mlir; 23 24 namespace { 25 class BufferizeCastOp : public OpConversionPattern<tensor::CastOp> { 26 public: 27 using OpConversionPattern::OpConversionPattern; 28 LogicalResult 29 matchAndRewrite(tensor::CastOp op, ArrayRef<Value> operands, 30 ConversionPatternRewriter &rewriter) const override { 31 auto resultType = getTypeConverter()->convertType(op.getType()); 32 rewriter.replaceOpWithNewOp<memref::CastOp>(op, resultType, operands[0]); 33 return success(); 34 } 35 }; 36 } // namespace 37 38 namespace { 39 class BufferizeDimOp : public OpConversionPattern<tensor::DimOp> { 40 public: 41 using OpConversionPattern::OpConversionPattern; 42 LogicalResult 43 matchAndRewrite(tensor::DimOp op, ArrayRef<Value> operands, 44 ConversionPatternRewriter &rewriter) const override { 45 tensor::DimOp::Adaptor adaptor(operands); 46 rewriter.replaceOpWithNewOp<memref::DimOp>(op, adaptor.source(), 47 adaptor.index()); 48 return success(); 49 } 50 }; 51 } // namespace 52 53 namespace { 54 class BufferizeExtractOp : public OpConversionPattern<tensor::ExtractOp> { 55 public: 56 using OpConversionPattern::OpConversionPattern; 57 LogicalResult 58 matchAndRewrite(tensor::ExtractOp op, ArrayRef<Value> operands, 59 ConversionPatternRewriter &rewriter) const override { 60 tensor::ExtractOp::Adaptor adaptor(operands); 61 rewriter.replaceOpWithNewOp<memref::LoadOp>(op, adaptor.tensor(), 62 adaptor.indices()); 63 return success(); 64 } 65 }; 66 } // namespace 67 68 namespace { 69 class BufferizeFromElementsOp 70 : public OpConversionPattern<tensor::FromElementsOp> { 71 public: 72 using OpConversionPattern::OpConversionPattern; 73 LogicalResult 74 matchAndRewrite(tensor::FromElementsOp op, ArrayRef<Value> operands, 75 ConversionPatternRewriter &rewriter) const override { 76 int numberOfElements = op.elements().size(); 77 auto resultType = MemRefType::get( 78 {numberOfElements}, op.getType().cast<TensorType>().getElementType()); 79 Value result = rewriter.create<memref::AllocOp>(op.getLoc(), resultType); 80 for (auto element : llvm::enumerate(op.elements())) { 81 Value index = 82 rewriter.create<ConstantIndexOp>(op.getLoc(), element.index()); 83 rewriter.create<memref::StoreOp>(op.getLoc(), element.value(), result, 84 index); 85 } 86 rewriter.replaceOp(op, {result}); 87 return success(); 88 } 89 }; 90 } // namespace 91 92 namespace { 93 class BufferizeGenerateOp : public OpConversionPattern<tensor::GenerateOp> { 94 public: 95 using OpConversionPattern::OpConversionPattern; 96 97 LogicalResult 98 matchAndRewrite(tensor::GenerateOp op, ArrayRef<Value> operands, 99 ConversionPatternRewriter &rewriter) const final { 100 // Allocate memory. 101 Location loc = op.getLoc(); 102 tensor::GenerateOp::Adaptor transformed(operands); 103 RankedTensorType tensorType = op.getType().cast<RankedTensorType>(); 104 MemRefType memrefType = 105 MemRefType::get(tensorType.getShape(), tensorType.getElementType()); 106 Value result = rewriter.create<memref::AllocOp>( 107 loc, memrefType, transformed.dynamicExtents()); 108 109 // Collect loop bounds. 110 int64_t rank = tensorType.getRank(); 111 Value zero = rewriter.create<ConstantIndexOp>(loc, 0); 112 Value one = rewriter.create<ConstantIndexOp>(loc, 1); 113 SmallVector<Value, 4> lowerBounds(rank, zero); 114 SmallVector<Value, 4> steps(rank, one); 115 SmallVector<Value, 4> upperBounds; 116 int nextDynamicIndex = 0; 117 for (int i = 0; i < rank; i++) { 118 Value upperBound = 119 tensorType.isDynamicDim(i) 120 ? transformed.dynamicExtents()[nextDynamicIndex++] 121 : rewriter.create<ConstantIndexOp>(loc, memrefType.getDimSize(i)); 122 upperBounds.push_back(upperBound); 123 } 124 125 // Generate tensor elements with a parallel loop that stores into 126 // each element of the resulting memref. 127 // 128 // This is a bit tricky. We cannot simply clone the ops because when an op 129 // is cloned, it must be legalized. However, we want to allow arbitrary ops 130 // in the body that we don't necessarily have legalization patterns for as 131 // part of this dialect conversion invocation. 132 // 133 // To accomplish this, we use mergeBlockBefore to "move" this op's body 134 // into the scf.parallel's body. 135 auto parallel = 136 rewriter.create<scf::ParallelOp>(loc, lowerBounds, upperBounds, steps); 137 Block *parallelBody = parallel.getBody(); 138 rewriter.mergeBlockBefore(op.getBody(), parallelBody->getTerminator(), 139 parallelBody->getArguments()); 140 // Replace the inlined yield op with a store op. The scf.parallel's builder 141 // already populated an scf.yield at the end, so we don't need to worry 142 // about creating that. 143 Operation *elementYield = parallelBody->getTerminator()->getPrevNode(); 144 rewriter.setInsertionPointAfter(elementYield); 145 rewriter.replaceOpWithNewOp<memref::StoreOp>( 146 elementYield, elementYield->getOperands()[0], result, 147 parallelBody->getArguments()); 148 149 rewriter.replaceOp(op, {result}); 150 return success(); 151 } 152 }; 153 } // namespace 154 155 void mlir::populateTensorBufferizePatterns( 156 BufferizeTypeConverter &typeConverter, RewritePatternSet &patterns) { 157 patterns.add<BufferizeCastOp, BufferizeDimOp, BufferizeExtractOp, 158 BufferizeFromElementsOp, BufferizeGenerateOp>( 159 typeConverter, patterns.getContext()); 160 } 161 162 namespace { 163 struct TensorBufferizePass : public TensorBufferizeBase<TensorBufferizePass> { 164 void runOnFunction() override { 165 auto *context = &getContext(); 166 BufferizeTypeConverter typeConverter; 167 RewritePatternSet patterns(context); 168 ConversionTarget target(*context); 169 170 populateBufferizeMaterializationLegality(target); 171 172 populateTensorBufferizePatterns(typeConverter, patterns); 173 target.addIllegalOp<tensor::CastOp, tensor::ExtractOp, 174 tensor::FromElementsOp, tensor::GenerateOp>(); 175 target.addLegalDialect<memref::MemRefDialect>(); 176 target.addDynamicallyLegalDialect<StandardOpsDialect>( 177 [&](Operation *op) { return typeConverter.isLegal(op); }); 178 target.addLegalDialect<scf::SCFDialect>(); 179 180 if (failed( 181 applyPartialConversion(getFunction(), target, std::move(patterns)))) 182 signalPassFailure(); 183 } 184 }; 185 } // namespace 186 187 std::unique_ptr<Pass> mlir::createTensorBufferizePass() { 188 return std::make_unique<TensorBufferizePass>(); 189 } 190