1 //===- BufferizableOpInterfaceImpl.cpp - Impl. of BufferizableOpInterface -===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "mlir/Dialect/Arithmetic/Transforms/BufferizableOpInterfaceImpl.h" 10 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h" 11 #include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h" 12 #include "mlir/Dialect/Bufferization/Transforms/BufferUtils.h" 13 #include "mlir/Dialect/MemRef/IR/MemRef.h" 14 #include "mlir/IR/Dialect.h" 15 #include "mlir/IR/Operation.h" 16 17 using namespace mlir; 18 using namespace mlir::bufferization; 19 20 namespace { 21 /// Bufferization of arith.constant. Replace with memref.get_global. 22 struct ConstantOpInterface 23 : public BufferizableOpInterface::ExternalModel<ConstantOpInterface, 24 arith::ConstantOp> { 25 LogicalResult bufferize(Operation *op, RewriterBase &rewriter, 26 BufferizationState &state) const { 27 auto constantOp = cast<arith::ConstantOp>(op); 28 29 // Only ranked tensors are supported. 30 if (!constantOp.getType().isa<RankedTensorType>()) 31 return failure(); 32 33 // Only constants inside a module are supported. 34 auto moduleOp = constantOp->getParentOfType<ModuleOp>(); 35 if (!moduleOp) 36 return failure(); 37 38 // Create global memory segment and replace tensor with memref pointing to 39 // that memory segment. 40 FailureOr<memref::GlobalOp> globalOp = 41 getGlobalFor(constantOp, state.getOptions().bufferAlignment); 42 if (failed(globalOp)) 43 return failure(); 44 memref::GlobalOp globalMemref = globalOp.getValue(); 45 replaceOpWithNewBufferizedOp<memref::GetGlobalOp>( 46 rewriter, op, globalMemref.type(), globalMemref.getName()); 47 48 return success(); 49 } 50 51 bool isWritable(Operation *op, Value value, 52 const AnalysisState &state) const { 53 // Memory locations returned by memref::GetGlobalOp may not be written to. 54 assert(value.isa<OpResult>()); 55 return false; 56 } 57 }; 58 59 struct IndexCastOpInterface 60 : public BufferizableOpInterface::ExternalModel<IndexCastOpInterface, 61 arith::IndexCastOp> { 62 bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand, 63 const AnalysisState &state) const { 64 return false; 65 } 66 67 bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand, 68 const AnalysisState &state) const { 69 return false; 70 } 71 72 SmallVector<OpResult> getAliasingOpResult(Operation *op, OpOperand &opOperand, 73 const AnalysisState &state) const { 74 return {op->getResult(0)}; 75 } 76 77 BufferRelation bufferRelation(Operation *op, OpResult opResult, 78 const AnalysisState &state) const { 79 return BufferRelation::Equivalent; 80 } 81 82 LogicalResult bufferize(Operation *op, RewriterBase &rewriter, 83 BufferizationState &state) const { 84 auto castOp = cast<arith::IndexCastOp>(op); 85 auto resultTensorType = castOp.getType().cast<TensorType>(); 86 87 Value source = *state.getBuffer(rewriter, op->getOpOperand(0) /*in*/); 88 auto sourceType = source.getType().cast<BaseMemRefType>(); 89 90 // Result type should have same layout and address space as the source type. 91 BaseMemRefType resultType; 92 if (auto rankedMemRefType = sourceType.dyn_cast<MemRefType>()) { 93 resultType = MemRefType::get( 94 rankedMemRefType.getShape(), resultTensorType.getElementType(), 95 rankedMemRefType.getLayout(), rankedMemRefType.getMemorySpace()); 96 } else { 97 auto unrankedMemrefType = sourceType.cast<UnrankedMemRefType>(); 98 resultType = UnrankedMemRefType::get(resultTensorType.getElementType(), 99 unrankedMemrefType.getMemorySpace()); 100 } 101 102 replaceOpWithNewBufferizedOp<arith::IndexCastOp>(rewriter, op, resultType, 103 source); 104 return success(); 105 } 106 }; 107 108 /// Bufferization of arith.select. Just replace the operands. 109 struct SelectOpInterface 110 : public BufferizableOpInterface::ExternalModel<SelectOpInterface, 111 arith::SelectOp> { 112 bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand, 113 const AnalysisState &state) const { 114 return false; 115 } 116 117 bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand, 118 const AnalysisState &state) const { 119 return false; 120 } 121 122 SmallVector<OpResult> getAliasingOpResult(Operation *op, OpOperand &opOperand, 123 const AnalysisState &state) const { 124 return {op->getOpResult(0) /*result*/}; 125 } 126 127 SmallVector<OpOperand *> 128 getAliasingOpOperand(Operation *op, OpResult opResult, 129 const AnalysisState &state) const { 130 return {&op->getOpOperand(1) /*true_value*/, 131 &op->getOpOperand(2) /*false_value*/}; 132 } 133 134 LogicalResult bufferize(Operation *op, RewriterBase &rewriter, 135 BufferizationState &state) const { 136 auto selectOp = cast<arith::SelectOp>(op); 137 Location loc = selectOp.getLoc(); 138 139 // `getBuffer` introduces copies if an OpOperand bufferizes out-of-place. 140 // TODO: It would be more efficient to copy the result of the `select` op 141 // instead of its OpOperands. In the worst case, 2 copies are inserted at 142 // the moment (one for each tensor). When copying the op result, only one 143 // copy would be needed. 144 Value trueBuffer = 145 *state.getBuffer(rewriter, selectOp->getOpOperand(1) /*true_value*/); 146 Value falseBuffer = 147 *state.getBuffer(rewriter, selectOp->getOpOperand(2) /*false_value*/); 148 149 // The "true" and the "false" operands must have the same type. If the 150 // buffers have different types, they differ only in their layout map. Cast 151 // both of them to the most dynamic MemRef type. 152 if (trueBuffer.getType() != falseBuffer.getType()) { 153 auto trueType = trueBuffer.getType().cast<MemRefType>(); 154 int64_t dynamicOffset = ShapedType::kDynamicStrideOrOffset; 155 SmallVector<int64_t> dynamicStrides(trueType.getRank(), 156 ShapedType::kDynamicStrideOrOffset); 157 AffineMap stridedLayout = makeStridedLinearLayoutMap( 158 dynamicStrides, dynamicOffset, op->getContext()); 159 auto castedType = 160 MemRefType::get(trueType.getShape(), trueType.getElementType(), 161 stridedLayout, trueType.getMemorySpaceAsInt()); 162 trueBuffer = rewriter.create<memref::CastOp>(loc, castedType, trueBuffer); 163 falseBuffer = 164 rewriter.create<memref::CastOp>(loc, castedType, falseBuffer); 165 } 166 167 replaceOpWithNewBufferizedOp<arith::SelectOp>( 168 rewriter, op, selectOp.getCondition(), trueBuffer, falseBuffer); 169 return success(); 170 } 171 172 BufferRelation bufferRelation(Operation *op, OpResult opResult, 173 const AnalysisState &state) const { 174 return BufferRelation::None; 175 } 176 }; 177 178 } // namespace 179 180 void mlir::arith::registerBufferizableOpInterfaceExternalModels( 181 DialectRegistry ®istry) { 182 registry.addExtension(+[](MLIRContext *ctx, ArithmeticDialect *dialect) { 183 ConstantOp::attachInterface<ConstantOpInterface>(*ctx); 184 IndexCastOp::attachInterface<IndexCastOpInterface>(*ctx); 185 SelectOp::attachInterface<SelectOpInterface>(*ctx); 186 }); 187 } 188