1 //===- BufferizableOpInterfaceImpl.cpp - Impl. of BufferizableOpInterface -===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "mlir/Dialect/Arithmetic/Transforms/BufferizableOpInterfaceImpl.h" 10 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h" 11 #include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h" 12 #include "mlir/Dialect/Bufferization/Transforms/BufferUtils.h" 13 #include "mlir/Dialect/MemRef/IR/MemRef.h" 14 #include "mlir/IR/Dialect.h" 15 #include "mlir/IR/Operation.h" 16 17 using namespace mlir; 18 using namespace mlir::bufferization; 19 20 namespace { 21 /// Bufferization of arith.constant. Replace with memref.get_global. 22 struct ConstantOpInterface 23 : public BufferizableOpInterface::ExternalModel<ConstantOpInterface, 24 arith::ConstantOp> { 25 LogicalResult bufferize(Operation *op, RewriterBase &rewriter, 26 const BufferizationOptions &options) const { 27 auto constantOp = cast<arith::ConstantOp>(op); 28 29 // Only ranked tensors are supported. 30 if (!constantOp.getType().isa<RankedTensorType>()) 31 return failure(); 32 33 // Only constants inside a module are supported. 34 auto moduleOp = constantOp->getParentOfType<ModuleOp>(); 35 if (!moduleOp) 36 return failure(); 37 38 // Create global memory segment and replace tensor with memref pointing to 39 // that memory segment. 40 FailureOr<memref::GlobalOp> globalOp = 41 getGlobalFor(constantOp, options.bufferAlignment); 42 if (failed(globalOp)) 43 return failure(); 44 memref::GlobalOp globalMemref = *globalOp; 45 replaceOpWithNewBufferizedOp<memref::GetGlobalOp>( 46 rewriter, op, globalMemref.type(), globalMemref.getName()); 47 48 return success(); 49 } 50 51 bool isWritable(Operation *op, Value value, 52 const AnalysisState &state) const { 53 // Memory locations returned by memref::GetGlobalOp may not be written to. 54 assert(value.isa<OpResult>()); 55 return false; 56 } 57 }; 58 59 struct IndexCastOpInterface 60 : public BufferizableOpInterface::ExternalModel<IndexCastOpInterface, 61 arith::IndexCastOp> { 62 bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand, 63 const AnalysisState &state) const { 64 return false; 65 } 66 67 bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand, 68 const AnalysisState &state) const { 69 return false; 70 } 71 72 SmallVector<OpResult> getAliasingOpResult(Operation *op, OpOperand &opOperand, 73 const AnalysisState &state) const { 74 return {op->getResult(0)}; 75 } 76 77 BufferRelation bufferRelation(Operation *op, OpResult opResult, 78 const AnalysisState &state) const { 79 return BufferRelation::Equivalent; 80 } 81 82 LogicalResult bufferize(Operation *op, RewriterBase &rewriter, 83 const BufferizationOptions &options) const { 84 auto castOp = cast<arith::IndexCastOp>(op); 85 auto resultTensorType = castOp.getType().cast<TensorType>(); 86 87 FailureOr<Value> source = getBuffer(rewriter, castOp.getIn(), options); 88 if (failed(source)) 89 return failure(); 90 auto sourceType = source->getType().cast<BaseMemRefType>(); 91 92 // Result type should have same layout and address space as the source type. 93 BaseMemRefType resultType; 94 if (auto rankedMemRefType = sourceType.dyn_cast<MemRefType>()) { 95 resultType = MemRefType::get( 96 rankedMemRefType.getShape(), resultTensorType.getElementType(), 97 rankedMemRefType.getLayout(), rankedMemRefType.getMemorySpace()); 98 } else { 99 auto unrankedMemrefType = sourceType.cast<UnrankedMemRefType>(); 100 resultType = UnrankedMemRefType::get(resultTensorType.getElementType(), 101 unrankedMemrefType.getMemorySpace()); 102 } 103 104 replaceOpWithNewBufferizedOp<arith::IndexCastOp>(rewriter, op, resultType, 105 *source); 106 return success(); 107 } 108 }; 109 110 /// Bufferization of arith.select. Just replace the operands. 111 struct SelectOpInterface 112 : public BufferizableOpInterface::ExternalModel<SelectOpInterface, 113 arith::SelectOp> { 114 bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand, 115 const AnalysisState &state) const { 116 return false; 117 } 118 119 bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand, 120 const AnalysisState &state) const { 121 return false; 122 } 123 124 SmallVector<OpResult> getAliasingOpResult(Operation *op, OpOperand &opOperand, 125 const AnalysisState &state) const { 126 return {op->getOpResult(0) /*result*/}; 127 } 128 129 SmallVector<OpOperand *> 130 getAliasingOpOperand(Operation *op, OpResult opResult, 131 const AnalysisState &state) const { 132 return {&op->getOpOperand(1) /*true_value*/, 133 &op->getOpOperand(2) /*false_value*/}; 134 } 135 136 LogicalResult bufferize(Operation *op, RewriterBase &rewriter, 137 const BufferizationOptions &options) const { 138 auto selectOp = cast<arith::SelectOp>(op); 139 Location loc = selectOp.getLoc(); 140 141 // TODO: It would be more efficient to copy the result of the `select` op 142 // instead of its OpOperands. In the worst case, 2 copies are inserted at 143 // the moment (one for each tensor). When copying the op result, only one 144 // copy would be needed. 145 FailureOr<Value> maybeTrueBuffer = 146 getBuffer(rewriter, selectOp.getTrueValue(), options); 147 FailureOr<Value> maybeFalseBuffer = 148 getBuffer(rewriter, selectOp.getFalseValue(), options); 149 if (failed(maybeTrueBuffer) || failed(maybeFalseBuffer)) 150 return failure(); 151 Value trueBuffer = *maybeTrueBuffer; 152 Value falseBuffer = *maybeFalseBuffer; 153 154 // The "true" and the "false" operands must have the same type. If the 155 // buffers have different types, they differ only in their layout map. Cast 156 // both of them to the most dynamic MemRef type. 157 if (trueBuffer.getType() != falseBuffer.getType()) { 158 auto trueType = trueBuffer.getType().cast<MemRefType>(); 159 int64_t dynamicOffset = ShapedType::kDynamicStrideOrOffset; 160 SmallVector<int64_t> dynamicStrides(trueType.getRank(), 161 ShapedType::kDynamicStrideOrOffset); 162 AffineMap stridedLayout = makeStridedLinearLayoutMap( 163 dynamicStrides, dynamicOffset, op->getContext()); 164 auto castedType = 165 MemRefType::get(trueType.getShape(), trueType.getElementType(), 166 stridedLayout, trueType.getMemorySpaceAsInt()); 167 trueBuffer = rewriter.create<memref::CastOp>(loc, castedType, trueBuffer); 168 falseBuffer = 169 rewriter.create<memref::CastOp>(loc, castedType, falseBuffer); 170 } 171 172 replaceOpWithNewBufferizedOp<arith::SelectOp>( 173 rewriter, op, selectOp.getCondition(), trueBuffer, falseBuffer); 174 return success(); 175 } 176 177 BufferRelation bufferRelation(Operation *op, OpResult opResult, 178 const AnalysisState &state) const { 179 return BufferRelation::None; 180 } 181 }; 182 183 } // namespace 184 185 void mlir::arith::registerBufferizableOpInterfaceExternalModels( 186 DialectRegistry ®istry) { 187 registry.addExtension(+[](MLIRContext *ctx, ArithmeticDialect *dialect) { 188 ConstantOp::attachInterface<ConstantOpInterface>(*ctx); 189 IndexCastOp::attachInterface<IndexCastOpInterface>(*ctx); 190 SelectOp::attachInterface<SelectOpInterface>(*ctx); 191 }); 192 } 193