1 //===- BufferizableOpInterface.cpp - Bufferizable Ops ---=----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h" 10 #include "mlir/Dialect/Bufferization/IR/Bufferization.h" 11 #include "mlir/Dialect/MemRef/IR/MemRef.h" 12 #include "mlir/IR/AsmState.h" 13 #include "mlir/IR/BlockAndValueMapping.h" 14 #include "mlir/IR/BuiltinOps.h" 15 #include "mlir/IR/Operation.h" 16 #include "mlir/IR/TypeUtilities.h" 17 #include "mlir/IR/Value.h" 18 #include "llvm/Support/Debug.h" 19 20 namespace mlir { 21 namespace bufferization { 22 23 #include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.cpp.inc" 24 25 } // namespace bufferization 26 } // namespace mlir 27 28 #define DEBUG_TYPE "bufferizable-op-interface" 29 #define DBGS() (llvm::dbgs() << '[' << DEBUG_TYPE << "] ") 30 #define LDBG(X) LLVM_DEBUG(DBGS() << (X)) 31 32 using namespace mlir; 33 using namespace bufferization; 34 35 /// Attribute name used to mark the bufferization layout for region 36 /// arguments during linalg comprehensive bufferization. 37 constexpr const ::llvm::StringLiteral 38 bufferization::BufferizableOpInterface::kBufferLayoutAttrName; 39 40 /// Attribute name used to mark region arguments that can be bufferized 41 /// in-place during linalg comprehensive bufferization. 42 constexpr const ::llvm::StringLiteral 43 bufferization::BufferizableOpInterface::kInplaceableAttrName; 44 45 //===----------------------------------------------------------------------===// 46 // BufferizationOptions 47 //===----------------------------------------------------------------------===// 48 49 // Default constructor for BufferizationOptions. 50 BufferizationOptions::BufferizationOptions() {} 51 52 BufferizableOpInterface 53 BufferizationOptions::dynCastBufferizableOp(Operation *op) const { 54 if (isOpAllowed(op)) 55 return dyn_cast<BufferizableOpInterface>(op); 56 return nullptr; 57 } 58 59 BufferizableOpInterface 60 BufferizationOptions::dynCastBufferizableOp(Value value) const { 61 if (auto bufferizableOp = value.getDefiningOp<BufferizableOpInterface>()) 62 if (isOpAllowed(bufferizableOp.getOperation())) 63 return bufferizableOp; 64 return nullptr; 65 } 66 67 //===----------------------------------------------------------------------===// 68 // Helper functions for BufferizableOpInterface 69 //===----------------------------------------------------------------------===// 70 71 static void setInsertionPointAfter(OpBuilder &b, Value value) { 72 if (auto bbArg = value.dyn_cast<BlockArgument>()) { 73 b.setInsertionPointToStart(bbArg.getOwner()); 74 } else { 75 b.setInsertionPointAfter(value.getDefiningOp()); 76 } 77 } 78 79 /// Determine which OpOperand* will alias with `result` if the op is bufferized 80 /// in place. Return an empty vector if the op is not bufferizable. 81 SmallVector<OpOperand *> 82 BufferizationState::getAliasingOpOperand(OpResult result) const { 83 if (Operation *op = result.getDefiningOp()) 84 if (auto bufferizableOp = dyn_cast<BufferizableOpInterface>(op)) 85 return bufferizableOp.getAliasingOpOperand(result, *this); 86 return {}; 87 } 88 89 /// Determine which OpResult will alias with `opOperand` if the op is bufferized 90 /// in place. Return an empty OpResult if the op is not bufferizable. 91 OpResult BufferizationState::getAliasingOpResult(OpOperand &opOperand) const { 92 if (auto bufferizableOp = 93 dyn_cast<BufferizableOpInterface>(opOperand.getOwner())) 94 return bufferizableOp.getAliasingOpResult(opOperand, *this); 95 return OpResult(); 96 } 97 98 /// Return true if `opOperand` bufferizes to a memory read. Return `true` if the 99 /// op is not bufferizable. 100 bool BufferizationState::bufferizesToMemoryRead(OpOperand &opOperand) const { 101 if (auto bufferizableOp = 102 dyn_cast<BufferizableOpInterface>(opOperand.getOwner())) 103 return bufferizableOp.bufferizesToMemoryRead(opOperand, *this); 104 105 // Unknown op that returns a tensor. The inplace analysis does not support it. 106 // Conservatively return true. 107 return true; 108 } 109 110 /// Return true if `opOperand` bufferizes to a memory write. Return 111 /// `true` if the op is not bufferizable. 112 bool BufferizationState::bufferizesToMemoryWrite(OpOperand &opOperand) const { 113 if (auto bufferizableOp = 114 dyn_cast<BufferizableOpInterface>(opOperand.getOwner())) 115 return bufferizableOp.bufferizesToMemoryWrite(opOperand, *this); 116 117 // Unknown op that returns a tensor. The inplace analysis does not support it. 118 // Conservatively return true. 119 return true; 120 } 121 122 /// Return true if `opOperand` does neither read nor write but bufferizes to an 123 /// alias. Return false if the op is not bufferizable. 124 bool BufferizationState::bufferizesToAliasOnly(OpOperand &opOperand) const { 125 if (auto bufferizableOp = 126 dyn_cast<BufferizableOpInterface>(opOperand.getOwner())) 127 return bufferizableOp.bufferizesToAliasOnly(opOperand, *this); 128 129 // Unknown op that returns a tensor. The inplace analysis does not support it. 130 // Conservatively return false. 131 return false; 132 } 133 134 /// Return true if the given value is read by an op that bufferizes to a memory 135 /// read. Also takes into account ops that create an alias but do not read by 136 /// themselves (e.g., ExtractSliceOp). 137 bool BufferizationState::isValueRead(Value value) const { 138 assert(value.getType().isa<TensorType>() && "expected TensorType"); 139 SmallVector<OpOperand *> workingSet; 140 for (OpOperand &use : value.getUses()) 141 workingSet.push_back(&use); 142 143 while (!workingSet.empty()) { 144 OpOperand *uMaybeReading = workingSet.pop_back_val(); 145 // Skip over all ops that neither read nor write (but create an alias). 146 if (bufferizesToAliasOnly(*uMaybeReading)) 147 for (OpOperand &use : getAliasingOpResult(*uMaybeReading).getUses()) 148 workingSet.push_back(&use); 149 if (bufferizesToMemoryRead(*uMaybeReading)) 150 return true; 151 } 152 153 return false; 154 } 155 156 // Starting from `value`, follow the use-def chain in reverse, always selecting 157 // the aliasing OpOperands. Find and return Values for which `condition` 158 // evaluates to true. OpOperands of such matching Values are not traversed any 159 // further. 160 llvm::SetVector<Value> BufferizationState::findValueInReverseUseDefChain( 161 Value value, llvm::function_ref<bool(Value)> condition) const { 162 llvm::SetVector<Value> result, workingSet; 163 workingSet.insert(value); 164 165 while (!workingSet.empty()) { 166 Value value = workingSet.pop_back_val(); 167 if (condition(value) || value.isa<BlockArgument>()) { 168 result.insert(value); 169 continue; 170 } 171 172 OpResult opResult = value.cast<OpResult>(); 173 SmallVector<OpOperand *> opOperands = getAliasingOpOperand(opResult); 174 if (opOperands.empty() || !options.isOpAllowed(value.getDefiningOp())) { 175 result.insert(value); 176 continue; 177 } 178 179 for (OpOperand *o : opOperands) 180 workingSet.insert(o->get()); 181 } 182 183 return result; 184 } 185 186 // Find the Values of the last preceding write of a given Value. 187 llvm::SetVector<Value> 188 BufferizationState::findLastPrecedingWrite(Value value) const { 189 return findValueInReverseUseDefChain(value, [&](Value value) { 190 Operation *op = value.getDefiningOp(); 191 if (!op) 192 return true; 193 auto bufferizableOp = options.dynCastBufferizableOp(op); 194 if (!bufferizableOp) 195 return true; 196 return bufferizableOp.isMemoryWrite(value.cast<OpResult>(), *this); 197 }); 198 } 199 200 BufferizationState::BufferizationState(const BufferizationOptions &options) 201 : options(options) {} 202 203 // bufferization.to_memref is not allowed to change the rank. 204 static void ensureToMemrefOpIsValid(Value tensor, Type memrefType) { 205 #ifndef NDEBUG 206 auto rankedTensorType = tensor.getType().dyn_cast<RankedTensorType>(); 207 assert((!rankedTensorType || memrefType.cast<MemRefType>().getRank() == 208 rankedTensorType.getRank()) && 209 "to_memref would be invalid: mismatching ranks"); 210 #endif 211 } 212 213 static Value lookupBuffer(RewriterBase &rewriter, Value tensor) { 214 assert(tensor.getType().isa<TensorType>() && "unexpected non-tensor type"); 215 216 // Replace "%t = to_tensor %m" with %m. 217 if (auto toTensorOp = tensor.getDefiningOp<bufferization::ToTensorOp>()) 218 return toTensorOp.memref(); 219 220 // Insert to_memref op. 221 OpBuilder::InsertionGuard g(rewriter); 222 setInsertionPointAfter(rewriter, tensor); 223 Type memrefType; 224 if (auto rankedTensorType = tensor.getType().dyn_cast<RankedTensorType>()) { 225 memrefType = getDynamicMemRefType(rankedTensorType); 226 } else { 227 memrefType = getUnrankedMemRefType( 228 tensor.getType().cast<TensorType>().getElementType()); 229 } 230 ensureToMemrefOpIsValid(tensor, memrefType); 231 return rewriter.create<bufferization::ToMemrefOp>(tensor.getLoc(), memrefType, 232 tensor); 233 } 234 235 /// Return the result buffer (memref) for a given OpResult (tensor). Allocate 236 /// a new buffer and copy over data from the existing buffer if out-of-place 237 /// bufferization is necessary. 238 FailureOr<Value> BufferizationState::getBuffer( 239 RewriterBase &rewriter, OpOperand &opOperand, bool forceInPlace, 240 Optional<Operation *> customCopyInsertionPoint) const { 241 OpBuilder::InsertionGuard guard(rewriter); 242 Operation *op = opOperand.getOwner(); 243 Location loc = op->getLoc(); 244 Value operand = opOperand.get(); 245 Value operandBuffer = lookupBuffer(rewriter, operand); 246 247 if (forceInPlace || isInPlace(opOperand)) 248 return operandBuffer; 249 250 // Bufferizing out-of-place: Allocate a new buffer. 251 // Move insertion point right after `operandBuffer`. That is where the 252 // allocation should be inserted (in the absence of allocation hoisting). 253 setInsertionPointAfter(rewriter, operandBuffer); 254 // Allocate the result buffer. 255 FailureOr<Value> resultBuffer = createAlloc(rewriter, loc, operandBuffer, 256 options.createDeallocs, options); 257 if (failed(resultBuffer)) 258 return failure(); 259 // Do not copy if the last preceding writes of `operand` are ops that do 260 // not write (skipping ops that merely create aliases). E.g., InitTensorOp. 261 // Note: If `findLastPrecedingWrite` reaches the end of the reverse SSA 262 // use-def chain, it returns that value, regardless of whether it is a 263 // memory write or not. 264 SetVector<Value> lastWrites = findLastPrecedingWrite(operand); 265 if (llvm::none_of(lastWrites, [&](Value lastWrite) { 266 if (auto bufferizableOp = options.dynCastBufferizableOp(lastWrite)) 267 return bufferizableOp.isMemoryWrite(lastWrite.cast<OpResult>(), 268 *this); 269 return true; 270 })) 271 return resultBuffer; 272 // Do not copy if the copied data is never read. 273 OpResult aliasingOpResult = getAliasingOpResult(opOperand); 274 if (aliasingOpResult && !bufferizesToMemoryRead(opOperand) && 275 !isValueRead(aliasingOpResult)) 276 return resultBuffer; 277 // Do not copy if this op does not read the data, but writes it. 278 if (bufferizesToMemoryWrite(opOperand) && !bufferizesToMemoryRead(opOperand)) 279 return resultBuffer; 280 281 if (customCopyInsertionPoint) { 282 rewriter.setInsertionPoint(*customCopyInsertionPoint); 283 } else { 284 // The copy happens right before the op that is bufferized. 285 rewriter.setInsertionPoint(op); 286 } 287 if (failed( 288 createMemCpy(rewriter, loc, operandBuffer, *resultBuffer, options))) 289 return failure(); 290 291 return resultBuffer; 292 } 293 294 void bufferization::replaceOpWithBufferizedValues(RewriterBase &rewriter, 295 Operation *op, 296 ValueRange values) { 297 OpBuilder::InsertionGuard g(rewriter); 298 299 // Replace all OpResults with the given values. 300 for (OpResult opResult : op->getOpResults()) { 301 // Skip OpResult if it has no uses. 302 if (opResult.getUses().empty()) 303 continue; 304 305 Value replacement = values[opResult.getResultNumber()]; 306 if (opResult.getType().isa<TensorType>()) { 307 // The OpResult is a tensor. Such values are replaced with memrefs during 308 // bufferization. 309 assert((replacement.getType().isa<MemRefType>() || 310 replacement.getType().isa<UnrankedMemRefType>()) && 311 "tensor op result should be replaced with a memref value"); 312 // The existing uses of the OpResult still expect a tensor. Insert a 313 // ToTensorOp. Throughout bufferization, this ToTensorOp will gradually 314 // loose all of its users and eventually DCE away. 315 rewriter.setInsertionPointAfter(op); 316 replacement = rewriter.create<bufferization::ToTensorOp>( 317 replacement.getLoc(), replacement); 318 } 319 opResult.replaceAllUsesWith(replacement); 320 } 321 322 rewriter.eraseOp(op); 323 } 324 325 //===----------------------------------------------------------------------===// 326 // Bufferization-specific scoped alloc/dealloc insertion support. 327 //===----------------------------------------------------------------------===// 328 329 /// Move the insertion point of the given builder to the beginning of a 330 /// surrounding block as much as possible, while not crossing any allocation 331 /// hoisting barriers. 332 static void moveInsertionPointToAllocationHoistingBarrier(OpBuilder &b) { 333 Operation *op = b.getInsertionBlock()->getParentOp(); 334 while (op) { 335 if (auto bufferizableOp = dyn_cast<BufferizableOpInterface>(op)) 336 if (bufferizableOp.isAllocationHoistingBarrier()) 337 break; 338 op = op->getParentOp(); 339 } 340 341 if (!op) { 342 // No allocation hoisting barrier found. Hoist to FuncOp. 343 op = b.getInsertionBlock()->getParentOp(); 344 if (!isa<FuncOp>(op)) 345 op = op->getParentOfType<FuncOp>(); 346 assert(op && "could not find enclosing FuncOp"); 347 } 348 349 // TODO: Handle cases where allocation hoisting barrier has more than one 350 // region or block. 351 assert(op->getNumRegions() == 1 && 352 "allocation hoisting barriers with >1 regions not supported"); 353 assert(op->getRegion(0).getBlocks().size() == 1 && 354 "allocation hoisting barriers with >1 blocks not supported"); 355 b.setInsertionPointToStart(&(op->getRegion(0).front())); 356 } 357 358 /// Compute the type of the `memref` to use for allocating the buffer for 359 /// `shapedValue`. Also returns (by reference in `dynShape`), the value for the 360 /// dynamic dimensions in the returned `memref` type. The function may also set 361 /// the insertion point to an earlier location, where the allocation should 362 /// happen ("allocation hoisting"). 363 static MemRefType getAllocationTypeAndShape(OpBuilder &b, Location loc, 364 Value shapedValue, 365 SmallVectorImpl<Value> &dynShape) { 366 MemRefType allocMemRefType = 367 getContiguousMemRefType(shapedValue.getType().cast<ShapedType>()); 368 369 // Compute the dynamic part of the shape. 370 bool reifiedShapes = false; 371 if (auto rankedOp = dyn_cast_or_null<ReifyRankedShapedTypeOpInterface>( 372 shapedValue.getDefiningOp())) { 373 ReifiedRankedShapedTypeDims resultDims; 374 if (succeeded(rankedOp.reifyResultShapes(b, resultDims))) { 375 reifiedShapes = true; 376 OpResult resultValue = shapedValue.dyn_cast<OpResult>(); 377 auto &shape = resultDims[resultValue.getResultNumber()]; 378 for (const auto &dim : enumerate(allocMemRefType.getShape())) 379 if (ShapedType::isDynamic(dim.value())) 380 dynShape.push_back(shape[dim.index()]); 381 } 382 } 383 384 if (!reifiedShapes) { 385 for (const auto &dim : enumerate(allocMemRefType.getShape())) 386 if (ShapedType::isDynamic(dim.value())) { 387 assert((shapedValue.getType().isa<UnrankedMemRefType>() || 388 shapedValue.getType().isa<MemRefType>()) && 389 "expected MemRef type"); 390 dynShape.push_back( 391 b.create<memref::DimOp>(loc, shapedValue, dim.index())); 392 } 393 } 394 395 // If the buffer is statically shaped, try to hoist it to the first enclosing 396 // parallel region. 397 // TODO: also hoist in the dynamic case. For now this relies on subsequent 398 // calls to LICM and buffer hoisting which will most likely not succeed. 399 // TODO: when packing, allocate a static bounding box which will enable more 400 // hoisting. 401 if (dynShape.empty()) 402 moveInsertionPointToAllocationHoistingBarrier(b); 403 404 return allocMemRefType; 405 } 406 407 /// Create an AllocOp/DeallocOp pair, where the AllocOp is after 408 /// `shapedValue.getDefiningOp` (or at the top of the block in case of a 409 /// bbArg) and the DeallocOp is at the end of the block. 410 FailureOr<Value> 411 bufferization::createAlloc(OpBuilder &b, Location loc, Value shapedValue, 412 bool deallocMemref, 413 const BufferizationOptions &options) { 414 // Take a guard before anything else. 415 OpBuilder::InsertionGuard g(b); 416 417 // 1. Create memory allocation. 418 assert(shapedValue.getType().isa<ShapedType>()); 419 MemRefType memRefType = shapedValue.getType().dyn_cast<MemRefType>(); 420 SmallVector<Value> dynShape; 421 // Note: getAllocationTypeAndShape also sets the insertion point. 422 MemRefType allocMemRefType = 423 getAllocationTypeAndShape(b, loc, shapedValue, dynShape); 424 FailureOr<Value> allocated = 425 createAlloc(b, loc, allocMemRefType, dynShape, options); 426 if (failed(allocated)) 427 return failure(); 428 Value casted = allocated.getValue(); 429 if (memRefType && memRefType != allocMemRefType) { 430 assert(memref::CastOp::areCastCompatible(allocated.getValue().getType(), 431 memRefType) && 432 "createAlloc: cast incompatible"); 433 casted = b.create<memref::CastOp>(loc, memRefType, allocated.getValue()); 434 } 435 436 if (deallocMemref) { 437 // 2. Create memory deallocation. 438 b.setInsertionPoint(allocated.getValue().getParentBlock()->getTerminator()); 439 if (failed(createDealloc(b, loc, allocated.getValue(), options))) 440 return failure(); 441 } 442 443 return casted; 444 } 445 446 /// Create a memref allocation with the given type and dynamic extents. 447 FailureOr<Value> 448 bufferization::createAlloc(OpBuilder &b, Location loc, MemRefType type, 449 ValueRange dynShape, 450 const BufferizationOptions &options) { 451 if (options.allocationFn) 452 return (*options.allocationFn)(b, loc, type, dynShape); 453 454 // Default bufferallocation via AllocOp. 455 Value allocated = b.create<memref::AllocOp>( 456 loc, type, dynShape, b.getI64IntegerAttr(kBufferAlignments)); 457 return allocated; 458 } 459 460 /// Create a memref allocation with the given type and dynamic extents. May also 461 /// deallocate the memref again. 462 FailureOr<Value> 463 bufferization::createAlloc(OpBuilder &b, Location loc, MemRefType type, 464 ValueRange dynShape, bool deallocMemref, 465 const BufferizationOptions &options) { 466 OpBuilder::InsertionGuard g(b); 467 468 FailureOr<Value> alloc = createAlloc(b, loc, type, dynShape, options); 469 if (failed(alloc)) 470 return failure(); 471 472 if (deallocMemref) { 473 // Dealloc at the end of the block. 474 b.setInsertionPoint(alloc.getValue().getParentBlock()->getTerminator()); 475 if (failed(createDealloc(b, loc, *alloc, options))) 476 return failure(); 477 } 478 479 return alloc; 480 } 481 482 /// Create a memref deallocation. 483 LogicalResult 484 bufferization::createDealloc(OpBuilder &b, Location loc, Value allocatedBuffer, 485 const BufferizationOptions &options) { 486 if (options.deallocationFn) 487 return (*options.deallocationFn)(b, loc, allocatedBuffer); 488 489 // Default buffer deallocation via DeallocOp. 490 b.create<memref::DeallocOp>(loc, allocatedBuffer); 491 return success(); 492 } 493 494 /// Create a memory copy between two memref buffers. 495 LogicalResult bufferization::createMemCpy(OpBuilder &b, Location loc, 496 Value from, Value to, 497 const BufferizationOptions &options) { 498 if (options.memCpyFn) 499 return (*options.memCpyFn)(b, loc, from, to); 500 501 b.create<memref::CopyOp>(loc, from, to); 502 return success(); 503 } 504 505 //===----------------------------------------------------------------------===// 506 // Bufferization-specific BlockAndValueMapping support with debugging. 507 //===----------------------------------------------------------------------===// 508 509 bool bufferization::isFunctionArgument(Value value) { 510 auto bbArg = value.dyn_cast<BlockArgument>(); 511 if (!bbArg) 512 return false; 513 return isa<FuncOp>(bbArg.getOwner()->getParentOp()); 514 } 515 516 MemRefType 517 bufferization::getContiguousMemRefType(ShapedType shapedType, 518 MemRefLayoutAttrInterface layout, 519 Attribute memorySpace) { 520 return MemRefType::get(shapedType.getShape(), shapedType.getElementType(), 521 layout, memorySpace); 522 } 523 524 UnrankedMemRefType bufferization::getUnrankedMemRefType(Type elementType, 525 Attribute memorySpace) { 526 return UnrankedMemRefType::get(elementType, memorySpace); 527 } 528 529 MemRefType bufferization::getDynamicMemRefType(RankedTensorType tensorType, 530 unsigned addressSpace) { 531 // TODO: address space decisions to connect with the actual alloc. 532 int64_t dynamicOffset = ShapedType::kDynamicStrideOrOffset; 533 SmallVector<int64_t> dynamicStrides(tensorType.getRank(), 534 ShapedType::kDynamicStrideOrOffset); 535 AffineMap stridedLayout = makeStridedLinearLayoutMap( 536 dynamicStrides, dynamicOffset, tensorType.getContext()); 537 return MemRefType::get(tensorType.getShape(), tensorType.getElementType(), 538 stridedLayout, addressSpace); 539 } 540