1 //===- Bufferize.cpp - Bufferization utilities ----------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "PassDetail.h" 10 11 #include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h" 12 #include "mlir/Dialect/Bufferization/IR/Bufferization.h" 13 #include "mlir/Dialect/Bufferization/Transforms/Bufferize.h" 14 #include "mlir/Dialect/Bufferization/Transforms/OneShotAnalysis.h" 15 #include "mlir/Dialect/Bufferization/Transforms/OneShotModuleBufferize.h" 16 #include "mlir/Dialect/Bufferization/Transforms/Passes.h" 17 #include "mlir/Dialect/Bufferization/Transforms/TensorCopyInsertion.h" 18 #include "mlir/Dialect/Func/IR/FuncOps.h" 19 #include "mlir/Dialect/MemRef/IR/MemRef.h" 20 #include "mlir/IR/Operation.h" 21 #include "mlir/Pass/PassManager.h" 22 #include "mlir/Transforms/GreedyPatternRewriteDriver.h" 23 #include "mlir/Transforms/Passes.h" 24 25 using namespace mlir; 26 using namespace mlir::bufferization; 27 28 //===----------------------------------------------------------------------===// 29 // BufferizeTypeConverter 30 //===----------------------------------------------------------------------===// 31 32 static Value materializeToTensor(OpBuilder &builder, TensorType type, 33 ValueRange inputs, Location loc) { 34 assert(inputs.size() == 1); 35 assert(inputs[0].getType().isa<BaseMemRefType>()); 36 return builder.create<bufferization::ToTensorOp>(loc, type, inputs[0]); 37 } 38 39 /// Registers conversions into BufferizeTypeConverter 40 BufferizeTypeConverter::BufferizeTypeConverter() { 41 // Keep all types unchanged. 42 addConversion([](Type type) { return type; }); 43 // Convert RankedTensorType to MemRefType. 44 addConversion([](RankedTensorType type) -> Type { 45 return MemRefType::get(type.getShape(), type.getElementType()); 46 }); 47 // Convert UnrankedTensorType to UnrankedMemRefType. 48 addConversion([](UnrankedTensorType type) -> Type { 49 return UnrankedMemRefType::get(type.getElementType(), 0); 50 }); 51 addArgumentMaterialization(materializeToTensor); 52 addSourceMaterialization(materializeToTensor); 53 addTargetMaterialization([](OpBuilder &builder, BaseMemRefType type, 54 ValueRange inputs, Location loc) -> Value { 55 assert(inputs.size() == 1 && "expected exactly one input"); 56 57 if (auto inputType = inputs[0].getType().dyn_cast<MemRefType>()) { 58 // MemRef to MemRef cast. 59 assert(inputType != type && "expected different types"); 60 // Unranked to ranked and ranked to unranked casts must be explicit. 61 auto rankedDestType = type.dyn_cast<MemRefType>(); 62 if (!rankedDestType) 63 return nullptr; 64 FailureOr<Value> replacement = 65 castOrReallocMemRefValue(builder, inputs[0], rankedDestType); 66 if (failed(replacement)) 67 return nullptr; 68 return *replacement; 69 } 70 71 if (inputs[0].getType().isa<TensorType>()) { 72 // Tensor to MemRef cast. 73 return builder.create<bufferization::ToMemrefOp>(loc, type, inputs[0]); 74 } 75 76 llvm_unreachable("only tensor/memref input types supported"); 77 }); 78 } 79 80 void mlir::bufferization::populateBufferizeMaterializationLegality( 81 ConversionTarget &target) { 82 target.addLegalOp<bufferization::ToTensorOp, bufferization::ToMemrefOp>(); 83 } 84 85 namespace { 86 // In a finalizing bufferize conversion, we know that all tensors have been 87 // converted to memrefs, thus, this op becomes an identity. 88 class BufferizeToTensorOp 89 : public OpConversionPattern<bufferization::ToTensorOp> { 90 public: 91 using OpConversionPattern::OpConversionPattern; 92 LogicalResult 93 matchAndRewrite(bufferization::ToTensorOp op, OpAdaptor adaptor, 94 ConversionPatternRewriter &rewriter) const override { 95 rewriter.replaceOp(op, adaptor.getMemref()); 96 return success(); 97 } 98 }; 99 } // namespace 100 101 namespace { 102 // In a finalizing bufferize conversion, we know that all tensors have been 103 // converted to memrefs, thus, this op becomes an identity. 104 class BufferizeToMemrefOp 105 : public OpConversionPattern<bufferization::ToMemrefOp> { 106 public: 107 using OpConversionPattern::OpConversionPattern; 108 LogicalResult 109 matchAndRewrite(bufferization::ToMemrefOp op, OpAdaptor adaptor, 110 ConversionPatternRewriter &rewriter) const override { 111 rewriter.replaceOp(op, adaptor.getTensor()); 112 return success(); 113 } 114 }; 115 } // namespace 116 117 void mlir::bufferization::populateEliminateBufferizeMaterializationsPatterns( 118 BufferizeTypeConverter &typeConverter, RewritePatternSet &patterns) { 119 patterns.add<BufferizeToTensorOp, BufferizeToMemrefOp>(typeConverter, 120 patterns.getContext()); 121 } 122 123 namespace { 124 struct FinalizingBufferizePass 125 : public FinalizingBufferizeBase<FinalizingBufferizePass> { 126 using FinalizingBufferizeBase< 127 FinalizingBufferizePass>::FinalizingBufferizeBase; 128 129 void runOnOperation() override { 130 auto func = getOperation(); 131 auto *context = &getContext(); 132 133 BufferizeTypeConverter typeConverter; 134 RewritePatternSet patterns(context); 135 ConversionTarget target(*context); 136 137 populateEliminateBufferizeMaterializationsPatterns(typeConverter, patterns); 138 139 // If all result types are legal, and all block arguments are legal (ensured 140 // by func conversion above), then all types in the program are legal. 141 // 142 // We also check that the operand types are legal to avoid creating invalid 143 // IR. For example, this prevents 144 // populateEliminateBufferizeMaterializationsPatterns from updating the 145 // types of the operands to a return op without updating the enclosing 146 // function. 147 target.markUnknownOpDynamicallyLegal( 148 [&](Operation *op) { return typeConverter.isLegal(op); }); 149 150 if (failed(applyFullConversion(func, target, std::move(patterns)))) 151 signalPassFailure(); 152 } 153 }; 154 155 static BufferizationOptions::LayoutMapOption 156 parseLayoutMapOption(const std::string &s) { 157 if (s == "fully-dynamic-layout-map") 158 return BufferizationOptions::LayoutMapOption::FullyDynamicLayoutMap; 159 if (s == "identity-layout-map") 160 return BufferizationOptions::LayoutMapOption::IdentityLayoutMap; 161 if (s == "infer-layout-map") 162 return BufferizationOptions::LayoutMapOption::InferLayoutMap; 163 llvm_unreachable("invalid layout map option"); 164 } 165 166 struct OneShotBufferizePass 167 : public OneShotBufferizeBase<OneShotBufferizePass> { 168 OneShotBufferizePass() : OneShotBufferizeBase<OneShotBufferizePass>() {} 169 170 explicit OneShotBufferizePass(const OneShotBufferizationOptions &options) 171 : options(options) {} 172 173 void getDependentDialects(DialectRegistry ®istry) const override { 174 registry 175 .insert<bufferization::BufferizationDialect, memref::MemRefDialect>(); 176 registerAllocationOpInterfaceExternalModels(registry); 177 } 178 179 void runOnOperation() override { 180 OneShotBufferizationOptions opt; 181 if (!options) { 182 // Make new bufferization options if none were provided when creating the 183 // pass. 184 opt.allowReturnAllocs = allowReturnAllocs; 185 opt.allowUnknownOps = allowUnknownOps; 186 opt.analysisFuzzerSeed = analysisFuzzerSeed; 187 opt.createDeallocs = createDeallocs; 188 opt.functionBoundaryTypeConversion = 189 parseLayoutMapOption(functionBoundaryTypeConversion); 190 opt.printConflicts = printConflicts; 191 opt.testAnalysisOnly = testAnalysisOnly; 192 opt.bufferizeFunctionBoundaries = bufferizeFunctionBoundaries; 193 opt.unknownTypeConversion = parseLayoutMapOption(unknownTypeConversion); 194 195 OpFilter::Entry::FilterFn filterFn = 196 [&](Operation *op) { 197 // Filter may be specified via options. 198 if (this->dialectFilter.hasValue()) 199 return llvm::is_contained(this->dialectFilter, 200 op->getDialect()->getNamespace()); 201 // No filter specified: All other ops are allowed. 202 return true; 203 }; 204 opt.opFilter.allowOperation(filterFn); 205 } else { 206 opt = *options; 207 } 208 209 ModuleOp moduleOp = getOperation(); 210 if (opt.bufferizeFunctionBoundaries) { 211 if (failed(runOneShotModuleBufferize(moduleOp, opt))) { 212 signalPassFailure(); 213 return; 214 } 215 } else { 216 if (failed(runOneShotBufferize(moduleOp, opt))) { 217 signalPassFailure(); 218 return; 219 } 220 } 221 222 if (opt.testAnalysisOnly) 223 return; 224 225 OpPassManager cleanupPipeline("builtin.module"); 226 cleanupPipeline.addPass(createCanonicalizerPass()); 227 cleanupPipeline.addPass(createCSEPass()); 228 cleanupPipeline.addPass(createLoopInvariantCodeMotionPass()); 229 (void)runPipeline(cleanupPipeline, moduleOp); 230 } 231 232 private: 233 llvm::Optional<OneShotBufferizationOptions> options; 234 }; 235 } // namespace 236 237 namespace { 238 struct BufferizationBufferizePass 239 : public BufferizationBufferizeBase<BufferizationBufferizePass> { 240 void runOnOperation() override { 241 BufferizationOptions options = getPartialBufferizationOptions(); 242 options.opFilter.allowDialect<BufferizationDialect>(); 243 244 if (failed(bufferizeOp(getOperation(), options))) 245 signalPassFailure(); 246 } 247 248 void getDependentDialects(DialectRegistry ®istry) const override { 249 registry 250 .insert<bufferization::BufferizationDialect, memref::MemRefDialect>(); 251 } 252 }; 253 } // namespace 254 255 std::unique_ptr<Pass> mlir::bufferization::createBufferizationBufferizePass() { 256 return std::make_unique<BufferizationBufferizePass>(); 257 } 258 259 std::unique_ptr<Pass> mlir::bufferization::createOneShotBufferizePass() { 260 return std::make_unique<OneShotBufferizePass>(); 261 } 262 263 std::unique_ptr<Pass> mlir::bufferization::createOneShotBufferizePass( 264 const OneShotBufferizationOptions &options) { 265 return std::make_unique<OneShotBufferizePass>(options); 266 } 267 268 std::unique_ptr<OperationPass<func::FuncOp>> 269 mlir::bufferization::createFinalizingBufferizePass() { 270 return std::make_unique<FinalizingBufferizePass>(); 271 } 272 273 //===----------------------------------------------------------------------===// 274 // BufferizableOpInterface-based Bufferization 275 //===----------------------------------------------------------------------===// 276 277 static bool isaTensor(Type t) { return t.isa<TensorType>(); } 278 279 /// Return true if the given op has a tensor result or a tensor operand. 280 static bool hasTensorSemantics(Operation *op) { 281 if (auto funcOp = dyn_cast<FunctionOpInterface>(op)) { 282 bool hasTensorArg = any_of(funcOp.getArgumentTypes(), isaTensor); 283 bool hasTensorResult = any_of(funcOp.getResultTypes(), isaTensor); 284 return hasTensorArg || hasTensorResult; 285 } 286 287 bool hasTensorResult = any_of(op->getResultTypes(), isaTensor); 288 bool hasTensorOperand = any_of(op->getOperandTypes(), isaTensor); 289 return hasTensorResult || hasTensorOperand; 290 } 291 292 namespace { 293 /// A rewriter that keeps track of extra information during bufferization. 294 class BufferizationRewriter : public IRRewriter { 295 public: 296 BufferizationRewriter(MLIRContext *ctx, DenseSet<Operation *> &erasedOps, 297 DenseSet<Operation *> &toMemrefOps, 298 SmallVector<Operation *> &worklist, 299 const BufferizationOptions &options, 300 const OpFilter *opFilter) 301 : IRRewriter(ctx), erasedOps(erasedOps), toMemrefOps(toMemrefOps), 302 worklist(worklist), analysisState(options), opFilter(opFilter) {} 303 304 protected: 305 void notifyOperationRemoved(Operation *op) override { 306 IRRewriter::notifyOperationRemoved(op); 307 erasedOps.insert(op); 308 // Erase if present. 309 toMemrefOps.erase(op); 310 } 311 312 void notifyOperationInserted(Operation *op) override { 313 IRRewriter::notifyOperationInserted(op); 314 erasedOps.erase(op); 315 316 // Keep track of to_memref ops. 317 if (isa<ToMemrefOp>(op)) { 318 toMemrefOps.insert(op); 319 return; 320 } 321 322 // Skip to_tensor ops. 323 if (isa<ToTensorOp>(op)) 324 return; 325 326 // Skip non-tensor ops. 327 if (!hasTensorSemantics(op)) 328 return; 329 330 // Skip ops that are not allowed to be bufferized. 331 auto const &options = analysisState.getOptions(); 332 if (!options.isOpAllowed(op) || (opFilter && !opFilter->isOpAllowed(op))) 333 return; 334 335 #ifndef NDEBUG 336 // Read-only tensor ops may be created during bufferization. Ops that are 337 // writing should not be created because such ops were never analyzed. 338 // Bufferizing such ops could introduce a RaW conflict. 339 for (OpOperand &operand : op->getOpOperands()) 340 if (operand.get().getType().isa<TensorType>()) 341 assert(!analysisState.bufferizesToMemoryWrite(operand) && 342 "creating tensor ops that bufferize to a memory write is not " 343 "allowed during bufferization"); 344 #endif // NDEBUG 345 346 // Add op to worklist. 347 worklist.push_back(op); 348 } 349 350 private: 351 /// A set of all erased ops. 352 DenseSet<Operation *> &erasedOps; 353 354 /// A set of all to_memref ops. 355 DenseSet<Operation *> &toMemrefOps; 356 357 /// The worklist of ops to be bufferized. 358 SmallVector<Operation *> &worklist; 359 360 /// The analysis state. Used for debug assertions and access to the 361 /// bufferization options. 362 const AnalysisState analysisState; 363 364 /// An extra op filter for bufferization. 365 const OpFilter *opFilter; 366 }; 367 } // namespace 368 369 LogicalResult bufferization::bufferizeOp(Operation *op, 370 const BufferizationOptions &options, 371 bool copyBeforeWrite, 372 const OpFilter *opFilter) { 373 assert(options.unknownTypeConversion != 374 BufferizationOptions::LayoutMapOption::InferLayoutMap && 375 "invalid layout map option"); 376 377 if (copyBeforeWrite) { 378 AnalysisState state(options); 379 if (failed(insertTensorCopies(op, state))) 380 return failure(); 381 } 382 383 // Keep track of to_memref ops. 384 DenseSet<Operation *> toMemrefOps; 385 op->walk([&](ToMemrefOp toMemrefOp) { toMemrefOps.insert(toMemrefOp); }); 386 387 // Gather all bufferizable ops in top-to-bottom order. 388 // 389 // We should ideally know the exact memref type of all operands when 390 // bufferizing an op. (This is the case when bufferizing top-to-bottom.) 391 // Otherwise, we have to use a memref type with a fully dynamic layout map to 392 // avoid copies. We are currently missing patterns for layout maps to 393 // canonicalize away (or canonicalize to more precise layouts). 394 SmallVector<Operation *> worklist; 395 op->walk<WalkOrder::PreOrder>([&](Operation *op) { 396 if (hasTensorSemantics(op)) 397 worklist.push_back(op); 398 }); 399 400 // Keep track of all erased ops. 401 DenseSet<Operation *> erasedOps; 402 403 // Bufferize all ops. 404 BufferizationRewriter rewriter(op->getContext(), erasedOps, toMemrefOps, 405 worklist, options, opFilter); 406 for (unsigned i = 0; i < worklist.size(); ++i) { 407 Operation *op = worklist[i]; 408 // Skip ops that were erased. 409 if (erasedOps.contains(op)) 410 continue; 411 // Skip ops that are not bufferizable or not allowed. 412 auto bufferizableOp = options.dynCastBufferizableOp(op); 413 if (!bufferizableOp) 414 continue; 415 if (opFilter && !opFilter->isOpAllowed(op)) 416 continue; 417 // Skip ops that no longer have tensor semantics. 418 if (!hasTensorSemantics(op)) 419 continue; 420 // Bufferize the op. 421 rewriter.setInsertionPoint(op); 422 if (failed(bufferizableOp.bufferize(rewriter, options))) 423 return op->emitError("failed to bufferize op"); 424 } 425 426 // Fold all to_memref(to_tensor(x)) pairs. 427 for (Operation *op : toMemrefOps) { 428 rewriter.setInsertionPoint(op); 429 (void)bufferization::foldToMemrefToTensorPair(rewriter, 430 cast<ToMemrefOp>(op)); 431 } 432 433 /// Check the result of bufferization. Return an error if an op was not 434 /// bufferized, unless partial bufferization is allowed. 435 if (options.allowUnknownOps) 436 return success(); 437 438 for (Operation *op : worklist) { 439 // Skip ops that are entirely gone. 440 if (erasedOps.contains(op)) 441 continue; 442 // Ops that no longer have tensor semantics (because they were updated 443 // in-place) are allowed. 444 if (!hasTensorSemantics(op)) 445 continue; 446 // Continue ops that are not allowed. 447 if (!options.isOpAllowed(op)) 448 continue; 449 if (opFilter && !opFilter->isOpAllowed(op)) 450 continue; 451 // Ops without any uses and no side effects will fold away. 452 if (op->getUses().empty() && MemoryEffectOpInterface::hasNoEffect(op)) 453 continue; 454 // ToTensorOps/ToMemrefOps are allowed in the output. 455 if (isa<ToTensorOp, ToMemrefOp>(op)) 456 continue; 457 return op->emitError("op was not bufferized"); 458 } 459 460 return success(); 461 } 462 463 BufferizationOptions bufferization::getPartialBufferizationOptions() { 464 BufferizationOptions options; 465 options.allowUnknownOps = true; 466 options.createDeallocs = false; 467 options.enforceAliasingInvariants = false; 468 options.unknownTypeConversion = 469 BufferizationOptions::LayoutMapOption::IdentityLayoutMap; 470 options.opFilter.allowDialect<BufferizationDialect>(); 471 return options; 472 } 473