1 //===- Hoisting.cpp - Linalg hoisting transformations ---------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements functions concerned with hoisting invariant operations 10 // in the context of Linalg transformations. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "mlir/Dialect/Linalg/Transforms/Hoisting.h" 15 #include "mlir/Analysis/AffineStructures.h" 16 #include "mlir/Analysis/SliceAnalysis.h" 17 #include "mlir/Dialect/Affine/IR/AffineValueMap.h" 18 #include "mlir/Dialect/Affine/Utils.h" 19 #include "mlir/Dialect/Linalg/IR/LinalgOps.h" 20 #include "mlir/Dialect/Linalg/Transforms/Transforms.h" 21 #include "mlir/Dialect/SCF/SCF.h" 22 #include "mlir/Dialect/SCF/Utils.h" 23 #include "mlir/Dialect/StandardOps/IR/Ops.h" 24 #include "mlir/Dialect/Tensor/IR/Tensor.h" 25 #include "mlir/Dialect/Vector/VectorOps.h" 26 #include "mlir/Dialect/Vector/VectorUtils.h" 27 #include "mlir/IR/BuiltinOps.h" 28 #include "mlir/IR/Dominance.h" 29 #include "mlir/Transforms/GreedyPatternRewriteDriver.h" 30 #include "mlir/Transforms/LoopUtils.h" 31 #include "llvm/ADT/StringRef.h" 32 #include "llvm/Support/Debug.h" 33 34 using llvm::dbgs; 35 36 #define DEBUG_TYPE "linalg-hoisting" 37 38 #define DBGS() (dbgs() << '[' << DEBUG_TYPE << "] ") 39 40 using namespace mlir; 41 using namespace mlir::linalg; 42 43 namespace { 44 /// Represents a unit of hoistable TransferWriteOp. This may comprise other 45 /// instructions that need to be hoisted too. 46 struct HoistableWrite { 47 vector::TransferWriteOp transferWriteOp; 48 tensor::InsertSliceOp insertSliceOp; 49 }; 50 /// Represents a unit of hoistable TransferReadOp. This may comprise other 51 /// instructions that need to be hoisted too. 52 struct HoistableRead { 53 vector::TransferReadOp transferReadOp; 54 tensor::ExtractSliceOp extractSliceOp; 55 }; 56 } // namespace 57 58 /// Return true if op1 and op2 are the same constant or the same SSA value. 59 static bool isEqualOffsetSizeOrStride(OpFoldResult op1, OpFoldResult op2) { 60 auto getConstantIntValue = [](OpFoldResult ofr) -> llvm::Optional<int64_t> { 61 Attribute attr = ofr.dyn_cast<Attribute>(); 62 // Note: isa+cast-like pattern allows writing the condition below as 1 line. 63 if (!attr && ofr.get<Value>().getDefiningOp<arith::ConstantOp>()) 64 attr = ofr.get<Value>().getDefiningOp<arith::ConstantOp>().getValue(); 65 if (auto intAttr = attr.dyn_cast_or_null<IntegerAttr>()) 66 return intAttr.getValue().getSExtValue(); 67 return llvm::None; 68 }; 69 auto cst1 = getConstantIntValue(op1), cst2 = getConstantIntValue(op2); 70 if (cst1 && cst2 && *cst1 == *cst2) 71 return true; 72 auto v1 = op1.dyn_cast<Value>(), v2 = op2.dyn_cast<Value>(); 73 return v1 && v2 && v1 == v2; 74 } 75 76 /// Return true is all offsets, sizes and strides are equal. 77 static bool sameOffsetsSizesAndStrides(tensor::ExtractSliceOp s, 78 tensor::InsertSliceOp si) { 79 if (s.static_offsets().size() != si.static_offsets().size()) 80 return false; 81 if (s.static_sizes().size() != si.static_sizes().size()) 82 return false; 83 if (s.static_strides().size() != si.static_strides().size()) 84 return false; 85 for (auto it : llvm::zip(s.getMixedOffsets(), si.getMixedOffsets())) 86 if (!isEqualOffsetSizeOrStride(std::get<0>(it), std::get<1>(it))) 87 return false; 88 for (auto it : llvm::zip(s.getMixedSizes(), si.getMixedSizes())) 89 if (!isEqualOffsetSizeOrStride(std::get<0>(it), std::get<1>(it))) 90 return false; 91 for (auto it : llvm::zip(s.getMixedStrides(), si.getMixedStrides())) 92 if (!isEqualOffsetSizeOrStride(std::get<0>(it), std::get<1>(it))) 93 return false; 94 return true; 95 } 96 97 /// Look for a HoistableRead, in the given tensor uses, accessing the same 98 /// offset as the HoistableWrite. 99 static HoistableRead findMatchingTransferRead(HoistableWrite write, 100 Value srcTensor) { 101 assert(write.transferWriteOp && 102 "expected hoistable write to have a .transfer_write"); 103 104 LLVM_DEBUG(DBGS() << "findMatchingTransferRead for: " 105 << *write.transferWriteOp.getOperation() << "\n"); 106 if (write.insertSliceOp) 107 LLVM_DEBUG(DBGS() << "findMatchingTransferRead inserSliceOp: " 108 << *write.insertSliceOp.getOperation() << "\n"); 109 110 for (Operation *user : srcTensor.getUsers()) { 111 LLVM_DEBUG(DBGS() << "findMatchingTransferRead inspect user: " << *user 112 << "\n"); 113 114 // If HoistableWrite involves a InsertSliceOp, we need to find a 115 // matching ExtractSliceOp. 116 tensor::ExtractSliceOp sliceOp; 117 Operation *maybeTransferReadUser = user; 118 if (write.insertSliceOp) { 119 sliceOp = dyn_cast<tensor::ExtractSliceOp>(user); 120 if (!sliceOp || sliceOp.getResult().getType() != 121 write.insertSliceOp.source().getType()) 122 continue; 123 124 LLVM_DEBUG(DBGS() << "check whether sameOffsetsSizesAndStrides: " 125 << *sliceOp << " vs " << *write.insertSliceOp << "\n"); 126 if (!sameOffsetsSizesAndStrides(sliceOp, write.insertSliceOp)) 127 continue; 128 129 LLVM_DEBUG(DBGS() << "sameOffsetsSizesAndStrides: SUCCESS\n"); 130 // If we got here, sliceOp is hoistable iff it has exactly 2 uses: 131 // 1. the transfer_write we want to hoist. 132 // 2. a matching transfer_read. 133 // Anything else, we skip. 134 bool skip = false; 135 Operation *otherUser = nullptr; 136 for (Operation *u : sliceOp->getUsers()) { 137 if (u == write.transferWriteOp) 138 continue; 139 if (otherUser) { 140 skip = true; 141 break; 142 } 143 otherUser = u; 144 } 145 if (skip || !otherUser) 146 continue; 147 maybeTransferReadUser = otherUser; 148 } 149 150 LLVM_DEBUG(DBGS() << "maybeTransferReadUser: " << *maybeTransferReadUser 151 << "\n"); 152 auto read = dyn_cast<vector::TransferReadOp>(maybeTransferReadUser); 153 if (read && read.indices() == write.transferWriteOp.indices() && 154 read.getVectorType() == write.transferWriteOp.getVectorType()) 155 return HoistableRead{read, sliceOp}; 156 } 157 return HoistableRead(); 158 } 159 160 /// Check if the chunk of data inserted by the HoistableWrite are read by any 161 /// other op than the HoistableRead candidate. 162 static bool tensorChunkAccessedByUnknownOp(HoistableWrite write, 163 HoistableRead candidateRead, 164 BlockArgument tensorArg) { 165 // Make sure none of the other uses read the part of the tensor modified 166 // by the transfer_write. 167 llvm::SmallVector<Value::use_range, 1> uses; 168 uses.push_back(tensorArg.getUses()); 169 while (!uses.empty()) { 170 for (OpOperand &use : uses.pop_back_val()) { 171 Operation *user = use.getOwner(); 172 // Skip the candidate use, only inspect the "other" uses. 173 if (user == candidateRead.transferReadOp || 174 user == candidateRead.extractSliceOp || 175 user == write.transferWriteOp || user == write.insertSliceOp) 176 continue; 177 // Consider all transitive uses through a extract_slice / insert_slice. 178 // TODO: atm we just bail because a stronger analysis is needed for these 179 // cases. 180 if (isa<tensor::ExtractSliceOp, tensor::InsertSliceOp>(user)) 181 return true; 182 // Consider all transitive uses through a vector.transfer_write. 183 if (auto writeUser = dyn_cast<vector::TransferWriteOp>(user)) { 184 uses.push_back(writeUser->getResult(0).getUses()); 185 continue; 186 } 187 // Consider all nested uses through an scf::ForOp. We may have 188 // pass-through tensor arguments left from previous level of 189 // hoisting. 190 if (auto forUser = dyn_cast<scf::ForOp>(user)) { 191 Value arg = forUser.getLoopBody().getArgument( 192 use.getOperandNumber() - forUser.getNumControlOperands() + 193 /*iv value*/ 1); 194 uses.push_back(arg.getUses()); 195 continue; 196 } 197 // Follow the use yield as long as it doesn't escape the original 198 // region. 199 scf::YieldOp yieldUser = dyn_cast<scf::YieldOp>(user); 200 if (yieldUser && write.transferWriteOp->getParentOp()->isAncestor( 201 yieldUser->getParentOp())) { 202 Value ret = yieldUser->getParentOp()->getResult(use.getOperandNumber()); 203 uses.push_back(ret.getUses()); 204 continue; 205 } 206 auto read = dyn_cast<vector::TransferReadOp>(user); 207 if (!read || !isDisjointTransferIndices( 208 cast<VectorTransferOpInterface>(read.getOperation()), 209 cast<VectorTransferOpInterface>( 210 write.transferWriteOp.getOperation()))) { 211 return true; 212 } 213 } 214 } 215 return false; 216 } 217 218 /// Return the `forOp`-invariant HoistableWrite that produces `yieldOperand`. 219 /// Return the null HoistableWrite() if it is not comprised of a 220 /// vector.transfer_write + optional insert_slice or if any of the indexings 221 /// is `forOp`-dependent. 222 static HoistableWrite 223 getLoopInvariantTransferWriteOpDefining(scf::ForOp forOp, 224 OpOperand &yieldOperand) { 225 Value v = yieldOperand.get(); 226 if (auto write = v.getDefiningOp<vector::TransferWriteOp>()) { 227 // Indexing must not depend on `forOp`. 228 for (Value operand : write.indices()) 229 if (!forOp.isDefinedOutsideOfLoop(operand)) 230 return HoistableWrite(); 231 232 return HoistableWrite{write, nullptr}; 233 } 234 235 if (auto insertSliceOp = v.getDefiningOp<tensor::InsertSliceOp>()) { 236 // Inserted slice must come from vector.transfer_write. 237 auto write = 238 insertSliceOp.source().getDefiningOp<vector::TransferWriteOp>(); 239 if (!write) 240 return HoistableWrite(); 241 242 // Tensor inserted into must be a BBArg at position matching yieldOperand's. 243 auto bbArg = insertSliceOp.dest().dyn_cast<BlockArgument>(); 244 if (!bbArg || bbArg.getOwner()->getParentOp() != forOp || 245 bbArg.getArgNumber() != /*num iv=*/1 + yieldOperand.getOperandNumber()) 246 return HoistableWrite(); 247 248 // Indexing inserted into must not depend on `forOp`. 249 for (Value operand : insertSliceOp->getOperands().drop_front( 250 tensor::InsertSliceOp::getOffsetSizeAndStrideStartOperandIndex())) 251 if (!forOp.isDefinedOutsideOfLoop(operand)) 252 return HoistableWrite(); 253 254 return HoistableWrite{write, insertSliceOp}; 255 } 256 257 return HoistableWrite(); 258 } 259 260 /// Mechanical hoisting of a matching HoistableRead / HoistableWrite pair. 261 static void hoistReadWrite(HoistableRead read, HoistableWrite write, 262 BlockArgument tensorBBArg) { 263 scf::ForOp forOp = cast<scf::ForOp>(tensorBBArg.getOwner()->getParentOp()); 264 assert(read.transferReadOp && write.transferWriteOp && 265 "expected transfer_read and transfer_write ops to be set"); 266 assert(((read.extractSliceOp && write.insertSliceOp) || 267 (!read.extractSliceOp && !write.insertSliceOp)) && 268 "expected matching extract_slice / insert_slice"); 269 LLVM_DEBUG(DBGS() << "In forOp:\n" 270 << *forOp.getOperation() 271 << "\nHoist: " << *read.transferReadOp.getOperation() 272 << "\nHoist: " << *write.transferWriteOp.getOperation() 273 << "\nInvolving: " << tensorBBArg << "\n"); 274 275 // If a read slice is present, hoist it. 276 if (read.extractSliceOp && failed(forOp.moveOutOfLoop({read.extractSliceOp}))) 277 llvm_unreachable("Unexpected failure moving extract_slice out of loop"); 278 279 // Hoist the transfer_read op. 280 if (failed(forOp.moveOutOfLoop({read.transferReadOp}))) 281 llvm_unreachable("Unexpected failure moving transfer read out of loop"); 282 283 // TODO: don't hardcode /*numIvs=*/1. 284 assert(tensorBBArg.getArgNumber() >= /*numIvs=*/1); 285 unsigned initArgNumber = tensorBBArg.getArgNumber() - /*numIvs=*/1; 286 287 // Update the source tensor. 288 if (read.extractSliceOp) 289 read.extractSliceOp.sourceMutable().assign(forOp.initArgs()[initArgNumber]); 290 else 291 read.transferReadOp.sourceMutable().assign(forOp.initArgs()[initArgNumber]); 292 293 // Hoist write after. 294 if (write.insertSliceOp) 295 write.insertSliceOp->moveAfter(forOp); 296 write.transferWriteOp->moveAfter(forOp); 297 298 // Update the yield. 299 auto yieldOp = cast<scf::YieldOp>(forOp.region().front().getTerminator()); 300 if (write.insertSliceOp) 301 yieldOp->setOperand(initArgNumber, write.insertSliceOp.dest()); 302 else 303 yieldOp->setOperand(initArgNumber, write.transferWriteOp.source()); 304 305 // Rewrite `loop` with additional new yields. 306 OpBuilder b(read.transferReadOp); 307 auto newForOp = cloneWithNewYields(b, forOp, read.transferReadOp.vector(), 308 write.transferWriteOp.vector()); 309 // Transfer write has been hoisted, need to update the vector and tensor 310 // source. Replace the result of the loop to use the new tensor created 311 // outside the loop. 312 // Depending on whether a insert_slice is present or not, it carries the 313 // update on the tensor operands. 314 if (write.insertSliceOp) { 315 newForOp.getResult(initArgNumber) 316 .replaceAllUsesWith(write.insertSliceOp.getResult()); 317 write.transferWriteOp.sourceMutable().assign(read.extractSliceOp.result()); 318 write.insertSliceOp.destMutable().assign(read.extractSliceOp.source()); 319 } else { 320 newForOp.getResult(initArgNumber) 321 .replaceAllUsesWith(write.transferWriteOp.getResult(0)); 322 write.transferWriteOp.sourceMutable().assign( 323 newForOp.getResult(initArgNumber)); 324 } 325 326 // Always update with the newly yield tensor and vector. 327 write.transferWriteOp.vectorMutable().assign(newForOp.getResults().back()); 328 } 329 330 // To hoist transfer op on tensor the logic can be significantly simplified 331 // compared to the case on buffer. The transformation follows this logic: 332 // 1. Look for transfer_write with a single use from ForOp yield 333 // 2. Check the uses of the matching block argument and look for a transfer_read 334 // with the same indices. 335 // 3. Check that all the other uses of the tensor argument are either disjoint 336 // tensor_read or transfer_write. For transfer_write uses recurse to make sure 337 // the new tensor has the same restrictions on its uses. 338 // 4. Hoist the tensor_read/tensor_write and update the tensor SSA links. 339 // After this transformation the scf.forOp may have unused arguments that can be 340 // remove by the canonicalization pass. 341 void mlir::linalg::hoistRedundantVectorTransfersOnTensor(FuncOp func) { 342 bool changed = true; 343 while (changed) { 344 changed = false; 345 func.walk([&](scf::ForOp forOp) { 346 Operation *yield = forOp.getBody()->getTerminator(); 347 for (auto it : llvm::enumerate(forOp.getRegionIterArgs())) { 348 OpOperand &ret = yield->getOpOperand(it.index()); 349 HoistableWrite write = 350 getLoopInvariantTransferWriteOpDefining(forOp, ret); 351 if (!write.transferWriteOp || !write.transferWriteOp->hasOneUse()) 352 continue; 353 LLVM_DEBUG(dbgs() << "\n"; 354 DBGS() << "Candidate write for hoisting: " 355 << *write.transferWriteOp.getOperation() << "\n"); 356 if (write.insertSliceOp) 357 LLVM_DEBUG(DBGS() << "Candidate insert_slice for hoisting: " 358 << *write.insertSliceOp.getOperation() << "\n"); 359 if (llvm::any_of(write.transferWriteOp.indices(), 360 [&forOp](Value index) { 361 return !forOp.isDefinedOutsideOfLoop(index); 362 })) 363 continue; 364 // Find a read with the same type and indices. 365 HoistableRead matchingRead = 366 findMatchingTransferRead(write, it.value()); 367 // Make sure none of the other uses read the part of the tensor modified 368 // by the transfer_write. 369 if (!matchingRead.transferReadOp || 370 tensorChunkAccessedByUnknownOp(write, matchingRead, it.value())) 371 continue; 372 373 LLVM_DEBUG(DBGS() << "Start hoisting\n"); 374 hoistReadWrite(matchingRead, write, it.value()); 375 changed = true; 376 forOp.erase(); 377 378 // Need to interrupt and restart: erasing the loop messes up the walk. 379 return WalkResult::interrupt(); 380 } 381 return WalkResult::advance(); 382 }); 383 // Apply canonicalization so the newForOp + yield folds immediately, thus 384 // cleaning up the IR and potentially enabling more hoisting. 385 if (changed) { 386 RewritePatternSet patterns(func->getContext()); 387 scf::ForOp::getCanonicalizationPatterns(patterns, func->getContext()); 388 (void)applyPatternsAndFoldGreedily(func, std::move(patterns)); 389 } 390 } 391 } 392 393 void mlir::linalg::hoistRedundantVectorTransfers(FuncOp func) { 394 bool changed = true; 395 while (changed) { 396 changed = false; 397 // First move loop invariant ops outside of their loop. This needs to be 398 // done before as we cannot move ops without interputing the function walk. 399 func.walk([&](LoopLikeOpInterface loopLike) { 400 if (failed(moveLoopInvariantCode(loopLike))) 401 llvm_unreachable( 402 "Unexpected failure to move invariant code out of loop"); 403 }); 404 405 func.walk([&](vector::TransferReadOp transferRead) { 406 if (!transferRead.getShapedType().isa<MemRefType>()) 407 return WalkResult::advance(); 408 409 LLVM_DEBUG(DBGS() << "Candidate for hoisting: " 410 << *transferRead.getOperation() << "\n"); 411 auto loop = dyn_cast<scf::ForOp>(transferRead->getParentOp()); 412 LLVM_DEBUG(DBGS() << "Parent op: " << *transferRead->getParentOp() 413 << "\n"); 414 if (!loop) 415 return WalkResult::advance(); 416 417 LLVM_DEBUG(DBGS() << "Candidate read: " << *transferRead.getOperation() 418 << "\n"); 419 420 SetVector<Operation *> forwardSlice; 421 getForwardSlice(transferRead.getOperation(), &forwardSlice); 422 423 // Look for the last TransferWriteOp in the forwardSlice of 424 // `transferRead` that operates on the same memref. 425 vector::TransferWriteOp transferWrite; 426 for (auto *sliceOp : llvm::reverse(forwardSlice)) { 427 auto candidateWrite = dyn_cast<vector::TransferWriteOp>(sliceOp); 428 if (!candidateWrite || candidateWrite.source() != transferRead.source()) 429 continue; 430 transferWrite = candidateWrite; 431 } 432 433 // All operands of the TransferRead must be defined outside of the loop. 434 for (auto operand : transferRead.getOperands()) 435 if (!loop.isDefinedOutsideOfLoop(operand)) 436 return WalkResult::advance(); 437 438 // Only hoist transfer_read / transfer_write pairs for now. 439 if (!transferWrite) 440 return WalkResult::advance(); 441 442 LLVM_DEBUG(DBGS() << "Candidate: " << *transferWrite.getOperation() 443 << "\n"); 444 445 // Approximate aliasing by checking that: 446 // 1. indices are the same, 447 // 2. no other operations in the loop access the same memref except 448 // for transfer_read/transfer_write accessing statically disjoint 449 // slices. 450 if (transferRead.indices() != transferWrite.indices() && 451 transferRead.getVectorType() == transferWrite.getVectorType()) 452 return WalkResult::advance(); 453 454 // TODO: may want to memoize this information for performance but it 455 // likely gets invalidated often. 456 DominanceInfo dom(loop); 457 if (!dom.properlyDominates(transferRead.getOperation(), transferWrite)) 458 return WalkResult::advance(); 459 for (auto &use : transferRead.source().getUses()) { 460 if (!loop->isAncestor(use.getOwner())) 461 continue; 462 if (use.getOwner() == transferRead.getOperation() || 463 use.getOwner() == transferWrite.getOperation()) 464 continue; 465 if (auto transferWriteUse = 466 dyn_cast<vector::TransferWriteOp>(use.getOwner())) { 467 if (!isDisjointTransferSet( 468 cast<VectorTransferOpInterface>(transferWrite.getOperation()), 469 cast<VectorTransferOpInterface>( 470 transferWriteUse.getOperation()))) 471 return WalkResult::advance(); 472 } else if (auto transferReadUse = 473 dyn_cast<vector::TransferReadOp>(use.getOwner())) { 474 if (!isDisjointTransferSet( 475 cast<VectorTransferOpInterface>(transferWrite.getOperation()), 476 cast<VectorTransferOpInterface>( 477 transferReadUse.getOperation()))) 478 return WalkResult::advance(); 479 } else { 480 // Unknown use, we cannot prove that it doesn't alias with the 481 // transferRead/transferWrite operations. 482 return WalkResult::advance(); 483 } 484 } 485 486 // Hoist read before. 487 if (failed(loop.moveOutOfLoop({transferRead}))) 488 llvm_unreachable( 489 "Unexpected failure to move transfer read out of loop"); 490 491 // Hoist write after. 492 transferWrite->moveAfter(loop); 493 494 // Rewrite `loop` with new yields by cloning and erase the original loop. 495 OpBuilder b(transferRead); 496 auto newForOp = cloneWithNewYields(b, loop, transferRead.vector(), 497 transferWrite.vector()); 498 499 // Transfer write has been hoisted, need to update the written value to 500 // the value yielded by the newForOp. 501 transferWrite.vector().replaceAllUsesWith( 502 newForOp.getResults().take_back()[0]); 503 504 changed = true; 505 loop.erase(); 506 // Need to interrupt and restart because erasing the loop messes up the 507 // walk. 508 return WalkResult::interrupt(); 509 }); 510 } 511 } 512