1 //===- Hoisting.cpp - Linalg hoisting transformations ---------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements functions concerned with hoisting invariant operations 10 // in the context of Linalg transformations. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "mlir/Dialect/Linalg/Transforms/Hoisting.h" 15 #include "mlir/Analysis/SliceAnalysis.h" 16 #include "mlir/Dialect/Affine/Analysis/AffineStructures.h" 17 #include "mlir/Dialect/Affine/IR/AffineValueMap.h" 18 #include "mlir/Dialect/Affine/Utils.h" 19 #include "mlir/Dialect/Linalg/IR/Linalg.h" 20 #include "mlir/Dialect/Linalg/Transforms/Transforms.h" 21 #include "mlir/Dialect/SCF/SCF.h" 22 #include "mlir/Dialect/SCF/Utils.h" 23 #include "mlir/Dialect/StandardOps/IR/Ops.h" 24 #include "mlir/Dialect/Tensor/IR/Tensor.h" 25 #include "mlir/Dialect/Vector/VectorOps.h" 26 #include "mlir/Dialect/Vector/VectorUtils.h" 27 #include "mlir/IR/BuiltinOps.h" 28 #include "mlir/IR/Dominance.h" 29 #include "mlir/Transforms/GreedyPatternRewriteDriver.h" 30 #include "mlir/Transforms/LoopUtils.h" 31 #include "llvm/ADT/StringRef.h" 32 #include "llvm/Support/Debug.h" 33 34 using llvm::dbgs; 35 36 #define DEBUG_TYPE "linalg-hoisting" 37 38 #define DBGS() (dbgs() << '[' << DEBUG_TYPE << "] ") 39 40 using namespace mlir; 41 using namespace mlir::linalg; 42 43 namespace { 44 /// Represents a unit of hoistable TransferWriteOp. This may comprise other 45 /// instructions that need to be hoisted too. 46 struct HoistableWrite { 47 vector::TransferWriteOp transferWriteOp; 48 tensor::InsertSliceOp insertSliceOp; 49 }; 50 /// Represents a unit of hoistable TransferReadOp. This may comprise other 51 /// instructions that need to be hoisted too. 52 struct HoistableRead { 53 vector::TransferReadOp transferReadOp; 54 tensor::ExtractSliceOp extractSliceOp; 55 }; 56 } // namespace 57 58 /// Return true if op1 and op2 are the same constant or the same SSA value. 59 static bool isEqualOffsetSizeOrStride(OpFoldResult op1, OpFoldResult op2) { 60 auto getConstantIntValue = [](OpFoldResult ofr) -> llvm::Optional<int64_t> { 61 Attribute attr = ofr.dyn_cast<Attribute>(); 62 // Note: isa+cast-like pattern allows writing the condition below as 1 line. 63 if (!attr && ofr.get<Value>().getDefiningOp<arith::ConstantOp>()) 64 attr = ofr.get<Value>().getDefiningOp<arith::ConstantOp>().getValue(); 65 if (auto intAttr = attr.dyn_cast_or_null<IntegerAttr>()) 66 return intAttr.getValue().getSExtValue(); 67 return llvm::None; 68 }; 69 auto cst1 = getConstantIntValue(op1), cst2 = getConstantIntValue(op2); 70 if (cst1 && cst2 && *cst1 == *cst2) 71 return true; 72 auto v1 = op1.dyn_cast<Value>(), v2 = op2.dyn_cast<Value>(); 73 return v1 && v2 && v1 == v2; 74 } 75 76 /// Return true is all offsets, sizes and strides are equal. 77 static bool sameOffsetsSizesAndStrides(tensor::ExtractSliceOp s, 78 tensor::InsertSliceOp si) { 79 if (s.static_offsets().size() != si.static_offsets().size()) 80 return false; 81 if (s.static_sizes().size() != si.static_sizes().size()) 82 return false; 83 if (s.static_strides().size() != si.static_strides().size()) 84 return false; 85 for (auto it : llvm::zip(s.getMixedOffsets(), si.getMixedOffsets())) 86 if (!isEqualOffsetSizeOrStride(std::get<0>(it), std::get<1>(it))) 87 return false; 88 for (auto it : llvm::zip(s.getMixedSizes(), si.getMixedSizes())) 89 if (!isEqualOffsetSizeOrStride(std::get<0>(it), std::get<1>(it))) 90 return false; 91 for (auto it : llvm::zip(s.getMixedStrides(), si.getMixedStrides())) 92 if (!isEqualOffsetSizeOrStride(std::get<0>(it), std::get<1>(it))) 93 return false; 94 return true; 95 } 96 97 /// Look for a HoistableRead, in the given tensor uses, accessing the same 98 /// offset as the HoistableWrite. 99 static HoistableRead findMatchingTransferRead(HoistableWrite write, 100 Value srcTensor) { 101 assert(write.transferWriteOp && 102 "expected hoistable write to have a .transfer_write"); 103 104 LLVM_DEBUG(DBGS() << "findMatchingTransferRead for: " 105 << *write.transferWriteOp.getOperation() << "\n"); 106 if (write.insertSliceOp) 107 LLVM_DEBUG(DBGS() << "findMatchingTransferRead inserSliceOp: " 108 << *write.insertSliceOp.getOperation() << "\n"); 109 110 for (Operation *user : srcTensor.getUsers()) { 111 LLVM_DEBUG(DBGS() << "findMatchingTransferRead inspect user: " << *user 112 << "\n"); 113 114 // If HoistableWrite involves a InsertSliceOp, we need to find a 115 // matching ExtractSliceOp. 116 tensor::ExtractSliceOp sliceOp; 117 Operation *maybeTransferReadUser = user; 118 if (write.insertSliceOp) { 119 sliceOp = dyn_cast<tensor::ExtractSliceOp>(user); 120 if (!sliceOp || sliceOp.getResult().getType() != 121 write.insertSliceOp.source().getType()) 122 continue; 123 124 LLVM_DEBUG(DBGS() << "check whether sameOffsetsSizesAndStrides: " 125 << *sliceOp << " vs " << *write.insertSliceOp << "\n"); 126 if (!sameOffsetsSizesAndStrides(sliceOp, write.insertSliceOp)) 127 continue; 128 129 LLVM_DEBUG(DBGS() << "sameOffsetsSizesAndStrides: SUCCESS\n"); 130 // If we got here, sliceOp is hoistable iff it has exactly 2 uses: 131 // 1. the transfer_write we want to hoist. 132 // 2. a matching transfer_read. 133 // Anything else, we skip. 134 bool skip = false; 135 Operation *otherUser = nullptr; 136 for (Operation *u : sliceOp->getUsers()) { 137 if (u == write.transferWriteOp) 138 continue; 139 if (otherUser) { 140 skip = true; 141 break; 142 } 143 otherUser = u; 144 } 145 if (skip || !otherUser) 146 continue; 147 maybeTransferReadUser = otherUser; 148 } 149 150 LLVM_DEBUG(DBGS() << "maybeTransferReadUser: " << *maybeTransferReadUser 151 << "\n"); 152 auto read = dyn_cast<vector::TransferReadOp>(maybeTransferReadUser); 153 if (read && read.indices() == write.transferWriteOp.indices() && 154 read.getVectorType() == write.transferWriteOp.getVectorType()) 155 return HoistableRead{read, sliceOp}; 156 } 157 return HoistableRead(); 158 } 159 160 /// Check if the chunk of data inserted by the HoistableWrite are read by any 161 /// other op than the HoistableRead candidate. 162 static bool tensorChunkAccessedByUnknownOp(HoistableWrite write, 163 HoistableRead candidateRead, 164 BlockArgument tensorArg) { 165 // Make sure none of the other uses read the part of the tensor modified 166 // by the transfer_write. 167 llvm::SmallVector<Value::use_range, 1> uses; 168 uses.push_back(tensorArg.getUses()); 169 while (!uses.empty()) { 170 for (OpOperand &use : uses.pop_back_val()) { 171 Operation *user = use.getOwner(); 172 // Skip the candidate use, only inspect the "other" uses. 173 if (user == candidateRead.transferReadOp || 174 user == candidateRead.extractSliceOp || 175 user == write.transferWriteOp || user == write.insertSliceOp) 176 continue; 177 // Consider all transitive uses through a extract_slice / insert_slice. 178 // TODO: atm we just bail because a stronger analysis is needed for these 179 // cases. 180 if (isa<tensor::ExtractSliceOp, tensor::InsertSliceOp>(user)) 181 return true; 182 // Consider all transitive uses through a vector.transfer_write. 183 if (auto writeUser = dyn_cast<vector::TransferWriteOp>(user)) { 184 uses.push_back(writeUser->getResult(0).getUses()); 185 continue; 186 } 187 // Consider all nested uses through an scf::ForOp. We may have 188 // pass-through tensor arguments left from previous level of 189 // hoisting. 190 if (auto forUser = dyn_cast<scf::ForOp>(user)) { 191 Value arg = forUser.getLoopBody().getArgument( 192 use.getOperandNumber() - forUser.getNumControlOperands() + 193 /*iv value*/ 1); 194 uses.push_back(arg.getUses()); 195 continue; 196 } 197 // Follow the use yield as long as it doesn't escape the original 198 // region. 199 scf::YieldOp yieldUser = dyn_cast<scf::YieldOp>(user); 200 if (yieldUser && write.transferWriteOp->getParentOp()->isAncestor( 201 yieldUser->getParentOp())) { 202 Value ret = yieldUser->getParentOp()->getResult(use.getOperandNumber()); 203 uses.push_back(ret.getUses()); 204 continue; 205 } 206 auto read = dyn_cast<vector::TransferReadOp>(user); 207 if (!read || !isDisjointTransferIndices( 208 cast<VectorTransferOpInterface>(read.getOperation()), 209 cast<VectorTransferOpInterface>( 210 write.transferWriteOp.getOperation()))) { 211 return true; 212 } 213 } 214 } 215 return false; 216 } 217 218 /// Return the `forOp`-invariant HoistableWrite that produces `yieldOperand`. 219 /// Return the null HoistableWrite() if it is not comprised of a 220 /// vector.transfer_write + optional insert_slice or if any of the indexings 221 /// is `forOp`-dependent. 222 static HoistableWrite 223 getLoopInvariantTransferWriteOpDefining(scf::ForOp forOp, 224 OpOperand &yieldOperand) { 225 Value v = yieldOperand.get(); 226 if (auto write = v.getDefiningOp<vector::TransferWriteOp>()) { 227 // Indexing must not depend on `forOp`. 228 for (Value operand : write.indices()) 229 if (!forOp.isDefinedOutsideOfLoop(operand)) 230 return HoistableWrite(); 231 232 return HoistableWrite{write, nullptr}; 233 } 234 235 if (auto insertSliceOp = v.getDefiningOp<tensor::InsertSliceOp>()) { 236 // Inserted slice must come from vector.transfer_write. 237 auto write = 238 insertSliceOp.source().getDefiningOp<vector::TransferWriteOp>(); 239 if (!write) 240 return HoistableWrite(); 241 242 // Tensor inserted into must be a BBArg at position matching yieldOperand's. 243 auto bbArg = insertSliceOp.dest().dyn_cast<BlockArgument>(); 244 if (!bbArg || bbArg.getOwner()->getParentOp() != forOp || 245 bbArg.getArgNumber() != /*num iv=*/1 + yieldOperand.getOperandNumber()) 246 return HoistableWrite(); 247 248 // Indexing inserted into must not depend on `forOp`. 249 for (Value operand : insertSliceOp->getOperands().drop_front( 250 tensor::InsertSliceOp::getOffsetSizeAndStrideStartOperandIndex())) 251 if (!forOp.isDefinedOutsideOfLoop(operand)) 252 return HoistableWrite(); 253 254 return HoistableWrite{write, insertSliceOp}; 255 } 256 257 return HoistableWrite(); 258 } 259 260 /// Mechanical hoisting of a matching HoistableRead / HoistableWrite pair. 261 static void hoistReadWrite(HoistableRead read, HoistableWrite write, 262 BlockArgument tensorBBArg) { 263 scf::ForOp forOp = cast<scf::ForOp>(tensorBBArg.getOwner()->getParentOp()); 264 assert(read.transferReadOp && write.transferWriteOp && 265 "expected transfer_read and transfer_write ops to be set"); 266 assert(((read.extractSliceOp && write.insertSliceOp) || 267 (!read.extractSliceOp && !write.insertSliceOp)) && 268 "expected matching extract_slice / insert_slice"); 269 LLVM_DEBUG(DBGS() << "In forOp:\n" 270 << *forOp.getOperation() 271 << "\nHoist: " << *read.transferReadOp.getOperation() 272 << "\nHoist: " << *write.transferWriteOp.getOperation() 273 << "\nInvolving: " << tensorBBArg << "\n"); 274 275 // If a read slice is present, hoist it. 276 if (read.extractSliceOp && failed(forOp.moveOutOfLoop({read.extractSliceOp}))) 277 llvm_unreachable("Unexpected failure moving extract_slice out of loop"); 278 279 // Hoist the transfer_read op. 280 if (failed(forOp.moveOutOfLoop({read.transferReadOp}))) 281 llvm_unreachable("Unexpected failure moving transfer read out of loop"); 282 283 // TODO: don't hardcode /*numIvs=*/1. 284 assert(tensorBBArg.getArgNumber() >= /*numIvs=*/1); 285 unsigned initArgNumber = tensorBBArg.getArgNumber() - /*numIvs=*/1; 286 287 // Update the source tensor. 288 if (read.extractSliceOp) 289 read.extractSliceOp.sourceMutable().assign( 290 forOp.getInitArgs()[initArgNumber]); 291 else 292 read.transferReadOp.sourceMutable().assign( 293 forOp.getInitArgs()[initArgNumber]); 294 295 // Hoist write after. 296 if (write.insertSliceOp) 297 write.insertSliceOp->moveAfter(forOp); 298 write.transferWriteOp->moveAfter(forOp); 299 300 // Update the yield. 301 auto yieldOp = cast<scf::YieldOp>(forOp.getRegion().front().getTerminator()); 302 if (write.insertSliceOp) 303 yieldOp->setOperand(initArgNumber, write.insertSliceOp.dest()); 304 else 305 yieldOp->setOperand(initArgNumber, write.transferWriteOp.source()); 306 307 // Rewrite `loop` with additional new yields. 308 OpBuilder b(read.transferReadOp); 309 auto newForOp = cloneWithNewYields(b, forOp, read.transferReadOp.vector(), 310 write.transferWriteOp.vector()); 311 // Transfer write has been hoisted, need to update the vector and tensor 312 // source. Replace the result of the loop to use the new tensor created 313 // outside the loop. 314 // Depending on whether a insert_slice is present or not, it carries the 315 // update on the tensor operands. 316 if (write.insertSliceOp) { 317 newForOp.getResult(initArgNumber) 318 .replaceAllUsesWith(write.insertSliceOp.getResult()); 319 write.transferWriteOp.sourceMutable().assign(read.extractSliceOp.result()); 320 write.insertSliceOp.destMutable().assign(read.extractSliceOp.source()); 321 } else { 322 newForOp.getResult(initArgNumber) 323 .replaceAllUsesWith(write.transferWriteOp.getResult(0)); 324 write.transferWriteOp.sourceMutable().assign( 325 newForOp.getResult(initArgNumber)); 326 } 327 328 // Always update with the newly yield tensor and vector. 329 write.transferWriteOp.vectorMutable().assign(newForOp.getResults().back()); 330 } 331 332 // To hoist transfer op on tensor the logic can be significantly simplified 333 // compared to the case on buffer. The transformation follows this logic: 334 // 1. Look for transfer_write with a single use from ForOp yield 335 // 2. Check the uses of the matching block argument and look for a transfer_read 336 // with the same indices. 337 // 3. Check that all the other uses of the tensor argument are either disjoint 338 // tensor_read or transfer_write. For transfer_write uses recurse to make sure 339 // the new tensor has the same restrictions on its uses. 340 // 4. Hoist the tensor_read/tensor_write and update the tensor SSA links. 341 // After this transformation the scf.forOp may have unused arguments that can be 342 // remove by the canonicalization pass. 343 void mlir::linalg::hoistRedundantVectorTransfersOnTensor(FuncOp func) { 344 bool changed = true; 345 while (changed) { 346 changed = false; 347 func.walk([&](scf::ForOp forOp) { 348 Operation *yield = forOp.getBody()->getTerminator(); 349 for (const auto &it : llvm::enumerate(forOp.getRegionIterArgs())) { 350 OpOperand &ret = yield->getOpOperand(it.index()); 351 HoistableWrite write = 352 getLoopInvariantTransferWriteOpDefining(forOp, ret); 353 if (!write.transferWriteOp || !write.transferWriteOp->hasOneUse()) 354 continue; 355 LLVM_DEBUG(dbgs() << "\n"; 356 DBGS() << "Candidate write for hoisting: " 357 << *write.transferWriteOp.getOperation() << "\n"); 358 if (write.insertSliceOp) 359 LLVM_DEBUG(DBGS() << "Candidate insert_slice for hoisting: " 360 << *write.insertSliceOp.getOperation() << "\n"); 361 if (llvm::any_of(write.transferWriteOp.indices(), 362 [&forOp](Value index) { 363 return !forOp.isDefinedOutsideOfLoop(index); 364 })) 365 continue; 366 // Find a read with the same type and indices. 367 HoistableRead matchingRead = 368 findMatchingTransferRead(write, it.value()); 369 // Make sure none of the other uses read the part of the tensor modified 370 // by the transfer_write. 371 if (!matchingRead.transferReadOp || 372 tensorChunkAccessedByUnknownOp(write, matchingRead, it.value())) 373 continue; 374 375 LLVM_DEBUG(DBGS() << "Start hoisting\n"); 376 hoistReadWrite(matchingRead, write, it.value()); 377 changed = true; 378 forOp.erase(); 379 380 // Need to interrupt and restart: erasing the loop messes up the walk. 381 return WalkResult::interrupt(); 382 } 383 return WalkResult::advance(); 384 }); 385 // Apply canonicalization so the newForOp + yield folds immediately, thus 386 // cleaning up the IR and potentially enabling more hoisting. 387 if (changed) { 388 RewritePatternSet patterns(func->getContext()); 389 scf::ForOp::getCanonicalizationPatterns(patterns, func->getContext()); 390 (void)applyPatternsAndFoldGreedily(func, std::move(patterns)); 391 } 392 } 393 } 394 395 void mlir::linalg::hoistRedundantVectorTransfers(FuncOp func) { 396 bool changed = true; 397 while (changed) { 398 changed = false; 399 // First move loop invariant ops outside of their loop. This needs to be 400 // done before as we cannot move ops without interputing the function walk. 401 func.walk([&](LoopLikeOpInterface loopLike) { 402 if (failed(moveLoopInvariantCode(loopLike))) 403 llvm_unreachable( 404 "Unexpected failure to move invariant code out of loop"); 405 }); 406 407 func.walk([&](vector::TransferReadOp transferRead) { 408 if (!transferRead.getShapedType().isa<MemRefType>()) 409 return WalkResult::advance(); 410 411 LLVM_DEBUG(DBGS() << "Candidate for hoisting: " 412 << *transferRead.getOperation() << "\n"); 413 auto loop = dyn_cast<scf::ForOp>(transferRead->getParentOp()); 414 LLVM_DEBUG(DBGS() << "Parent op: " << *transferRead->getParentOp() 415 << "\n"); 416 if (!loop) 417 return WalkResult::advance(); 418 419 LLVM_DEBUG(DBGS() << "Candidate read: " << *transferRead.getOperation() 420 << "\n"); 421 422 SetVector<Operation *> forwardSlice; 423 getForwardSlice(transferRead.getOperation(), &forwardSlice); 424 425 // Look for the last TransferWriteOp in the forwardSlice of 426 // `transferRead` that operates on the same memref. 427 vector::TransferWriteOp transferWrite; 428 for (auto *sliceOp : llvm::reverse(forwardSlice)) { 429 auto candidateWrite = dyn_cast<vector::TransferWriteOp>(sliceOp); 430 if (!candidateWrite || candidateWrite.source() != transferRead.source()) 431 continue; 432 transferWrite = candidateWrite; 433 } 434 435 // All operands of the TransferRead must be defined outside of the loop. 436 for (auto operand : transferRead.getOperands()) 437 if (!loop.isDefinedOutsideOfLoop(operand)) 438 return WalkResult::advance(); 439 440 // Only hoist transfer_read / transfer_write pairs for now. 441 if (!transferWrite) 442 return WalkResult::advance(); 443 444 LLVM_DEBUG(DBGS() << "Candidate: " << *transferWrite.getOperation() 445 << "\n"); 446 447 // Approximate aliasing by checking that: 448 // 1. indices are the same, 449 // 2. no other operations in the loop access the same memref except 450 // for transfer_read/transfer_write accessing statically disjoint 451 // slices. 452 if (transferRead.indices() != transferWrite.indices() && 453 transferRead.getVectorType() == transferWrite.getVectorType()) 454 return WalkResult::advance(); 455 456 // TODO: may want to memoize this information for performance but it 457 // likely gets invalidated often. 458 DominanceInfo dom(loop); 459 if (!dom.properlyDominates(transferRead.getOperation(), transferWrite)) 460 return WalkResult::advance(); 461 for (auto &use : transferRead.source().getUses()) { 462 if (!loop->isAncestor(use.getOwner())) 463 continue; 464 if (use.getOwner() == transferRead.getOperation() || 465 use.getOwner() == transferWrite.getOperation()) 466 continue; 467 if (auto transferWriteUse = 468 dyn_cast<vector::TransferWriteOp>(use.getOwner())) { 469 if (!isDisjointTransferSet( 470 cast<VectorTransferOpInterface>(transferWrite.getOperation()), 471 cast<VectorTransferOpInterface>( 472 transferWriteUse.getOperation()))) 473 return WalkResult::advance(); 474 } else if (auto transferReadUse = 475 dyn_cast<vector::TransferReadOp>(use.getOwner())) { 476 if (!isDisjointTransferSet( 477 cast<VectorTransferOpInterface>(transferWrite.getOperation()), 478 cast<VectorTransferOpInterface>( 479 transferReadUse.getOperation()))) 480 return WalkResult::advance(); 481 } else { 482 // Unknown use, we cannot prove that it doesn't alias with the 483 // transferRead/transferWrite operations. 484 return WalkResult::advance(); 485 } 486 } 487 488 // Hoist read before. 489 if (failed(loop.moveOutOfLoop({transferRead}))) 490 llvm_unreachable( 491 "Unexpected failure to move transfer read out of loop"); 492 493 // Hoist write after. 494 transferWrite->moveAfter(loop); 495 496 // Rewrite `loop` with new yields by cloning and erase the original loop. 497 OpBuilder b(transferRead); 498 auto newForOp = cloneWithNewYields(b, loop, transferRead.vector(), 499 transferWrite.vector()); 500 501 // Transfer write has been hoisted, need to update the written value to 502 // the value yielded by the newForOp. 503 transferWrite.vector().replaceAllUsesWith( 504 newForOp.getResults().take_back()[0]); 505 506 changed = true; 507 loop.erase(); 508 // Need to interrupt and restart because erasing the loop messes up the 509 // walk. 510 return WalkResult::interrupt(); 511 }); 512 } 513 } 514