1 //===- VectorToGPU.cpp - Convert vector to GPU dialect ----------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements lowering of vector operations to GPU dialect ops. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include <type_traits> 14 15 #include "mlir/Conversion/VectorToGPU/VectorToGPU.h" 16 17 #include "../PassDetail.h" 18 #include "mlir/Analysis/SliceAnalysis.h" 19 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h" 20 #include "mlir/Dialect/GPU/GPUDialect.h" 21 #include "mlir/Dialect/MemRef/IR/MemRef.h" 22 #include "mlir/Dialect/SCF/SCF.h" 23 #include "mlir/Dialect/Utils/StructuredOpsUtils.h" 24 #include "mlir/Dialect/Vector/IR/VectorOps.h" 25 #include "mlir/Dialect/Vector/Utils/VectorUtils.h" 26 #include "mlir/IR/Builders.h" 27 #include "mlir/Pass/Pass.h" 28 #include "mlir/Transforms/GreedyPatternRewriteDriver.h" 29 #include "mlir/Transforms/Passes.h" 30 31 using namespace mlir; 32 33 // Return true if the contract op can be convert to MMA matmul. 34 static bool contractSupportsMMAMatrixType(vector::ContractionOp contract) { 35 if (llvm::size(contract.getMasks()) != 0) 36 return false; 37 38 using MapList = ArrayRef<ArrayRef<AffineExpr>>; 39 auto infer = [](MapList m) { return AffineMap::inferFromExprList(m); }; 40 AffineExpr m, n, k; 41 bindDims(contract.getContext(), m, n, k); 42 auto iteratorTypes = contract.getIteratorTypes().getValue(); 43 if (!(isParallelIterator(iteratorTypes[0]) && 44 isParallelIterator(iteratorTypes[1]) && 45 isReductionIterator(iteratorTypes[2]))) 46 return false; 47 48 // The contract needs to represent a matmul to be able to convert to 49 // MMAMatrix matmul. 50 if (contract.getIndexingMaps() != infer({{m, k}, {k, n}, {m, n}})) 51 return false; 52 53 return true; 54 } 55 56 // Return the stide for the dimension 0 of |type| if it is a memref and has a 57 // constant stride. 58 static llvm::Optional<int64_t> 59 getMemrefConstantHorizontalStride(ShapedType type) { 60 auto memrefType = type.dyn_cast<MemRefType>(); 61 if (!memrefType) 62 return false; 63 // If the memref is 0 or 1D the horizontal stride is 0. 64 if(memrefType.getRank() < 2) 65 return 0; 66 int64_t offset = 0; 67 SmallVector<int64_t, 2> strides; 68 if (failed(getStridesAndOffset(memrefType, strides, offset)) || 69 strides.back() != 1) 70 return llvm::None; 71 int64_t stride = strides[strides.size() - 2]; 72 if (stride == ShapedType::kDynamicStrideOrOffset) 73 return llvm::None; 74 return stride; 75 } 76 77 // Return true if the transfer op can be converted to a MMA matrix load. 78 static bool transferReadSupportsMMAMatrixType(vector::TransferReadOp readOp) { 79 if (readOp.getMask() || readOp.hasOutOfBoundsDim() || 80 readOp.getVectorType().getRank() != 2) 81 return false; 82 if (!getMemrefConstantHorizontalStride(readOp.getShapedType())) 83 return false; 84 AffineMap map = readOp.getPermutationMap(); 85 OpBuilder b(readOp.getContext()); 86 AffineExpr innerDim = b.getAffineDimExpr(map.getNumDims() - 1); 87 AffineExpr zero = b.getAffineConstantExpr(0); 88 auto broadcastInnerDim = AffineMap::get(map.getNumDims(), 0, {zero, innerDim}, 89 readOp.getContext()); 90 // TODO: Support transpose once it is added to GPU dialect ops. 91 // For now we only support (d0, d1) -> (d0, d1) and (d0, d1) -> (0, d1). 92 return !(!map.isMinorIdentity() && map != broadcastInnerDim); 93 } 94 95 // Return true if the transfer op can be converted to a MMA matrix store. 96 static bool 97 transferWriteSupportsMMAMatrixType(vector::TransferWriteOp writeOp) { 98 // TODO: support 0-d corner case. 99 if (writeOp.getTransferRank() == 0) 100 return false; 101 102 if (writeOp.getMask() || writeOp.hasOutOfBoundsDim() || 103 writeOp.getVectorType().getRank() != 2) 104 return false; 105 if (!getMemrefConstantHorizontalStride(writeOp.getShapedType())) 106 return false; 107 // TODO: Support transpose once it is added to GPU dialect ops. 108 if (!writeOp.getPermutationMap().isMinorIdentity()) 109 return false; 110 return true; 111 } 112 113 /// Return true if the constant is a splat to a 2D vector so that it can be 114 /// converted to a MMA constant matrix op. 115 static bool constantSupportsMMAMatrixType(arith::ConstantOp constantOp) { 116 auto vecType = constantOp.getType().dyn_cast<VectorType>(); 117 if (!vecType || vecType.getRank() != 2) 118 return false; 119 return constantOp.getValue().isa<SplatElementsAttr>(); 120 } 121 122 /// Return true if this is a broadcast from scalar to a 2D vector. 123 static bool broadcastSupportsMMAMatrixType(vector::BroadcastOp broadcastOp) { 124 return broadcastOp.getVectorType().getRank() == 2 && 125 broadcastOp.getSource().getType().isa<FloatType>(); 126 } 127 128 /// Return the MMA elementwise enum associated with `op` if it is supported. 129 /// Return `llvm::None` otherwise. 130 static llvm::Optional<gpu::MMAElementwiseOp> 131 convertElementwiseOpToMMA(Operation *op) { 132 if (isa<arith::AddFOp>(op)) 133 return gpu::MMAElementwiseOp::ADDF; 134 if (isa<arith::MulFOp>(op)) 135 return gpu::MMAElementwiseOp::MULF; 136 if (isa<arith::MaxFOp>(op)) 137 return gpu::MMAElementwiseOp::MAXF; 138 if (isa<arith::MinFOp>(op)) 139 return gpu::MMAElementwiseOp::MINF; 140 if (isa<arith::DivFOp>(op)) 141 return gpu::MMAElementwiseOp::DIVF; 142 return llvm::None; 143 } 144 145 /// Return true if the op is supported as elementwise op on MMAMatrix type. 146 static bool elementwiseSupportsMMAMatrixType(Operation *op) { 147 return convertElementwiseOpToMMA(op).hasValue(); 148 } 149 150 static bool supportsMMaMatrixType(Operation *op) { 151 if (isa<scf::ForOp, scf::YieldOp>(op)) 152 return true; 153 if (auto transferRead = dyn_cast<vector::TransferReadOp>(op)) 154 return transferReadSupportsMMAMatrixType(transferRead); 155 if (auto transferWrite = dyn_cast<vector::TransferWriteOp>(op)) 156 return transferWriteSupportsMMAMatrixType(transferWrite); 157 if (auto contract = dyn_cast<vector::ContractionOp>(op)) 158 return contractSupportsMMAMatrixType(contract); 159 if (auto constant = dyn_cast<arith::ConstantOp>(op)) 160 return constantSupportsMMAMatrixType(constant); 161 if (auto broadcast = dyn_cast<vector::BroadcastOp>(op)) 162 return broadcastSupportsMMAMatrixType(broadcast); 163 return elementwiseSupportsMMAMatrixType(op); 164 } 165 166 /// Return an unsorted slice handling scf.for region differently than 167 /// `getSlice`. In scf.for we only want to include as part of the slice elements 168 /// that are part of the use/def chain. 169 static SetVector<Operation *> getSliceContract(Operation *op, 170 TransitiveFilter backwardFilter, 171 TransitiveFilter forwardFilter) { 172 SetVector<Operation *> slice; 173 slice.insert(op); 174 unsigned currentIndex = 0; 175 SetVector<Operation *> backwardSlice; 176 SetVector<Operation *> forwardSlice; 177 while (currentIndex != slice.size()) { 178 auto *currentOp = (slice)[currentIndex]; 179 // Compute and insert the backwardSlice starting from currentOp. 180 backwardSlice.clear(); 181 getBackwardSlice(currentOp, &backwardSlice, backwardFilter); 182 slice.insert(backwardSlice.begin(), backwardSlice.end()); 183 184 // Compute and insert the forwardSlice starting from currentOp. 185 forwardSlice.clear(); 186 // Special case for ForOp, we don't want to include the whole region but 187 // only the value using the region arguments. 188 // TODO: We should refine this to only care about the region arguments being 189 // converted to matrix type. 190 if (auto forOp = dyn_cast<scf::ForOp>(currentOp)) { 191 for (Value forOpResult : forOp.getResults()) 192 getForwardSlice(forOpResult, &forwardSlice, forwardFilter); 193 for (BlockArgument &arg : forOp.getRegionIterArgs()) 194 getForwardSlice(arg, &forwardSlice, forwardFilter); 195 } else { 196 getForwardSlice(currentOp, &forwardSlice, forwardFilter); 197 } 198 slice.insert(forwardSlice.begin(), forwardSlice.end()); 199 ++currentIndex; 200 } 201 return slice; 202 } 203 204 // Analyze slice of operations based on convert op to figure out if the whole 205 // slice can be converted to MMA operations. 206 static SetVector<Operation *> getOpToConvert(mlir::Operation *op) { 207 auto hasVectorDest = [](Operation *op) { 208 return llvm::any_of(op->getResultTypes(), 209 [](Type t) { return t.isa<VectorType>(); }); 210 }; 211 auto hasVectorSrc = [](Operation *op) { 212 return llvm::any_of(op->getOperandTypes(), 213 [](Type t) { return t.isa<VectorType>(); }); 214 }; 215 SetVector<Operation *> opToConvert; 216 op->walk([&](vector::ContractionOp contract) { 217 if (opToConvert.contains(contract.getOperation())) 218 return; 219 SetVector<Operation *> dependentOps = 220 getSliceContract(contract, hasVectorDest, hasVectorSrc); 221 // If any instruction cannot use MMA matrix type drop the whole 222 // chain. MMA matrix are stored in an opaque type so they cannot be used 223 // by all operations. 224 if (llvm::any_of(dependentOps, 225 [](Operation *op) { return !supportsMMaMatrixType(op); })) 226 return; 227 opToConvert.insert(dependentOps.begin(), dependentOps.end()); 228 }); 229 // Sort the operations so that we can convert them in topological order. 230 return topologicalSort(opToConvert); 231 } 232 233 namespace { 234 // Transform contract into (m, k)x(k, n)x(m, n) form so that it can be converted 235 // to MMA matmul. 236 struct PrepareContractToGPUMMA 237 : public OpRewritePattern<vector::ContractionOp> { 238 using OpRewritePattern<vector::ContractionOp>::OpRewritePattern; 239 240 LogicalResult matchAndRewrite(vector::ContractionOp op, 241 PatternRewriter &rewriter) const override { 242 Location loc = op.getLoc(); 243 Value lhs = op.getLhs(), rhs = op.getRhs(), res = op.getAcc(); 244 245 // Set up the parallel/reduction structure in right form. 246 using MapList = ArrayRef<ArrayRef<AffineExpr>>; 247 auto infer = [](MapList m) { return AffineMap::inferFromExprList(m); }; 248 AffineExpr m, n, k; 249 bindDims(rewriter.getContext(), m, n, k); 250 static constexpr std::array<int64_t, 2> perm = {1, 0}; 251 auto iteratorTypes = op.getIteratorTypes().getValue(); 252 SmallVector<AffineMap, 4> maps = op.getIndexingMaps(); 253 if (!(isParallelIterator(iteratorTypes[0]) && 254 isParallelIterator(iteratorTypes[1]) && 255 isReductionIterator(iteratorTypes[2]))) 256 return failure(); 257 // 258 // Two outer parallel, one inner reduction (matmat flavor). 259 // 260 if (maps == infer({{m, k}, {k, n}, {m, n}})) { 261 // This is the classical row-major matmul, nothing to do. 262 return failure(); 263 } 264 if (maps == infer({{m, k}, {n, k}, {m, n}})) { 265 rhs = rewriter.create<vector::TransposeOp>(loc, rhs, perm); 266 } else if (maps == infer({{k, m}, {k, n}, {m, n}})) { 267 lhs = rewriter.create<vector::TransposeOp>(loc, lhs, perm); 268 } else if (maps == infer({{k, m}, {n, k}, {m, n}})) { 269 rhs = rewriter.create<vector::TransposeOp>(loc, rhs, perm); 270 lhs = rewriter.create<vector::TransposeOp>(loc, lhs, perm); 271 } else if (maps == infer({{m, k}, {k, n}, {n, m}})) { 272 std::swap(rhs, lhs); 273 rhs = rewriter.create<vector::TransposeOp>(loc, rhs, perm); 274 lhs = rewriter.create<vector::TransposeOp>(loc, lhs, perm); 275 } else if (maps == infer({{m, k}, {n, k}, {n, m}})) { 276 std::swap(rhs, lhs); 277 rhs = rewriter.create<vector::TransposeOp>(loc, rhs, perm); 278 } else if (maps == infer({{k, m}, {k, n}, {n, m}})) { 279 std::swap(lhs, rhs); 280 lhs = rewriter.create<vector::TransposeOp>(loc, lhs, perm); 281 } else if (maps == infer({{k, m}, {n, k}, {n, m}})) { 282 std::swap(lhs, rhs); 283 } else { 284 return failure(); 285 } 286 rewriter.replaceOpWithNewOp<vector::ContractionOp>( 287 op, lhs, rhs, res, 288 rewriter.getAffineMapArrayAttr(infer({{m, k}, {k, n}, {m, n}})), 289 op.getIteratorTypes()); 290 return success(); 291 } 292 }; 293 294 // Merge transpose op into the transfer read op. Transpose are not supported on 295 // MMA types but MMA load can transpose the matrix when loading. 296 struct CombineTransferReadOpTranspose final 297 : public OpRewritePattern<vector::TransposeOp> { 298 using OpRewritePattern<vector::TransposeOp>::OpRewritePattern; 299 300 LogicalResult matchAndRewrite(vector::TransposeOp op, 301 PatternRewriter &rewriter) const override { 302 auto transferReadOp = 303 op.getVector().getDefiningOp<vector::TransferReadOp>(); 304 if (!transferReadOp) 305 return failure(); 306 307 // TODO: support 0-d corner case. 308 if (transferReadOp.getTransferRank() == 0) 309 return failure(); 310 311 if (transferReadOp.getMask() || transferReadOp.hasOutOfBoundsDim()) 312 return failure(); 313 SmallVector<int64_t, 2> perm; 314 op.getTransp(perm); 315 SmallVector<unsigned, 2> permU; 316 for (int64_t o : perm) 317 permU.push_back(unsigned(o)); 318 AffineMap permutationMap = 319 AffineMap::getPermutationMap(permU, op.getContext()); 320 AffineMap newMap = 321 permutationMap.compose(transferReadOp.getPermutationMap()); 322 rewriter.replaceOpWithNewOp<vector::TransferReadOp>( 323 op, op.getType(), transferReadOp.getSource(), 324 transferReadOp.getIndices(), AffineMapAttr::get(newMap), 325 transferReadOp.getPadding(), transferReadOp.getMask(), 326 transferReadOp.getInBoundsAttr()); 327 return success(); 328 } 329 }; 330 331 } // namespace 332 333 // MMA types have different layout based on how they are used in matmul ops. 334 // Figure the right layout to use by looking at op uses. 335 // TODO: Change the GPU dialect to abstract the layout at the this level and 336 // only care about it during lowering to NVVM. 337 template <typename OpTy> 338 static const char *inferFragType(OpTy op) { 339 for (Operation *users : op->getUsers()) { 340 auto contract = dyn_cast<vector::ContractionOp>(users); 341 if (!contract) 342 continue; 343 if (contract.getLhs() == op.getResult()) 344 return "AOp"; 345 if (contract.getRhs() == op.getResult()) 346 return "BOp"; 347 } 348 return "COp"; 349 } 350 351 static void convertTransferReadOp(vector::TransferReadOp op, 352 llvm::DenseMap<Value, Value> &valueMapping) { 353 assert(op.getTransferRank() > 0 && "unexpected 0-d transfer"); 354 assert(transferReadSupportsMMAMatrixType(op)); 355 Optional<int64_t> stride = 356 getMemrefConstantHorizontalStride(op.getShapedType()); 357 AffineMap map = op.getPermutationMap(); 358 // Handle broadcast by setting the stride to 0. 359 if (map.getResult(0).isa<AffineConstantExpr>()) { 360 assert(map.getResult(0).cast<AffineConstantExpr>().getValue() == 0); 361 stride = 0; 362 } 363 assert(stride); 364 const char *fragType = inferFragType(op); 365 gpu::MMAMatrixType type = 366 gpu::MMAMatrixType::get(op.getVectorType().getShape(), 367 op.getVectorType().getElementType(), fragType); 368 OpBuilder b(op); 369 Value load = b.create<gpu::SubgroupMmaLoadMatrixOp>( 370 op.getLoc(), type, op.getSource(), op.getIndices(), 371 b.getIndexAttr(*stride)); 372 valueMapping[op.getResult()] = load; 373 } 374 375 static void convertTransferWriteOp(vector::TransferWriteOp op, 376 llvm::DenseMap<Value, Value> &valueMapping) { 377 assert(transferWriteSupportsMMAMatrixType(op)); 378 Optional<int64_t> stride = 379 getMemrefConstantHorizontalStride(op.getShapedType()); 380 assert(stride); 381 OpBuilder b(op); 382 Value matrix = valueMapping.find(op.getVector())->second; 383 b.create<gpu::SubgroupMmaStoreMatrixOp>(op.getLoc(), matrix, op.getSource(), 384 op.getIndices(), 385 b.getIndexAttr(*stride)); 386 op.erase(); 387 } 388 389 static void convertContractOp(vector::ContractionOp op, 390 llvm::DenseMap<Value, Value> &valueMapping) { 391 OpBuilder b(op); 392 Value opA = valueMapping.find(op.getLhs())->second; 393 Value opB = valueMapping.find(op.getRhs())->second; 394 Value opC = valueMapping.find(op.getAcc())->second; 395 Value matmul = b.create<gpu::SubgroupMmaComputeOp>(op.getLoc(), opC.getType(), 396 opA, opB, opC); 397 valueMapping[op.getResult()] = matmul; 398 } 399 400 /// Convert a 2D splat ConstantOp to a SubgroupMmaConstantMatrix op. 401 static void convertConstantOp(arith::ConstantOp op, 402 llvm::DenseMap<Value, Value> &valueMapping) { 403 assert(constantSupportsMMAMatrixType(op)); 404 OpBuilder b(op); 405 Attribute splat = 406 op.getValue().cast<SplatElementsAttr>().getSplatValue<Attribute>(); 407 auto scalarConstant = 408 b.create<arith::ConstantOp>(op.getLoc(), splat.getType(), splat); 409 const char *fragType = inferFragType(op); 410 auto vecType = op.getType().cast<VectorType>(); 411 gpu::MMAMatrixType type = gpu::MMAMatrixType::get( 412 vecType.getShape(), vecType.getElementType(), llvm::StringRef(fragType)); 413 auto matrix = b.create<gpu::SubgroupMmaConstantMatrixOp>(op.getLoc(), type, 414 scalarConstant); 415 valueMapping[op.getResult()] = matrix; 416 } 417 418 /// Convert a vector.broadcast from scalar to a SubgroupMmaConstantMatrix op. 419 static void convertBroadcastOp(vector::BroadcastOp op, 420 llvm::DenseMap<Value, Value> &valueMapping) { 421 assert(broadcastSupportsMMAMatrixType(op)); 422 OpBuilder b(op); 423 const char *fragType = inferFragType(op); 424 auto vecType = op.getVectorType(); 425 gpu::MMAMatrixType type = gpu::MMAMatrixType::get( 426 vecType.getShape(), vecType.getElementType(), llvm::StringRef(fragType)); 427 auto matrix = b.create<gpu::SubgroupMmaConstantMatrixOp>(op.getLoc(), type, 428 op.getSource()); 429 valueMapping[op.getResult()] = matrix; 430 } 431 432 // Replace ForOp with a new ForOp with extra operands. The YieldOp is not 433 // updated and needs to be updated separatly for the loop to be correct. 434 static scf::ForOp replaceForOpWithNewSignature(OpBuilder &b, scf::ForOp loop, 435 ValueRange newIterOperands) { 436 // Create a new loop before the existing one, with the extra operands. 437 OpBuilder::InsertionGuard g(b); 438 b.setInsertionPoint(loop); 439 auto operands = llvm::to_vector<4>(loop.getIterOperands()); 440 operands.append(newIterOperands.begin(), newIterOperands.end()); 441 scf::ForOp newLoop = 442 b.create<scf::ForOp>(loop.getLoc(), loop.getLowerBound(), 443 loop.getUpperBound(), loop.getStep(), operands); 444 newLoop.getBody()->erase(); 445 newLoop.getLoopBody().getBlocks().splice( 446 newLoop.getLoopBody().getBlocks().begin(), 447 loop.getLoopBody().getBlocks()); 448 for (Value operand : newIterOperands) 449 newLoop.getBody()->addArgument(operand.getType(), operand.getLoc()); 450 451 for (auto it : llvm::zip(loop.getResults(), newLoop.getResults().take_front( 452 loop.getNumResults()))) 453 std::get<0>(it).replaceAllUsesWith(std::get<1>(it)); 454 loop.erase(); 455 return newLoop; 456 } 457 458 static void convertForOp(scf::ForOp op, 459 llvm::DenseMap<Value, Value> &valueMapping) { 460 SmallVector<Value> newOperands; 461 SmallVector<std::pair<size_t, size_t>> argMapping; 462 for (const auto &operand : llvm::enumerate(op.getIterOperands())) { 463 auto it = valueMapping.find(operand.value()); 464 if (it == valueMapping.end()) 465 continue; 466 argMapping.push_back(std::make_pair( 467 operand.index(), op.getNumIterOperands() + newOperands.size())); 468 newOperands.push_back(it->second); 469 } 470 OpBuilder b(op); 471 scf::ForOp newForOp = replaceForOpWithNewSignature(b, op, newOperands); 472 Block &loopBody = *newForOp.getBody(); 473 for (auto mapping : argMapping) { 474 valueMapping[newForOp.getResult(mapping.first)] = 475 newForOp.getResult(mapping.second); 476 valueMapping[loopBody.getArgument(mapping.first + 477 newForOp.getNumInductionVars())] = 478 loopBody.getArgument(mapping.second + newForOp.getNumInductionVars()); 479 } 480 } 481 482 static void convertYieldOp(scf::YieldOp op, 483 llvm::DenseMap<Value, Value> &valueMapping) { 484 OpBuilder b(op); 485 auto loop = cast<scf::ForOp>(op->getParentOp()); 486 auto yieldOperands = llvm::to_vector<4>(op.getOperands()); 487 for (const auto &operand : llvm::enumerate(op.getOperands())) { 488 auto it = valueMapping.find(operand.value()); 489 if (it == valueMapping.end()) 490 continue; 491 // Replace the yield of old value with the for op argument to make it easier 492 // to remove the dead code. 493 yieldOperands[operand.index()] = loop.getIterOperands()[operand.index()]; 494 yieldOperands.push_back(it->second); 495 } 496 b.create<scf::YieldOp>(op.getLoc(), yieldOperands); 497 op.erase(); 498 } 499 500 /// Convert an elementwise op to the equivalent elementwise op on MMA matrix. 501 static void convertElementwiseOp(Operation *op, gpu::MMAElementwiseOp opType, 502 llvm::DenseMap<Value, Value> &valueMapping) { 503 OpBuilder b(op); 504 SmallVector<Value> matrixOperands; 505 for (Value operand : op->getOperands()) 506 matrixOperands.push_back(valueMapping.find(operand)->second); 507 Value newOp = b.create<gpu::SubgroupMmaElementwiseOp>( 508 op->getLoc(), matrixOperands[0].getType(), matrixOperands, opType); 509 valueMapping[op->getResult(0)] = newOp; 510 } 511 512 void mlir::populatePrepareVectorToMMAPatterns(RewritePatternSet &patterns) { 513 patterns.add<PrepareContractToGPUMMA, CombineTransferReadOpTranspose>( 514 patterns.getContext()); 515 } 516 517 void mlir::convertVectorToMMAOps(Operation *rootOp) { 518 SetVector<Operation *> ops = getOpToConvert(rootOp); 519 llvm::DenseMap<Value, Value> valueMapping; 520 for (Operation *op : ops) { 521 if (auto transferRead = dyn_cast<vector::TransferReadOp>(op)) { 522 convertTransferReadOp(transferRead, valueMapping); 523 } else if (auto transferWrite = dyn_cast<vector::TransferWriteOp>(op)) { 524 convertTransferWriteOp(transferWrite, valueMapping); 525 } else if (auto contractOp = dyn_cast<vector::ContractionOp>(op)) { 526 convertContractOp(contractOp, valueMapping); 527 } else if (auto constantOp = dyn_cast<arith::ConstantOp>(op)) { 528 convertConstantOp(constantOp, valueMapping); 529 } else if (auto broadcastOp = dyn_cast<vector::BroadcastOp>(op)) { 530 convertBroadcastOp(broadcastOp, valueMapping); 531 } else if (auto forOp = dyn_cast<scf::ForOp>(op)) { 532 convertForOp(forOp, valueMapping); 533 } else if (auto yiledOp = dyn_cast<scf::YieldOp>(op)) { 534 convertYieldOp(yiledOp, valueMapping); 535 } else if (auto elementwiseType = convertElementwiseOpToMMA(op)) { 536 convertElementwiseOp(op, *elementwiseType, valueMapping); 537 } 538 } 539 } 540 541 namespace { 542 543 struct ConvertVectorToGPUPass 544 : public ConvertVectorToGPUBase<ConvertVectorToGPUPass> { 545 void runOnOperation() override { 546 RewritePatternSet patterns(&getContext()); 547 populatePrepareVectorToMMAPatterns(patterns); 548 (void)applyPatternsAndFoldGreedily(getOperation(), std::move(patterns)); 549 550 convertVectorToMMAOps(getOperation()); 551 } 552 }; 553 554 } // namespace 555 556 std::unique_ptr<Pass> mlir::createConvertVectorToGPUPass() { 557 return std::make_unique<ConvertVectorToGPUPass>(); 558 } 559