1 //===- VectorToGPU.cpp - Convert vector to GPU dialect ----------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements lowering of vector operations to GPU dialect ops. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include <type_traits> 14 15 #include "mlir/Conversion/VectorToGPU/VectorToGPU.h" 16 17 #include "../PassDetail.h" 18 #include "mlir/Analysis/SliceAnalysis.h" 19 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h" 20 #include "mlir/Dialect/GPU/GPUDialect.h" 21 #include "mlir/Dialect/MemRef/IR/MemRef.h" 22 #include "mlir/Dialect/SCF/SCF.h" 23 #include "mlir/Dialect/Utils/StructuredOpsUtils.h" 24 #include "mlir/Dialect/Vector/VectorOps.h" 25 #include "mlir/Dialect/Vector/VectorUtils.h" 26 #include "mlir/IR/Builders.h" 27 #include "mlir/Pass/Pass.h" 28 #include "mlir/Transforms/GreedyPatternRewriteDriver.h" 29 #include "mlir/Transforms/Passes.h" 30 31 using namespace mlir; 32 33 // Return true if the contract op can be convert to MMA matmul. 34 static bool contractSupportsMMAMatrixType(vector::ContractionOp contract) { 35 if (llvm::size(contract.masks()) != 0) 36 return false; 37 38 using MapList = ArrayRef<ArrayRef<AffineExpr>>; 39 auto infer = [](MapList m) { return AffineMap::inferFromExprList(m); }; 40 AffineExpr m, n, k; 41 bindDims(contract.getContext(), m, n, k); 42 auto iteratorTypes = contract.iterator_types().getValue(); 43 if (!(isParallelIterator(iteratorTypes[0]) && 44 isParallelIterator(iteratorTypes[1]) && 45 isReductionIterator(iteratorTypes[2]))) 46 return false; 47 48 // The contract needs to represent a matmul to be able to convert to 49 // MMAMatrix matmul. 50 if (contract.getIndexingMaps() != infer({{m, k}, {k, n}, {m, n}})) 51 return false; 52 53 return true; 54 } 55 56 // Return the stide for the dimension 0 of |type| if it is a memref and has a 57 // constant stride. 58 static llvm::Optional<int64_t> 59 getMemrefConstantHorizontalStride(ShapedType type) { 60 auto memrefType = type.dyn_cast<MemRefType>(); 61 if (!memrefType) 62 return false; 63 int64_t offset = 0; 64 SmallVector<int64_t, 2> strides; 65 if (failed(getStridesAndOffset(memrefType, strides, offset))) 66 return llvm::None; 67 if (strides[0] == ShapedType::kDynamicStrideOrOffset) 68 return llvm::None; 69 return strides[0]; 70 } 71 72 // Return true if the transfer op can be converted to a MMA matrix load. 73 static bool transferReadSupportsMMAMatrixType(vector::TransferReadOp readOp) { 74 if (readOp.mask() || readOp.hasOutOfBoundsDim() || 75 readOp.getVectorType().getRank() != 2) 76 return false; 77 if (!getMemrefConstantHorizontalStride(readOp.getShapedType())) 78 return false; 79 AffineMap map = readOp.permutation_map(); 80 OpBuilder b(readOp.getContext()); 81 AffineExpr innerDim = b.getAffineDimExpr(map.getNumDims() - 1); 82 AffineExpr zero = b.getAffineConstantExpr(0); 83 auto broadcastInnerDim = AffineMap::get(map.getNumDims(), 0, {zero, innerDim}, 84 readOp.getContext()); 85 // TODO: Support transpose once it is added to GPU dialect ops. 86 // For now we only support (d0, d1) -> (d0, d1) and (d0, d1) -> (0, d1). 87 if (!map.isMinorIdentity() && map != broadcastInnerDim) 88 return false; 89 return true; 90 } 91 92 // Return true if the transfer op can be converted to a MMA matrix store. 93 static bool 94 transferWriteSupportsMMAMatrixType(vector::TransferWriteOp writeOp) { 95 if (writeOp.mask() || writeOp.hasOutOfBoundsDim() || 96 writeOp.getVectorType().getRank() != 2) 97 return false; 98 if (!getMemrefConstantHorizontalStride(writeOp.getShapedType())) 99 return false; 100 // TODO: Support transpose once it is added to GPU dialect ops. 101 if (!writeOp.permutation_map().isMinorIdentity()) 102 return false; 103 return true; 104 } 105 106 /// Return true if the constant is a splat to a 2D vector so that it can be 107 /// converted to a MMA constant matrix op. 108 static bool constantSupportsMMAMatrixType(arith::ConstantOp constantOp) { 109 auto vecType = constantOp.getType().dyn_cast<VectorType>(); 110 if (!vecType || vecType.getRank() != 2) 111 return false; 112 return constantOp.getValue().isa<SplatElementsAttr>(); 113 } 114 115 /// Return true if this is a broadcast from scalar to a 2D vector. 116 static bool broadcastSupportsMMAMatrixType(vector::BroadcastOp broadcastOp) { 117 return broadcastOp.getVectorType().getRank() == 2 && 118 broadcastOp.source().getType().isa<FloatType>(); 119 } 120 121 /// Return the MMA elementwise enum associated with `op` if it is supported. 122 /// Return `llvm::None` otherwise. 123 static llvm::Optional<gpu::MMAElementwiseOp> 124 convertElementwiseOpToMMA(Operation *op) { 125 if (isa<arith::AddFOp>(op)) 126 return gpu::MMAElementwiseOp::ADDF; 127 if (isa<arith::MulFOp>(op)) 128 return gpu::MMAElementwiseOp::MULF; 129 if (isa<MaxFOp>(op)) 130 return gpu::MMAElementwiseOp::MAXF; 131 if (isa<MinFOp>(op)) 132 return gpu::MMAElementwiseOp::MINF; 133 if (isa<arith::DivFOp>(op)) 134 return gpu::MMAElementwiseOp::DIVF; 135 return llvm::None; 136 } 137 138 /// Return true if the op is supported as elementwise op on MMAMatrix type. 139 static bool elementwiseSupportsMMAMatrixType(Operation *op) { 140 return convertElementwiseOpToMMA(op).hasValue(); 141 } 142 143 static bool supportsMMaMatrixType(Operation *op) { 144 if (isa<scf::ForOp, scf::YieldOp>(op)) 145 return true; 146 if (auto transferRead = dyn_cast<vector::TransferReadOp>(op)) 147 return transferReadSupportsMMAMatrixType(transferRead); 148 if (auto transferWrite = dyn_cast<vector::TransferWriteOp>(op)) 149 return transferWriteSupportsMMAMatrixType(transferWrite); 150 if (auto contract = dyn_cast<vector::ContractionOp>(op)) 151 return contractSupportsMMAMatrixType(contract); 152 if (auto constant = dyn_cast<arith::ConstantOp>(op)) 153 return constantSupportsMMAMatrixType(constant); 154 if (auto broadcast = dyn_cast<vector::BroadcastOp>(op)) 155 return broadcastSupportsMMAMatrixType(broadcast); 156 return elementwiseSupportsMMAMatrixType(op); 157 } 158 159 /// Return an unsorted slice handling scf.for region differently than 160 /// `getSlice`. In scf.for we only want to include as part of the slice elements 161 /// that are part of the use/def chain. 162 static SetVector<Operation *> getSliceContract(Operation *op, 163 TransitiveFilter backwardFilter, 164 TransitiveFilter forwardFilter) { 165 SetVector<Operation *> slice; 166 slice.insert(op); 167 unsigned currentIndex = 0; 168 SetVector<Operation *> backwardSlice; 169 SetVector<Operation *> forwardSlice; 170 while (currentIndex != slice.size()) { 171 auto *currentOp = (slice)[currentIndex]; 172 // Compute and insert the backwardSlice starting from currentOp. 173 backwardSlice.clear(); 174 getBackwardSlice(currentOp, &backwardSlice, backwardFilter); 175 slice.insert(backwardSlice.begin(), backwardSlice.end()); 176 177 // Compute and insert the forwardSlice starting from currentOp. 178 forwardSlice.clear(); 179 // Special case for ForOp, we don't want to include the whole region but 180 // only the value using the region arguments. 181 // TODO: We should refine this to only care about the region arguments being 182 // converted to matrix type. 183 if (auto forOp = dyn_cast<scf::ForOp>(currentOp)) { 184 for (Value forOpResult : forOp.getResults()) 185 getForwardSlice(forOpResult, &forwardSlice, forwardFilter); 186 for (BlockArgument &arg : forOp.getRegionIterArgs()) 187 getForwardSlice(arg, &forwardSlice, forwardFilter); 188 } else { 189 getForwardSlice(currentOp, &forwardSlice, forwardFilter); 190 } 191 slice.insert(forwardSlice.begin(), forwardSlice.end()); 192 ++currentIndex; 193 } 194 return slice; 195 } 196 197 // Analyze slice of operations based on convert op to figure out if the whole 198 // slice can be converted to MMA operations. 199 static SetVector<Operation *> getOpToConvert(mlir::Operation *op) { 200 auto hasVectorDest = [](Operation *op) { 201 return llvm::any_of(op->getResultTypes(), 202 [](Type t) { return t.isa<VectorType>(); }); 203 }; 204 auto hasVectorSrc = [](Operation *op) { 205 return llvm::any_of(op->getOperandTypes(), 206 [](Type t) { return t.isa<VectorType>(); }); 207 }; 208 SetVector<Operation *> opToConvert; 209 op->walk([&](vector::ContractionOp contract) { 210 if (opToConvert.contains(contract.getOperation())) 211 return; 212 SetVector<Operation *> dependentOps = 213 getSliceContract(contract, hasVectorDest, hasVectorSrc); 214 // If any instruction cannot use MMA matrix type drop the whole 215 // chain. MMA matrix are stored in an opaque type so they cannot be used 216 // by all operations. 217 if (llvm::any_of(dependentOps, 218 [](Operation *op) { return !supportsMMaMatrixType(op); })) 219 return; 220 opToConvert.insert(dependentOps.begin(), dependentOps.end()); 221 }); 222 // Sort the operations so that we can convert them in topological order. 223 return topologicalSort(opToConvert); 224 } 225 226 namespace { 227 // Transform contract into (m, k)x(k, n)x(m, n) form so that it can be converted 228 // to MMA matmul. 229 struct PrepareContractToGPUMMA 230 : public OpRewritePattern<vector::ContractionOp> { 231 using OpRewritePattern<vector::ContractionOp>::OpRewritePattern; 232 233 LogicalResult matchAndRewrite(vector::ContractionOp op, 234 PatternRewriter &rewriter) const override { 235 Location loc = op.getLoc(); 236 Value lhs = op.lhs(), rhs = op.rhs(), res = op.acc(); 237 238 // Set up the parallel/reduction structure in right form. 239 using MapList = ArrayRef<ArrayRef<AffineExpr>>; 240 auto infer = [](MapList m) { return AffineMap::inferFromExprList(m); }; 241 AffineExpr m, n, k; 242 bindDims(rewriter.getContext(), m, n, k); 243 static constexpr std::array<int64_t, 2> perm = {1, 0}; 244 auto iteratorTypes = op.iterator_types().getValue(); 245 SmallVector<AffineMap, 4> maps = op.getIndexingMaps(); 246 if (!(isParallelIterator(iteratorTypes[0]) && 247 isParallelIterator(iteratorTypes[1]) && 248 isReductionIterator(iteratorTypes[2]))) 249 return failure(); 250 // 251 // Two outer parallel, one inner reduction (matmat flavor). 252 // 253 if (maps == infer({{m, k}, {k, n}, {m, n}})) { 254 // This is the classical row-major matmul, nothing to do. 255 return failure(); 256 } 257 if (maps == infer({{m, k}, {n, k}, {m, n}})) { 258 rhs = rewriter.create<vector::TransposeOp>(loc, rhs, perm); 259 } else if (maps == infer({{k, m}, {k, n}, {m, n}})) { 260 lhs = rewriter.create<vector::TransposeOp>(loc, lhs, perm); 261 } else if (maps == infer({{k, m}, {n, k}, {m, n}})) { 262 rhs = rewriter.create<vector::TransposeOp>(loc, rhs, perm); 263 lhs = rewriter.create<vector::TransposeOp>(loc, lhs, perm); 264 } else if (maps == infer({{m, k}, {k, n}, {n, m}})) { 265 std::swap(rhs, lhs); 266 rhs = rewriter.create<vector::TransposeOp>(loc, rhs, perm); 267 lhs = rewriter.create<vector::TransposeOp>(loc, lhs, perm); 268 } else if (maps == infer({{m, k}, {n, k}, {n, m}})) { 269 std::swap(rhs, lhs); 270 rhs = rewriter.create<vector::TransposeOp>(loc, rhs, perm); 271 } else if (maps == infer({{k, m}, {k, n}, {n, m}})) { 272 std::swap(lhs, rhs); 273 lhs = rewriter.create<vector::TransposeOp>(loc, lhs, perm); 274 } else if (maps == infer({{k, m}, {n, k}, {n, m}})) { 275 std::swap(lhs, rhs); 276 } else { 277 return failure(); 278 } 279 rewriter.replaceOpWithNewOp<vector::ContractionOp>( 280 op, lhs, rhs, res, 281 rewriter.getAffineMapArrayAttr(infer({{m, k}, {k, n}, {m, n}})), 282 op.iterator_types()); 283 return success(); 284 } 285 }; 286 287 // Merge transpose op into the transfer read op. Transpose are not supported on 288 // MMA types but MMA load can transpose the matrix when loading. 289 struct CombineTransferReadOpTranspose final 290 : public OpRewritePattern<vector::TransposeOp> { 291 using OpRewritePattern<vector::TransposeOp>::OpRewritePattern; 292 293 LogicalResult matchAndRewrite(vector::TransposeOp op, 294 PatternRewriter &rewriter) const override { 295 auto transferReadOp = op.vector().getDefiningOp<vector::TransferReadOp>(); 296 if (!transferReadOp) 297 return failure(); 298 if (transferReadOp.mask() || transferReadOp.hasOutOfBoundsDim()) 299 return failure(); 300 SmallVector<int64_t, 2> perm; 301 op.getTransp(perm); 302 SmallVector<unsigned, 2> permU; 303 for (int64_t o : perm) 304 permU.push_back(unsigned(o)); 305 AffineMap permutationMap = 306 AffineMap::getPermutationMap(permU, op.getContext()); 307 AffineMap newMap = permutationMap.compose(transferReadOp.permutation_map()); 308 rewriter.replaceOpWithNewOp<vector::TransferReadOp>( 309 op, op.getType(), transferReadOp.source(), transferReadOp.indices(), 310 newMap, transferReadOp.padding(), transferReadOp.mask(), 311 transferReadOp.in_boundsAttr()); 312 return success(); 313 } 314 }; 315 316 } // namespace 317 318 // MMA types have different layout based on how they are used in matmul ops. 319 // Figure the right layout to use by looking at op uses. 320 // TODO: Change the GPU dialect to abstract the layout at the this level and 321 // only care about it during lowering to NVVM. 322 template <typename OpTy> 323 static const char *inferFragType(OpTy op) { 324 for (Operation *users : op->getUsers()) { 325 auto contract = dyn_cast<vector::ContractionOp>(users); 326 if (!contract) 327 continue; 328 if (contract.lhs() == op.getResult()) 329 return "AOp"; 330 if (contract.rhs() == op.getResult()) 331 return "BOp"; 332 } 333 return "COp"; 334 } 335 336 static void convertTransferReadOp(vector::TransferReadOp op, 337 llvm::DenseMap<Value, Value> &valueMapping) { 338 assert(transferReadSupportsMMAMatrixType(op)); 339 Optional<int64_t> stride = 340 getMemrefConstantHorizontalStride(op.getShapedType()); 341 AffineMap map = op.permutation_map(); 342 // Handle broadcast by setting the stride to 0. 343 if (map.getResult(0).isa<AffineConstantExpr>()) { 344 assert(map.getResult(0).cast<AffineConstantExpr>().getValue() == 0); 345 stride = 0; 346 } 347 assert(stride); 348 const char *fragType = inferFragType(op); 349 gpu::MMAMatrixType type = 350 gpu::MMAMatrixType::get(op.getVectorType().getShape(), 351 op.getVectorType().getElementType(), fragType); 352 OpBuilder b(op); 353 Value load = b.create<gpu::SubgroupMmaLoadMatrixOp>( 354 op.getLoc(), type, op.source(), op.indices(), b.getIndexAttr(*stride)); 355 valueMapping[op.getResult()] = load; 356 } 357 358 static void convertTransferWriteOp(vector::TransferWriteOp op, 359 llvm::DenseMap<Value, Value> &valueMapping) { 360 assert(transferWriteSupportsMMAMatrixType(op)); 361 Optional<int64_t> stride = 362 getMemrefConstantHorizontalStride(op.getShapedType()); 363 assert(stride); 364 OpBuilder b(op); 365 Value matrix = valueMapping.find(op.vector())->second; 366 b.create<gpu::SubgroupMmaStoreMatrixOp>( 367 op.getLoc(), matrix, op.source(), op.indices(), b.getIndexAttr(*stride)); 368 op.erase(); 369 } 370 371 static void convertContractOp(vector::ContractionOp op, 372 llvm::DenseMap<Value, Value> &valueMapping) { 373 OpBuilder b(op); 374 Value opA = valueMapping.find(op.lhs())->second; 375 Value opB = valueMapping.find(op.rhs())->second; 376 Value opC = valueMapping.find(op.acc())->second; 377 Value matmul = b.create<gpu::SubgroupMmaComputeOp>(op.getLoc(), opC.getType(), 378 opA, opB, opC); 379 valueMapping[op.getResult()] = matmul; 380 } 381 382 /// Convert a 2D splat ConstantOp to a SubgroupMmaConstantMatrix op. 383 static void convertConstantOp(arith::ConstantOp op, 384 llvm::DenseMap<Value, Value> &valueMapping) { 385 assert(constantSupportsMMAMatrixType(op)); 386 OpBuilder b(op); 387 Attribute splat = 388 op.getValue().cast<SplatElementsAttr>().getSplatValue<Attribute>(); 389 auto scalarConstant = 390 b.create<arith::ConstantOp>(op.getLoc(), splat.getType(), splat); 391 const char *fragType = inferFragType(op); 392 auto vecType = op.getType().cast<VectorType>(); 393 gpu::MMAMatrixType type = gpu::MMAMatrixType::get( 394 vecType.getShape(), vecType.getElementType(), llvm::StringRef(fragType)); 395 auto matrix = b.create<gpu::SubgroupMmaConstantMatrixOp>(op.getLoc(), type, 396 scalarConstant); 397 valueMapping[op.getResult()] = matrix; 398 } 399 400 /// Convert a vector.broadcast from scalar to a SubgroupMmaConstantMatrix op. 401 static void convertBroadcastOp(vector::BroadcastOp op, 402 llvm::DenseMap<Value, Value> &valueMapping) { 403 assert(broadcastSupportsMMAMatrixType(op)); 404 OpBuilder b(op); 405 const char *fragType = inferFragType(op); 406 auto vecType = op.getVectorType(); 407 gpu::MMAMatrixType type = gpu::MMAMatrixType::get( 408 vecType.getShape(), vecType.getElementType(), llvm::StringRef(fragType)); 409 auto matrix = b.create<gpu::SubgroupMmaConstantMatrixOp>(op.getLoc(), type, 410 op.source()); 411 valueMapping[op.getResult()] = matrix; 412 } 413 414 // Replace ForOp with a new ForOp with extra operands. The YieldOp is not 415 // updated and needs to be updated separatly for the loop to be correct. 416 static scf::ForOp replaceForOpWithNewSignature(OpBuilder &b, scf::ForOp loop, 417 ValueRange newIterOperands) { 418 // Create a new loop before the existing one, with the extra operands. 419 OpBuilder::InsertionGuard g(b); 420 b.setInsertionPoint(loop); 421 auto operands = llvm::to_vector<4>(loop.getIterOperands()); 422 operands.append(newIterOperands.begin(), newIterOperands.end()); 423 scf::ForOp newLoop = 424 b.create<scf::ForOp>(loop.getLoc(), loop.lowerBound(), loop.upperBound(), 425 loop.step(), operands); 426 newLoop.getBody()->erase(); 427 newLoop.getLoopBody().getBlocks().splice( 428 newLoop.getLoopBody().getBlocks().begin(), 429 loop.getLoopBody().getBlocks()); 430 for (auto operand : newIterOperands) 431 newLoop.getBody()->addArgument(operand.getType()); 432 433 for (auto it : llvm::zip(loop.getResults(), newLoop.getResults().take_front( 434 loop.getNumResults()))) 435 std::get<0>(it).replaceAllUsesWith(std::get<1>(it)); 436 loop.erase(); 437 return newLoop; 438 } 439 440 static void convertForOp(scf::ForOp op, 441 llvm::DenseMap<Value, Value> &valueMapping) { 442 SmallVector<Value> newOperands; 443 SmallVector<std::pair<size_t, size_t>> argMapping; 444 for (auto operand : llvm::enumerate(op.getIterOperands())) { 445 auto it = valueMapping.find(operand.value()); 446 if (it == valueMapping.end()) 447 continue; 448 argMapping.push_back(std::make_pair( 449 operand.index(), op.getNumIterOperands() + newOperands.size())); 450 newOperands.push_back(it->second); 451 } 452 OpBuilder b(op); 453 scf::ForOp newForOp = replaceForOpWithNewSignature(b, op, newOperands); 454 Block &loopBody = *newForOp.getBody(); 455 for (auto mapping : argMapping) { 456 valueMapping[newForOp.getResult(mapping.first)] = 457 newForOp.getResult(mapping.second); 458 valueMapping[loopBody.getArgument(mapping.first + 459 newForOp.getNumInductionVars())] = 460 loopBody.getArgument(mapping.second + newForOp.getNumInductionVars()); 461 } 462 } 463 464 static void convertYieldOp(scf::YieldOp op, 465 llvm::DenseMap<Value, Value> &valueMapping) { 466 OpBuilder b(op); 467 auto loop = cast<scf::ForOp>(op->getParentOp()); 468 auto yieldOperands = llvm::to_vector<4>(op.getOperands()); 469 for (auto operand : llvm::enumerate(op.getOperands())) { 470 auto it = valueMapping.find(operand.value()); 471 if (it == valueMapping.end()) 472 continue; 473 // Replace the yield of old value with the for op argument to make it easier 474 // to remove the dead code. 475 yieldOperands[operand.index()] = loop.getIterOperands()[operand.index()]; 476 yieldOperands.push_back(it->second); 477 } 478 b.create<scf::YieldOp>(op.getLoc(), yieldOperands); 479 op.erase(); 480 } 481 482 /// Convert an elementwise op to the equivalent elementwise op on MMA matrix. 483 static void convertElementwiseOp(Operation *op, gpu::MMAElementwiseOp opType, 484 llvm::DenseMap<Value, Value> &valueMapping) { 485 OpBuilder b(op); 486 SmallVector<Value> matrixOperands; 487 for (Value operand : op->getOperands()) 488 matrixOperands.push_back(valueMapping.find(operand)->second); 489 Value newOp = b.create<gpu::SubgroupMmaElementwiseOp>( 490 op->getLoc(), matrixOperands[0].getType(), matrixOperands, opType); 491 valueMapping[op->getResult(0)] = newOp; 492 } 493 494 namespace mlir { 495 496 void populatePrepareVectorToMMAPatterns(RewritePatternSet &patterns) { 497 patterns.add<PrepareContractToGPUMMA, CombineTransferReadOpTranspose>( 498 patterns.getContext()); 499 } 500 501 void convertVectorToMMAOps(FuncOp funcOp) { 502 SetVector<Operation *> ops = getOpToConvert(funcOp); 503 llvm::DenseMap<Value, Value> valueMapping; 504 for (Operation *op : ops) { 505 if (auto transferRead = dyn_cast<vector::TransferReadOp>(op)) { 506 convertTransferReadOp(transferRead, valueMapping); 507 } else if (auto transferWrite = dyn_cast<vector::TransferWriteOp>(op)) { 508 convertTransferWriteOp(transferWrite, valueMapping); 509 } else if (auto contractOp = dyn_cast<vector::ContractionOp>(op)) { 510 convertContractOp(contractOp, valueMapping); 511 } else if (auto constantOp = dyn_cast<arith::ConstantOp>(op)) { 512 convertConstantOp(constantOp, valueMapping); 513 } else if (auto broadcastOp = dyn_cast<vector::BroadcastOp>(op)) { 514 convertBroadcastOp(broadcastOp, valueMapping); 515 } else if (auto forOp = dyn_cast<scf::ForOp>(op)) { 516 convertForOp(forOp, valueMapping); 517 } else if (auto yiledOp = dyn_cast<scf::YieldOp>(op)) { 518 convertYieldOp(yiledOp, valueMapping); 519 } else if (auto elementwiseType = convertElementwiseOpToMMA(op)) { 520 convertElementwiseOp(op, *elementwiseType, valueMapping); 521 } 522 } 523 } 524 525 } // namespace mlir 526 namespace { 527 528 struct ConvertVectorToGPUPass 529 : public ConvertVectorToGPUBase<ConvertVectorToGPUPass> { 530 void runOnFunction() override { 531 RewritePatternSet patterns(getFunction().getContext()); 532 populatePrepareVectorToMMAPatterns(patterns); 533 (void)applyPatternsAndFoldGreedily(getFunction(), std::move(patterns)); 534 535 convertVectorToMMAOps(getFunction()); 536 } 537 }; 538 539 } // namespace 540 541 std::unique_ptr<Pass> mlir::createConvertVectorToGPUPass() { 542 return std::make_unique<ConvertVectorToGPUPass>(); 543 } 544