1 //===- VectorToGPU.cpp - Convert vector to GPU dialect ----------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements lowering of vector operations to GPU dialect ops.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include <type_traits>
14 
15 #include "mlir/Conversion/VectorToGPU/VectorToGPU.h"
16 
17 #include "../PassDetail.h"
18 #include "mlir/Analysis/SliceAnalysis.h"
19 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h"
20 #include "mlir/Dialect/GPU/GPUDialect.h"
21 #include "mlir/Dialect/MemRef/IR/MemRef.h"
22 #include "mlir/Dialect/SCF/SCF.h"
23 #include "mlir/Dialect/Utils/StructuredOpsUtils.h"
24 #include "mlir/Dialect/Vector/IR/VectorOps.h"
25 #include "mlir/Dialect/Vector/Utils/VectorUtils.h"
26 #include "mlir/IR/Builders.h"
27 #include "mlir/Pass/Pass.h"
28 #include "mlir/Transforms/GreedyPatternRewriteDriver.h"
29 #include "mlir/Transforms/Passes.h"
30 
31 using namespace mlir;
32 
33 // Return true if the contract op can be convert to MMA matmul.
34 static bool contractSupportsMMAMatrixType(vector::ContractionOp contract) {
35   if (llvm::size(contract.masks()) != 0)
36     return false;
37 
38   using MapList = ArrayRef<ArrayRef<AffineExpr>>;
39   auto infer = [](MapList m) { return AffineMap::inferFromExprList(m); };
40   AffineExpr m, n, k;
41   bindDims(contract.getContext(), m, n, k);
42   auto iteratorTypes = contract.iterator_types().getValue();
43   if (!(isParallelIterator(iteratorTypes[0]) &&
44         isParallelIterator(iteratorTypes[1]) &&
45         isReductionIterator(iteratorTypes[2])))
46     return false;
47 
48   // The contract needs to represent a matmul to be able to convert to
49   // MMAMatrix matmul.
50   if (contract.getIndexingMaps() != infer({{m, k}, {k, n}, {m, n}}))
51     return false;
52 
53   return true;
54 }
55 
56 // Return the stide for the dimension 0 of |type| if it is a memref and has a
57 // constant stride.
58 static llvm::Optional<int64_t>
59 getMemrefConstantHorizontalStride(ShapedType type) {
60   auto memrefType = type.dyn_cast<MemRefType>();
61   if (!memrefType)
62     return false;
63   // If the memref is 0 or 1D the horizontal stride is 0.
64   if(memrefType.getRank() < 2)
65     return 0;
66   int64_t offset = 0;
67   SmallVector<int64_t, 2> strides;
68   if (failed(getStridesAndOffset(memrefType, strides, offset)))
69     return llvm::None;
70   int64_t stride = strides[strides.size() - 2];
71   if (stride == ShapedType::kDynamicStrideOrOffset)
72     return llvm::None;
73   return stride;
74 }
75 
76 // Return true if the transfer op can be converted to a MMA matrix load.
77 static bool transferReadSupportsMMAMatrixType(vector::TransferReadOp readOp) {
78   if (readOp.mask() || readOp.hasOutOfBoundsDim() ||
79       readOp.getVectorType().getRank() != 2)
80     return false;
81   if (!getMemrefConstantHorizontalStride(readOp.getShapedType()))
82     return false;
83   AffineMap map = readOp.permutation_map();
84   OpBuilder b(readOp.getContext());
85   AffineExpr innerDim = b.getAffineDimExpr(map.getNumDims() - 1);
86   AffineExpr zero = b.getAffineConstantExpr(0);
87   auto broadcastInnerDim = AffineMap::get(map.getNumDims(), 0, {zero, innerDim},
88                                           readOp.getContext());
89   // TODO: Support transpose once it is added to GPU dialect ops.
90   // For now we only support (d0, d1) -> (d0, d1) and (d0, d1) -> (0, d1).
91   return !(!map.isMinorIdentity() && map != broadcastInnerDim);
92 }
93 
94 // Return true if the transfer op can be converted to a MMA matrix store.
95 static bool
96 transferWriteSupportsMMAMatrixType(vector::TransferWriteOp writeOp) {
97   // TODO: support 0-d corner case.
98   if (writeOp.getTransferRank() == 0)
99     return false;
100 
101   if (writeOp.mask() || writeOp.hasOutOfBoundsDim() ||
102       writeOp.getVectorType().getRank() != 2)
103     return false;
104   if (!getMemrefConstantHorizontalStride(writeOp.getShapedType()))
105     return false;
106   // TODO: Support transpose once it is added to GPU dialect ops.
107   if (!writeOp.permutation_map().isMinorIdentity())
108     return false;
109   return true;
110 }
111 
112 /// Return true if the constant is a splat to a 2D vector so that it can be
113 /// converted to a MMA constant matrix op.
114 static bool constantSupportsMMAMatrixType(arith::ConstantOp constantOp) {
115   auto vecType = constantOp.getType().dyn_cast<VectorType>();
116   if (!vecType || vecType.getRank() != 2)
117     return false;
118   return constantOp.getValue().isa<SplatElementsAttr>();
119 }
120 
121 /// Return true if this is a broadcast from scalar to a 2D vector.
122 static bool broadcastSupportsMMAMatrixType(vector::BroadcastOp broadcastOp) {
123   return broadcastOp.getVectorType().getRank() == 2 &&
124          broadcastOp.source().getType().isa<FloatType>();
125 }
126 
127 /// Return the MMA elementwise enum associated with `op` if it is supported.
128 /// Return `llvm::None` otherwise.
129 static llvm::Optional<gpu::MMAElementwiseOp>
130 convertElementwiseOpToMMA(Operation *op) {
131   if (isa<arith::AddFOp>(op))
132     return gpu::MMAElementwiseOp::ADDF;
133   if (isa<arith::MulFOp>(op))
134     return gpu::MMAElementwiseOp::MULF;
135   if (isa<arith::MaxFOp>(op))
136     return gpu::MMAElementwiseOp::MAXF;
137   if (isa<arith::MinFOp>(op))
138     return gpu::MMAElementwiseOp::MINF;
139   if (isa<arith::DivFOp>(op))
140     return gpu::MMAElementwiseOp::DIVF;
141   return llvm::None;
142 }
143 
144 /// Return true if the op is supported as elementwise op on MMAMatrix type.
145 static bool elementwiseSupportsMMAMatrixType(Operation *op) {
146   return convertElementwiseOpToMMA(op).hasValue();
147 }
148 
149 static bool supportsMMaMatrixType(Operation *op) {
150   if (isa<scf::ForOp, scf::YieldOp>(op))
151     return true;
152   if (auto transferRead = dyn_cast<vector::TransferReadOp>(op))
153     return transferReadSupportsMMAMatrixType(transferRead);
154   if (auto transferWrite = dyn_cast<vector::TransferWriteOp>(op))
155     return transferWriteSupportsMMAMatrixType(transferWrite);
156   if (auto contract = dyn_cast<vector::ContractionOp>(op))
157     return contractSupportsMMAMatrixType(contract);
158   if (auto constant = dyn_cast<arith::ConstantOp>(op))
159     return constantSupportsMMAMatrixType(constant);
160   if (auto broadcast = dyn_cast<vector::BroadcastOp>(op))
161     return broadcastSupportsMMAMatrixType(broadcast);
162   return elementwiseSupportsMMAMatrixType(op);
163 }
164 
165 /// Return an unsorted slice handling scf.for region differently than
166 /// `getSlice`. In scf.for we only want to include as part of the slice elements
167 /// that are part of the use/def chain.
168 static SetVector<Operation *> getSliceContract(Operation *op,
169                                                TransitiveFilter backwardFilter,
170                                                TransitiveFilter forwardFilter) {
171   SetVector<Operation *> slice;
172   slice.insert(op);
173   unsigned currentIndex = 0;
174   SetVector<Operation *> backwardSlice;
175   SetVector<Operation *> forwardSlice;
176   while (currentIndex != slice.size()) {
177     auto *currentOp = (slice)[currentIndex];
178     // Compute and insert the backwardSlice starting from currentOp.
179     backwardSlice.clear();
180     getBackwardSlice(currentOp, &backwardSlice, backwardFilter);
181     slice.insert(backwardSlice.begin(), backwardSlice.end());
182 
183     // Compute and insert the forwardSlice starting from currentOp.
184     forwardSlice.clear();
185     // Special case for ForOp, we don't want to include the whole region but
186     // only the value using the region arguments.
187     // TODO: We should refine this to only care about the region arguments being
188     // converted to matrix type.
189     if (auto forOp = dyn_cast<scf::ForOp>(currentOp)) {
190       for (Value forOpResult : forOp.getResults())
191         getForwardSlice(forOpResult, &forwardSlice, forwardFilter);
192       for (BlockArgument &arg : forOp.getRegionIterArgs())
193         getForwardSlice(arg, &forwardSlice, forwardFilter);
194     } else {
195       getForwardSlice(currentOp, &forwardSlice, forwardFilter);
196     }
197     slice.insert(forwardSlice.begin(), forwardSlice.end());
198     ++currentIndex;
199   }
200   return slice;
201 }
202 
203 // Analyze slice of operations based on convert op to figure out if the whole
204 // slice can be converted to MMA operations.
205 static SetVector<Operation *> getOpToConvert(mlir::Operation *op) {
206   auto hasVectorDest = [](Operation *op) {
207     return llvm::any_of(op->getResultTypes(),
208                         [](Type t) { return t.isa<VectorType>(); });
209   };
210   auto hasVectorSrc = [](Operation *op) {
211     return llvm::any_of(op->getOperandTypes(),
212                         [](Type t) { return t.isa<VectorType>(); });
213   };
214   SetVector<Operation *> opToConvert;
215   op->walk([&](vector::ContractionOp contract) {
216     if (opToConvert.contains(contract.getOperation()))
217       return;
218     SetVector<Operation *> dependentOps =
219         getSliceContract(contract, hasVectorDest, hasVectorSrc);
220     // If any instruction cannot use MMA matrix type drop the whole
221     // chain. MMA matrix are stored in an opaque type so they cannot be used
222     // by all operations.
223     if (llvm::any_of(dependentOps,
224                      [](Operation *op) { return !supportsMMaMatrixType(op); }))
225       return;
226     opToConvert.insert(dependentOps.begin(), dependentOps.end());
227   });
228   // Sort the operations so that we can convert them in topological order.
229   return topologicalSort(opToConvert);
230 }
231 
232 namespace {
233 // Transform contract into (m, k)x(k, n)x(m, n) form so that it can be converted
234 // to MMA matmul.
235 struct PrepareContractToGPUMMA
236     : public OpRewritePattern<vector::ContractionOp> {
237   using OpRewritePattern<vector::ContractionOp>::OpRewritePattern;
238 
239   LogicalResult matchAndRewrite(vector::ContractionOp op,
240                                 PatternRewriter &rewriter) const override {
241     Location loc = op.getLoc();
242     Value lhs = op.lhs(), rhs = op.rhs(), res = op.acc();
243 
244     // Set up the parallel/reduction structure in right form.
245     using MapList = ArrayRef<ArrayRef<AffineExpr>>;
246     auto infer = [](MapList m) { return AffineMap::inferFromExprList(m); };
247     AffineExpr m, n, k;
248     bindDims(rewriter.getContext(), m, n, k);
249     static constexpr std::array<int64_t, 2> perm = {1, 0};
250     auto iteratorTypes = op.iterator_types().getValue();
251     SmallVector<AffineMap, 4> maps = op.getIndexingMaps();
252     if (!(isParallelIterator(iteratorTypes[0]) &&
253           isParallelIterator(iteratorTypes[1]) &&
254           isReductionIterator(iteratorTypes[2])))
255       return failure();
256     //
257     // Two outer parallel, one inner reduction (matmat flavor).
258     //
259     if (maps == infer({{m, k}, {k, n}, {m, n}})) {
260       // This is the classical row-major matmul, nothing to do.
261       return failure();
262     }
263     if (maps == infer({{m, k}, {n, k}, {m, n}})) {
264       rhs = rewriter.create<vector::TransposeOp>(loc, rhs, perm);
265     } else if (maps == infer({{k, m}, {k, n}, {m, n}})) {
266       lhs = rewriter.create<vector::TransposeOp>(loc, lhs, perm);
267     } else if (maps == infer({{k, m}, {n, k}, {m, n}})) {
268       rhs = rewriter.create<vector::TransposeOp>(loc, rhs, perm);
269       lhs = rewriter.create<vector::TransposeOp>(loc, lhs, perm);
270     } else if (maps == infer({{m, k}, {k, n}, {n, m}})) {
271       std::swap(rhs, lhs);
272       rhs = rewriter.create<vector::TransposeOp>(loc, rhs, perm);
273       lhs = rewriter.create<vector::TransposeOp>(loc, lhs, perm);
274     } else if (maps == infer({{m, k}, {n, k}, {n, m}})) {
275       std::swap(rhs, lhs);
276       rhs = rewriter.create<vector::TransposeOp>(loc, rhs, perm);
277     } else if (maps == infer({{k, m}, {k, n}, {n, m}})) {
278       std::swap(lhs, rhs);
279       lhs = rewriter.create<vector::TransposeOp>(loc, lhs, perm);
280     } else if (maps == infer({{k, m}, {n, k}, {n, m}})) {
281       std::swap(lhs, rhs);
282     } else {
283       return failure();
284     }
285     rewriter.replaceOpWithNewOp<vector::ContractionOp>(
286         op, lhs, rhs, res,
287         rewriter.getAffineMapArrayAttr(infer({{m, k}, {k, n}, {m, n}})),
288         op.iterator_types());
289     return success();
290   }
291 };
292 
293 // Merge transpose op into the transfer read op. Transpose are not supported on
294 // MMA types but MMA load can transpose the matrix when loading.
295 struct CombineTransferReadOpTranspose final
296     : public OpRewritePattern<vector::TransposeOp> {
297   using OpRewritePattern<vector::TransposeOp>::OpRewritePattern;
298 
299   LogicalResult matchAndRewrite(vector::TransposeOp op,
300                                 PatternRewriter &rewriter) const override {
301     auto transferReadOp = op.vector().getDefiningOp<vector::TransferReadOp>();
302     if (!transferReadOp)
303       return failure();
304 
305     // TODO: support 0-d corner case.
306     if (transferReadOp.getTransferRank() == 0)
307       return failure();
308 
309     if (transferReadOp.mask() || transferReadOp.hasOutOfBoundsDim())
310       return failure();
311     SmallVector<int64_t, 2> perm;
312     op.getTransp(perm);
313     SmallVector<unsigned, 2> permU;
314     for (int64_t o : perm)
315       permU.push_back(unsigned(o));
316     AffineMap permutationMap =
317         AffineMap::getPermutationMap(permU, op.getContext());
318     AffineMap newMap = permutationMap.compose(transferReadOp.permutation_map());
319     rewriter.replaceOpWithNewOp<vector::TransferReadOp>(
320         op, op.getType(), transferReadOp.source(), transferReadOp.indices(),
321         AffineMapAttr::get(newMap), transferReadOp.padding(),
322         transferReadOp.mask(), transferReadOp.in_boundsAttr());
323     return success();
324   }
325 };
326 
327 } // namespace
328 
329 // MMA types have different layout based on how they are used in matmul ops.
330 // Figure the right layout to use by looking at op uses.
331 // TODO: Change the GPU dialect to abstract the layout at the this level and
332 // only care about it during lowering to NVVM.
333 template <typename OpTy>
334 static const char *inferFragType(OpTy op) {
335   for (Operation *users : op->getUsers()) {
336     auto contract = dyn_cast<vector::ContractionOp>(users);
337     if (!contract)
338       continue;
339     if (contract.lhs() == op.getResult())
340       return "AOp";
341     if (contract.rhs() == op.getResult())
342       return "BOp";
343   }
344   return "COp";
345 }
346 
347 static void convertTransferReadOp(vector::TransferReadOp op,
348                                   llvm::DenseMap<Value, Value> &valueMapping) {
349   assert(op.getTransferRank() > 0 && "unexpected 0-d transfer");
350   assert(transferReadSupportsMMAMatrixType(op));
351   Optional<int64_t> stride =
352       getMemrefConstantHorizontalStride(op.getShapedType());
353   AffineMap map = op.permutation_map();
354   // Handle broadcast by setting the stride to 0.
355   if (map.getResult(0).isa<AffineConstantExpr>()) {
356     assert(map.getResult(0).cast<AffineConstantExpr>().getValue() == 0);
357     stride = 0;
358   }
359   assert(stride);
360   const char *fragType = inferFragType(op);
361   gpu::MMAMatrixType type =
362       gpu::MMAMatrixType::get(op.getVectorType().getShape(),
363                               op.getVectorType().getElementType(), fragType);
364   OpBuilder b(op);
365   Value load = b.create<gpu::SubgroupMmaLoadMatrixOp>(
366       op.getLoc(), type, op.source(), op.indices(), b.getIndexAttr(*stride));
367   valueMapping[op.getResult()] = load;
368 }
369 
370 static void convertTransferWriteOp(vector::TransferWriteOp op,
371                                    llvm::DenseMap<Value, Value> &valueMapping) {
372   assert(transferWriteSupportsMMAMatrixType(op));
373   Optional<int64_t> stride =
374       getMemrefConstantHorizontalStride(op.getShapedType());
375   assert(stride);
376   OpBuilder b(op);
377   Value matrix = valueMapping.find(op.vector())->second;
378   b.create<gpu::SubgroupMmaStoreMatrixOp>(
379       op.getLoc(), matrix, op.source(), op.indices(), b.getIndexAttr(*stride));
380   op.erase();
381 }
382 
383 static void convertContractOp(vector::ContractionOp op,
384                               llvm::DenseMap<Value, Value> &valueMapping) {
385   OpBuilder b(op);
386   Value opA = valueMapping.find(op.lhs())->second;
387   Value opB = valueMapping.find(op.rhs())->second;
388   Value opC = valueMapping.find(op.acc())->second;
389   Value matmul = b.create<gpu::SubgroupMmaComputeOp>(op.getLoc(), opC.getType(),
390                                                      opA, opB, opC);
391   valueMapping[op.getResult()] = matmul;
392 }
393 
394 /// Convert a 2D splat ConstantOp to a SubgroupMmaConstantMatrix op.
395 static void convertConstantOp(arith::ConstantOp op,
396                               llvm::DenseMap<Value, Value> &valueMapping) {
397   assert(constantSupportsMMAMatrixType(op));
398   OpBuilder b(op);
399   Attribute splat =
400       op.getValue().cast<SplatElementsAttr>().getSplatValue<Attribute>();
401   auto scalarConstant =
402       b.create<arith::ConstantOp>(op.getLoc(), splat.getType(), splat);
403   const char *fragType = inferFragType(op);
404   auto vecType = op.getType().cast<VectorType>();
405   gpu::MMAMatrixType type = gpu::MMAMatrixType::get(
406       vecType.getShape(), vecType.getElementType(), llvm::StringRef(fragType));
407   auto matrix = b.create<gpu::SubgroupMmaConstantMatrixOp>(op.getLoc(), type,
408                                                            scalarConstant);
409   valueMapping[op.getResult()] = matrix;
410 }
411 
412 /// Convert a vector.broadcast from scalar to a SubgroupMmaConstantMatrix op.
413 static void convertBroadcastOp(vector::BroadcastOp op,
414                                llvm::DenseMap<Value, Value> &valueMapping) {
415   assert(broadcastSupportsMMAMatrixType(op));
416   OpBuilder b(op);
417   const char *fragType = inferFragType(op);
418   auto vecType = op.getVectorType();
419   gpu::MMAMatrixType type = gpu::MMAMatrixType::get(
420       vecType.getShape(), vecType.getElementType(), llvm::StringRef(fragType));
421   auto matrix = b.create<gpu::SubgroupMmaConstantMatrixOp>(op.getLoc(), type,
422                                                            op.source());
423   valueMapping[op.getResult()] = matrix;
424 }
425 
426 // Replace ForOp with a new ForOp with extra operands. The YieldOp is not
427 // updated and needs to be updated separatly for the loop to be correct.
428 static scf::ForOp replaceForOpWithNewSignature(OpBuilder &b, scf::ForOp loop,
429                                                ValueRange newIterOperands) {
430   // Create a new loop before the existing one, with the extra operands.
431   OpBuilder::InsertionGuard g(b);
432   b.setInsertionPoint(loop);
433   auto operands = llvm::to_vector<4>(loop.getIterOperands());
434   operands.append(newIterOperands.begin(), newIterOperands.end());
435   scf::ForOp newLoop =
436       b.create<scf::ForOp>(loop.getLoc(), loop.getLowerBound(),
437                            loop.getUpperBound(), loop.getStep(), operands);
438   newLoop.getBody()->erase();
439   newLoop.getLoopBody().getBlocks().splice(
440       newLoop.getLoopBody().getBlocks().begin(),
441       loop.getLoopBody().getBlocks());
442   for (Value operand : newIterOperands)
443     newLoop.getBody()->addArgument(operand.getType(), operand.getLoc());
444 
445   for (auto it : llvm::zip(loop.getResults(), newLoop.getResults().take_front(
446                                                   loop.getNumResults())))
447     std::get<0>(it).replaceAllUsesWith(std::get<1>(it));
448   loop.erase();
449   return newLoop;
450 }
451 
452 static void convertForOp(scf::ForOp op,
453                          llvm::DenseMap<Value, Value> &valueMapping) {
454   SmallVector<Value> newOperands;
455   SmallVector<std::pair<size_t, size_t>> argMapping;
456   for (const auto &operand : llvm::enumerate(op.getIterOperands())) {
457     auto it = valueMapping.find(operand.value());
458     if (it == valueMapping.end())
459       continue;
460     argMapping.push_back(std::make_pair(
461         operand.index(), op.getNumIterOperands() + newOperands.size()));
462     newOperands.push_back(it->second);
463   }
464   OpBuilder b(op);
465   scf::ForOp newForOp = replaceForOpWithNewSignature(b, op, newOperands);
466   Block &loopBody = *newForOp.getBody();
467   for (auto mapping : argMapping) {
468     valueMapping[newForOp.getResult(mapping.first)] =
469         newForOp.getResult(mapping.second);
470     valueMapping[loopBody.getArgument(mapping.first +
471                                       newForOp.getNumInductionVars())] =
472         loopBody.getArgument(mapping.second + newForOp.getNumInductionVars());
473   }
474 }
475 
476 static void convertYieldOp(scf::YieldOp op,
477                            llvm::DenseMap<Value, Value> &valueMapping) {
478   OpBuilder b(op);
479   auto loop = cast<scf::ForOp>(op->getParentOp());
480   auto yieldOperands = llvm::to_vector<4>(op.getOperands());
481   for (const auto &operand : llvm::enumerate(op.getOperands())) {
482     auto it = valueMapping.find(operand.value());
483     if (it == valueMapping.end())
484       continue;
485     // Replace the yield of old value with the for op argument to make it easier
486     // to remove the dead code.
487     yieldOperands[operand.index()] = loop.getIterOperands()[operand.index()];
488     yieldOperands.push_back(it->second);
489   }
490   b.create<scf::YieldOp>(op.getLoc(), yieldOperands);
491   op.erase();
492 }
493 
494 /// Convert an elementwise op to the equivalent elementwise op on MMA matrix.
495 static void convertElementwiseOp(Operation *op, gpu::MMAElementwiseOp opType,
496                                  llvm::DenseMap<Value, Value> &valueMapping) {
497   OpBuilder b(op);
498   SmallVector<Value> matrixOperands;
499   for (Value operand : op->getOperands())
500     matrixOperands.push_back(valueMapping.find(operand)->second);
501   Value newOp = b.create<gpu::SubgroupMmaElementwiseOp>(
502       op->getLoc(), matrixOperands[0].getType(), matrixOperands, opType);
503   valueMapping[op->getResult(0)] = newOp;
504 }
505 
506 namespace mlir {
507 
508 void populatePrepareVectorToMMAPatterns(RewritePatternSet &patterns) {
509   patterns.add<PrepareContractToGPUMMA, CombineTransferReadOpTranspose>(
510       patterns.getContext());
511 }
512 
513 void convertVectorToMMAOps(FuncOp funcOp) {
514   SetVector<Operation *> ops = getOpToConvert(funcOp);
515   llvm::DenseMap<Value, Value> valueMapping;
516   for (Operation *op : ops) {
517     if (auto transferRead = dyn_cast<vector::TransferReadOp>(op)) {
518       convertTransferReadOp(transferRead, valueMapping);
519     } else if (auto transferWrite = dyn_cast<vector::TransferWriteOp>(op)) {
520       convertTransferWriteOp(transferWrite, valueMapping);
521     } else if (auto contractOp = dyn_cast<vector::ContractionOp>(op)) {
522       convertContractOp(contractOp, valueMapping);
523     } else if (auto constantOp = dyn_cast<arith::ConstantOp>(op)) {
524       convertConstantOp(constantOp, valueMapping);
525     } else if (auto broadcastOp = dyn_cast<vector::BroadcastOp>(op)) {
526       convertBroadcastOp(broadcastOp, valueMapping);
527     } else if (auto forOp = dyn_cast<scf::ForOp>(op)) {
528       convertForOp(forOp, valueMapping);
529     } else if (auto yiledOp = dyn_cast<scf::YieldOp>(op)) {
530       convertYieldOp(yiledOp, valueMapping);
531     } else if (auto elementwiseType = convertElementwiseOpToMMA(op)) {
532       convertElementwiseOp(op, *elementwiseType, valueMapping);
533     }
534   }
535 }
536 
537 } // namespace mlir
538 namespace {
539 
540 struct ConvertVectorToGPUPass
541     : public ConvertVectorToGPUBase<ConvertVectorToGPUPass> {
542   void runOnOperation() override {
543     RewritePatternSet patterns(getOperation().getContext());
544     populatePrepareVectorToMMAPatterns(patterns);
545     (void)applyPatternsAndFoldGreedily(getOperation(), std::move(patterns));
546 
547     convertVectorToMMAOps(getOperation());
548   }
549 };
550 
551 } // namespace
552 
553 std::unique_ptr<Pass> mlir::createConvertVectorToGPUPass() {
554   return std::make_unique<ConvertVectorToGPUPass>();
555 }
556