1 //===- VectorToGPU.cpp - Convert vector to GPU dialect ----------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements lowering of vector operations to GPU dialect ops.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include <type_traits>
14 
15 #include "mlir/Conversion/VectorToGPU/VectorToGPU.h"
16 
17 #include "../PassDetail.h"
18 #include "mlir/Analysis/SliceAnalysis.h"
19 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h"
20 #include "mlir/Dialect/GPU/GPUDialect.h"
21 #include "mlir/Dialect/MemRef/IR/MemRef.h"
22 #include "mlir/Dialect/SCF/SCF.h"
23 #include "mlir/Dialect/Utils/StructuredOpsUtils.h"
24 #include "mlir/Dialect/Vector/IR/VectorOps.h"
25 #include "mlir/Dialect/Vector/Utils/VectorUtils.h"
26 #include "mlir/IR/Builders.h"
27 #include "mlir/Pass/Pass.h"
28 #include "mlir/Transforms/GreedyPatternRewriteDriver.h"
29 #include "mlir/Transforms/Passes.h"
30 
31 using namespace mlir;
32 
33 // Return true if the contract op can be convert to MMA matmul.
34 static bool contractSupportsMMAMatrixType(vector::ContractionOp contract) {
35   if (llvm::size(contract.masks()) != 0)
36     return false;
37 
38   using MapList = ArrayRef<ArrayRef<AffineExpr>>;
39   auto infer = [](MapList m) { return AffineMap::inferFromExprList(m); };
40   AffineExpr m, n, k;
41   bindDims(contract.getContext(), m, n, k);
42   auto iteratorTypes = contract.iterator_types().getValue();
43   if (!(isParallelIterator(iteratorTypes[0]) &&
44         isParallelIterator(iteratorTypes[1]) &&
45         isReductionIterator(iteratorTypes[2])))
46     return false;
47 
48   // The contract needs to represent a matmul to be able to convert to
49   // MMAMatrix matmul.
50   if (contract.getIndexingMaps() != infer({{m, k}, {k, n}, {m, n}}))
51     return false;
52 
53   return true;
54 }
55 
56 // Return the stide for the dimension 0 of |type| if it is a memref and has a
57 // constant stride.
58 static llvm::Optional<int64_t>
59 getMemrefConstantHorizontalStride(ShapedType type) {
60   auto memrefType = type.dyn_cast<MemRefType>();
61   if (!memrefType)
62     return false;
63   // If the memref is 0 or 1D the horizontal stride is 0.
64   if(memrefType.getRank() < 2)
65     return 0;
66   int64_t offset = 0;
67   SmallVector<int64_t, 2> strides;
68   if (failed(getStridesAndOffset(memrefType, strides, offset)) ||
69       strides.back() != 1)
70     return llvm::None;
71   int64_t stride = strides[strides.size() - 2];
72   if (stride == ShapedType::kDynamicStrideOrOffset)
73     return llvm::None;
74   return stride;
75 }
76 
77 // Return true if the transfer op can be converted to a MMA matrix load.
78 static bool transferReadSupportsMMAMatrixType(vector::TransferReadOp readOp) {
79   if (readOp.mask() || readOp.hasOutOfBoundsDim() ||
80       readOp.getVectorType().getRank() != 2)
81     return false;
82   if (!getMemrefConstantHorizontalStride(readOp.getShapedType()))
83     return false;
84   AffineMap map = readOp.permutation_map();
85   OpBuilder b(readOp.getContext());
86   AffineExpr innerDim = b.getAffineDimExpr(map.getNumDims() - 1);
87   AffineExpr zero = b.getAffineConstantExpr(0);
88   auto broadcastInnerDim = AffineMap::get(map.getNumDims(), 0, {zero, innerDim},
89                                           readOp.getContext());
90   // TODO: Support transpose once it is added to GPU dialect ops.
91   // For now we only support (d0, d1) -> (d0, d1) and (d0, d1) -> (0, d1).
92   return !(!map.isMinorIdentity() && map != broadcastInnerDim);
93 }
94 
95 // Return true if the transfer op can be converted to a MMA matrix store.
96 static bool
97 transferWriteSupportsMMAMatrixType(vector::TransferWriteOp writeOp) {
98   // TODO: support 0-d corner case.
99   if (writeOp.getTransferRank() == 0)
100     return false;
101 
102   if (writeOp.mask() || writeOp.hasOutOfBoundsDim() ||
103       writeOp.getVectorType().getRank() != 2)
104     return false;
105   if (!getMemrefConstantHorizontalStride(writeOp.getShapedType()))
106     return false;
107   // TODO: Support transpose once it is added to GPU dialect ops.
108   if (!writeOp.permutation_map().isMinorIdentity())
109     return false;
110   return true;
111 }
112 
113 /// Return true if the constant is a splat to a 2D vector so that it can be
114 /// converted to a MMA constant matrix op.
115 static bool constantSupportsMMAMatrixType(arith::ConstantOp constantOp) {
116   auto vecType = constantOp.getType().dyn_cast<VectorType>();
117   if (!vecType || vecType.getRank() != 2)
118     return false;
119   return constantOp.getValue().isa<SplatElementsAttr>();
120 }
121 
122 /// Return true if this is a broadcast from scalar to a 2D vector.
123 static bool broadcastSupportsMMAMatrixType(vector::BroadcastOp broadcastOp) {
124   return broadcastOp.getVectorType().getRank() == 2 &&
125          broadcastOp.source().getType().isa<FloatType>();
126 }
127 
128 /// Return the MMA elementwise enum associated with `op` if it is supported.
129 /// Return `llvm::None` otherwise.
130 static llvm::Optional<gpu::MMAElementwiseOp>
131 convertElementwiseOpToMMA(Operation *op) {
132   if (isa<arith::AddFOp>(op))
133     return gpu::MMAElementwiseOp::ADDF;
134   if (isa<arith::MulFOp>(op))
135     return gpu::MMAElementwiseOp::MULF;
136   if (isa<arith::MaxFOp>(op))
137     return gpu::MMAElementwiseOp::MAXF;
138   if (isa<arith::MinFOp>(op))
139     return gpu::MMAElementwiseOp::MINF;
140   if (isa<arith::DivFOp>(op))
141     return gpu::MMAElementwiseOp::DIVF;
142   return llvm::None;
143 }
144 
145 /// Return true if the op is supported as elementwise op on MMAMatrix type.
146 static bool elementwiseSupportsMMAMatrixType(Operation *op) {
147   return convertElementwiseOpToMMA(op).hasValue();
148 }
149 
150 static bool supportsMMaMatrixType(Operation *op) {
151   if (isa<scf::ForOp, scf::YieldOp>(op))
152     return true;
153   if (auto transferRead = dyn_cast<vector::TransferReadOp>(op))
154     return transferReadSupportsMMAMatrixType(transferRead);
155   if (auto transferWrite = dyn_cast<vector::TransferWriteOp>(op))
156     return transferWriteSupportsMMAMatrixType(transferWrite);
157   if (auto contract = dyn_cast<vector::ContractionOp>(op))
158     return contractSupportsMMAMatrixType(contract);
159   if (auto constant = dyn_cast<arith::ConstantOp>(op))
160     return constantSupportsMMAMatrixType(constant);
161   if (auto broadcast = dyn_cast<vector::BroadcastOp>(op))
162     return broadcastSupportsMMAMatrixType(broadcast);
163   return elementwiseSupportsMMAMatrixType(op);
164 }
165 
166 /// Return an unsorted slice handling scf.for region differently than
167 /// `getSlice`. In scf.for we only want to include as part of the slice elements
168 /// that are part of the use/def chain.
169 static SetVector<Operation *> getSliceContract(Operation *op,
170                                                TransitiveFilter backwardFilter,
171                                                TransitiveFilter forwardFilter) {
172   SetVector<Operation *> slice;
173   slice.insert(op);
174   unsigned currentIndex = 0;
175   SetVector<Operation *> backwardSlice;
176   SetVector<Operation *> forwardSlice;
177   while (currentIndex != slice.size()) {
178     auto *currentOp = (slice)[currentIndex];
179     // Compute and insert the backwardSlice starting from currentOp.
180     backwardSlice.clear();
181     getBackwardSlice(currentOp, &backwardSlice, backwardFilter);
182     slice.insert(backwardSlice.begin(), backwardSlice.end());
183 
184     // Compute and insert the forwardSlice starting from currentOp.
185     forwardSlice.clear();
186     // Special case for ForOp, we don't want to include the whole region but
187     // only the value using the region arguments.
188     // TODO: We should refine this to only care about the region arguments being
189     // converted to matrix type.
190     if (auto forOp = dyn_cast<scf::ForOp>(currentOp)) {
191       for (Value forOpResult : forOp.getResults())
192         getForwardSlice(forOpResult, &forwardSlice, forwardFilter);
193       for (BlockArgument &arg : forOp.getRegionIterArgs())
194         getForwardSlice(arg, &forwardSlice, forwardFilter);
195     } else {
196       getForwardSlice(currentOp, &forwardSlice, forwardFilter);
197     }
198     slice.insert(forwardSlice.begin(), forwardSlice.end());
199     ++currentIndex;
200   }
201   return slice;
202 }
203 
204 // Analyze slice of operations based on convert op to figure out if the whole
205 // slice can be converted to MMA operations.
206 static SetVector<Operation *> getOpToConvert(mlir::Operation *op) {
207   auto hasVectorDest = [](Operation *op) {
208     return llvm::any_of(op->getResultTypes(),
209                         [](Type t) { return t.isa<VectorType>(); });
210   };
211   auto hasVectorSrc = [](Operation *op) {
212     return llvm::any_of(op->getOperandTypes(),
213                         [](Type t) { return t.isa<VectorType>(); });
214   };
215   SetVector<Operation *> opToConvert;
216   op->walk([&](vector::ContractionOp contract) {
217     if (opToConvert.contains(contract.getOperation()))
218       return;
219     SetVector<Operation *> dependentOps =
220         getSliceContract(contract, hasVectorDest, hasVectorSrc);
221     // If any instruction cannot use MMA matrix type drop the whole
222     // chain. MMA matrix are stored in an opaque type so they cannot be used
223     // by all operations.
224     if (llvm::any_of(dependentOps,
225                      [](Operation *op) { return !supportsMMaMatrixType(op); }))
226       return;
227     opToConvert.insert(dependentOps.begin(), dependentOps.end());
228   });
229   // Sort the operations so that we can convert them in topological order.
230   return topologicalSort(opToConvert);
231 }
232 
233 namespace {
234 // Transform contract into (m, k)x(k, n)x(m, n) form so that it can be converted
235 // to MMA matmul.
236 struct PrepareContractToGPUMMA
237     : public OpRewritePattern<vector::ContractionOp> {
238   using OpRewritePattern<vector::ContractionOp>::OpRewritePattern;
239 
240   LogicalResult matchAndRewrite(vector::ContractionOp op,
241                                 PatternRewriter &rewriter) const override {
242     Location loc = op.getLoc();
243     Value lhs = op.lhs(), rhs = op.rhs(), res = op.acc();
244 
245     // Set up the parallel/reduction structure in right form.
246     using MapList = ArrayRef<ArrayRef<AffineExpr>>;
247     auto infer = [](MapList m) { return AffineMap::inferFromExprList(m); };
248     AffineExpr m, n, k;
249     bindDims(rewriter.getContext(), m, n, k);
250     static constexpr std::array<int64_t, 2> perm = {1, 0};
251     auto iteratorTypes = op.iterator_types().getValue();
252     SmallVector<AffineMap, 4> maps = op.getIndexingMaps();
253     if (!(isParallelIterator(iteratorTypes[0]) &&
254           isParallelIterator(iteratorTypes[1]) &&
255           isReductionIterator(iteratorTypes[2])))
256       return failure();
257     //
258     // Two outer parallel, one inner reduction (matmat flavor).
259     //
260     if (maps == infer({{m, k}, {k, n}, {m, n}})) {
261       // This is the classical row-major matmul, nothing to do.
262       return failure();
263     }
264     if (maps == infer({{m, k}, {n, k}, {m, n}})) {
265       rhs = rewriter.create<vector::TransposeOp>(loc, rhs, perm);
266     } else if (maps == infer({{k, m}, {k, n}, {m, n}})) {
267       lhs = rewriter.create<vector::TransposeOp>(loc, lhs, perm);
268     } else if (maps == infer({{k, m}, {n, k}, {m, n}})) {
269       rhs = rewriter.create<vector::TransposeOp>(loc, rhs, perm);
270       lhs = rewriter.create<vector::TransposeOp>(loc, lhs, perm);
271     } else if (maps == infer({{m, k}, {k, n}, {n, m}})) {
272       std::swap(rhs, lhs);
273       rhs = rewriter.create<vector::TransposeOp>(loc, rhs, perm);
274       lhs = rewriter.create<vector::TransposeOp>(loc, lhs, perm);
275     } else if (maps == infer({{m, k}, {n, k}, {n, m}})) {
276       std::swap(rhs, lhs);
277       rhs = rewriter.create<vector::TransposeOp>(loc, rhs, perm);
278     } else if (maps == infer({{k, m}, {k, n}, {n, m}})) {
279       std::swap(lhs, rhs);
280       lhs = rewriter.create<vector::TransposeOp>(loc, lhs, perm);
281     } else if (maps == infer({{k, m}, {n, k}, {n, m}})) {
282       std::swap(lhs, rhs);
283     } else {
284       return failure();
285     }
286     rewriter.replaceOpWithNewOp<vector::ContractionOp>(
287         op, lhs, rhs, res,
288         rewriter.getAffineMapArrayAttr(infer({{m, k}, {k, n}, {m, n}})),
289         op.iterator_types());
290     return success();
291   }
292 };
293 
294 // Merge transpose op into the transfer read op. Transpose are not supported on
295 // MMA types but MMA load can transpose the matrix when loading.
296 struct CombineTransferReadOpTranspose final
297     : public OpRewritePattern<vector::TransposeOp> {
298   using OpRewritePattern<vector::TransposeOp>::OpRewritePattern;
299 
300   LogicalResult matchAndRewrite(vector::TransposeOp op,
301                                 PatternRewriter &rewriter) const override {
302     auto transferReadOp = op.vector().getDefiningOp<vector::TransferReadOp>();
303     if (!transferReadOp)
304       return failure();
305 
306     // TODO: support 0-d corner case.
307     if (transferReadOp.getTransferRank() == 0)
308       return failure();
309 
310     if (transferReadOp.mask() || transferReadOp.hasOutOfBoundsDim())
311       return failure();
312     SmallVector<int64_t, 2> perm;
313     op.getTransp(perm);
314     SmallVector<unsigned, 2> permU;
315     for (int64_t o : perm)
316       permU.push_back(unsigned(o));
317     AffineMap permutationMap =
318         AffineMap::getPermutationMap(permU, op.getContext());
319     AffineMap newMap = permutationMap.compose(transferReadOp.permutation_map());
320     rewriter.replaceOpWithNewOp<vector::TransferReadOp>(
321         op, op.getType(), transferReadOp.source(), transferReadOp.indices(),
322         AffineMapAttr::get(newMap), transferReadOp.padding(),
323         transferReadOp.mask(), transferReadOp.in_boundsAttr());
324     return success();
325   }
326 };
327 
328 } // namespace
329 
330 // MMA types have different layout based on how they are used in matmul ops.
331 // Figure the right layout to use by looking at op uses.
332 // TODO: Change the GPU dialect to abstract the layout at the this level and
333 // only care about it during lowering to NVVM.
334 template <typename OpTy>
335 static const char *inferFragType(OpTy op) {
336   for (Operation *users : op->getUsers()) {
337     auto contract = dyn_cast<vector::ContractionOp>(users);
338     if (!contract)
339       continue;
340     if (contract.lhs() == op.getResult())
341       return "AOp";
342     if (contract.rhs() == op.getResult())
343       return "BOp";
344   }
345   return "COp";
346 }
347 
348 static void convertTransferReadOp(vector::TransferReadOp op,
349                                   llvm::DenseMap<Value, Value> &valueMapping) {
350   assert(op.getTransferRank() > 0 && "unexpected 0-d transfer");
351   assert(transferReadSupportsMMAMatrixType(op));
352   Optional<int64_t> stride =
353       getMemrefConstantHorizontalStride(op.getShapedType());
354   AffineMap map = op.permutation_map();
355   // Handle broadcast by setting the stride to 0.
356   if (map.getResult(0).isa<AffineConstantExpr>()) {
357     assert(map.getResult(0).cast<AffineConstantExpr>().getValue() == 0);
358     stride = 0;
359   }
360   assert(stride);
361   const char *fragType = inferFragType(op);
362   gpu::MMAMatrixType type =
363       gpu::MMAMatrixType::get(op.getVectorType().getShape(),
364                               op.getVectorType().getElementType(), fragType);
365   OpBuilder b(op);
366   Value load = b.create<gpu::SubgroupMmaLoadMatrixOp>(
367       op.getLoc(), type, op.source(), op.indices(), b.getIndexAttr(*stride));
368   valueMapping[op.getResult()] = load;
369 }
370 
371 static void convertTransferWriteOp(vector::TransferWriteOp op,
372                                    llvm::DenseMap<Value, Value> &valueMapping) {
373   assert(transferWriteSupportsMMAMatrixType(op));
374   Optional<int64_t> stride =
375       getMemrefConstantHorizontalStride(op.getShapedType());
376   assert(stride);
377   OpBuilder b(op);
378   Value matrix = valueMapping.find(op.vector())->second;
379   b.create<gpu::SubgroupMmaStoreMatrixOp>(
380       op.getLoc(), matrix, op.source(), op.indices(), b.getIndexAttr(*stride));
381   op.erase();
382 }
383 
384 static void convertContractOp(vector::ContractionOp op,
385                               llvm::DenseMap<Value, Value> &valueMapping) {
386   OpBuilder b(op);
387   Value opA = valueMapping.find(op.lhs())->second;
388   Value opB = valueMapping.find(op.rhs())->second;
389   Value opC = valueMapping.find(op.acc())->second;
390   Value matmul = b.create<gpu::SubgroupMmaComputeOp>(op.getLoc(), opC.getType(),
391                                                      opA, opB, opC);
392   valueMapping[op.getResult()] = matmul;
393 }
394 
395 /// Convert a 2D splat ConstantOp to a SubgroupMmaConstantMatrix op.
396 static void convertConstantOp(arith::ConstantOp op,
397                               llvm::DenseMap<Value, Value> &valueMapping) {
398   assert(constantSupportsMMAMatrixType(op));
399   OpBuilder b(op);
400   Attribute splat =
401       op.getValue().cast<SplatElementsAttr>().getSplatValue<Attribute>();
402   auto scalarConstant =
403       b.create<arith::ConstantOp>(op.getLoc(), splat.getType(), splat);
404   const char *fragType = inferFragType(op);
405   auto vecType = op.getType().cast<VectorType>();
406   gpu::MMAMatrixType type = gpu::MMAMatrixType::get(
407       vecType.getShape(), vecType.getElementType(), llvm::StringRef(fragType));
408   auto matrix = b.create<gpu::SubgroupMmaConstantMatrixOp>(op.getLoc(), type,
409                                                            scalarConstant);
410   valueMapping[op.getResult()] = matrix;
411 }
412 
413 /// Convert a vector.broadcast from scalar to a SubgroupMmaConstantMatrix op.
414 static void convertBroadcastOp(vector::BroadcastOp op,
415                                llvm::DenseMap<Value, Value> &valueMapping) {
416   assert(broadcastSupportsMMAMatrixType(op));
417   OpBuilder b(op);
418   const char *fragType = inferFragType(op);
419   auto vecType = op.getVectorType();
420   gpu::MMAMatrixType type = gpu::MMAMatrixType::get(
421       vecType.getShape(), vecType.getElementType(), llvm::StringRef(fragType));
422   auto matrix = b.create<gpu::SubgroupMmaConstantMatrixOp>(op.getLoc(), type,
423                                                            op.source());
424   valueMapping[op.getResult()] = matrix;
425 }
426 
427 // Replace ForOp with a new ForOp with extra operands. The YieldOp is not
428 // updated and needs to be updated separatly for the loop to be correct.
429 static scf::ForOp replaceForOpWithNewSignature(OpBuilder &b, scf::ForOp loop,
430                                                ValueRange newIterOperands) {
431   // Create a new loop before the existing one, with the extra operands.
432   OpBuilder::InsertionGuard g(b);
433   b.setInsertionPoint(loop);
434   auto operands = llvm::to_vector<4>(loop.getIterOperands());
435   operands.append(newIterOperands.begin(), newIterOperands.end());
436   scf::ForOp newLoop =
437       b.create<scf::ForOp>(loop.getLoc(), loop.getLowerBound(),
438                            loop.getUpperBound(), loop.getStep(), operands);
439   newLoop.getBody()->erase();
440   newLoop.getLoopBody().getBlocks().splice(
441       newLoop.getLoopBody().getBlocks().begin(),
442       loop.getLoopBody().getBlocks());
443   for (Value operand : newIterOperands)
444     newLoop.getBody()->addArgument(operand.getType(), operand.getLoc());
445 
446   for (auto it : llvm::zip(loop.getResults(), newLoop.getResults().take_front(
447                                                   loop.getNumResults())))
448     std::get<0>(it).replaceAllUsesWith(std::get<1>(it));
449   loop.erase();
450   return newLoop;
451 }
452 
453 static void convertForOp(scf::ForOp op,
454                          llvm::DenseMap<Value, Value> &valueMapping) {
455   SmallVector<Value> newOperands;
456   SmallVector<std::pair<size_t, size_t>> argMapping;
457   for (const auto &operand : llvm::enumerate(op.getIterOperands())) {
458     auto it = valueMapping.find(operand.value());
459     if (it == valueMapping.end())
460       continue;
461     argMapping.push_back(std::make_pair(
462         operand.index(), op.getNumIterOperands() + newOperands.size()));
463     newOperands.push_back(it->second);
464   }
465   OpBuilder b(op);
466   scf::ForOp newForOp = replaceForOpWithNewSignature(b, op, newOperands);
467   Block &loopBody = *newForOp.getBody();
468   for (auto mapping : argMapping) {
469     valueMapping[newForOp.getResult(mapping.first)] =
470         newForOp.getResult(mapping.second);
471     valueMapping[loopBody.getArgument(mapping.first +
472                                       newForOp.getNumInductionVars())] =
473         loopBody.getArgument(mapping.second + newForOp.getNumInductionVars());
474   }
475 }
476 
477 static void convertYieldOp(scf::YieldOp op,
478                            llvm::DenseMap<Value, Value> &valueMapping) {
479   OpBuilder b(op);
480   auto loop = cast<scf::ForOp>(op->getParentOp());
481   auto yieldOperands = llvm::to_vector<4>(op.getOperands());
482   for (const auto &operand : llvm::enumerate(op.getOperands())) {
483     auto it = valueMapping.find(operand.value());
484     if (it == valueMapping.end())
485       continue;
486     // Replace the yield of old value with the for op argument to make it easier
487     // to remove the dead code.
488     yieldOperands[operand.index()] = loop.getIterOperands()[operand.index()];
489     yieldOperands.push_back(it->second);
490   }
491   b.create<scf::YieldOp>(op.getLoc(), yieldOperands);
492   op.erase();
493 }
494 
495 /// Convert an elementwise op to the equivalent elementwise op on MMA matrix.
496 static void convertElementwiseOp(Operation *op, gpu::MMAElementwiseOp opType,
497                                  llvm::DenseMap<Value, Value> &valueMapping) {
498   OpBuilder b(op);
499   SmallVector<Value> matrixOperands;
500   for (Value operand : op->getOperands())
501     matrixOperands.push_back(valueMapping.find(operand)->second);
502   Value newOp = b.create<gpu::SubgroupMmaElementwiseOp>(
503       op->getLoc(), matrixOperands[0].getType(), matrixOperands, opType);
504   valueMapping[op->getResult(0)] = newOp;
505 }
506 
507 void mlir::populatePrepareVectorToMMAPatterns(RewritePatternSet &patterns) {
508   patterns.add<PrepareContractToGPUMMA, CombineTransferReadOpTranspose>(
509       patterns.getContext());
510 }
511 
512 void mlir::convertVectorToMMAOps(Operation *rootOp) {
513   SetVector<Operation *> ops = getOpToConvert(rootOp);
514   llvm::DenseMap<Value, Value> valueMapping;
515   for (Operation *op : ops) {
516     if (auto transferRead = dyn_cast<vector::TransferReadOp>(op)) {
517       convertTransferReadOp(transferRead, valueMapping);
518     } else if (auto transferWrite = dyn_cast<vector::TransferWriteOp>(op)) {
519       convertTransferWriteOp(transferWrite, valueMapping);
520     } else if (auto contractOp = dyn_cast<vector::ContractionOp>(op)) {
521       convertContractOp(contractOp, valueMapping);
522     } else if (auto constantOp = dyn_cast<arith::ConstantOp>(op)) {
523       convertConstantOp(constantOp, valueMapping);
524     } else if (auto broadcastOp = dyn_cast<vector::BroadcastOp>(op)) {
525       convertBroadcastOp(broadcastOp, valueMapping);
526     } else if (auto forOp = dyn_cast<scf::ForOp>(op)) {
527       convertForOp(forOp, valueMapping);
528     } else if (auto yiledOp = dyn_cast<scf::YieldOp>(op)) {
529       convertYieldOp(yiledOp, valueMapping);
530     } else if (auto elementwiseType = convertElementwiseOpToMMA(op)) {
531       convertElementwiseOp(op, *elementwiseType, valueMapping);
532     }
533   }
534 }
535 
536 namespace {
537 
538 struct ConvertVectorToGPUPass
539     : public ConvertVectorToGPUBase<ConvertVectorToGPUPass> {
540   void runOnOperation() override {
541     RewritePatternSet patterns(&getContext());
542     populatePrepareVectorToMMAPatterns(patterns);
543     (void)applyPatternsAndFoldGreedily(getOperation(), std::move(patterns));
544 
545     convertVectorToMMAOps(getOperation());
546   }
547 };
548 
549 } // namespace
550 
551 std::unique_ptr<Pass> mlir::createConvertVectorToGPUPass() {
552   return std::make_unique<ConvertVectorToGPUPass>();
553 }
554