1 //===- VectorToGPU.cpp - Convert vector to GPU dialect ----------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements lowering of vector operations to GPU dialect ops.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include <type_traits>
14 
15 #include "mlir/Conversion/VectorToGPU/VectorToGPU.h"
16 
17 #include "../PassDetail.h"
18 #include "mlir/Analysis/SliceAnalysis.h"
19 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h"
20 #include "mlir/Dialect/GPU/GPUDialect.h"
21 #include "mlir/Dialect/MemRef/IR/MemRef.h"
22 #include "mlir/Dialect/SCF/SCF.h"
23 #include "mlir/Dialect/Utils/StructuredOpsUtils.h"
24 #include "mlir/Dialect/Vector/VectorOps.h"
25 #include "mlir/Dialect/Vector/VectorUtils.h"
26 #include "mlir/IR/Builders.h"
27 #include "mlir/Pass/Pass.h"
28 #include "mlir/Transforms/GreedyPatternRewriteDriver.h"
29 #include "mlir/Transforms/Passes.h"
30 
31 using namespace mlir;
32 
33 // Return true if the contract op can be convert to MMA matmul.
34 static bool contractSupportsMMAMatrixType(vector::ContractionOp contract) {
35   if (llvm::size(contract.masks()) != 0)
36     return false;
37 
38   using MapList = ArrayRef<ArrayRef<AffineExpr>>;
39   auto infer = [](MapList m) { return AffineMap::inferFromExprList(m); };
40   AffineExpr m, n, k;
41   bindDims(contract.getContext(), m, n, k);
42   auto iteratorTypes = contract.iterator_types().getValue();
43   if (!(isParallelIterator(iteratorTypes[0]) &&
44         isParallelIterator(iteratorTypes[1]) &&
45         isReductionIterator(iteratorTypes[2])))
46     return false;
47 
48   // The contract needs to represent a matmul to be able to convert to
49   // MMAMatrix matmul.
50   if (contract.getIndexingMaps() != infer({{m, k}, {k, n}, {m, n}}))
51     return false;
52 
53   return true;
54 }
55 
56 // Return the stide for the dimension 0 of |type| if it is a memref and has a
57 // constant stride.
58 static llvm::Optional<int64_t>
59 getMemrefConstantHorizontalStride(ShapedType type) {
60   auto memrefType = type.dyn_cast<MemRefType>();
61   if (!memrefType)
62     return false;
63   int64_t offset = 0;
64   SmallVector<int64_t, 2> strides;
65   if (failed(getStridesAndOffset(memrefType, strides, offset)))
66     return llvm::None;
67   if (strides[0] == ShapedType::kDynamicStrideOrOffset)
68     return llvm::None;
69   return strides[0];
70 }
71 
72 // Return true if the transfer op can be converted to a MMA matrix load.
73 static bool transferReadSupportsMMAMatrixType(vector::TransferReadOp readOp) {
74   if (readOp.mask() || readOp.hasOutOfBoundsDim() ||
75       readOp.getVectorType().getRank() != 2)
76     return false;
77   if (!getMemrefConstantHorizontalStride(readOp.getShapedType()))
78     return false;
79   AffineMap map = readOp.permutation_map();
80   OpBuilder b(readOp.getContext());
81   AffineExpr innerDim = b.getAffineDimExpr(map.getNumDims() - 1);
82   AffineExpr zero = b.getAffineConstantExpr(0);
83   auto broadcastInnerDim = AffineMap::get(map.getNumDims(), 0, {zero, innerDim},
84                                           readOp.getContext());
85   // TODO: Support transpose once it is added to GPU dialect ops.
86   // For now we only support (d0, d1) -> (d0, d1) and (d0, d1) -> (0, d1).
87   if (!map.isMinorIdentity() && map != broadcastInnerDim)
88     return false;
89   return true;
90 }
91 
92 // Return true if the transfer op can be converted to a MMA matrix store.
93 static bool
94 transferWriteSupportsMMAMatrixType(vector::TransferWriteOp writeOp) {
95   // TODO: support 0-d corner case.
96   if (writeOp.getTransferRank() == 0)
97     return false;
98 
99   if (writeOp.mask() || writeOp.hasOutOfBoundsDim() ||
100       writeOp.getVectorType().getRank() != 2)
101     return false;
102   if (!getMemrefConstantHorizontalStride(writeOp.getShapedType()))
103     return false;
104   // TODO: Support transpose once it is added to GPU dialect ops.
105   if (!writeOp.permutation_map().isMinorIdentity())
106     return false;
107   return true;
108 }
109 
110 /// Return true if the constant is a splat to a 2D vector so that it can be
111 /// converted to a MMA constant matrix op.
112 static bool constantSupportsMMAMatrixType(arith::ConstantOp constantOp) {
113   auto vecType = constantOp.getType().dyn_cast<VectorType>();
114   if (!vecType || vecType.getRank() != 2)
115     return false;
116   return constantOp.getValue().isa<SplatElementsAttr>();
117 }
118 
119 /// Return true if this is a broadcast from scalar to a 2D vector.
120 static bool broadcastSupportsMMAMatrixType(vector::BroadcastOp broadcastOp) {
121   return broadcastOp.getVectorType().getRank() == 2 &&
122          broadcastOp.source().getType().isa<FloatType>();
123 }
124 
125 /// Return the MMA elementwise enum associated with `op` if it is supported.
126 /// Return `llvm::None` otherwise.
127 static llvm::Optional<gpu::MMAElementwiseOp>
128 convertElementwiseOpToMMA(Operation *op) {
129   if (isa<arith::AddFOp>(op))
130     return gpu::MMAElementwiseOp::ADDF;
131   if (isa<arith::MulFOp>(op))
132     return gpu::MMAElementwiseOp::MULF;
133   if (isa<arith::MaxFOp>(op))
134     return gpu::MMAElementwiseOp::MAXF;
135   if (isa<arith::MinFOp>(op))
136     return gpu::MMAElementwiseOp::MINF;
137   if (isa<arith::DivFOp>(op))
138     return gpu::MMAElementwiseOp::DIVF;
139   return llvm::None;
140 }
141 
142 /// Return true if the op is supported as elementwise op on MMAMatrix type.
143 static bool elementwiseSupportsMMAMatrixType(Operation *op) {
144   return convertElementwiseOpToMMA(op).hasValue();
145 }
146 
147 static bool supportsMMaMatrixType(Operation *op) {
148   if (isa<scf::ForOp, scf::YieldOp>(op))
149     return true;
150   if (auto transferRead = dyn_cast<vector::TransferReadOp>(op))
151     return transferReadSupportsMMAMatrixType(transferRead);
152   if (auto transferWrite = dyn_cast<vector::TransferWriteOp>(op))
153     return transferWriteSupportsMMAMatrixType(transferWrite);
154   if (auto contract = dyn_cast<vector::ContractionOp>(op))
155     return contractSupportsMMAMatrixType(contract);
156   if (auto constant = dyn_cast<arith::ConstantOp>(op))
157     return constantSupportsMMAMatrixType(constant);
158   if (auto broadcast = dyn_cast<vector::BroadcastOp>(op))
159     return broadcastSupportsMMAMatrixType(broadcast);
160   return elementwiseSupportsMMAMatrixType(op);
161 }
162 
163 /// Return an unsorted slice handling scf.for region differently than
164 /// `getSlice`. In scf.for we only want to include as part of the slice elements
165 /// that are part of the use/def chain.
166 static SetVector<Operation *> getSliceContract(Operation *op,
167                                                TransitiveFilter backwardFilter,
168                                                TransitiveFilter forwardFilter) {
169   SetVector<Operation *> slice;
170   slice.insert(op);
171   unsigned currentIndex = 0;
172   SetVector<Operation *> backwardSlice;
173   SetVector<Operation *> forwardSlice;
174   while (currentIndex != slice.size()) {
175     auto *currentOp = (slice)[currentIndex];
176     // Compute and insert the backwardSlice starting from currentOp.
177     backwardSlice.clear();
178     getBackwardSlice(currentOp, &backwardSlice, backwardFilter);
179     slice.insert(backwardSlice.begin(), backwardSlice.end());
180 
181     // Compute and insert the forwardSlice starting from currentOp.
182     forwardSlice.clear();
183     // Special case for ForOp, we don't want to include the whole region but
184     // only the value using the region arguments.
185     // TODO: We should refine this to only care about the region arguments being
186     // converted to matrix type.
187     if (auto forOp = dyn_cast<scf::ForOp>(currentOp)) {
188       for (Value forOpResult : forOp.getResults())
189         getForwardSlice(forOpResult, &forwardSlice, forwardFilter);
190       for (BlockArgument &arg : forOp.getRegionIterArgs())
191         getForwardSlice(arg, &forwardSlice, forwardFilter);
192     } else {
193       getForwardSlice(currentOp, &forwardSlice, forwardFilter);
194     }
195     slice.insert(forwardSlice.begin(), forwardSlice.end());
196     ++currentIndex;
197   }
198   return slice;
199 }
200 
201 // Analyze slice of operations based on convert op to figure out if the whole
202 // slice can be converted to MMA operations.
203 static SetVector<Operation *> getOpToConvert(mlir::Operation *op) {
204   auto hasVectorDest = [](Operation *op) {
205     return llvm::any_of(op->getResultTypes(),
206                         [](Type t) { return t.isa<VectorType>(); });
207   };
208   auto hasVectorSrc = [](Operation *op) {
209     return llvm::any_of(op->getOperandTypes(),
210                         [](Type t) { return t.isa<VectorType>(); });
211   };
212   SetVector<Operation *> opToConvert;
213   op->walk([&](vector::ContractionOp contract) {
214     if (opToConvert.contains(contract.getOperation()))
215       return;
216     SetVector<Operation *> dependentOps =
217         getSliceContract(contract, hasVectorDest, hasVectorSrc);
218     // If any instruction cannot use MMA matrix type drop the whole
219     // chain. MMA matrix are stored in an opaque type so they cannot be used
220     // by all operations.
221     if (llvm::any_of(dependentOps,
222                      [](Operation *op) { return !supportsMMaMatrixType(op); }))
223       return;
224     opToConvert.insert(dependentOps.begin(), dependentOps.end());
225   });
226   // Sort the operations so that we can convert them in topological order.
227   return topologicalSort(opToConvert);
228 }
229 
230 namespace {
231 // Transform contract into (m, k)x(k, n)x(m, n) form so that it can be converted
232 // to MMA matmul.
233 struct PrepareContractToGPUMMA
234     : public OpRewritePattern<vector::ContractionOp> {
235   using OpRewritePattern<vector::ContractionOp>::OpRewritePattern;
236 
237   LogicalResult matchAndRewrite(vector::ContractionOp op,
238                                 PatternRewriter &rewriter) const override {
239     Location loc = op.getLoc();
240     Value lhs = op.lhs(), rhs = op.rhs(), res = op.acc();
241 
242     // Set up the parallel/reduction structure in right form.
243     using MapList = ArrayRef<ArrayRef<AffineExpr>>;
244     auto infer = [](MapList m) { return AffineMap::inferFromExprList(m); };
245     AffineExpr m, n, k;
246     bindDims(rewriter.getContext(), m, n, k);
247     static constexpr std::array<int64_t, 2> perm = {1, 0};
248     auto iteratorTypes = op.iterator_types().getValue();
249     SmallVector<AffineMap, 4> maps = op.getIndexingMaps();
250     if (!(isParallelIterator(iteratorTypes[0]) &&
251           isParallelIterator(iteratorTypes[1]) &&
252           isReductionIterator(iteratorTypes[2])))
253       return failure();
254     //
255     // Two outer parallel, one inner reduction (matmat flavor).
256     //
257     if (maps == infer({{m, k}, {k, n}, {m, n}})) {
258       // This is the classical row-major matmul, nothing to do.
259       return failure();
260     }
261     if (maps == infer({{m, k}, {n, k}, {m, n}})) {
262       rhs = rewriter.create<vector::TransposeOp>(loc, rhs, perm);
263     } else if (maps == infer({{k, m}, {k, n}, {m, n}})) {
264       lhs = rewriter.create<vector::TransposeOp>(loc, lhs, perm);
265     } else if (maps == infer({{k, m}, {n, k}, {m, n}})) {
266       rhs = rewriter.create<vector::TransposeOp>(loc, rhs, perm);
267       lhs = rewriter.create<vector::TransposeOp>(loc, lhs, perm);
268     } else if (maps == infer({{m, k}, {k, n}, {n, m}})) {
269       std::swap(rhs, lhs);
270       rhs = rewriter.create<vector::TransposeOp>(loc, rhs, perm);
271       lhs = rewriter.create<vector::TransposeOp>(loc, lhs, perm);
272     } else if (maps == infer({{m, k}, {n, k}, {n, m}})) {
273       std::swap(rhs, lhs);
274       rhs = rewriter.create<vector::TransposeOp>(loc, rhs, perm);
275     } else if (maps == infer({{k, m}, {k, n}, {n, m}})) {
276       std::swap(lhs, rhs);
277       lhs = rewriter.create<vector::TransposeOp>(loc, lhs, perm);
278     } else if (maps == infer({{k, m}, {n, k}, {n, m}})) {
279       std::swap(lhs, rhs);
280     } else {
281       return failure();
282     }
283     rewriter.replaceOpWithNewOp<vector::ContractionOp>(
284         op, lhs, rhs, res,
285         rewriter.getAffineMapArrayAttr(infer({{m, k}, {k, n}, {m, n}})),
286         op.iterator_types());
287     return success();
288   }
289 };
290 
291 // Merge transpose op into the transfer read op. Transpose are not supported on
292 // MMA types but MMA load can transpose the matrix when loading.
293 struct CombineTransferReadOpTranspose final
294     : public OpRewritePattern<vector::TransposeOp> {
295   using OpRewritePattern<vector::TransposeOp>::OpRewritePattern;
296 
297   LogicalResult matchAndRewrite(vector::TransposeOp op,
298                                 PatternRewriter &rewriter) const override {
299     auto transferReadOp = op.vector().getDefiningOp<vector::TransferReadOp>();
300     if (!transferReadOp)
301       return failure();
302 
303     // TODO: support 0-d corner case.
304     if (transferReadOp.getTransferRank() == 0)
305       return failure();
306 
307     if (transferReadOp.mask() || transferReadOp.hasOutOfBoundsDim())
308       return failure();
309     SmallVector<int64_t, 2> perm;
310     op.getTransp(perm);
311     SmallVector<unsigned, 2> permU;
312     for (int64_t o : perm)
313       permU.push_back(unsigned(o));
314     AffineMap permutationMap =
315         AffineMap::getPermutationMap(permU, op.getContext());
316     AffineMap newMap = permutationMap.compose(transferReadOp.permutation_map());
317     rewriter.replaceOpWithNewOp<vector::TransferReadOp>(
318         op, op.getType(), transferReadOp.source(), transferReadOp.indices(),
319         AffineMapAttr::get(newMap), transferReadOp.padding(),
320         transferReadOp.mask(), transferReadOp.in_boundsAttr());
321     return success();
322   }
323 };
324 
325 } // namespace
326 
327 // MMA types have different layout based on how they are used in matmul ops.
328 // Figure the right layout to use by looking at op uses.
329 // TODO: Change the GPU dialect to abstract the layout at the this level and
330 // only care about it during lowering to NVVM.
331 template <typename OpTy>
332 static const char *inferFragType(OpTy op) {
333   for (Operation *users : op->getUsers()) {
334     auto contract = dyn_cast<vector::ContractionOp>(users);
335     if (!contract)
336       continue;
337     if (contract.lhs() == op.getResult())
338       return "AOp";
339     if (contract.rhs() == op.getResult())
340       return "BOp";
341   }
342   return "COp";
343 }
344 
345 static void convertTransferReadOp(vector::TransferReadOp op,
346                                   llvm::DenseMap<Value, Value> &valueMapping) {
347   assert(op.getTransferRank() > 0 && "unexpected 0-d transfer");
348   assert(transferReadSupportsMMAMatrixType(op));
349   Optional<int64_t> stride =
350       getMemrefConstantHorizontalStride(op.getShapedType());
351   AffineMap map = op.permutation_map();
352   // Handle broadcast by setting the stride to 0.
353   if (map.getResult(0).isa<AffineConstantExpr>()) {
354     assert(map.getResult(0).cast<AffineConstantExpr>().getValue() == 0);
355     stride = 0;
356   }
357   assert(stride);
358   const char *fragType = inferFragType(op);
359   gpu::MMAMatrixType type =
360       gpu::MMAMatrixType::get(op.getVectorType().getShape(),
361                               op.getVectorType().getElementType(), fragType);
362   OpBuilder b(op);
363   Value load = b.create<gpu::SubgroupMmaLoadMatrixOp>(
364       op.getLoc(), type, op.source(), op.indices(), b.getIndexAttr(*stride));
365   valueMapping[op.getResult()] = load;
366 }
367 
368 static void convertTransferWriteOp(vector::TransferWriteOp op,
369                                    llvm::DenseMap<Value, Value> &valueMapping) {
370   assert(transferWriteSupportsMMAMatrixType(op));
371   Optional<int64_t> stride =
372       getMemrefConstantHorizontalStride(op.getShapedType());
373   assert(stride);
374   OpBuilder b(op);
375   Value matrix = valueMapping.find(op.vector())->second;
376   b.create<gpu::SubgroupMmaStoreMatrixOp>(
377       op.getLoc(), matrix, op.source(), op.indices(), b.getIndexAttr(*stride));
378   op.erase();
379 }
380 
381 static void convertContractOp(vector::ContractionOp op,
382                               llvm::DenseMap<Value, Value> &valueMapping) {
383   OpBuilder b(op);
384   Value opA = valueMapping.find(op.lhs())->second;
385   Value opB = valueMapping.find(op.rhs())->second;
386   Value opC = valueMapping.find(op.acc())->second;
387   Value matmul = b.create<gpu::SubgroupMmaComputeOp>(op.getLoc(), opC.getType(),
388                                                      opA, opB, opC);
389   valueMapping[op.getResult()] = matmul;
390 }
391 
392 /// Convert a 2D splat ConstantOp to a SubgroupMmaConstantMatrix op.
393 static void convertConstantOp(arith::ConstantOp op,
394                               llvm::DenseMap<Value, Value> &valueMapping) {
395   assert(constantSupportsMMAMatrixType(op));
396   OpBuilder b(op);
397   Attribute splat =
398       op.getValue().cast<SplatElementsAttr>().getSplatValue<Attribute>();
399   auto scalarConstant =
400       b.create<arith::ConstantOp>(op.getLoc(), splat.getType(), splat);
401   const char *fragType = inferFragType(op);
402   auto vecType = op.getType().cast<VectorType>();
403   gpu::MMAMatrixType type = gpu::MMAMatrixType::get(
404       vecType.getShape(), vecType.getElementType(), llvm::StringRef(fragType));
405   auto matrix = b.create<gpu::SubgroupMmaConstantMatrixOp>(op.getLoc(), type,
406                                                            scalarConstant);
407   valueMapping[op.getResult()] = matrix;
408 }
409 
410 /// Convert a vector.broadcast from scalar to a SubgroupMmaConstantMatrix op.
411 static void convertBroadcastOp(vector::BroadcastOp op,
412                                llvm::DenseMap<Value, Value> &valueMapping) {
413   assert(broadcastSupportsMMAMatrixType(op));
414   OpBuilder b(op);
415   const char *fragType = inferFragType(op);
416   auto vecType = op.getVectorType();
417   gpu::MMAMatrixType type = gpu::MMAMatrixType::get(
418       vecType.getShape(), vecType.getElementType(), llvm::StringRef(fragType));
419   auto matrix = b.create<gpu::SubgroupMmaConstantMatrixOp>(op.getLoc(), type,
420                                                            op.source());
421   valueMapping[op.getResult()] = matrix;
422 }
423 
424 // Replace ForOp with a new ForOp with extra operands. The YieldOp is not
425 // updated and needs to be updated separatly for the loop to be correct.
426 static scf::ForOp replaceForOpWithNewSignature(OpBuilder &b, scf::ForOp loop,
427                                                ValueRange newIterOperands) {
428   // Create a new loop before the existing one, with the extra operands.
429   OpBuilder::InsertionGuard g(b);
430   b.setInsertionPoint(loop);
431   auto operands = llvm::to_vector<4>(loop.getIterOperands());
432   operands.append(newIterOperands.begin(), newIterOperands.end());
433   scf::ForOp newLoop =
434       b.create<scf::ForOp>(loop.getLoc(), loop.getLowerBound(),
435                            loop.getUpperBound(), loop.getStep(), operands);
436   newLoop.getBody()->erase();
437   newLoop.getLoopBody().getBlocks().splice(
438       newLoop.getLoopBody().getBlocks().begin(),
439       loop.getLoopBody().getBlocks());
440   for (auto operand : newIterOperands)
441     newLoop.getBody()->addArgument(operand.getType());
442 
443   for (auto it : llvm::zip(loop.getResults(), newLoop.getResults().take_front(
444                                                   loop.getNumResults())))
445     std::get<0>(it).replaceAllUsesWith(std::get<1>(it));
446   loop.erase();
447   return newLoop;
448 }
449 
450 static void convertForOp(scf::ForOp op,
451                          llvm::DenseMap<Value, Value> &valueMapping) {
452   SmallVector<Value> newOperands;
453   SmallVector<std::pair<size_t, size_t>> argMapping;
454   for (auto operand : llvm::enumerate(op.getIterOperands())) {
455     auto it = valueMapping.find(operand.value());
456     if (it == valueMapping.end())
457       continue;
458     argMapping.push_back(std::make_pair(
459         operand.index(), op.getNumIterOperands() + newOperands.size()));
460     newOperands.push_back(it->second);
461   }
462   OpBuilder b(op);
463   scf::ForOp newForOp = replaceForOpWithNewSignature(b, op, newOperands);
464   Block &loopBody = *newForOp.getBody();
465   for (auto mapping : argMapping) {
466     valueMapping[newForOp.getResult(mapping.first)] =
467         newForOp.getResult(mapping.second);
468     valueMapping[loopBody.getArgument(mapping.first +
469                                       newForOp.getNumInductionVars())] =
470         loopBody.getArgument(mapping.second + newForOp.getNumInductionVars());
471   }
472 }
473 
474 static void convertYieldOp(scf::YieldOp op,
475                            llvm::DenseMap<Value, Value> &valueMapping) {
476   OpBuilder b(op);
477   auto loop = cast<scf::ForOp>(op->getParentOp());
478   auto yieldOperands = llvm::to_vector<4>(op.getOperands());
479   for (auto operand : llvm::enumerate(op.getOperands())) {
480     auto it = valueMapping.find(operand.value());
481     if (it == valueMapping.end())
482       continue;
483     // Replace the yield of old value with the for op argument to make it easier
484     // to remove the dead code.
485     yieldOperands[operand.index()] = loop.getIterOperands()[operand.index()];
486     yieldOperands.push_back(it->second);
487   }
488   b.create<scf::YieldOp>(op.getLoc(), yieldOperands);
489   op.erase();
490 }
491 
492 /// Convert an elementwise op to the equivalent elementwise op on MMA matrix.
493 static void convertElementwiseOp(Operation *op, gpu::MMAElementwiseOp opType,
494                                  llvm::DenseMap<Value, Value> &valueMapping) {
495   OpBuilder b(op);
496   SmallVector<Value> matrixOperands;
497   for (Value operand : op->getOperands())
498     matrixOperands.push_back(valueMapping.find(operand)->second);
499   Value newOp = b.create<gpu::SubgroupMmaElementwiseOp>(
500       op->getLoc(), matrixOperands[0].getType(), matrixOperands, opType);
501   valueMapping[op->getResult(0)] = newOp;
502 }
503 
504 namespace mlir {
505 
506 void populatePrepareVectorToMMAPatterns(RewritePatternSet &patterns) {
507   patterns.add<PrepareContractToGPUMMA, CombineTransferReadOpTranspose>(
508       patterns.getContext());
509 }
510 
511 void convertVectorToMMAOps(FuncOp funcOp) {
512   SetVector<Operation *> ops = getOpToConvert(funcOp);
513   llvm::DenseMap<Value, Value> valueMapping;
514   for (Operation *op : ops) {
515     if (auto transferRead = dyn_cast<vector::TransferReadOp>(op)) {
516       convertTransferReadOp(transferRead, valueMapping);
517     } else if (auto transferWrite = dyn_cast<vector::TransferWriteOp>(op)) {
518       convertTransferWriteOp(transferWrite, valueMapping);
519     } else if (auto contractOp = dyn_cast<vector::ContractionOp>(op)) {
520       convertContractOp(contractOp, valueMapping);
521     } else if (auto constantOp = dyn_cast<arith::ConstantOp>(op)) {
522       convertConstantOp(constantOp, valueMapping);
523     } else if (auto broadcastOp = dyn_cast<vector::BroadcastOp>(op)) {
524       convertBroadcastOp(broadcastOp, valueMapping);
525     } else if (auto forOp = dyn_cast<scf::ForOp>(op)) {
526       convertForOp(forOp, valueMapping);
527     } else if (auto yiledOp = dyn_cast<scf::YieldOp>(op)) {
528       convertYieldOp(yiledOp, valueMapping);
529     } else if (auto elementwiseType = convertElementwiseOpToMMA(op)) {
530       convertElementwiseOp(op, *elementwiseType, valueMapping);
531     }
532   }
533 }
534 
535 } // namespace mlir
536 namespace {
537 
538 struct ConvertVectorToGPUPass
539     : public ConvertVectorToGPUBase<ConvertVectorToGPUPass> {
540   void runOnFunction() override {
541     RewritePatternSet patterns(getFunction().getContext());
542     populatePrepareVectorToMMAPatterns(patterns);
543     (void)applyPatternsAndFoldGreedily(getFunction(), std::move(patterns));
544 
545     convertVectorToMMAOps(getFunction());
546   }
547 };
548 
549 } // namespace
550 
551 std::unique_ptr<Pass> mlir::createConvertVectorToGPUPass() {
552   return std::make_unique<ConvertVectorToGPUPass>();
553 }
554