1 //===- VectorToGPU.cpp - Convert vector to GPU dialect ----------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements lowering of vector operations to GPU dialect ops.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include <type_traits>
14 
15 #include "mlir/Conversion/VectorToGPU/VectorToGPU.h"
16 
17 #include "../PassDetail.h"
18 #include "mlir/Analysis/SliceAnalysis.h"
19 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h"
20 #include "mlir/Dialect/GPU/GPUDialect.h"
21 #include "mlir/Dialect/MemRef/IR/MemRef.h"
22 #include "mlir/Dialect/SCF/SCF.h"
23 #include "mlir/Dialect/Utils/StructuredOpsUtils.h"
24 #include "mlir/Dialect/Vector/VectorOps.h"
25 #include "mlir/Dialect/Vector/VectorUtils.h"
26 #include "mlir/IR/Builders.h"
27 #include "mlir/Pass/Pass.h"
28 #include "mlir/Transforms/GreedyPatternRewriteDriver.h"
29 #include "mlir/Transforms/Passes.h"
30 
31 using namespace mlir;
32 
33 // Return true if the contract op can be convert to MMA matmul.
34 static bool contractSupportsMMAMatrixType(vector::ContractionOp contract) {
35   if (llvm::size(contract.masks()) != 0)
36     return false;
37 
38   using MapList = ArrayRef<ArrayRef<AffineExpr>>;
39   auto infer = [](MapList m) { return AffineMap::inferFromExprList(m); };
40   AffineExpr m, n, k;
41   bindDims(contract.getContext(), m, n, k);
42   auto iteratorTypes = contract.iterator_types().getValue();
43   if (!(isParallelIterator(iteratorTypes[0]) &&
44         isParallelIterator(iteratorTypes[1]) &&
45         isReductionIterator(iteratorTypes[2])))
46     return false;
47 
48   // The contract needs to represent a matmul to be able to convert to
49   // MMAMatrix matmul.
50   if (contract.getIndexingMaps() != infer({{m, k}, {k, n}, {m, n}}))
51     return false;
52 
53   // Check that the size matches what is natively supported.
54   VectorType lhsType = contract.lhs().getType().cast<VectorType>();
55   VectorType rhsType = contract.rhs().getType().cast<VectorType>();
56   VectorType accType = contract.acc().getType().cast<VectorType>();
57 
58   std::tuple<int, int, int> dim(lhsType.getDimSize(0), rhsType.getDimSize(1),
59                                 lhsType.getDimSize(1));
60   if (lhsType.getElementType().isInteger(8) &&
61       rhsType.getElementType().isInteger(8) &&
62       accType.getElementType().isInteger(32) &&
63       (dim == std::make_tuple(8, 8, 32) || dim == std::make_tuple(16, 16, 32) ||
64        dim == std::make_tuple(16, 8, 32)))
65     return true;
66 
67   if (lhsType.getElementType().isF16() && rhsType.getElementType().isF16() &&
68       (accType.getElementType().isF16() || accType.getElementType().isF32()) &&
69       (dim == std::make_tuple(8, 8, 16) || dim == std::make_tuple(16, 16, 16) ||
70        dim == std::make_tuple(16, 8, 16)))
71     return true;
72   return false;
73 }
74 
75 // Return the stide for the dimension 0 of |type| if it is a memref and has a
76 // constant stride.
77 static llvm::Optional<int64_t>
78 getMemrefConstantHorizontalStride(ShapedType type) {
79   auto memrefType = type.dyn_cast<MemRefType>();
80   if (!memrefType)
81     return false;
82   int64_t offset = 0;
83   SmallVector<int64_t, 2> strides;
84   if (failed(getStridesAndOffset(memrefType, strides, offset)))
85     return llvm::None;
86   if (strides[0] == ShapedType::kDynamicStrideOrOffset)
87     return llvm::None;
88   return strides[0];
89 }
90 
91 // Return true if the transfer op can be converted to a MMA matrix load.
92 static bool transferReadSupportsMMAMatrixType(vector::TransferReadOp readOp) {
93   if (readOp.mask() || readOp.hasOutOfBoundsDim() ||
94       readOp.getVectorType().getRank() != 2)
95     return false;
96   if (!getMemrefConstantHorizontalStride(readOp.getShapedType()))
97     return false;
98   // TODO: Support transpose once it is added to GPU dialect ops.
99   if (!readOp.permutation_map().isMinorIdentity())
100     return false;
101   return true;
102 }
103 
104 // Return true if the transfer op can be converted to a MMA matrix store.
105 static bool
106 transferWriteSupportsMMAMatrixType(vector::TransferWriteOp writeOp) {
107   if (writeOp.mask() || writeOp.hasOutOfBoundsDim() ||
108       writeOp.getVectorType().getRank() != 2)
109     return false;
110   if (!getMemrefConstantHorizontalStride(writeOp.getShapedType()))
111     return false;
112   // TODO: Support transpose once it is added to GPU dialect ops.
113   if (!writeOp.permutation_map().isMinorIdentity())
114     return false;
115   return true;
116 }
117 
118 /// Return true if the constant is a splat to a 2D vector so that it can be
119 /// converted to a MMA constant matrix op.
120 static bool constantSupportsMMAMatrixType(arith::ConstantOp constantOp) {
121   auto vecType = constantOp.getType().dyn_cast<VectorType>();
122   if (!vecType || vecType.getRank() != 2)
123     return false;
124   return constantOp.value().isa<SplatElementsAttr>();
125 }
126 
127 /// Return true if this is a broadcast from scalar to a 2D vector.
128 static bool broadcastSupportsMMAMatrixType(vector::BroadcastOp broadcastOp) {
129   return broadcastOp.getVectorType().getRank() == 2 &&
130          broadcastOp.source().getType().isa<FloatType>();
131 }
132 
133 static bool supportsMMaMatrixType(Operation *op) {
134   if (isa<scf::ForOp, scf::YieldOp>(op))
135     return true;
136   if (auto transferRead = dyn_cast<vector::TransferReadOp>(op))
137     return transferReadSupportsMMAMatrixType(transferRead);
138   if (auto transferWrite = dyn_cast<vector::TransferWriteOp>(op))
139     return transferWriteSupportsMMAMatrixType(transferWrite);
140   if (auto contract = dyn_cast<vector::ContractionOp>(op))
141     return contractSupportsMMAMatrixType(contract);
142   if (auto constant = dyn_cast<arith::ConstantOp>(op))
143     return constantSupportsMMAMatrixType(constant);
144   if (auto broadcast = dyn_cast<vector::BroadcastOp>(op))
145     return broadcastSupportsMMAMatrixType(broadcast);
146   return false;
147 }
148 
149 // Analyze slice of operations based on convert op to figure out if the whole
150 // slice can be converted to MMA operations.
151 static SetVector<Operation *> getOpToConvert(mlir::Operation *op) {
152   auto hasVectorDest = [](Operation *op) {
153     return llvm::any_of(op->getResultTypes(),
154                         [](Type t) { return t.isa<VectorType>(); });
155   };
156   auto hasVectorSrc = [](Operation *op) {
157     return llvm::any_of(op->getOperandTypes(),
158                         [](Type t) { return t.isa<VectorType>(); });
159   };
160   SetVector<Operation *> opToConvert;
161   op->walk([&](vector::ContractionOp contract) {
162     if (opToConvert.contains(contract.getOperation()))
163       return;
164     SetVector<Operation *> dependentOps =
165         getSlice(contract, hasVectorDest, hasVectorSrc);
166     // If any instruction cannot use MMA matrix type drop the whole
167     // chaine. MMA matrix are stored in an opaque type so they cannot be used
168     // by all operations.
169     if (llvm::any_of(dependentOps,
170                      [](Operation *op) { return !supportsMMaMatrixType(op); }))
171       return;
172     opToConvert.insert(dependentOps.begin(), dependentOps.end());
173   });
174   return opToConvert;
175 }
176 
177 namespace {
178 // Transform contract into (m, k)x(k, n)x(m, n) form so that it can be converted
179 // to MMA matmul.
180 struct PrepareContractToGPUMMA
181     : public OpRewritePattern<vector::ContractionOp> {
182   using OpRewritePattern<vector::ContractionOp>::OpRewritePattern;
183 
184   LogicalResult matchAndRewrite(vector::ContractionOp op,
185                                 PatternRewriter &rewriter) const override {
186     Location loc = op.getLoc();
187     Value lhs = op.lhs(), rhs = op.rhs(), res = op.acc();
188 
189     // Set up the parallel/reduction structure in right form.
190     using MapList = ArrayRef<ArrayRef<AffineExpr>>;
191     auto infer = [](MapList m) { return AffineMap::inferFromExprList(m); };
192     AffineExpr m, n, k;
193     bindDims(rewriter.getContext(), m, n, k);
194     static constexpr std::array<int64_t, 2> perm = {1, 0};
195     auto iteratorTypes = op.iterator_types().getValue();
196     SmallVector<AffineMap, 4> maps = op.getIndexingMaps();
197     if (!(isParallelIterator(iteratorTypes[0]) &&
198           isParallelIterator(iteratorTypes[1]) &&
199           isReductionIterator(iteratorTypes[2])))
200       return failure();
201     //
202     // Two outer parallel, one inner reduction (matmat flavor).
203     //
204     if (maps == infer({{m, k}, {k, n}, {m, n}})) {
205       // This is the classical row-major matmul, nothing to do.
206       return failure();
207     }
208     if (maps == infer({{m, k}, {n, k}, {m, n}})) {
209       rhs = rewriter.create<vector::TransposeOp>(loc, rhs, perm);
210     } else if (maps == infer({{k, m}, {k, n}, {m, n}})) {
211       lhs = rewriter.create<vector::TransposeOp>(loc, lhs, perm);
212     } else if (maps == infer({{k, m}, {n, k}, {m, n}})) {
213       rhs = rewriter.create<vector::TransposeOp>(loc, rhs, perm);
214       lhs = rewriter.create<vector::TransposeOp>(loc, lhs, perm);
215     } else if (maps == infer({{m, k}, {k, n}, {n, m}})) {
216       std::swap(rhs, lhs);
217       rhs = rewriter.create<vector::TransposeOp>(loc, rhs, perm);
218       lhs = rewriter.create<vector::TransposeOp>(loc, lhs, perm);
219     } else if (maps == infer({{m, k}, {n, k}, {n, m}})) {
220       std::swap(rhs, lhs);
221       rhs = rewriter.create<vector::TransposeOp>(loc, rhs, perm);
222     } else if (maps == infer({{k, m}, {k, n}, {n, m}})) {
223       std::swap(lhs, rhs);
224       lhs = rewriter.create<vector::TransposeOp>(loc, lhs, perm);
225     } else if (maps == infer({{k, m}, {n, k}, {n, m}})) {
226       std::swap(lhs, rhs);
227     } else {
228       return failure();
229     }
230     rewriter.replaceOpWithNewOp<vector::ContractionOp>(
231         op, lhs, rhs, res,
232         rewriter.getAffineMapArrayAttr(infer({{m, k}, {k, n}, {m, n}})),
233         op.iterator_types());
234     return success();
235   }
236 };
237 
238 // Merge transpose op into the transfer read op. Transpose are not supported on
239 // MMA types but MMA load can transpose the matrix when loading.
240 struct CombineTransferReadOpTranspose final
241     : public OpRewritePattern<vector::TransposeOp> {
242   using OpRewritePattern<vector::TransposeOp>::OpRewritePattern;
243 
244   LogicalResult matchAndRewrite(vector::TransposeOp op,
245                                 PatternRewriter &rewriter) const override {
246     auto transferReadOp = op.vector().getDefiningOp<vector::TransferReadOp>();
247     if (!transferReadOp)
248       return failure();
249     if (transferReadOp.mask() || transferReadOp.hasOutOfBoundsDim())
250       return failure();
251     SmallVector<int64_t, 2> perm;
252     op.getTransp(perm);
253     SmallVector<unsigned, 2> permU;
254     for (int64_t o : perm)
255       permU.push_back(unsigned(o));
256     AffineMap permutationMap =
257         AffineMap::getPermutationMap(permU, op.getContext());
258     AffineMap newMap = permutationMap.compose(transferReadOp.permutation_map());
259     rewriter.replaceOpWithNewOp<vector::TransferReadOp>(
260         op, op.getType(), transferReadOp.source(), transferReadOp.indices(),
261         newMap, transferReadOp.padding(), transferReadOp.mask(),
262         transferReadOp.in_boundsAttr());
263     return success();
264   }
265 };
266 
267 } // namespace
268 
269 // MMA types have different layout based on how they are used in matmul ops.
270 // Figure the right layout to use by looking at op uses.
271 // TODO: Change the GPU dialect to abstract the layout at the this level and
272 // only care about it during lowering to NVVM.
273 template <typename OpTy>
274 static const char *inferFragType(OpTy op) {
275   for (Operation *users : op->getUsers()) {
276     auto contract = dyn_cast<vector::ContractionOp>(users);
277     if (!contract)
278       continue;
279     if (contract.lhs() == op.getResult())
280       return "AOp";
281     if (contract.rhs() == op.getResult())
282       return "BOp";
283   }
284   return "COp";
285 }
286 
287 static void convertTransferReadOp(vector::TransferReadOp op,
288                                   llvm::DenseMap<Value, Value> &valueMapping) {
289   assert(transferReadSupportsMMAMatrixType(op));
290   Optional<int64_t> stride =
291       getMemrefConstantHorizontalStride(op.getShapedType());
292   assert(stride);
293   const char *fragType = inferFragType(op);
294   gpu::MMAMatrixType type =
295       gpu::MMAMatrixType::get(op.getVectorType().getShape(),
296                               op.getVectorType().getElementType(), fragType);
297   OpBuilder b(op);
298   Value load = b.create<gpu::SubgroupMmaLoadMatrixOp>(
299       op.getLoc(), type, op.source(), op.indices(), b.getIndexAttr(*stride));
300   valueMapping[op.getResult()] = load;
301 }
302 
303 static void convertTransferWriteOp(vector::TransferWriteOp op,
304                                    llvm::DenseMap<Value, Value> &valueMapping) {
305   assert(transferWriteSupportsMMAMatrixType(op));
306   Optional<int64_t> stride =
307       getMemrefConstantHorizontalStride(op.getShapedType());
308   assert(stride);
309   OpBuilder b(op);
310   Value matrix = valueMapping.find(op.vector())->second;
311   b.create<gpu::SubgroupMmaStoreMatrixOp>(
312       op.getLoc(), matrix, op.source(), op.indices(), b.getIndexAttr(*stride));
313   op.erase();
314 }
315 
316 static void convertContractOp(vector::ContractionOp op,
317                               llvm::DenseMap<Value, Value> &valueMapping) {
318   OpBuilder b(op);
319   Value opA = valueMapping.find(op.lhs())->second;
320   Value opB = valueMapping.find(op.rhs())->second;
321   Value opC = valueMapping.find(op.acc())->second;
322   Value matmul = b.create<gpu::SubgroupMmaComputeOp>(op.getLoc(), opC.getType(),
323                                                      opA, opB, opC);
324   valueMapping[op.getResult()] = matmul;
325 }
326 
327 /// Convert a 2D splat ConstantOp to a SubgroupMmaConstantMatrix op.
328 static void convertConstantOp(arith::ConstantOp op,
329                               llvm::DenseMap<Value, Value> &valueMapping) {
330   assert(constantSupportsMMAMatrixType(op));
331   OpBuilder b(op);
332   Attribute splat = op.value().cast<SplatElementsAttr>().getSplatValue();
333   auto scalarConstant =
334       b.create<arith::ConstantOp>(op.getLoc(), splat.getType(), splat);
335   const char *fragType = inferFragType(op);
336   auto vecType = op.getType().cast<VectorType>();
337   gpu::MMAMatrixType type = gpu::MMAMatrixType::get(
338       vecType.getShape(), vecType.getElementType(), llvm::StringRef(fragType));
339   auto matrix = b.create<gpu::SubgroupMmaConstantMatrixOp>(op.getLoc(), type,
340                                                            scalarConstant);
341   valueMapping[op.getResult()] = matrix;
342 }
343 
344 /// Convert a vector.broadcast from scalar to a SubgroupMmaConstantMatrix op.
345 static void convertBroadcastOp(vector::BroadcastOp op,
346                                llvm::DenseMap<Value, Value> &valueMapping) {
347   assert(broadcastSupportsMMAMatrixType(op));
348   OpBuilder b(op);
349   const char *fragType = inferFragType(op);
350   auto vecType = op.getVectorType();
351   gpu::MMAMatrixType type = gpu::MMAMatrixType::get(
352       vecType.getShape(), vecType.getElementType(), llvm::StringRef(fragType));
353   auto matrix = b.create<gpu::SubgroupMmaConstantMatrixOp>(op.getLoc(), type,
354                                                            op.source());
355   valueMapping[op.getResult()] = matrix;
356 }
357 
358 // Replace ForOp with a new ForOp with extra operands. The YieldOp is not
359 // updated and needs to be updated separatly for the loop to be correct.
360 static scf::ForOp replaceForOpWithNewSignature(OpBuilder &b, scf::ForOp loop,
361                                                ValueRange newIterOperands) {
362   // Create a new loop before the existing one, with the extra operands.
363   OpBuilder::InsertionGuard g(b);
364   b.setInsertionPoint(loop);
365   auto operands = llvm::to_vector<4>(loop.getIterOperands());
366   operands.append(newIterOperands.begin(), newIterOperands.end());
367   scf::ForOp newLoop =
368       b.create<scf::ForOp>(loop.getLoc(), loop.lowerBound(), loop.upperBound(),
369                            loop.step(), operands);
370   newLoop.getBody()->erase();
371   newLoop.getLoopBody().getBlocks().splice(
372       newLoop.getLoopBody().getBlocks().begin(),
373       loop.getLoopBody().getBlocks());
374   for (auto operand : newIterOperands)
375     newLoop.getBody()->addArgument(operand.getType());
376 
377   for (auto it : llvm::zip(loop.getResults(), newLoop.getResults().take_front(
378                                                   loop.getNumResults())))
379     std::get<0>(it).replaceAllUsesWith(std::get<1>(it));
380   loop.erase();
381   return newLoop;
382 }
383 
384 static void convertForOp(scf::ForOp op,
385                          llvm::DenseMap<Value, Value> &valueMapping) {
386   SmallVector<Value> newOperands;
387   SmallVector<std::pair<size_t, size_t>> argMapping;
388   for (auto operand : llvm::enumerate(op.getIterOperands())) {
389     auto it = valueMapping.find(operand.value());
390     if (it == valueMapping.end())
391       continue;
392     argMapping.push_back(std::make_pair(
393         operand.index(), op.getNumIterOperands() + newOperands.size()));
394     newOperands.push_back(it->second);
395   }
396   OpBuilder b(op);
397   scf::ForOp newForOp = replaceForOpWithNewSignature(b, op, newOperands);
398   Block &loopBody = *newForOp.getBody();
399   for (auto mapping : argMapping) {
400     valueMapping[newForOp.getResult(mapping.first)] =
401         newForOp.getResult(mapping.second);
402     valueMapping[loopBody.getArgument(mapping.first +
403                                       newForOp.getNumInductionVars())] =
404         loopBody.getArgument(mapping.second + newForOp.getNumInductionVars());
405   }
406 }
407 
408 static void convertYieldOp(scf::YieldOp op,
409                            llvm::DenseMap<Value, Value> &valueMapping) {
410   OpBuilder b(op);
411   auto loop = cast<scf::ForOp>(op->getParentOp());
412   auto yieldOperands = llvm::to_vector<4>(op.getOperands());
413   for (auto operand : llvm::enumerate(op.getOperands())) {
414     auto it = valueMapping.find(operand.value());
415     if (it == valueMapping.end())
416       continue;
417     // Replace the yield of old value with the for op argument to make it easier
418     // to remove the dead code.
419     yieldOperands[operand.index()] = loop.getIterOperands()[operand.index()];
420     yieldOperands.push_back(it->second);
421   }
422   b.create<scf::YieldOp>(op.getLoc(), yieldOperands);
423   op.erase();
424 }
425 
426 namespace mlir {
427 
428 void populatePrepareVectorToMMAPatterns(RewritePatternSet &patterns) {
429   patterns.add<PrepareContractToGPUMMA, CombineTransferReadOpTranspose>(
430       patterns.getContext());
431 }
432 
433 void convertVectorToMMAOps(FuncOp funcOp) {
434   SetVector<Operation *> ops = getOpToConvert(funcOp);
435   llvm::DenseMap<Value, Value> valueMapping;
436   for (Operation *op : ops) {
437     if (auto transferRead = dyn_cast<vector::TransferReadOp>(op)) {
438       convertTransferReadOp(transferRead, valueMapping);
439     } else if (auto transferWrite = dyn_cast<vector::TransferWriteOp>(op)) {
440       convertTransferWriteOp(transferWrite, valueMapping);
441     } else if (auto contractOp = dyn_cast<vector::ContractionOp>(op)) {
442       convertContractOp(contractOp, valueMapping);
443     } else if (auto constantOp = dyn_cast<arith::ConstantOp>(op)) {
444       convertConstantOp(constantOp, valueMapping);
445     } else if (auto broadcastOp = dyn_cast<vector::BroadcastOp>(op)) {
446       convertBroadcastOp(broadcastOp, valueMapping);
447     } else if (auto forOp = dyn_cast<scf::ForOp>(op)) {
448       convertForOp(forOp, valueMapping);
449     } else if (auto yiledOp = dyn_cast<scf::YieldOp>(op)) {
450       convertYieldOp(yiledOp, valueMapping);
451     }
452   }
453 }
454 
455 } // namespace mlir
456 namespace {
457 
458 struct ConvertVectorToGPUPass
459     : public ConvertVectorToGPUBase<ConvertVectorToGPUPass> {
460   void runOnFunction() override {
461     RewritePatternSet patterns(getFunction().getContext());
462     populatePrepareVectorToMMAPatterns(patterns);
463     (void)applyPatternsAndFoldGreedily(getFunction(), std::move(patterns));
464 
465     convertVectorToMMAOps(getFunction());
466   }
467 };
468 
469 } // namespace
470 
471 std::unique_ptr<Pass> mlir::createConvertVectorToGPUPass() {
472   return std::make_unique<ConvertVectorToGPUPass>();
473 }
474