1 //===- VectorToGPU.cpp - Convert vector to GPU dialect ----------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements lowering of vector operations to GPU dialect ops.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include <type_traits>
14 
15 #include "mlir/Conversion/VectorToGPU/VectorToGPU.h"
16 
17 #include "../PassDetail.h"
18 #include "mlir/Analysis/SliceAnalysis.h"
19 #include "mlir/Dialect/GPU/GPUDialect.h"
20 #include "mlir/Dialect/Utils/StructuredOpsUtils.h"
21 #include "mlir/Dialect/Vector/VectorOps.h"
22 #include "mlir/Dialect/Vector/VectorUtils.h"
23 #include "mlir/IR/Builders.h"
24 #include "mlir/Pass/Pass.h"
25 #include "mlir/Transforms/GreedyPatternRewriteDriver.h"
26 #include "mlir/Transforms/Passes.h"
27 
28 using namespace mlir;
29 
30 // Return true if the contract op can be convert to MMA matmul.
31 static bool contractSupportsMMAMatrixType(vector::ContractionOp contract) {
32   if (llvm::size(contract.masks()) != 0)
33     return false;
34 
35   using MapList = ArrayRef<ArrayRef<AffineExpr>>;
36   auto infer = [](MapList m) { return AffineMap::inferFromExprList(m); };
37   AffineExpr m, n, k;
38   bindDims(contract.getContext(), m, n, k);
39   auto iteratorTypes = contract.iterator_types().getValue();
40   if (!(isParallelIterator(iteratorTypes[0]) &&
41         isParallelIterator(iteratorTypes[1]) &&
42         isReductionIterator(iteratorTypes[2])))
43     return false;
44 
45   // The contract needs to represent a matmul to be able to convert to
46   // MMAMatrix matmul.
47   if (contract.getIndexingMaps() != infer({{m, k}, {k, n}, {m, n}}))
48     return false;
49 
50   // Check that the size matches what is natively supported.
51   VectorType lhsType = contract.lhs().getType().cast<VectorType>();
52   VectorType rhsType = contract.rhs().getType().cast<VectorType>();
53   VectorType accType = contract.acc().getType().cast<VectorType>();
54 
55   std::tuple<int, int, int> dim(lhsType.getDimSize(0), rhsType.getDimSize(1),
56                                 lhsType.getDimSize(1));
57   if (lhsType.getElementType().isInteger(8) &&
58       rhsType.getElementType().isInteger(8) &&
59       accType.getElementType().isInteger(32) &&
60       (dim == std::make_tuple(8, 8, 32) || dim == std::make_tuple(16, 16, 32) ||
61        dim == std::make_tuple(16, 8, 32)))
62     return true;
63 
64   if (lhsType.getElementType().isF16() && rhsType.getElementType().isF16() &&
65       (accType.getElementType().isF16() || accType.getElementType().isF32()) &&
66       (dim == std::make_tuple(8, 8, 16) || dim == std::make_tuple(16, 16, 16) ||
67        dim == std::make_tuple(16, 8, 16)))
68     return true;
69   return false;
70 }
71 
72 // Return the stide for the dimension 0 of |type| if it is a memref and has a
73 // constant stride.
74 static llvm::Optional<int64_t>
75 getMemrefConstantHorizontalStride(ShapedType type) {
76   auto memrefType = type.dyn_cast<MemRefType>();
77   if (!memrefType)
78     return false;
79   int64_t offset = 0;
80   SmallVector<int64_t, 2> strides;
81   if (failed(getStridesAndOffset(memrefType, strides, offset)))
82     return llvm::None;
83   if (strides[0] == ShapedType::kDynamicStrideOrOffset)
84     return llvm::None;
85   return strides[0];
86 }
87 
88 // Return true if the transfer op can be converted to a MMA matrix load.
89 static bool transferReadSupportsMMAMatrixType(vector::TransferReadOp readOp) {
90   if (readOp.mask() || readOp.hasOutOfBoundsDim() ||
91       readOp.getVectorType().getRank() != 2)
92     return false;
93   if (!getMemrefConstantHorizontalStride(readOp.getShapedType()))
94     return false;
95   // TODO: Support transpose once it is added to GPU dialect ops.
96   if (!readOp.permutation_map().isMinorIdentity())
97     return false;
98   return true;
99 }
100 
101 // Return true if the transfer op can be converted to a MMA matrix store.
102 static bool
103 transferWriteSupportsMMAMatrixType(vector::TransferWriteOp writeOp) {
104   if (writeOp.mask() || writeOp.hasOutOfBoundsDim() ||
105       writeOp.getVectorType().getRank() != 2)
106     return false;
107   if (!getMemrefConstantHorizontalStride(writeOp.getShapedType()))
108     return false;
109   // TODO: Support transpose once it is added to GPU dialect ops.
110   if (!writeOp.permutation_map().isMinorIdentity())
111     return false;
112   return true;
113 }
114 
115 static bool supportsMMaMatrixType(Operation *op) {
116   if (auto transferRead = dyn_cast<vector::TransferReadOp>(op))
117     return transferReadSupportsMMAMatrixType(transferRead);
118   if (auto transferWrite = dyn_cast<vector::TransferWriteOp>(op))
119     return transferWriteSupportsMMAMatrixType(transferWrite);
120   if (auto contract = dyn_cast<vector::ContractionOp>(op))
121     return contractSupportsMMAMatrixType(contract);
122   return false;
123 }
124 
125 // Analyze slice of operations based on convert op to figure out if the whole
126 // slice can be converted to MMA operations.
127 static SetVector<Operation *> getOpToConvert(mlir::Operation *op) {
128   auto hasVectorDest = [](Operation *op) {
129     return op->getNumResults() == 0 ||
130            llvm::any_of(op->getResultTypes(),
131                         [](Type t) { return t.isa<VectorType>(); });
132   };
133   SetVector<Operation *> opToConvert;
134   op->walk([&](vector::ContractionOp contract) {
135     if (opToConvert.contains(contract.getOperation()))
136       return;
137     SetVector<Operation *> dependentOps =
138         getSlice(contract, hasVectorDest, hasVectorDest);
139     // If any instruction cannot use MMA matrix type drop the whole
140     // chaine. MMA matrix are stored in an opaque type so they cannot be used
141     // by all operations.
142     if (llvm::any_of(dependentOps,
143                      [](Operation *op) { return !supportsMMaMatrixType(op); }))
144       return;
145     opToConvert.insert(dependentOps.begin(), dependentOps.end());
146   });
147   return opToConvert;
148 }
149 
150 namespace {
151 // Transform contract into (m, k)x(k, n)x(m, n) form so that it can be converted
152 // to MMA matmul.
153 struct PrepareContractToGPUMMA
154     : public OpRewritePattern<vector::ContractionOp> {
155   using OpRewritePattern<vector::ContractionOp>::OpRewritePattern;
156 
157   LogicalResult matchAndRewrite(vector::ContractionOp op,
158                                 PatternRewriter &rewriter) const override {
159     Location loc = op.getLoc();
160     Value lhs = op.lhs(), rhs = op.rhs(), res = op.acc();
161 
162     // Set up the parallel/reduction structure in right form.
163     using MapList = ArrayRef<ArrayRef<AffineExpr>>;
164     auto infer = [](MapList m) { return AffineMap::inferFromExprList(m); };
165     AffineExpr m, n, k;
166     bindDims(rewriter.getContext(), m, n, k);
167     static constexpr std::array<int64_t, 2> perm = {1, 0};
168     auto iteratorTypes = op.iterator_types().getValue();
169     SmallVector<AffineMap, 4> maps = op.getIndexingMaps();
170     if (!(isParallelIterator(iteratorTypes[0]) &&
171           isParallelIterator(iteratorTypes[1]) &&
172           isReductionIterator(iteratorTypes[2])))
173       return failure();
174     //
175     // Two outer parallel, one inner reduction (matmat flavor).
176     //
177     if (maps == infer({{m, k}, {k, n}, {m, n}})) {
178       // This is the classical row-major matmul, nothing to do.
179       return failure();
180     }
181     if (maps == infer({{m, k}, {n, k}, {m, n}})) {
182       rhs = rewriter.create<vector::TransposeOp>(loc, rhs, perm);
183     } else if (maps == infer({{k, m}, {k, n}, {m, n}})) {
184       lhs = rewriter.create<vector::TransposeOp>(loc, lhs, perm);
185     } else if (maps == infer({{k, m}, {n, k}, {m, n}})) {
186       rhs = rewriter.create<vector::TransposeOp>(loc, rhs, perm);
187       lhs = rewriter.create<vector::TransposeOp>(loc, lhs, perm);
188     } else if (maps == infer({{m, k}, {k, n}, {n, m}})) {
189       std::swap(rhs, lhs);
190       rhs = rewriter.create<vector::TransposeOp>(loc, rhs, perm);
191       lhs = rewriter.create<vector::TransposeOp>(loc, lhs, perm);
192     } else if (maps == infer({{m, k}, {n, k}, {n, m}})) {
193       std::swap(rhs, lhs);
194       rhs = rewriter.create<vector::TransposeOp>(loc, rhs, perm);
195     } else if (maps == infer({{k, m}, {k, n}, {n, m}})) {
196       std::swap(lhs, rhs);
197       lhs = rewriter.create<vector::TransposeOp>(loc, lhs, perm);
198     } else if (maps == infer({{k, m}, {n, k}, {n, m}})) {
199       std::swap(lhs, rhs);
200     } else {
201       return failure();
202     }
203     rewriter.replaceOpWithNewOp<vector::ContractionOp>(
204         op, lhs, rhs, res,
205         rewriter.getAffineMapArrayAttr(infer({{m, k}, {k, n}, {m, n}})),
206         op.iterator_types());
207     return success();
208   }
209 };
210 
211 // Merge transpose op into the transfer read op. Transpose are not supported on
212 // MMA types but MMA load can transpose the matrix when loading.
213 struct CombineTransferReadOpTranspose final
214     : public OpRewritePattern<vector::TransposeOp> {
215   using OpRewritePattern<vector::TransposeOp>::OpRewritePattern;
216 
217   LogicalResult matchAndRewrite(vector::TransposeOp op,
218                                 PatternRewriter &rewriter) const override {
219     auto transferReadOp = op.vector().getDefiningOp<vector::TransferReadOp>();
220     if (!transferReadOp)
221       return failure();
222     if (transferReadOp.mask() || transferReadOp.hasOutOfBoundsDim())
223       return failure();
224     SmallVector<int64_t, 2> perm;
225     op.getTransp(perm);
226     SmallVector<unsigned, 2> permU;
227     for (int64_t o : perm)
228       permU.push_back(unsigned(o));
229     AffineMap permutationMap =
230         AffineMap::getPermutationMap(permU, op.getContext());
231     AffineMap newMap = permutationMap.compose(transferReadOp.permutation_map());
232     rewriter.replaceOpWithNewOp<vector::TransferReadOp>(
233         op, op.getType(), transferReadOp.source(), transferReadOp.indices(),
234         newMap, transferReadOp.padding(), transferReadOp.mask(),
235         transferReadOp.in_boundsAttr());
236     return success();
237   }
238 };
239 
240 } // namespace
241 
242 // MMA types have different layout based on how they are used in matmul ops.
243 // Figure the right layout to use by looking at Transfer op uses.
244 // TODO: Change the GPU dialect to abstract the layout at the this level and
245 // only care about it during lowering to NVVM.
246 static const char *inferFragType(vector::TransferReadOp op) {
247   for (Operation *users : op->getUsers()) {
248     auto contract = dyn_cast<vector::ContractionOp>(users);
249     if (!contract)
250       continue;
251     if (contract.lhs() == op.getResult())
252       return "AOp";
253     if (contract.rhs() == op.getResult())
254       return "BOp";
255   }
256   return "COp";
257 }
258 
259 static void convertTransferReadOp(vector::TransferReadOp op,
260                                   llvm::DenseMap<Value, Value> &valueMapping) {
261   assert(transferReadSupportsMMAMatrixType(op));
262   Optional<int64_t> stride =
263       getMemrefConstantHorizontalStride(op.getShapedType());
264   assert(stride);
265   const char *fragType = inferFragType(op);
266   gpu::MMAMatrixType type =
267       gpu::MMAMatrixType::get(op.getVectorType().getShape(),
268                               op.getVectorType().getElementType(), fragType);
269   OpBuilder b(op);
270   Value load = b.create<gpu::SubgroupMmaLoadMatrixOp>(
271       op.getLoc(), type, op.source(), op.indices(), b.getIndexAttr(*stride));
272   valueMapping[op.getResult()] = load;
273 }
274 
275 static void convertTransferWriteOp(vector::TransferWriteOp op,
276                                    llvm::DenseMap<Value, Value> &valueMapping) {
277   assert(transferWriteSupportsMMAMatrixType(op));
278   Optional<int64_t> stride =
279       getMemrefConstantHorizontalStride(op.getShapedType());
280   assert(stride);
281   OpBuilder b(op);
282   Value matrix = valueMapping.find(op.vector())->second;
283   b.create<gpu::SubgroupMmaStoreMatrixOp>(
284       op.getLoc(), matrix, op.source(), op.indices(), b.getIndexAttr(*stride));
285   op.erase();
286 }
287 
288 static void convertContractOp(vector::ContractionOp op,
289                               llvm::DenseMap<Value, Value> &valueMapping) {
290   OpBuilder b(op);
291   Value opA = valueMapping.find(op.lhs())->second;
292   Value opB = valueMapping.find(op.rhs())->second;
293   Value opC = valueMapping.find(op.acc())->second;
294   Value matmul = b.create<gpu::SubgroupMmaComputeOp>(op.getLoc(), opC.getType(),
295                                                      opA, opB, opC);
296   valueMapping[op.getResult()] = matmul;
297 }
298 
299 namespace mlir {
300 
301 void populatePrepareVectorToMMAPatterns(RewritePatternSet &patterns) {
302   patterns.add<PrepareContractToGPUMMA, CombineTransferReadOpTranspose>(
303       patterns.getContext());
304 }
305 
306 void convertVectorToMMAOps(FuncOp funcOp) {
307   SetVector<Operation *> ops = getOpToConvert(funcOp);
308   llvm::DenseMap<Value, Value> valueMapping;
309   for (Operation *op : ops) {
310     if (auto transferRead = dyn_cast<vector::TransferReadOp>(op)) {
311       convertTransferReadOp(transferRead, valueMapping);
312     } else if (auto transferWrite = dyn_cast<vector::TransferWriteOp>(op)) {
313       convertTransferWriteOp(transferWrite, valueMapping);
314     } else if (auto contractOp = dyn_cast<vector::ContractionOp>(op)) {
315       convertContractOp(contractOp, valueMapping);
316     }
317   }
318 }
319 
320 } // namespace mlir
321 namespace {
322 
323 struct ConvertVectorToGPUPass
324     : public ConvertVectorToGPUBase<ConvertVectorToGPUPass> {
325   void runOnFunction() override {
326     RewritePatternSet patterns(getFunction().getContext());
327     populatePrepareVectorToMMAPatterns(patterns);
328     (void)applyPatternsAndFoldGreedily(getFunction(), std::move(patterns));
329 
330     convertVectorToMMAOps(getFunction());
331   }
332 };
333 
334 } // namespace
335 
336 std::unique_ptr<Pass> mlir::createConvertVectorToGPUPass() {
337   return std::make_unique<ConvertVectorToGPUPass>();
338 }
339