1edd9515bSthomasraoux //===- VectorToGPU.cpp - Convert vector to GPU dialect ----------*- C++ -*-===//
2edd9515bSthomasraoux //
3edd9515bSthomasraoux // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4edd9515bSthomasraoux // See https://llvm.org/LICENSE.txt for license information.
5edd9515bSthomasraoux // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6edd9515bSthomasraoux //
7edd9515bSthomasraoux //===----------------------------------------------------------------------===//
8edd9515bSthomasraoux //
9edd9515bSthomasraoux // This file implements lowering of vector operations to GPU dialect ops.
10edd9515bSthomasraoux //
11edd9515bSthomasraoux //===----------------------------------------------------------------------===//
12edd9515bSthomasraoux
13edd9515bSthomasraoux #include <type_traits>
14edd9515bSthomasraoux
151ca772edSChristopher Bate #include "NvGpuSupport.h"
16edd9515bSthomasraoux #include "mlir/Conversion/VectorToGPU/VectorToGPU.h"
17edd9515bSthomasraoux
18edd9515bSthomasraoux #include "../PassDetail.h"
19edd9515bSthomasraoux #include "mlir/Analysis/SliceAnalysis.h"
20a54f4eaeSMogball #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h"
21d7ef488bSMogball #include "mlir/Dialect/GPU/IR/GPUDialect.h"
2266f878ceSMatthias Springer #include "mlir/Dialect/MemRef/IR/MemRef.h"
2351b925dfSChristopher Bate #include "mlir/Dialect/NVGPU/IR/NVGPUDialect.h"
248b68da2cSAlex Zinenko #include "mlir/Dialect/SCF/IR/SCF.h"
25edd9515bSthomasraoux #include "mlir/Dialect/Utils/StructuredOpsUtils.h"
2699ef9eebSMatthias Springer #include "mlir/Dialect/Vector/IR/VectorOps.h"
2799ef9eebSMatthias Springer #include "mlir/Dialect/Vector/Utils/VectorUtils.h"
28edd9515bSthomasraoux #include "mlir/IR/Builders.h"
29edd9515bSthomasraoux #include "mlir/Pass/Pass.h"
30edd9515bSthomasraoux #include "mlir/Transforms/GreedyPatternRewriteDriver.h"
31edd9515bSthomasraoux #include "mlir/Transforms/Passes.h"
321ca772edSChristopher Bate #include "llvm/ADT/TypeSwitch.h"
33edd9515bSthomasraoux
34edd9515bSthomasraoux using namespace mlir;
35edd9515bSthomasraoux
361ca772edSChristopher Bate /// For a vector TransferOpType `xferOp`, an empty `indices` vector, and an
371ca772edSChristopher Bate /// AffineMap representing offsets to apply to indices, the function fills
381ca772edSChristopher Bate /// `indices` with the original indices plus the offsets. The offsets are
391ca772edSChristopher Bate /// applied by taking into account the permutation map of the transfer op. If
401ca772edSChristopher Bate /// the `offsetMap` has dimension placeholders, those should be provided in
411ca772edSChristopher Bate /// `dimValues`.
421ca772edSChristopher Bate template <typename TransferOpType>
getXferIndices(OpBuilder & b,TransferOpType xferOp,AffineMap offsetMap,ArrayRef<Value> dimValues,SmallVector<Value,4> & indices)431ca772edSChristopher Bate static void getXferIndices(OpBuilder &b, TransferOpType xferOp,
441ca772edSChristopher Bate AffineMap offsetMap, ArrayRef<Value> dimValues,
451ca772edSChristopher Bate SmallVector<Value, 4> &indices) {
461ca772edSChristopher Bate indices.append(xferOp.getIndices().begin(), xferOp.getIndices().end());
471ca772edSChristopher Bate Location loc = xferOp.getLoc();
481ca772edSChristopher Bate unsigned offsetsIdx = 0;
491ca772edSChristopher Bate for (auto expr : xferOp.getPermutationMap().getResults()) {
501ca772edSChristopher Bate if (auto dim = expr.template dyn_cast<AffineDimExpr>()) {
511ca772edSChristopher Bate Value prevIdx = indices[dim.getPosition()];
521ca772edSChristopher Bate SmallVector<Value, 3> dims(dimValues.begin(), dimValues.end());
531ca772edSChristopher Bate dims.push_back(prevIdx);
541ca772edSChristopher Bate AffineExpr d0 = b.getAffineDimExpr(offsetMap.getNumDims());
551ca772edSChristopher Bate indices[dim.getPosition()] = makeComposedAffineApply(
561ca772edSChristopher Bate b, loc, d0 + offsetMap.getResult(offsetsIdx++), dims);
571ca772edSChristopher Bate continue;
581ca772edSChristopher Bate }
591ca772edSChristopher Bate }
601ca772edSChristopher Bate }
611ca772edSChristopher Bate
62edd9515bSthomasraoux // Return true if the contract op can be convert to MMA matmul.
contractSupportsMMAMatrixType(vector::ContractionOp contract,bool useNvGpu)631ca772edSChristopher Bate static bool contractSupportsMMAMatrixType(vector::ContractionOp contract,
641ca772edSChristopher Bate bool useNvGpu) {
657c38fd60SJacques Pienaar if (llvm::size(contract.getMasks()) != 0)
66edd9515bSthomasraoux return false;
67edd9515bSthomasraoux
68edd9515bSthomasraoux using MapList = ArrayRef<ArrayRef<AffineExpr>>;
69edd9515bSthomasraoux auto infer = [](MapList m) { return AffineMap::inferFromExprList(m); };
70edd9515bSthomasraoux AffineExpr m, n, k;
71edd9515bSthomasraoux bindDims(contract.getContext(), m, n, k);
727c38fd60SJacques Pienaar auto iteratorTypes = contract.getIteratorTypes().getValue();
73edd9515bSthomasraoux if (!(isParallelIterator(iteratorTypes[0]) &&
74edd9515bSthomasraoux isParallelIterator(iteratorTypes[1]) &&
75edd9515bSthomasraoux isReductionIterator(iteratorTypes[2])))
76edd9515bSthomasraoux return false;
77edd9515bSthomasraoux
78edd9515bSthomasraoux // The contract needs to represent a matmul to be able to convert to
79edd9515bSthomasraoux // MMAMatrix matmul.
801ca772edSChristopher Bate if (!useNvGpu &&
81*d2c0572bSJacques Pienaar contract.getIndexingMapsArray() != infer({{m, k}, {k, n}, {m, n}}))
821ca772edSChristopher Bate return false;
83*d2c0572bSJacques Pienaar if (useNvGpu &&
84*d2c0572bSJacques Pienaar contract.getIndexingMapsArray() != infer({{m, k}, {n, k}, {m, n}}))
85edd9515bSthomasraoux return false;
86edd9515bSthomasraoux
87edd9515bSthomasraoux return true;
88edd9515bSthomasraoux }
89edd9515bSthomasraoux
90edd9515bSthomasraoux // Return the stide for the dimension 0 of |type| if it is a memref and has a
91edd9515bSthomasraoux // constant stride.
92edd9515bSthomasraoux static llvm::Optional<int64_t>
getMemrefConstantHorizontalStride(ShapedType type)93edd9515bSthomasraoux getMemrefConstantHorizontalStride(ShapedType type) {
94edd9515bSthomasraoux auto memrefType = type.dyn_cast<MemRefType>();
95edd9515bSthomasraoux if (!memrefType)
96edd9515bSthomasraoux return false;
97a57ccad5SThomas Raoux // If the memref is 0 or 1D the horizontal stride is 0.
98a57ccad5SThomas Raoux if (memrefType.getRank() < 2)
99a57ccad5SThomas Raoux return 0;
100edd9515bSthomasraoux int64_t offset = 0;
101edd9515bSthomasraoux SmallVector<int64_t, 2> strides;
102d77f4836SThomas Raoux if (failed(getStridesAndOffset(memrefType, strides, offset)) ||
103d77f4836SThomas Raoux strides.back() != 1)
104edd9515bSthomasraoux return llvm::None;
105a57ccad5SThomas Raoux int64_t stride = strides[strides.size() - 2];
106a57ccad5SThomas Raoux if (stride == ShapedType::kDynamicStrideOrOffset)
107edd9515bSthomasraoux return llvm::None;
108a57ccad5SThomas Raoux return stride;
109edd9515bSthomasraoux }
110edd9515bSthomasraoux
111edd9515bSthomasraoux // Return true if the transfer op can be converted to a MMA matrix load.
transferReadSupportsMMAMatrixType(vector::TransferReadOp readOp,bool useNvGpu)1121ca772edSChristopher Bate static bool transferReadSupportsMMAMatrixType(vector::TransferReadOp readOp,
1131ca772edSChristopher Bate bool useNvGpu) {
1147c38fd60SJacques Pienaar if (readOp.getMask() || readOp.hasOutOfBoundsDim() ||
115edd9515bSthomasraoux readOp.getVectorType().getRank() != 2)
116edd9515bSthomasraoux return false;
117edd9515bSthomasraoux if (!getMemrefConstantHorizontalStride(readOp.getShapedType()))
118edd9515bSthomasraoux return false;
1197c38fd60SJacques Pienaar AffineMap map = readOp.getPermutationMap();
120e7969240SThomas Raoux OpBuilder b(readOp.getContext());
121e7969240SThomas Raoux AffineExpr innerDim = b.getAffineDimExpr(map.getNumDims() - 1);
122e7969240SThomas Raoux AffineExpr zero = b.getAffineConstantExpr(0);
123e7969240SThomas Raoux auto broadcastInnerDim = AffineMap::get(map.getNumDims(), 0, {zero, innerDim},
124e7969240SThomas Raoux readOp.getContext());
1251ca772edSChristopher Bate
1261ca772edSChristopher Bate if (!useNvGpu) {
127edd9515bSthomasraoux // TODO: Support transpose once it is added to GPU dialect ops.
128e7969240SThomas Raoux // For now we only support (d0, d1) -> (d0, d1) and (d0, d1) -> (0, d1).
1291ca772edSChristopher Bate return map.isMinorIdentity() || map == broadcastInnerDim;
1301ca772edSChristopher Bate }
1311ca772edSChristopher Bate
1321ca772edSChristopher Bate return true;
133edd9515bSthomasraoux }
134edd9515bSthomasraoux
135edd9515bSthomasraoux // Return true if the transfer op can be converted to a MMA matrix store.
136edd9515bSthomasraoux static bool
transferWriteSupportsMMAMatrixType(vector::TransferWriteOp writeOp)137edd9515bSthomasraoux transferWriteSupportsMMAMatrixType(vector::TransferWriteOp writeOp) {
138c537a943SNicolas Vasilache // TODO: support 0-d corner case.
139c537a943SNicolas Vasilache if (writeOp.getTransferRank() == 0)
140c537a943SNicolas Vasilache return false;
141c537a943SNicolas Vasilache
1427c38fd60SJacques Pienaar if (writeOp.getMask() || writeOp.hasOutOfBoundsDim() ||
143edd9515bSthomasraoux writeOp.getVectorType().getRank() != 2)
144edd9515bSthomasraoux return false;
145edd9515bSthomasraoux if (!getMemrefConstantHorizontalStride(writeOp.getShapedType()))
146edd9515bSthomasraoux return false;
147edd9515bSthomasraoux // TODO: Support transpose once it is added to GPU dialect ops.
1487c38fd60SJacques Pienaar if (!writeOp.getPermutationMap().isMinorIdentity())
149edd9515bSthomasraoux return false;
150edd9515bSthomasraoux return true;
151edd9515bSthomasraoux }
152edd9515bSthomasraoux
1536413226dSthomasraoux /// Return true if the constant is a splat to a 2D vector so that it can be
1546413226dSthomasraoux /// converted to a MMA constant matrix op.
constantSupportsMMAMatrixType(arith::ConstantOp constantOp)155a54f4eaeSMogball static bool constantSupportsMMAMatrixType(arith::ConstantOp constantOp) {
1566413226dSthomasraoux auto vecType = constantOp.getType().dyn_cast<VectorType>();
1576413226dSthomasraoux if (!vecType || vecType.getRank() != 2)
1586413226dSthomasraoux return false;
159cfb72fd3SJacques Pienaar return constantOp.getValue().isa<SplatElementsAttr>();
1606413226dSthomasraoux }
1616413226dSthomasraoux
16243928419Sthomasraoux /// Return true if this is a broadcast from scalar to a 2D vector.
broadcastSupportsMMAMatrixType(vector::BroadcastOp broadcastOp)16343928419Sthomasraoux static bool broadcastSupportsMMAMatrixType(vector::BroadcastOp broadcastOp) {
16443928419Sthomasraoux return broadcastOp.getVectorType().getRank() == 2 &&
1657c38fd60SJacques Pienaar broadcastOp.getSource().getType().isa<FloatType>();
16643928419Sthomasraoux }
16743928419Sthomasraoux
1687fbb0678Sthomasraoux /// Return the MMA elementwise enum associated with `op` if it is supported.
1697fbb0678Sthomasraoux /// Return `llvm::None` otherwise.
1707fbb0678Sthomasraoux static llvm::Optional<gpu::MMAElementwiseOp>
convertElementwiseOpToMMA(Operation * op)1717fbb0678Sthomasraoux convertElementwiseOpToMMA(Operation *op) {
1727fbb0678Sthomasraoux if (isa<arith::AddFOp>(op))
1737fbb0678Sthomasraoux return gpu::MMAElementwiseOp::ADDF;
1747fbb0678Sthomasraoux if (isa<arith::MulFOp>(op))
1757fbb0678Sthomasraoux return gpu::MMAElementwiseOp::MULF;
1769b1d90e8SAlexander Belyaev if (isa<arith::MaxFOp>(op))
1777fbb0678Sthomasraoux return gpu::MMAElementwiseOp::MAXF;
1789b1d90e8SAlexander Belyaev if (isa<arith::MinFOp>(op))
1797fbb0678Sthomasraoux return gpu::MMAElementwiseOp::MINF;
180e7969240SThomas Raoux if (isa<arith::DivFOp>(op))
181e7969240SThomas Raoux return gpu::MMAElementwiseOp::DIVF;
1827fbb0678Sthomasraoux return llvm::None;
1837fbb0678Sthomasraoux }
1847fbb0678Sthomasraoux
1857fbb0678Sthomasraoux /// Return true if the op is supported as elementwise op on MMAMatrix type.
elementwiseSupportsMMAMatrixType(Operation * op)1867fbb0678Sthomasraoux static bool elementwiseSupportsMMAMatrixType(Operation *op) {
187064a08cdSKazu Hirata return convertElementwiseOpToMMA(op).has_value();
1887fbb0678Sthomasraoux }
1897fbb0678Sthomasraoux
supportsMMaMatrixType(Operation * op,bool useNvGpu)1901ca772edSChristopher Bate static bool supportsMMaMatrixType(Operation *op, bool useNvGpu) {
1911a865592Sthomasraoux if (isa<scf::ForOp, scf::YieldOp>(op))
1921a865592Sthomasraoux return true;
193edd9515bSthomasraoux if (auto transferRead = dyn_cast<vector::TransferReadOp>(op))
1941ca772edSChristopher Bate return transferReadSupportsMMAMatrixType(transferRead, useNvGpu);
195edd9515bSthomasraoux if (auto transferWrite = dyn_cast<vector::TransferWriteOp>(op))
196edd9515bSthomasraoux return transferWriteSupportsMMAMatrixType(transferWrite);
197edd9515bSthomasraoux if (auto contract = dyn_cast<vector::ContractionOp>(op))
1981ca772edSChristopher Bate return contractSupportsMMAMatrixType(contract, useNvGpu);
199a54f4eaeSMogball if (auto constant = dyn_cast<arith::ConstantOp>(op))
2006413226dSthomasraoux return constantSupportsMMAMatrixType(constant);
20143928419Sthomasraoux if (auto broadcast = dyn_cast<vector::BroadcastOp>(op))
20243928419Sthomasraoux return broadcastSupportsMMAMatrixType(broadcast);
2037fbb0678Sthomasraoux return elementwiseSupportsMMAMatrixType(op);
204edd9515bSthomasraoux }
205edd9515bSthomasraoux
206e7969240SThomas Raoux /// Return an unsorted slice handling scf.for region differently than
207e7969240SThomas Raoux /// `getSlice`. In scf.for we only want to include as part of the slice elements
208e7969240SThomas Raoux /// that are part of the use/def chain.
getSliceContract(Operation * op,TransitiveFilter backwardFilter,TransitiveFilter forwardFilter)209e7969240SThomas Raoux static SetVector<Operation *> getSliceContract(Operation *op,
210e7969240SThomas Raoux TransitiveFilter backwardFilter,
211e7969240SThomas Raoux TransitiveFilter forwardFilter) {
212e7969240SThomas Raoux SetVector<Operation *> slice;
213e7969240SThomas Raoux slice.insert(op);
214e7969240SThomas Raoux unsigned currentIndex = 0;
215e7969240SThomas Raoux SetVector<Operation *> backwardSlice;
216e7969240SThomas Raoux SetVector<Operation *> forwardSlice;
217e7969240SThomas Raoux while (currentIndex != slice.size()) {
218e7969240SThomas Raoux auto *currentOp = (slice)[currentIndex];
219e7969240SThomas Raoux // Compute and insert the backwardSlice starting from currentOp.
220e7969240SThomas Raoux backwardSlice.clear();
221e7969240SThomas Raoux getBackwardSlice(currentOp, &backwardSlice, backwardFilter);
222e7969240SThomas Raoux slice.insert(backwardSlice.begin(), backwardSlice.end());
223e7969240SThomas Raoux
224e7969240SThomas Raoux // Compute and insert the forwardSlice starting from currentOp.
225e7969240SThomas Raoux forwardSlice.clear();
226e7969240SThomas Raoux // Special case for ForOp, we don't want to include the whole region but
227e7969240SThomas Raoux // only the value using the region arguments.
228e7969240SThomas Raoux // TODO: We should refine this to only care about the region arguments being
229e7969240SThomas Raoux // converted to matrix type.
230e7969240SThomas Raoux if (auto forOp = dyn_cast<scf::ForOp>(currentOp)) {
231e7969240SThomas Raoux for (Value forOpResult : forOp.getResults())
232e7969240SThomas Raoux getForwardSlice(forOpResult, &forwardSlice, forwardFilter);
233e7969240SThomas Raoux for (BlockArgument &arg : forOp.getRegionIterArgs())
234e7969240SThomas Raoux getForwardSlice(arg, &forwardSlice, forwardFilter);
235e7969240SThomas Raoux } else {
236e7969240SThomas Raoux getForwardSlice(currentOp, &forwardSlice, forwardFilter);
237e7969240SThomas Raoux }
238e7969240SThomas Raoux slice.insert(forwardSlice.begin(), forwardSlice.end());
239e7969240SThomas Raoux ++currentIndex;
240e7969240SThomas Raoux }
241e7969240SThomas Raoux return slice;
242e7969240SThomas Raoux }
243e7969240SThomas Raoux
244edd9515bSthomasraoux // Analyze slice of operations based on convert op to figure out if the whole
245edd9515bSthomasraoux // slice can be converted to MMA operations.
getOpToConvert(mlir::Operation * op,bool useNvGpu)2461ca772edSChristopher Bate static SetVector<Operation *> getOpToConvert(mlir::Operation *op,
2471ca772edSChristopher Bate bool useNvGpu) {
248edd9515bSthomasraoux auto hasVectorDest = [](Operation *op) {
24943928419Sthomasraoux return llvm::any_of(op->getResultTypes(),
25043928419Sthomasraoux [](Type t) { return t.isa<VectorType>(); });
25143928419Sthomasraoux };
25243928419Sthomasraoux auto hasVectorSrc = [](Operation *op) {
25343928419Sthomasraoux return llvm::any_of(op->getOperandTypes(),
254edd9515bSthomasraoux [](Type t) { return t.isa<VectorType>(); });
255edd9515bSthomasraoux };
256edd9515bSthomasraoux SetVector<Operation *> opToConvert;
257edd9515bSthomasraoux op->walk([&](vector::ContractionOp contract) {
258edd9515bSthomasraoux if (opToConvert.contains(contract.getOperation()))
259edd9515bSthomasraoux return;
260edd9515bSthomasraoux SetVector<Operation *> dependentOps =
261e7969240SThomas Raoux getSliceContract(contract, hasVectorDest, hasVectorSrc);
262edd9515bSthomasraoux // If any instruction cannot use MMA matrix type drop the whole
263e7969240SThomas Raoux // chain. MMA matrix are stored in an opaque type so they cannot be used
264edd9515bSthomasraoux // by all operations.
2651ca772edSChristopher Bate if (llvm::any_of(dependentOps, [useNvGpu](Operation *op) {
2661ca772edSChristopher Bate return !supportsMMaMatrixType(op, useNvGpu);
2671ca772edSChristopher Bate }))
268edd9515bSthomasraoux return;
269edd9515bSthomasraoux opToConvert.insert(dependentOps.begin(), dependentOps.end());
270edd9515bSthomasraoux });
271e7969240SThomas Raoux // Sort the operations so that we can convert them in topological order.
272e7969240SThomas Raoux return topologicalSort(opToConvert);
273edd9515bSthomasraoux }
274edd9515bSthomasraoux
275edd9515bSthomasraoux namespace {
276edd9515bSthomasraoux // Transform contract into (m, k)x(k, n)x(m, n) form so that it can be converted
277edd9515bSthomasraoux // to MMA matmul.
278edd9515bSthomasraoux struct PrepareContractToGPUMMA
279edd9515bSthomasraoux : public OpRewritePattern<vector::ContractionOp> {
280edd9515bSthomasraoux using OpRewritePattern<vector::ContractionOp>::OpRewritePattern;
281edd9515bSthomasraoux
matchAndRewrite__anon38edb3010811::PrepareContractToGPUMMA282edd9515bSthomasraoux LogicalResult matchAndRewrite(vector::ContractionOp op,
283edd9515bSthomasraoux PatternRewriter &rewriter) const override {
284edd9515bSthomasraoux Location loc = op.getLoc();
2857c38fd60SJacques Pienaar Value lhs = op.getLhs(), rhs = op.getRhs(), res = op.getAcc();
286edd9515bSthomasraoux
287edd9515bSthomasraoux // Set up the parallel/reduction structure in right form.
288edd9515bSthomasraoux using MapList = ArrayRef<ArrayRef<AffineExpr>>;
289edd9515bSthomasraoux auto infer = [](MapList m) { return AffineMap::inferFromExprList(m); };
290edd9515bSthomasraoux AffineExpr m, n, k;
291edd9515bSthomasraoux bindDims(rewriter.getContext(), m, n, k);
292edd9515bSthomasraoux static constexpr std::array<int64_t, 2> perm = {1, 0};
2937c38fd60SJacques Pienaar auto iteratorTypes = op.getIteratorTypes().getValue();
294*d2c0572bSJacques Pienaar SmallVector<AffineMap, 4> maps = op.getIndexingMapsArray();
295edd9515bSthomasraoux if (!(isParallelIterator(iteratorTypes[0]) &&
296edd9515bSthomasraoux isParallelIterator(iteratorTypes[1]) &&
297edd9515bSthomasraoux isReductionIterator(iteratorTypes[2])))
298edd9515bSthomasraoux return failure();
299edd9515bSthomasraoux //
300edd9515bSthomasraoux // Two outer parallel, one inner reduction (matmat flavor).
301edd9515bSthomasraoux //
302edd9515bSthomasraoux if (maps == infer({{m, k}, {k, n}, {m, n}})) {
303edd9515bSthomasraoux // This is the classical row-major matmul, nothing to do.
304edd9515bSthomasraoux return failure();
305edd9515bSthomasraoux }
306edd9515bSthomasraoux if (maps == infer({{m, k}, {n, k}, {m, n}})) {
307edd9515bSthomasraoux rhs = rewriter.create<vector::TransposeOp>(loc, rhs, perm);
308edd9515bSthomasraoux } else if (maps == infer({{k, m}, {k, n}, {m, n}})) {
309edd9515bSthomasraoux lhs = rewriter.create<vector::TransposeOp>(loc, lhs, perm);
310edd9515bSthomasraoux } else if (maps == infer({{k, m}, {n, k}, {m, n}})) {
311edd9515bSthomasraoux rhs = rewriter.create<vector::TransposeOp>(loc, rhs, perm);
312edd9515bSthomasraoux lhs = rewriter.create<vector::TransposeOp>(loc, lhs, perm);
313edd9515bSthomasraoux } else if (maps == infer({{m, k}, {k, n}, {n, m}})) {
314edd9515bSthomasraoux std::swap(rhs, lhs);
315edd9515bSthomasraoux rhs = rewriter.create<vector::TransposeOp>(loc, rhs, perm);
316edd9515bSthomasraoux lhs = rewriter.create<vector::TransposeOp>(loc, lhs, perm);
317edd9515bSthomasraoux } else if (maps == infer({{m, k}, {n, k}, {n, m}})) {
318edd9515bSthomasraoux std::swap(rhs, lhs);
319edd9515bSthomasraoux rhs = rewriter.create<vector::TransposeOp>(loc, rhs, perm);
320edd9515bSthomasraoux } else if (maps == infer({{k, m}, {k, n}, {n, m}})) {
321edd9515bSthomasraoux std::swap(lhs, rhs);
322edd9515bSthomasraoux lhs = rewriter.create<vector::TransposeOp>(loc, lhs, perm);
323edd9515bSthomasraoux } else if (maps == infer({{k, m}, {n, k}, {n, m}})) {
324edd9515bSthomasraoux std::swap(lhs, rhs);
325edd9515bSthomasraoux } else {
326edd9515bSthomasraoux return failure();
327edd9515bSthomasraoux }
328edd9515bSthomasraoux rewriter.replaceOpWithNewOp<vector::ContractionOp>(
329edd9515bSthomasraoux op, lhs, rhs, res,
330edd9515bSthomasraoux rewriter.getAffineMapArrayAttr(infer({{m, k}, {k, n}, {m, n}})),
3317c38fd60SJacques Pienaar op.getIteratorTypes());
332edd9515bSthomasraoux return success();
333edd9515bSthomasraoux }
334edd9515bSthomasraoux };
335edd9515bSthomasraoux
336edd9515bSthomasraoux // Merge transpose op into the transfer read op. Transpose are not supported on
337edd9515bSthomasraoux // MMA types but MMA load can transpose the matrix when loading.
338edd9515bSthomasraoux struct CombineTransferReadOpTranspose final
339edd9515bSthomasraoux : public OpRewritePattern<vector::TransposeOp> {
340edd9515bSthomasraoux using OpRewritePattern<vector::TransposeOp>::OpRewritePattern;
341edd9515bSthomasraoux
matchAndRewrite__anon38edb3010811::CombineTransferReadOpTranspose342edd9515bSthomasraoux LogicalResult matchAndRewrite(vector::TransposeOp op,
343edd9515bSthomasraoux PatternRewriter &rewriter) const override {
3447c38fd60SJacques Pienaar auto transferReadOp =
3457c38fd60SJacques Pienaar op.getVector().getDefiningOp<vector::TransferReadOp>();
346edd9515bSthomasraoux if (!transferReadOp)
347edd9515bSthomasraoux return failure();
348c537a943SNicolas Vasilache
349c537a943SNicolas Vasilache // TODO: support 0-d corner case.
350c537a943SNicolas Vasilache if (transferReadOp.getTransferRank() == 0)
351c537a943SNicolas Vasilache return failure();
352c537a943SNicolas Vasilache
3537c38fd60SJacques Pienaar if (transferReadOp.getMask() || transferReadOp.hasOutOfBoundsDim())
354edd9515bSthomasraoux return failure();
355edd9515bSthomasraoux SmallVector<int64_t, 2> perm;
356edd9515bSthomasraoux op.getTransp(perm);
357edd9515bSthomasraoux SmallVector<unsigned, 2> permU;
358edd9515bSthomasraoux for (int64_t o : perm)
359edd9515bSthomasraoux permU.push_back(unsigned(o));
360edd9515bSthomasraoux AffineMap permutationMap =
361edd9515bSthomasraoux AffineMap::getPermutationMap(permU, op.getContext());
3627c38fd60SJacques Pienaar AffineMap newMap =
3637c38fd60SJacques Pienaar permutationMap.compose(transferReadOp.getPermutationMap());
364edd9515bSthomasraoux rewriter.replaceOpWithNewOp<vector::TransferReadOp>(
3657c38fd60SJacques Pienaar op, op.getType(), transferReadOp.getSource(),
3667c38fd60SJacques Pienaar transferReadOp.getIndices(), AffineMapAttr::get(newMap),
3677c38fd60SJacques Pienaar transferReadOp.getPadding(), transferReadOp.getMask(),
3687c38fd60SJacques Pienaar transferReadOp.getInBoundsAttr());
369edd9515bSthomasraoux return success();
370edd9515bSthomasraoux }
371edd9515bSthomasraoux };
372edd9515bSthomasraoux
373edd9515bSthomasraoux } // namespace
374edd9515bSthomasraoux
375edd9515bSthomasraoux // MMA types have different layout based on how they are used in matmul ops.
3766413226dSthomasraoux // Figure the right layout to use by looking at op uses.
377edd9515bSthomasraoux // TODO: Change the GPU dialect to abstract the layout at the this level and
378edd9515bSthomasraoux // only care about it during lowering to NVVM.
3796413226dSthomasraoux template <typename OpTy>
inferFragType(OpTy op)3806413226dSthomasraoux static const char *inferFragType(OpTy op) {
381edd9515bSthomasraoux for (Operation *users : op->getUsers()) {
382edd9515bSthomasraoux auto contract = dyn_cast<vector::ContractionOp>(users);
383edd9515bSthomasraoux if (!contract)
384edd9515bSthomasraoux continue;
3857c38fd60SJacques Pienaar if (contract.getLhs() == op.getResult())
386edd9515bSthomasraoux return "AOp";
3877c38fd60SJacques Pienaar if (contract.getRhs() == op.getResult())
388edd9515bSthomasraoux return "BOp";
389edd9515bSthomasraoux }
390edd9515bSthomasraoux return "COp";
391edd9515bSthomasraoux }
392edd9515bSthomasraoux
convertTransferReadOp(vector::TransferReadOp op,llvm::DenseMap<Value,Value> & valueMapping)393edd9515bSthomasraoux static void convertTransferReadOp(vector::TransferReadOp op,
394edd9515bSthomasraoux llvm::DenseMap<Value, Value> &valueMapping) {
395c537a943SNicolas Vasilache assert(op.getTransferRank() > 0 && "unexpected 0-d transfer");
3961ca772edSChristopher Bate assert(transferReadSupportsMMAMatrixType(op, /*useNvGpu=*/false));
397edd9515bSthomasraoux Optional<int64_t> stride =
398edd9515bSthomasraoux getMemrefConstantHorizontalStride(op.getShapedType());
3997c38fd60SJacques Pienaar AffineMap map = op.getPermutationMap();
400e7969240SThomas Raoux // Handle broadcast by setting the stride to 0.
401e7969240SThomas Raoux if (map.getResult(0).isa<AffineConstantExpr>()) {
402e7969240SThomas Raoux assert(map.getResult(0).cast<AffineConstantExpr>().getValue() == 0);
403e7969240SThomas Raoux stride = 0;
404e7969240SThomas Raoux }
405edd9515bSthomasraoux assert(stride);
406edd9515bSthomasraoux const char *fragType = inferFragType(op);
407edd9515bSthomasraoux gpu::MMAMatrixType type =
408edd9515bSthomasraoux gpu::MMAMatrixType::get(op.getVectorType().getShape(),
409edd9515bSthomasraoux op.getVectorType().getElementType(), fragType);
410edd9515bSthomasraoux OpBuilder b(op);
411edd9515bSthomasraoux Value load = b.create<gpu::SubgroupMmaLoadMatrixOp>(
4127c38fd60SJacques Pienaar op.getLoc(), type, op.getSource(), op.getIndices(),
4137c38fd60SJacques Pienaar b.getIndexAttr(*stride));
414edd9515bSthomasraoux valueMapping[op.getResult()] = load;
415edd9515bSthomasraoux }
416edd9515bSthomasraoux
convertTransferWriteOp(vector::TransferWriteOp op,llvm::DenseMap<Value,Value> & valueMapping)417edd9515bSthomasraoux static void convertTransferWriteOp(vector::TransferWriteOp op,
418edd9515bSthomasraoux llvm::DenseMap<Value, Value> &valueMapping) {
419edd9515bSthomasraoux assert(transferWriteSupportsMMAMatrixType(op));
420edd9515bSthomasraoux Optional<int64_t> stride =
421edd9515bSthomasraoux getMemrefConstantHorizontalStride(op.getShapedType());
422edd9515bSthomasraoux assert(stride);
423edd9515bSthomasraoux OpBuilder b(op);
4247c38fd60SJacques Pienaar Value matrix = valueMapping.find(op.getVector())->second;
4257c38fd60SJacques Pienaar b.create<gpu::SubgroupMmaStoreMatrixOp>(op.getLoc(), matrix, op.getSource(),
4267c38fd60SJacques Pienaar op.getIndices(),
4277c38fd60SJacques Pienaar b.getIndexAttr(*stride));
428edd9515bSthomasraoux op.erase();
429edd9515bSthomasraoux }
430edd9515bSthomasraoux
4311ca772edSChristopher Bate /// Returns the vector type which represents a matrix fragment.
4321ca772edSChristopher Bate static VectorType
getMmaSyncVectorOperandType(const nvgpu::FragmentElementInfo & regInfo)4331ca772edSChristopher Bate getMmaSyncVectorOperandType(const nvgpu::FragmentElementInfo ®Info) {
4341ca772edSChristopher Bate SmallVector<int64_t> shape{regInfo.numRegistersPerFragment,
4351ca772edSChristopher Bate regInfo.elementsPerRegister};
4361ca772edSChristopher Bate Type elType = regInfo.registerLLVMType;
4371ca772edSChristopher Bate if (auto vecType = elType.dyn_cast<VectorType>())
4381ca772edSChristopher Bate elType = vecType.getElementType();
4391ca772edSChristopher Bate return VectorType::get(shape, elType);
4401ca772edSChristopher Bate }
4411ca772edSChristopher Bate
4421ca772edSChristopher Bate /// Convert a 2D splat ConstantOp to a SubgroupMmaConstantMatrix op.
4431ca772edSChristopher Bate static LogicalResult
convertConstantOpMmaSync(arith::ConstantOp op,llvm::DenseMap<Value,Value> & valueMapping)4441ca772edSChristopher Bate convertConstantOpMmaSync(arith::ConstantOp op,
4451ca772edSChristopher Bate llvm::DenseMap<Value, Value> &valueMapping) {
4461ca772edSChristopher Bate OpBuilder b(op);
4471ca772edSChristopher Bate FailureOr<nvgpu::WarpMatrixInfo> warpMatrixInfo =
4481ca772edSChristopher Bate nvgpu::getWarpMatrixInfo(op);
4491ca772edSChristopher Bate if (failed(warpMatrixInfo))
4501ca772edSChristopher Bate return failure();
4511ca772edSChristopher Bate
4521ca772edSChristopher Bate FailureOr<nvgpu::FragmentElementInfo> regInfo =
4531ca772edSChristopher Bate nvgpu::getMmaSyncRegisterType(*warpMatrixInfo);
4541ca772edSChristopher Bate if (failed(regInfo))
4551ca772edSChristopher Bate return failure();
4561ca772edSChristopher Bate
4571ca772edSChristopher Bate VectorType vectorType = getMmaSyncVectorOperandType(*regInfo);
4581ca772edSChristopher Bate auto dense = op.getValue().dyn_cast<SplatElementsAttr>();
4591ca772edSChristopher Bate if (!dense)
4601ca772edSChristopher Bate return failure();
4611ca772edSChristopher Bate Value result = b.create<arith::ConstantOp>(
4621ca772edSChristopher Bate op.getLoc(), vectorType,
4631ca772edSChristopher Bate DenseElementsAttr::get(vectorType, dense.getSplatValue<Attribute>()));
4641ca772edSChristopher Bate valueMapping[op.getResult()] = result;
4651ca772edSChristopher Bate return success();
4661ca772edSChristopher Bate }
4671ca772edSChristopher Bate
4681ca772edSChristopher Bate static LogicalResult
creatLdMatrixCompatibleLoads(vector::TransferReadOp op,OpBuilder & builder,llvm::DenseMap<Value,Value> & valueMapping)4691ca772edSChristopher Bate creatLdMatrixCompatibleLoads(vector::TransferReadOp op, OpBuilder &builder,
4701ca772edSChristopher Bate llvm::DenseMap<Value, Value> &valueMapping) {
4711ca772edSChristopher Bate Location loc = op->getLoc();
4721ca772edSChristopher Bate
4731ca772edSChristopher Bate FailureOr<nvgpu::WarpMatrixInfo> warpMatrixInfo =
4741ca772edSChristopher Bate nvgpu::getWarpMatrixInfo(op);
4751ca772edSChristopher Bate if (failed(warpMatrixInfo))
4761ca772edSChristopher Bate return failure();
4771ca772edSChristopher Bate
4781ca772edSChristopher Bate FailureOr<nvgpu::FragmentElementInfo> regInfo =
4791ca772edSChristopher Bate nvgpu::getMmaSyncRegisterType(*warpMatrixInfo);
4801ca772edSChristopher Bate if (failed(regInfo))
4811ca772edSChristopher Bate return failure();
4821ca772edSChristopher Bate
4831ca772edSChristopher Bate FailureOr<nvgpu::LdMatrixParams> params = nvgpu::getLdMatrixParams(
4841ca772edSChristopher Bate *warpMatrixInfo,
4851ca772edSChristopher Bate /*transpose=*/!op.getPermutationMap().isMinorIdentity());
4861ca772edSChristopher Bate if (failed(params)) {
4871ca772edSChristopher Bate return op->emitError()
4881ca772edSChristopher Bate << "failed to convert vector.transfer_read to ldmatrix; this op "
4891ca772edSChristopher Bate "likely "
4901ca772edSChristopher Bate "should not be converted to a nvgpu.ldmatrix call.";
4911ca772edSChristopher Bate }
4921ca772edSChristopher Bate
4931ca772edSChristopher Bate // Adjust the load offset.
4941ca772edSChristopher Bate auto laneId = builder.create<gpu::LaneIdOp>(loc);
4951ca772edSChristopher Bate FailureOr<AffineMap> offsets =
4961ca772edSChristopher Bate nvgpu::getLaneIdToLdMatrixMatrixCoord(loc, builder, *params);
4971ca772edSChristopher Bate if (failed(offsets))
4981ca772edSChristopher Bate return failure();
4991ca772edSChristopher Bate
5001ca772edSChristopher Bate VectorType vectorType = getMmaSyncVectorOperandType(*regInfo);
5011ca772edSChristopher Bate
5021ca772edSChristopher Bate SmallVector<Value, 4> indices;
5031ca772edSChristopher Bate getXferIndices<vector::TransferReadOp>(builder, op, *offsets, {laneId},
5041ca772edSChristopher Bate indices);
5051ca772edSChristopher Bate nvgpu::LdMatrixOp newOp = builder.create<nvgpu::LdMatrixOp>(
5061ca772edSChristopher Bate loc, vectorType, op.getSource(), indices,
5071ca772edSChristopher Bate !op.getPermutationMap().isMinorIdentity(), params->numTiles);
5081ca772edSChristopher Bate valueMapping[op] = newOp->getResult(0);
5091ca772edSChristopher Bate return success();
5101ca772edSChristopher Bate }
5111ca772edSChristopher Bate
5121ca772edSChristopher Bate static LogicalResult
createNonLdMatrixLoads(vector::TransferReadOp op,OpBuilder & builder,llvm::DenseMap<Value,Value> & valueMapping)5131ca772edSChristopher Bate createNonLdMatrixLoads(vector::TransferReadOp op, OpBuilder &builder,
5141ca772edSChristopher Bate llvm::DenseMap<Value, Value> &valueMapping) {
5151ca772edSChristopher Bate Location loc = op.getLoc();
5161ca772edSChristopher Bate FailureOr<nvgpu::WarpMatrixInfo> warpMatrixInfo =
5171ca772edSChristopher Bate nvgpu::getWarpMatrixInfo(op);
5181ca772edSChristopher Bate if (failed(warpMatrixInfo))
5191ca772edSChristopher Bate return failure();
5201ca772edSChristopher Bate FailureOr<nvgpu::FragmentElementInfo> regInfo =
5211ca772edSChristopher Bate nvgpu::getMmaSyncRegisterType(*warpMatrixInfo);
5221ca772edSChristopher Bate if (failed(regInfo)) {
5231ca772edSChristopher Bate op->emitError() << "Failed to deduce register fragment type during "
5241ca772edSChristopher Bate "conversion to distributed non-ldmatrix compatible load";
5251ca772edSChristopher Bate return failure();
5261ca772edSChristopher Bate }
5271ca772edSChristopher Bate
5281ca772edSChristopher Bate Value laneId = builder.create<gpu::LaneIdOp>(loc);
5291ca772edSChristopher Bate SmallVector<Value, 4> elements;
5301ca772edSChristopher Bate
5311ca772edSChristopher Bate // This is the individual element type.
5321ca772edSChristopher Bate Type loadedElType = regInfo->registerLLVMType;
5331ca772edSChristopher Bate VectorType vectorType = getMmaSyncVectorOperandType(*regInfo);
5341ca772edSChristopher Bate
5351ca772edSChristopher Bate Value fill = builder.create<arith::ConstantOp>(
5361ca772edSChristopher Bate op.getLoc(), vectorType.getElementType(),
5371ca772edSChristopher Bate builder.getZeroAttr(vectorType.getElementType()));
5381ca772edSChristopher Bate Value result = builder.create<vector::SplatOp>(op.getLoc(), fill, vectorType);
5391ca772edSChristopher Bate
5401ca772edSChristopher Bate bool isTransposeLoad = !op.getPermutationMap().isMinorIdentity();
5411ca772edSChristopher Bate
542670eee08SChristopher Bate // If we are not transposing, then we can use vectorized loads. Otherwise, we
543670eee08SChristopher Bate // must load each element individually.
544670eee08SChristopher Bate if (!isTransposeLoad) {
5451ca772edSChristopher Bate if (!loadedElType.isa<VectorType>()) {
5461ca772edSChristopher Bate loadedElType = VectorType::get({1}, loadedElType);
5471ca772edSChristopher Bate }
5481ca772edSChristopher Bate
5491ca772edSChristopher Bate for (int i = 0; i < vectorType.getShape()[0]; i++) {
5501ca772edSChristopher Bate FailureOr<AffineMap> coords = nvgpu::getLaneIdAndValueIdToOperandCoord(
5511ca772edSChristopher Bate op.getLoc(), builder, *warpMatrixInfo);
5521ca772edSChristopher Bate if (failed(coords))
5531ca772edSChristopher Bate return failure();
5541ca772edSChristopher Bate Value logicalValueId = builder.create<arith::ConstantOp>(
5551ca772edSChristopher Bate loc, builder.getIndexType(),
5561ca772edSChristopher Bate builder.getIndexAttr(i * regInfo->elementsPerRegister));
5571ca772edSChristopher Bate SmallVector<Value, 4> newIndices;
5581ca772edSChristopher Bate getXferIndices<vector::TransferReadOp>(
5591ca772edSChristopher Bate builder, op, *coords, {laneId, logicalValueId}, newIndices);
5601ca772edSChristopher Bate
5611ca772edSChristopher Bate Value el = builder.create<vector::LoadOp>(loc, loadedElType,
5621ca772edSChristopher Bate op.getSource(), newIndices);
5631ca772edSChristopher Bate result = builder.create<vector::InsertOp>(loc, el, result,
5641ca772edSChristopher Bate builder.getI64ArrayAttr(i));
5651ca772edSChristopher Bate }
566670eee08SChristopher Bate } else {
5671ca772edSChristopher Bate if (auto vecType = loadedElType.dyn_cast<VectorType>()) {
5681ca772edSChristopher Bate loadedElType = vecType.getElementType();
5691ca772edSChristopher Bate }
5701ca772edSChristopher Bate for (int i = 0; i < vectorType.getShape()[0]; i++) {
5711ca772edSChristopher Bate for (unsigned innerIdx = 0; innerIdx < vectorType.getShape()[1];
5721ca772edSChristopher Bate innerIdx++) {
5731ca772edSChristopher Bate
5741ca772edSChristopher Bate Value logicalValueId = builder.create<arith::ConstantOp>(
5751ca772edSChristopher Bate loc, builder.getIndexType(),
5761ca772edSChristopher Bate builder.getIndexAttr(i * regInfo->elementsPerRegister + innerIdx));
5771ca772edSChristopher Bate FailureOr<AffineMap> coords = nvgpu::getLaneIdAndValueIdToOperandCoord(
5781ca772edSChristopher Bate op.getLoc(), builder, *warpMatrixInfo);
5791ca772edSChristopher Bate if (failed(coords))
5801ca772edSChristopher Bate return failure();
5811ca772edSChristopher Bate
5821ca772edSChristopher Bate SmallVector<Value, 4> newIndices;
5831ca772edSChristopher Bate getXferIndices<vector::TransferReadOp>(
5841ca772edSChristopher Bate builder, op, *coords, {laneId, logicalValueId}, newIndices);
5851ca772edSChristopher Bate Value el = builder.create<memref::LoadOp>(op.getLoc(), loadedElType,
5861ca772edSChristopher Bate op.getSource(), newIndices);
5871ca772edSChristopher Bate result = builder.create<vector::InsertOp>(
5881ca772edSChristopher Bate op.getLoc(), el, result, builder.getI64ArrayAttr({i, innerIdx}));
5891ca772edSChristopher Bate }
5901ca772edSChristopher Bate }
5911ca772edSChristopher Bate }
5921ca772edSChristopher Bate
5931ca772edSChristopher Bate valueMapping[op.getResult()] = result;
5941ca772edSChristopher Bate return success();
5951ca772edSChristopher Bate }
5961ca772edSChristopher Bate
5971ca772edSChristopher Bate /// Converts a `vector.transfer_read` operation directly to either a
5981ca772edSChristopher Bate /// `vector.load` or a `nvgpu.ldmatrix` operation. This function should only be
5991ca772edSChristopher Bate /// used when converting to `nvgpu.mma.sync` operations.
6001ca772edSChristopher Bate static LogicalResult
convertTransferReadToLoads(vector::TransferReadOp op,llvm::DenseMap<Value,Value> & valueMapping)6011ca772edSChristopher Bate convertTransferReadToLoads(vector::TransferReadOp op,
6021ca772edSChristopher Bate llvm::DenseMap<Value, Value> &valueMapping) {
6031ca772edSChristopher Bate OpBuilder b(op);
6041ca772edSChristopher Bate
6051ca772edSChristopher Bate FailureOr<nvgpu::WarpMatrixInfo> warpMatrixInfo =
6061ca772edSChristopher Bate nvgpu::getWarpMatrixInfo(op);
6071ca772edSChristopher Bate if (failed(warpMatrixInfo))
6081ca772edSChristopher Bate return failure();
6091ca772edSChristopher Bate
6101ca772edSChristopher Bate bool isLdMatrixCompatible =
6111ca772edSChristopher Bate op.getSource().getType().cast<MemRefType>().getMemorySpaceAsInt() == 3 &&
6121ca772edSChristopher Bate nvgpu::inferTileWidthInBits(*warpMatrixInfo) == 128;
6131ca772edSChristopher Bate
6141ca772edSChristopher Bate VectorType vecTy = op.getVectorType();
6151ca772edSChristopher Bate int64_t bitWidth = vecTy.getElementType().getIntOrFloatBitWidth();
6161ca772edSChristopher Bate
6171ca772edSChristopher Bate // When we are transposing the B operand, ldmatrix will only work if we have
6181ca772edSChristopher Bate // at least 8 rows to read and the width to read for the transpose is 128
6191ca772edSChristopher Bate // bits.
6201ca772edSChristopher Bate if (!op.getPermutationMap().isMinorIdentity() &&
621271a48e0SThomas Raoux (bitWidth != 16 || vecTy.getDimSize(1) < 8 ||
622271a48e0SThomas Raoux vecTy.getDimSize(0) * bitWidth < 128))
6231ca772edSChristopher Bate isLdMatrixCompatible = false;
6241ca772edSChristopher Bate
6251ca772edSChristopher Bate if (!isLdMatrixCompatible)
6261ca772edSChristopher Bate return createNonLdMatrixLoads(op, b, valueMapping);
6271ca772edSChristopher Bate
6281ca772edSChristopher Bate return creatLdMatrixCompatibleLoads(op, b, valueMapping);
6291ca772edSChristopher Bate }
6301ca772edSChristopher Bate
6311ca772edSChristopher Bate static LogicalResult
convertTransferWriteToStores(vector::TransferWriteOp op,llvm::DenseMap<Value,Value> & valueMapping)6321ca772edSChristopher Bate convertTransferWriteToStores(vector::TransferWriteOp op,
6331ca772edSChristopher Bate llvm::DenseMap<Value, Value> &valueMapping) {
6341ca772edSChristopher Bate OpBuilder b(op);
6351ca772edSChristopher Bate Location loc = op->getLoc();
6361ca772edSChristopher Bate Value matrix = valueMapping.find(op.getVector())->second;
6371ca772edSChristopher Bate
6381ca772edSChristopher Bate FailureOr<nvgpu::WarpMatrixInfo> warpMatrixInfo =
6391ca772edSChristopher Bate nvgpu::getWarpMatrixInfo(op);
6401ca772edSChristopher Bate if (failed(warpMatrixInfo))
6411ca772edSChristopher Bate return failure();
6421ca772edSChristopher Bate FailureOr<nvgpu::FragmentElementInfo> regInfo =
6431ca772edSChristopher Bate nvgpu::getMmaSyncRegisterType(*warpMatrixInfo);
6441ca772edSChristopher Bate if (failed(regInfo))
6451ca772edSChristopher Bate return failure();
6461ca772edSChristopher Bate
6471ca772edSChristopher Bate VectorType vectorType = getMmaSyncVectorOperandType(*regInfo);
6481ca772edSChristopher Bate Value laneId = b.create<gpu::LaneIdOp>(loc);
6491ca772edSChristopher Bate
6501ca772edSChristopher Bate for (unsigned i = 0; i < vectorType.getShape()[0]; i++) {
6511ca772edSChristopher Bate Value logicalValueId = b.create<arith::ConstantOp>(
6521ca772edSChristopher Bate loc, b.getIndexType(),
6531ca772edSChristopher Bate b.getIndexAttr(i * regInfo->elementsPerRegister));
6541ca772edSChristopher Bate FailureOr<AffineMap> coords = nvgpu::getLaneIdAndValueIdToOperandCoord(
6551ca772edSChristopher Bate op.getLoc(), b, *warpMatrixInfo);
6561ca772edSChristopher Bate if (failed(coords))
6571ca772edSChristopher Bate return failure();
6581ca772edSChristopher Bate
6591ca772edSChristopher Bate Value el = b.create<vector::ExtractOp>(loc, matrix, ArrayRef<int64_t>{i});
6601ca772edSChristopher Bate SmallVector<Value, 4> newIndices;
6611ca772edSChristopher Bate getXferIndices<vector::TransferWriteOp>(
6621ca772edSChristopher Bate b, op, *coords, {laneId, logicalValueId}, newIndices);
6631ca772edSChristopher Bate b.create<vector::StoreOp>(loc, el, op.getSource(), newIndices);
6641ca772edSChristopher Bate }
6651ca772edSChristopher Bate op->erase();
6661ca772edSChristopher Bate return success();
6671ca772edSChristopher Bate }
6681ca772edSChristopher Bate
convertContractOp(vector::ContractionOp op,llvm::DenseMap<Value,Value> & valueMapping)669edd9515bSthomasraoux static void convertContractOp(vector::ContractionOp op,
670edd9515bSthomasraoux llvm::DenseMap<Value, Value> &valueMapping) {
671edd9515bSthomasraoux OpBuilder b(op);
6727c38fd60SJacques Pienaar Value opA = valueMapping.find(op.getLhs())->second;
6737c38fd60SJacques Pienaar Value opB = valueMapping.find(op.getRhs())->second;
6747c38fd60SJacques Pienaar Value opC = valueMapping.find(op.getAcc())->second;
675edd9515bSthomasraoux Value matmul = b.create<gpu::SubgroupMmaComputeOp>(op.getLoc(), opC.getType(),
676edd9515bSthomasraoux opA, opB, opC);
677edd9515bSthomasraoux valueMapping[op.getResult()] = matmul;
678edd9515bSthomasraoux }
679edd9515bSthomasraoux
6801ca772edSChristopher Bate static LogicalResult
convertContractOpToMmaSync(vector::ContractionOp op,llvm::DenseMap<Value,Value> & valueMapping)6811ca772edSChristopher Bate convertContractOpToMmaSync(vector::ContractionOp op,
6821ca772edSChristopher Bate llvm::DenseMap<Value, Value> &valueMapping) {
6831ca772edSChristopher Bate OpBuilder b(op);
6841ca772edSChristopher Bate Value opA = valueMapping.find(op.getLhs())->second;
6851ca772edSChristopher Bate Value opB = valueMapping.find(op.getRhs())->second;
6861ca772edSChristopher Bate Value opC = valueMapping.find(op.getAcc())->second;
6871ca772edSChristopher Bate int64_t m = op.getLhs().getType().cast<VectorType>().getShape()[0];
6881ca772edSChristopher Bate int64_t n = op.getRhs().getType().cast<VectorType>().getShape()[0];
6891ca772edSChristopher Bate int64_t k = op.getLhs().getType().cast<VectorType>().getShape()[1];
6901ca772edSChristopher Bate Value matmul = b.create<nvgpu::MmaSyncOp>(
6911ca772edSChristopher Bate op.getLoc(), opC.getType(), opA, opB, opC, b.getI64ArrayAttr({m, n, k}));
6921ca772edSChristopher Bate valueMapping[op.getResult()] = matmul;
6931ca772edSChristopher Bate return success();
6941ca772edSChristopher Bate }
6951ca772edSChristopher Bate
6966413226dSthomasraoux /// Convert a 2D splat ConstantOp to a SubgroupMmaConstantMatrix op.
convertConstantOp(arith::ConstantOp op,llvm::DenseMap<Value,Value> & valueMapping)697a54f4eaeSMogball static void convertConstantOp(arith::ConstantOp op,
6986413226dSthomasraoux llvm::DenseMap<Value, Value> &valueMapping) {
6996413226dSthomasraoux assert(constantSupportsMMAMatrixType(op));
7006413226dSthomasraoux OpBuilder b(op);
701937e40a8SRiver Riddle Attribute splat =
702937e40a8SRiver Riddle op.getValue().cast<SplatElementsAttr>().getSplatValue<Attribute>();
7036413226dSthomasraoux auto scalarConstant =
704a54f4eaeSMogball b.create<arith::ConstantOp>(op.getLoc(), splat.getType(), splat);
7056413226dSthomasraoux const char *fragType = inferFragType(op);
7066413226dSthomasraoux auto vecType = op.getType().cast<VectorType>();
7076413226dSthomasraoux gpu::MMAMatrixType type = gpu::MMAMatrixType::get(
7086413226dSthomasraoux vecType.getShape(), vecType.getElementType(), llvm::StringRef(fragType));
7096413226dSthomasraoux auto matrix = b.create<gpu::SubgroupMmaConstantMatrixOp>(op.getLoc(), type,
7106413226dSthomasraoux scalarConstant);
7116413226dSthomasraoux valueMapping[op.getResult()] = matrix;
7126413226dSthomasraoux }
7136413226dSthomasraoux
71443928419Sthomasraoux /// Convert a vector.broadcast from scalar to a SubgroupMmaConstantMatrix op.
convertBroadcastOp(vector::BroadcastOp op,llvm::DenseMap<Value,Value> & valueMapping)71543928419Sthomasraoux static void convertBroadcastOp(vector::BroadcastOp op,
71643928419Sthomasraoux llvm::DenseMap<Value, Value> &valueMapping) {
71743928419Sthomasraoux assert(broadcastSupportsMMAMatrixType(op));
71843928419Sthomasraoux OpBuilder b(op);
71943928419Sthomasraoux const char *fragType = inferFragType(op);
72043928419Sthomasraoux auto vecType = op.getVectorType();
72143928419Sthomasraoux gpu::MMAMatrixType type = gpu::MMAMatrixType::get(
72243928419Sthomasraoux vecType.getShape(), vecType.getElementType(), llvm::StringRef(fragType));
72343928419Sthomasraoux auto matrix = b.create<gpu::SubgroupMmaConstantMatrixOp>(op.getLoc(), type,
7247c38fd60SJacques Pienaar op.getSource());
72543928419Sthomasraoux valueMapping[op.getResult()] = matrix;
72643928419Sthomasraoux }
72743928419Sthomasraoux
7281a865592Sthomasraoux // Replace ForOp with a new ForOp with extra operands. The YieldOp is not
7291a865592Sthomasraoux // updated and needs to be updated separatly for the loop to be correct.
replaceForOpWithNewSignature(OpBuilder & b,scf::ForOp loop,ValueRange newIterOperands)7301a865592Sthomasraoux static scf::ForOp replaceForOpWithNewSignature(OpBuilder &b, scf::ForOp loop,
7311a865592Sthomasraoux ValueRange newIterOperands) {
7321a865592Sthomasraoux // Create a new loop before the existing one, with the extra operands.
7331a865592Sthomasraoux OpBuilder::InsertionGuard g(b);
7341a865592Sthomasraoux b.setInsertionPoint(loop);
7351a865592Sthomasraoux auto operands = llvm::to_vector<4>(loop.getIterOperands());
7361a865592Sthomasraoux operands.append(newIterOperands.begin(), newIterOperands.end());
7371a865592Sthomasraoux scf::ForOp newLoop =
738c0342a2dSJacques Pienaar b.create<scf::ForOp>(loop.getLoc(), loop.getLowerBound(),
739c0342a2dSJacques Pienaar loop.getUpperBound(), loop.getStep(), operands);
7401a865592Sthomasraoux newLoop.getBody()->erase();
7411a865592Sthomasraoux newLoop.getLoopBody().getBlocks().splice(
7421a865592Sthomasraoux newLoop.getLoopBody().getBlocks().begin(),
7431a865592Sthomasraoux loop.getLoopBody().getBlocks());
744e084679fSRiver Riddle for (Value operand : newIterOperands)
745e084679fSRiver Riddle newLoop.getBody()->addArgument(operand.getType(), operand.getLoc());
7461a865592Sthomasraoux
7471a865592Sthomasraoux for (auto it : llvm::zip(loop.getResults(), newLoop.getResults().take_front(
7481a865592Sthomasraoux loop.getNumResults())))
7491a865592Sthomasraoux std::get<0>(it).replaceAllUsesWith(std::get<1>(it));
7501a865592Sthomasraoux loop.erase();
7511a865592Sthomasraoux return newLoop;
7521a865592Sthomasraoux }
7531a865592Sthomasraoux
convertForOp(scf::ForOp op,llvm::DenseMap<Value,Value> & valueMapping)7541a865592Sthomasraoux static void convertForOp(scf::ForOp op,
7551a865592Sthomasraoux llvm::DenseMap<Value, Value> &valueMapping) {
7561a865592Sthomasraoux SmallVector<Value> newOperands;
7571a865592Sthomasraoux SmallVector<std::pair<size_t, size_t>> argMapping;
758e4853be2SMehdi Amini for (const auto &operand : llvm::enumerate(op.getIterOperands())) {
7591a865592Sthomasraoux auto it = valueMapping.find(operand.value());
7601a865592Sthomasraoux if (it == valueMapping.end())
7611a865592Sthomasraoux continue;
7621a865592Sthomasraoux argMapping.push_back(std::make_pair(
7631a865592Sthomasraoux operand.index(), op.getNumIterOperands() + newOperands.size()));
7641a865592Sthomasraoux newOperands.push_back(it->second);
7651a865592Sthomasraoux }
7661a865592Sthomasraoux OpBuilder b(op);
7671a865592Sthomasraoux scf::ForOp newForOp = replaceForOpWithNewSignature(b, op, newOperands);
7681a865592Sthomasraoux Block &loopBody = *newForOp.getBody();
7691a865592Sthomasraoux for (auto mapping : argMapping) {
7701a865592Sthomasraoux valueMapping[newForOp.getResult(mapping.first)] =
7711a865592Sthomasraoux newForOp.getResult(mapping.second);
7721a865592Sthomasraoux valueMapping[loopBody.getArgument(mapping.first +
7731a865592Sthomasraoux newForOp.getNumInductionVars())] =
7741a865592Sthomasraoux loopBody.getArgument(mapping.second + newForOp.getNumInductionVars());
7751a865592Sthomasraoux }
7761a865592Sthomasraoux }
7771a865592Sthomasraoux
convertYieldOp(scf::YieldOp op,llvm::DenseMap<Value,Value> & valueMapping)7781a865592Sthomasraoux static void convertYieldOp(scf::YieldOp op,
7791a865592Sthomasraoux llvm::DenseMap<Value, Value> &valueMapping) {
7801a865592Sthomasraoux OpBuilder b(op);
7811a865592Sthomasraoux auto loop = cast<scf::ForOp>(op->getParentOp());
7821a865592Sthomasraoux auto yieldOperands = llvm::to_vector<4>(op.getOperands());
783e4853be2SMehdi Amini for (const auto &operand : llvm::enumerate(op.getOperands())) {
7841a865592Sthomasraoux auto it = valueMapping.find(operand.value());
7851a865592Sthomasraoux if (it == valueMapping.end())
7861a865592Sthomasraoux continue;
7871a865592Sthomasraoux // Replace the yield of old value with the for op argument to make it easier
7881a865592Sthomasraoux // to remove the dead code.
7891a865592Sthomasraoux yieldOperands[operand.index()] = loop.getIterOperands()[operand.index()];
7901a865592Sthomasraoux yieldOperands.push_back(it->second);
7911a865592Sthomasraoux }
7921a865592Sthomasraoux b.create<scf::YieldOp>(op.getLoc(), yieldOperands);
7931a865592Sthomasraoux op.erase();
7941a865592Sthomasraoux }
7951a865592Sthomasraoux
7967fbb0678Sthomasraoux /// Convert an elementwise op to the equivalent elementwise op on MMA matrix.
convertElementwiseOp(Operation * op,gpu::MMAElementwiseOp opType,llvm::DenseMap<Value,Value> & valueMapping)7977fbb0678Sthomasraoux static void convertElementwiseOp(Operation *op, gpu::MMAElementwiseOp opType,
7987fbb0678Sthomasraoux llvm::DenseMap<Value, Value> &valueMapping) {
7997fbb0678Sthomasraoux OpBuilder b(op);
8007fbb0678Sthomasraoux SmallVector<Value> matrixOperands;
8017fbb0678Sthomasraoux for (Value operand : op->getOperands())
8027fbb0678Sthomasraoux matrixOperands.push_back(valueMapping.find(operand)->second);
8037fbb0678Sthomasraoux Value newOp = b.create<gpu::SubgroupMmaElementwiseOp>(
8047fbb0678Sthomasraoux op->getLoc(), matrixOperands[0].getType(), matrixOperands, opType);
8057fbb0678Sthomasraoux valueMapping[op->getResult(0)] = newOp;
8067fbb0678Sthomasraoux }
8077fbb0678Sthomasraoux
populatePrepareVectorToMMAPatterns(RewritePatternSet & patterns,bool useNvGpu)8081ca772edSChristopher Bate void mlir::populatePrepareVectorToMMAPatterns(RewritePatternSet &patterns,
8091ca772edSChristopher Bate bool useNvGpu) {
8101ca772edSChristopher Bate if (!useNvGpu) {
811edd9515bSthomasraoux patterns.add<PrepareContractToGPUMMA, CombineTransferReadOpTranspose>(
812edd9515bSthomasraoux patterns.getContext());
8131ca772edSChristopher Bate return;
8141ca772edSChristopher Bate }
8151ca772edSChristopher Bate patterns
8161ca772edSChristopher Bate .add<nvgpu::PrepareContractToGPUMMASync, CombineTransferReadOpTranspose>(
8171ca772edSChristopher Bate patterns.getContext());
818edd9515bSthomasraoux }
819edd9515bSthomasraoux
convertVectorToMMAOps(Operation * rootOp)82047f175b0SRiver Riddle void mlir::convertVectorToMMAOps(Operation *rootOp) {
8211ca772edSChristopher Bate SetVector<Operation *> ops = getOpToConvert(rootOp, /*useNvGpu=*/false);
822edd9515bSthomasraoux llvm::DenseMap<Value, Value> valueMapping;
823edd9515bSthomasraoux for (Operation *op : ops) {
824edd9515bSthomasraoux if (auto transferRead = dyn_cast<vector::TransferReadOp>(op)) {
825edd9515bSthomasraoux convertTransferReadOp(transferRead, valueMapping);
826edd9515bSthomasraoux } else if (auto transferWrite = dyn_cast<vector::TransferWriteOp>(op)) {
827edd9515bSthomasraoux convertTransferWriteOp(transferWrite, valueMapping);
828edd9515bSthomasraoux } else if (auto contractOp = dyn_cast<vector::ContractionOp>(op)) {
829edd9515bSthomasraoux convertContractOp(contractOp, valueMapping);
830a54f4eaeSMogball } else if (auto constantOp = dyn_cast<arith::ConstantOp>(op)) {
8316413226dSthomasraoux convertConstantOp(constantOp, valueMapping);
83243928419Sthomasraoux } else if (auto broadcastOp = dyn_cast<vector::BroadcastOp>(op)) {
83343928419Sthomasraoux convertBroadcastOp(broadcastOp, valueMapping);
8341a865592Sthomasraoux } else if (auto forOp = dyn_cast<scf::ForOp>(op)) {
8351a865592Sthomasraoux convertForOp(forOp, valueMapping);
8361a865592Sthomasraoux } else if (auto yiledOp = dyn_cast<scf::YieldOp>(op)) {
8371a865592Sthomasraoux convertYieldOp(yiledOp, valueMapping);
8387fbb0678Sthomasraoux } else if (auto elementwiseType = convertElementwiseOpToMMA(op)) {
8397fbb0678Sthomasraoux convertElementwiseOp(op, *elementwiseType, valueMapping);
840edd9515bSthomasraoux }
841edd9515bSthomasraoux }
842edd9515bSthomasraoux }
843edd9515bSthomasraoux
convertVectorToNVVMCompatibleMMASync(Operation * rootOp)8441ca772edSChristopher Bate LogicalResult mlir::convertVectorToNVVMCompatibleMMASync(Operation *rootOp) {
8451ca772edSChristopher Bate SetVector<Operation *> ops = getOpToConvert(rootOp, /*useNvGpu=*/true);
8461ca772edSChristopher Bate llvm::DenseMap<Value, Value> valueMapping;
8471ca772edSChristopher Bate for (Operation *op : ops) {
8481ca772edSChristopher Bate if (llvm::TypeSwitch<Operation *, LogicalResult>(op)
8491ca772edSChristopher Bate .Case([&](vector::TransferReadOp transferReadOp) {
8501ca772edSChristopher Bate return convertTransferReadToLoads(transferReadOp, valueMapping);
8511ca772edSChristopher Bate })
8521ca772edSChristopher Bate .Case([&](vector::TransferWriteOp transferWriteOp) {
8531ca772edSChristopher Bate return convertTransferWriteToStores(transferWriteOp,
8541ca772edSChristopher Bate valueMapping);
8551ca772edSChristopher Bate })
8561ca772edSChristopher Bate .Case([&](vector::ContractionOp contractionOp) {
8571ca772edSChristopher Bate return convertContractOpToMmaSync(contractionOp, valueMapping);
8581ca772edSChristopher Bate })
8591ca772edSChristopher Bate .Case([&](scf::ForOp forOp) {
8601ca772edSChristopher Bate convertForOp(forOp, valueMapping);
8611ca772edSChristopher Bate return success();
8621ca772edSChristopher Bate })
8631ca772edSChristopher Bate .Case([&](scf::YieldOp yieldOp) {
8641ca772edSChristopher Bate convertYieldOp(yieldOp, valueMapping);
8651ca772edSChristopher Bate return success();
8661ca772edSChristopher Bate })
8671ca772edSChristopher Bate .Case([&](arith::ConstantOp constOp) {
8681ca772edSChristopher Bate return convertConstantOpMmaSync(constOp, valueMapping);
8691ca772edSChristopher Bate })
8701ca772edSChristopher Bate .Default([&](Operation *op) {
8711ca772edSChristopher Bate op->emitError() << "unhandled vector to mma type: " << *op;
8721ca772edSChristopher Bate return failure();
8731ca772edSChristopher Bate })
8741ca772edSChristopher Bate .failed()) {
8751ca772edSChristopher Bate op->emitError() << "Failed to convert op " << *op;
8761ca772edSChristopher Bate return failure();
8771ca772edSChristopher Bate }
8781ca772edSChristopher Bate }
8791ca772edSChristopher Bate return success();
8801ca772edSChristopher Bate }
8811ca772edSChristopher Bate
882edd9515bSthomasraoux namespace {
883edd9515bSthomasraoux
884edd9515bSthomasraoux struct ConvertVectorToGPUPass
885edd9515bSthomasraoux : public ConvertVectorToGPUBase<ConvertVectorToGPUPass> {
8861ca772edSChristopher Bate
ConvertVectorToGPUPass__anon38edb3011111::ConvertVectorToGPUPass8871ca772edSChristopher Bate explicit ConvertVectorToGPUPass(bool useNvGpu_) {
8881ca772edSChristopher Bate useNvGpu.setValue(useNvGpu_);
8891ca772edSChristopher Bate }
8901ca772edSChristopher Bate
runOnOperation__anon38edb3011111::ConvertVectorToGPUPass89141574554SRiver Riddle void runOnOperation() override {
89247f175b0SRiver Riddle RewritePatternSet patterns(&getContext());
8931ca772edSChristopher Bate populatePrepareVectorToMMAPatterns(patterns, useNvGpu.getValue());
8941ca772edSChristopher Bate if (failed(
8951ca772edSChristopher Bate applyPatternsAndFoldGreedily(getOperation(), std::move(patterns))))
8961ca772edSChristopher Bate return signalPassFailure();
897edd9515bSthomasraoux
8981ca772edSChristopher Bate if (useNvGpu.getValue()) {
8991ca772edSChristopher Bate if (failed(convertVectorToNVVMCompatibleMMASync(getOperation())))
9001ca772edSChristopher Bate return signalPassFailure();
9011ca772edSChristopher Bate }
9021ca772edSChristopher Bate
9031ca772edSChristopher Bate (void)convertVectorToMMAOps(getOperation());
904edd9515bSthomasraoux }
905edd9515bSthomasraoux };
906edd9515bSthomasraoux
907edd9515bSthomasraoux } // namespace
908edd9515bSthomasraoux
createConvertVectorToGPUPass(bool useNvGpu)9091ca772edSChristopher Bate std::unique_ptr<Pass> mlir::createConvertVectorToGPUPass(bool useNvGpu) {
9101ca772edSChristopher Bate return std::make_unique<ConvertVectorToGPUPass>(useNvGpu);
911edd9515bSthomasraoux }
912