1edd9515bSthomasraoux //===- VectorToGPU.cpp - Convert vector to GPU dialect ----------*- C++ -*-===// 2edd9515bSthomasraoux // 3edd9515bSthomasraoux // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4edd9515bSthomasraoux // See https://llvm.org/LICENSE.txt for license information. 5edd9515bSthomasraoux // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6edd9515bSthomasraoux // 7edd9515bSthomasraoux //===----------------------------------------------------------------------===// 8edd9515bSthomasraoux // 9edd9515bSthomasraoux // This file implements lowering of vector operations to GPU dialect ops. 10edd9515bSthomasraoux // 11edd9515bSthomasraoux //===----------------------------------------------------------------------===// 12edd9515bSthomasraoux 13edd9515bSthomasraoux #include <type_traits> 14edd9515bSthomasraoux 15edd9515bSthomasraoux #include "mlir/Conversion/VectorToGPU/VectorToGPU.h" 16edd9515bSthomasraoux 17edd9515bSthomasraoux #include "../PassDetail.h" 18edd9515bSthomasraoux #include "mlir/Analysis/SliceAnalysis.h" 19a54f4eaeSMogball #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h" 20edd9515bSthomasraoux #include "mlir/Dialect/GPU/GPUDialect.h" 2166f878ceSMatthias Springer #include "mlir/Dialect/MemRef/IR/MemRef.h" 221a865592Sthomasraoux #include "mlir/Dialect/SCF/SCF.h" 23edd9515bSthomasraoux #include "mlir/Dialect/Utils/StructuredOpsUtils.h" 2499ef9eebSMatthias Springer #include "mlir/Dialect/Vector/IR/VectorOps.h" 2599ef9eebSMatthias Springer #include "mlir/Dialect/Vector/Utils/VectorUtils.h" 26edd9515bSthomasraoux #include "mlir/IR/Builders.h" 27edd9515bSthomasraoux #include "mlir/Pass/Pass.h" 28edd9515bSthomasraoux #include "mlir/Transforms/GreedyPatternRewriteDriver.h" 29edd9515bSthomasraoux #include "mlir/Transforms/Passes.h" 30edd9515bSthomasraoux 31edd9515bSthomasraoux using namespace mlir; 32edd9515bSthomasraoux 33edd9515bSthomasraoux // Return true if the contract op can be convert to MMA matmul. 34edd9515bSthomasraoux static bool contractSupportsMMAMatrixType(vector::ContractionOp contract) { 35*7c38fd60SJacques Pienaar if (llvm::size(contract.getMasks()) != 0) 36edd9515bSthomasraoux return false; 37edd9515bSthomasraoux 38edd9515bSthomasraoux using MapList = ArrayRef<ArrayRef<AffineExpr>>; 39edd9515bSthomasraoux auto infer = [](MapList m) { return AffineMap::inferFromExprList(m); }; 40edd9515bSthomasraoux AffineExpr m, n, k; 41edd9515bSthomasraoux bindDims(contract.getContext(), m, n, k); 42*7c38fd60SJacques Pienaar auto iteratorTypes = contract.getIteratorTypes().getValue(); 43edd9515bSthomasraoux if (!(isParallelIterator(iteratorTypes[0]) && 44edd9515bSthomasraoux isParallelIterator(iteratorTypes[1]) && 45edd9515bSthomasraoux isReductionIterator(iteratorTypes[2]))) 46edd9515bSthomasraoux return false; 47edd9515bSthomasraoux 48edd9515bSthomasraoux // The contract needs to represent a matmul to be able to convert to 49edd9515bSthomasraoux // MMAMatrix matmul. 50edd9515bSthomasraoux if (contract.getIndexingMaps() != infer({{m, k}, {k, n}, {m, n}})) 51edd9515bSthomasraoux return false; 52edd9515bSthomasraoux 53edd9515bSthomasraoux return true; 54edd9515bSthomasraoux } 55edd9515bSthomasraoux 56edd9515bSthomasraoux // Return the stide for the dimension 0 of |type| if it is a memref and has a 57edd9515bSthomasraoux // constant stride. 58edd9515bSthomasraoux static llvm::Optional<int64_t> 59edd9515bSthomasraoux getMemrefConstantHorizontalStride(ShapedType type) { 60edd9515bSthomasraoux auto memrefType = type.dyn_cast<MemRefType>(); 61edd9515bSthomasraoux if (!memrefType) 62edd9515bSthomasraoux return false; 63a57ccad5SThomas Raoux // If the memref is 0 or 1D the horizontal stride is 0. 64a57ccad5SThomas Raoux if(memrefType.getRank() < 2) 65a57ccad5SThomas Raoux return 0; 66edd9515bSthomasraoux int64_t offset = 0; 67edd9515bSthomasraoux SmallVector<int64_t, 2> strides; 68d77f4836SThomas Raoux if (failed(getStridesAndOffset(memrefType, strides, offset)) || 69d77f4836SThomas Raoux strides.back() != 1) 70edd9515bSthomasraoux return llvm::None; 71a57ccad5SThomas Raoux int64_t stride = strides[strides.size() - 2]; 72a57ccad5SThomas Raoux if (stride == ShapedType::kDynamicStrideOrOffset) 73edd9515bSthomasraoux return llvm::None; 74a57ccad5SThomas Raoux return stride; 75edd9515bSthomasraoux } 76edd9515bSthomasraoux 77edd9515bSthomasraoux // Return true if the transfer op can be converted to a MMA matrix load. 78edd9515bSthomasraoux static bool transferReadSupportsMMAMatrixType(vector::TransferReadOp readOp) { 79*7c38fd60SJacques Pienaar if (readOp.getMask() || readOp.hasOutOfBoundsDim() || 80edd9515bSthomasraoux readOp.getVectorType().getRank() != 2) 81edd9515bSthomasraoux return false; 82edd9515bSthomasraoux if (!getMemrefConstantHorizontalStride(readOp.getShapedType())) 83edd9515bSthomasraoux return false; 84*7c38fd60SJacques Pienaar AffineMap map = readOp.getPermutationMap(); 85e7969240SThomas Raoux OpBuilder b(readOp.getContext()); 86e7969240SThomas Raoux AffineExpr innerDim = b.getAffineDimExpr(map.getNumDims() - 1); 87e7969240SThomas Raoux AffineExpr zero = b.getAffineConstantExpr(0); 88e7969240SThomas Raoux auto broadcastInnerDim = AffineMap::get(map.getNumDims(), 0, {zero, innerDim}, 89e7969240SThomas Raoux readOp.getContext()); 90edd9515bSthomasraoux // TODO: Support transpose once it is added to GPU dialect ops. 91e7969240SThomas Raoux // For now we only support (d0, d1) -> (d0, d1) and (d0, d1) -> (0, d1). 926786d7e4SMehdi Amini return !(!map.isMinorIdentity() && map != broadcastInnerDim); 93edd9515bSthomasraoux } 94edd9515bSthomasraoux 95edd9515bSthomasraoux // Return true if the transfer op can be converted to a MMA matrix store. 96edd9515bSthomasraoux static bool 97edd9515bSthomasraoux transferWriteSupportsMMAMatrixType(vector::TransferWriteOp writeOp) { 98c537a943SNicolas Vasilache // TODO: support 0-d corner case. 99c537a943SNicolas Vasilache if (writeOp.getTransferRank() == 0) 100c537a943SNicolas Vasilache return false; 101c537a943SNicolas Vasilache 102*7c38fd60SJacques Pienaar if (writeOp.getMask() || writeOp.hasOutOfBoundsDim() || 103edd9515bSthomasraoux writeOp.getVectorType().getRank() != 2) 104edd9515bSthomasraoux return false; 105edd9515bSthomasraoux if (!getMemrefConstantHorizontalStride(writeOp.getShapedType())) 106edd9515bSthomasraoux return false; 107edd9515bSthomasraoux // TODO: Support transpose once it is added to GPU dialect ops. 108*7c38fd60SJacques Pienaar if (!writeOp.getPermutationMap().isMinorIdentity()) 109edd9515bSthomasraoux return false; 110edd9515bSthomasraoux return true; 111edd9515bSthomasraoux } 112edd9515bSthomasraoux 1136413226dSthomasraoux /// Return true if the constant is a splat to a 2D vector so that it can be 1146413226dSthomasraoux /// converted to a MMA constant matrix op. 115a54f4eaeSMogball static bool constantSupportsMMAMatrixType(arith::ConstantOp constantOp) { 1166413226dSthomasraoux auto vecType = constantOp.getType().dyn_cast<VectorType>(); 1176413226dSthomasraoux if (!vecType || vecType.getRank() != 2) 1186413226dSthomasraoux return false; 119cfb72fd3SJacques Pienaar return constantOp.getValue().isa<SplatElementsAttr>(); 1206413226dSthomasraoux } 1216413226dSthomasraoux 12243928419Sthomasraoux /// Return true if this is a broadcast from scalar to a 2D vector. 12343928419Sthomasraoux static bool broadcastSupportsMMAMatrixType(vector::BroadcastOp broadcastOp) { 12443928419Sthomasraoux return broadcastOp.getVectorType().getRank() == 2 && 125*7c38fd60SJacques Pienaar broadcastOp.getSource().getType().isa<FloatType>(); 12643928419Sthomasraoux } 12743928419Sthomasraoux 1287fbb0678Sthomasraoux /// Return the MMA elementwise enum associated with `op` if it is supported. 1297fbb0678Sthomasraoux /// Return `llvm::None` otherwise. 1307fbb0678Sthomasraoux static llvm::Optional<gpu::MMAElementwiseOp> 1317fbb0678Sthomasraoux convertElementwiseOpToMMA(Operation *op) { 1327fbb0678Sthomasraoux if (isa<arith::AddFOp>(op)) 1337fbb0678Sthomasraoux return gpu::MMAElementwiseOp::ADDF; 1347fbb0678Sthomasraoux if (isa<arith::MulFOp>(op)) 1357fbb0678Sthomasraoux return gpu::MMAElementwiseOp::MULF; 1369b1d90e8SAlexander Belyaev if (isa<arith::MaxFOp>(op)) 1377fbb0678Sthomasraoux return gpu::MMAElementwiseOp::MAXF; 1389b1d90e8SAlexander Belyaev if (isa<arith::MinFOp>(op)) 1397fbb0678Sthomasraoux return gpu::MMAElementwiseOp::MINF; 140e7969240SThomas Raoux if (isa<arith::DivFOp>(op)) 141e7969240SThomas Raoux return gpu::MMAElementwiseOp::DIVF; 1427fbb0678Sthomasraoux return llvm::None; 1437fbb0678Sthomasraoux } 1447fbb0678Sthomasraoux 1457fbb0678Sthomasraoux /// Return true if the op is supported as elementwise op on MMAMatrix type. 1467fbb0678Sthomasraoux static bool elementwiseSupportsMMAMatrixType(Operation *op) { 1477fbb0678Sthomasraoux return convertElementwiseOpToMMA(op).hasValue(); 1487fbb0678Sthomasraoux } 1497fbb0678Sthomasraoux 150edd9515bSthomasraoux static bool supportsMMaMatrixType(Operation *op) { 1511a865592Sthomasraoux if (isa<scf::ForOp, scf::YieldOp>(op)) 1521a865592Sthomasraoux return true; 153edd9515bSthomasraoux if (auto transferRead = dyn_cast<vector::TransferReadOp>(op)) 154edd9515bSthomasraoux return transferReadSupportsMMAMatrixType(transferRead); 155edd9515bSthomasraoux if (auto transferWrite = dyn_cast<vector::TransferWriteOp>(op)) 156edd9515bSthomasraoux return transferWriteSupportsMMAMatrixType(transferWrite); 157edd9515bSthomasraoux if (auto contract = dyn_cast<vector::ContractionOp>(op)) 158edd9515bSthomasraoux return contractSupportsMMAMatrixType(contract); 159a54f4eaeSMogball if (auto constant = dyn_cast<arith::ConstantOp>(op)) 1606413226dSthomasraoux return constantSupportsMMAMatrixType(constant); 16143928419Sthomasraoux if (auto broadcast = dyn_cast<vector::BroadcastOp>(op)) 16243928419Sthomasraoux return broadcastSupportsMMAMatrixType(broadcast); 1637fbb0678Sthomasraoux return elementwiseSupportsMMAMatrixType(op); 164edd9515bSthomasraoux } 165edd9515bSthomasraoux 166e7969240SThomas Raoux /// Return an unsorted slice handling scf.for region differently than 167e7969240SThomas Raoux /// `getSlice`. In scf.for we only want to include as part of the slice elements 168e7969240SThomas Raoux /// that are part of the use/def chain. 169e7969240SThomas Raoux static SetVector<Operation *> getSliceContract(Operation *op, 170e7969240SThomas Raoux TransitiveFilter backwardFilter, 171e7969240SThomas Raoux TransitiveFilter forwardFilter) { 172e7969240SThomas Raoux SetVector<Operation *> slice; 173e7969240SThomas Raoux slice.insert(op); 174e7969240SThomas Raoux unsigned currentIndex = 0; 175e7969240SThomas Raoux SetVector<Operation *> backwardSlice; 176e7969240SThomas Raoux SetVector<Operation *> forwardSlice; 177e7969240SThomas Raoux while (currentIndex != slice.size()) { 178e7969240SThomas Raoux auto *currentOp = (slice)[currentIndex]; 179e7969240SThomas Raoux // Compute and insert the backwardSlice starting from currentOp. 180e7969240SThomas Raoux backwardSlice.clear(); 181e7969240SThomas Raoux getBackwardSlice(currentOp, &backwardSlice, backwardFilter); 182e7969240SThomas Raoux slice.insert(backwardSlice.begin(), backwardSlice.end()); 183e7969240SThomas Raoux 184e7969240SThomas Raoux // Compute and insert the forwardSlice starting from currentOp. 185e7969240SThomas Raoux forwardSlice.clear(); 186e7969240SThomas Raoux // Special case for ForOp, we don't want to include the whole region but 187e7969240SThomas Raoux // only the value using the region arguments. 188e7969240SThomas Raoux // TODO: We should refine this to only care about the region arguments being 189e7969240SThomas Raoux // converted to matrix type. 190e7969240SThomas Raoux if (auto forOp = dyn_cast<scf::ForOp>(currentOp)) { 191e7969240SThomas Raoux for (Value forOpResult : forOp.getResults()) 192e7969240SThomas Raoux getForwardSlice(forOpResult, &forwardSlice, forwardFilter); 193e7969240SThomas Raoux for (BlockArgument &arg : forOp.getRegionIterArgs()) 194e7969240SThomas Raoux getForwardSlice(arg, &forwardSlice, forwardFilter); 195e7969240SThomas Raoux } else { 196e7969240SThomas Raoux getForwardSlice(currentOp, &forwardSlice, forwardFilter); 197e7969240SThomas Raoux } 198e7969240SThomas Raoux slice.insert(forwardSlice.begin(), forwardSlice.end()); 199e7969240SThomas Raoux ++currentIndex; 200e7969240SThomas Raoux } 201e7969240SThomas Raoux return slice; 202e7969240SThomas Raoux } 203e7969240SThomas Raoux 204edd9515bSthomasraoux // Analyze slice of operations based on convert op to figure out if the whole 205edd9515bSthomasraoux // slice can be converted to MMA operations. 206edd9515bSthomasraoux static SetVector<Operation *> getOpToConvert(mlir::Operation *op) { 207edd9515bSthomasraoux auto hasVectorDest = [](Operation *op) { 20843928419Sthomasraoux return llvm::any_of(op->getResultTypes(), 20943928419Sthomasraoux [](Type t) { return t.isa<VectorType>(); }); 21043928419Sthomasraoux }; 21143928419Sthomasraoux auto hasVectorSrc = [](Operation *op) { 21243928419Sthomasraoux return llvm::any_of(op->getOperandTypes(), 213edd9515bSthomasraoux [](Type t) { return t.isa<VectorType>(); }); 214edd9515bSthomasraoux }; 215edd9515bSthomasraoux SetVector<Operation *> opToConvert; 216edd9515bSthomasraoux op->walk([&](vector::ContractionOp contract) { 217edd9515bSthomasraoux if (opToConvert.contains(contract.getOperation())) 218edd9515bSthomasraoux return; 219edd9515bSthomasraoux SetVector<Operation *> dependentOps = 220e7969240SThomas Raoux getSliceContract(contract, hasVectorDest, hasVectorSrc); 221edd9515bSthomasraoux // If any instruction cannot use MMA matrix type drop the whole 222e7969240SThomas Raoux // chain. MMA matrix are stored in an opaque type so they cannot be used 223edd9515bSthomasraoux // by all operations. 224edd9515bSthomasraoux if (llvm::any_of(dependentOps, 225edd9515bSthomasraoux [](Operation *op) { return !supportsMMaMatrixType(op); })) 226edd9515bSthomasraoux return; 227edd9515bSthomasraoux opToConvert.insert(dependentOps.begin(), dependentOps.end()); 228edd9515bSthomasraoux }); 229e7969240SThomas Raoux // Sort the operations so that we can convert them in topological order. 230e7969240SThomas Raoux return topologicalSort(opToConvert); 231edd9515bSthomasraoux } 232edd9515bSthomasraoux 233edd9515bSthomasraoux namespace { 234edd9515bSthomasraoux // Transform contract into (m, k)x(k, n)x(m, n) form so that it can be converted 235edd9515bSthomasraoux // to MMA matmul. 236edd9515bSthomasraoux struct PrepareContractToGPUMMA 237edd9515bSthomasraoux : public OpRewritePattern<vector::ContractionOp> { 238edd9515bSthomasraoux using OpRewritePattern<vector::ContractionOp>::OpRewritePattern; 239edd9515bSthomasraoux 240edd9515bSthomasraoux LogicalResult matchAndRewrite(vector::ContractionOp op, 241edd9515bSthomasraoux PatternRewriter &rewriter) const override { 242edd9515bSthomasraoux Location loc = op.getLoc(); 243*7c38fd60SJacques Pienaar Value lhs = op.getLhs(), rhs = op.getRhs(), res = op.getAcc(); 244edd9515bSthomasraoux 245edd9515bSthomasraoux // Set up the parallel/reduction structure in right form. 246edd9515bSthomasraoux using MapList = ArrayRef<ArrayRef<AffineExpr>>; 247edd9515bSthomasraoux auto infer = [](MapList m) { return AffineMap::inferFromExprList(m); }; 248edd9515bSthomasraoux AffineExpr m, n, k; 249edd9515bSthomasraoux bindDims(rewriter.getContext(), m, n, k); 250edd9515bSthomasraoux static constexpr std::array<int64_t, 2> perm = {1, 0}; 251*7c38fd60SJacques Pienaar auto iteratorTypes = op.getIteratorTypes().getValue(); 252edd9515bSthomasraoux SmallVector<AffineMap, 4> maps = op.getIndexingMaps(); 253edd9515bSthomasraoux if (!(isParallelIterator(iteratorTypes[0]) && 254edd9515bSthomasraoux isParallelIterator(iteratorTypes[1]) && 255edd9515bSthomasraoux isReductionIterator(iteratorTypes[2]))) 256edd9515bSthomasraoux return failure(); 257edd9515bSthomasraoux // 258edd9515bSthomasraoux // Two outer parallel, one inner reduction (matmat flavor). 259edd9515bSthomasraoux // 260edd9515bSthomasraoux if (maps == infer({{m, k}, {k, n}, {m, n}})) { 261edd9515bSthomasraoux // This is the classical row-major matmul, nothing to do. 262edd9515bSthomasraoux return failure(); 263edd9515bSthomasraoux } 264edd9515bSthomasraoux if (maps == infer({{m, k}, {n, k}, {m, n}})) { 265edd9515bSthomasraoux rhs = rewriter.create<vector::TransposeOp>(loc, rhs, perm); 266edd9515bSthomasraoux } else if (maps == infer({{k, m}, {k, n}, {m, n}})) { 267edd9515bSthomasraoux lhs = rewriter.create<vector::TransposeOp>(loc, lhs, perm); 268edd9515bSthomasraoux } else if (maps == infer({{k, m}, {n, k}, {m, n}})) { 269edd9515bSthomasraoux rhs = rewriter.create<vector::TransposeOp>(loc, rhs, perm); 270edd9515bSthomasraoux lhs = rewriter.create<vector::TransposeOp>(loc, lhs, perm); 271edd9515bSthomasraoux } else if (maps == infer({{m, k}, {k, n}, {n, m}})) { 272edd9515bSthomasraoux std::swap(rhs, lhs); 273edd9515bSthomasraoux rhs = rewriter.create<vector::TransposeOp>(loc, rhs, perm); 274edd9515bSthomasraoux lhs = rewriter.create<vector::TransposeOp>(loc, lhs, perm); 275edd9515bSthomasraoux } else if (maps == infer({{m, k}, {n, k}, {n, m}})) { 276edd9515bSthomasraoux std::swap(rhs, lhs); 277edd9515bSthomasraoux rhs = rewriter.create<vector::TransposeOp>(loc, rhs, perm); 278edd9515bSthomasraoux } else if (maps == infer({{k, m}, {k, n}, {n, m}})) { 279edd9515bSthomasraoux std::swap(lhs, rhs); 280edd9515bSthomasraoux lhs = rewriter.create<vector::TransposeOp>(loc, lhs, perm); 281edd9515bSthomasraoux } else if (maps == infer({{k, m}, {n, k}, {n, m}})) { 282edd9515bSthomasraoux std::swap(lhs, rhs); 283edd9515bSthomasraoux } else { 284edd9515bSthomasraoux return failure(); 285edd9515bSthomasraoux } 286edd9515bSthomasraoux rewriter.replaceOpWithNewOp<vector::ContractionOp>( 287edd9515bSthomasraoux op, lhs, rhs, res, 288edd9515bSthomasraoux rewriter.getAffineMapArrayAttr(infer({{m, k}, {k, n}, {m, n}})), 289*7c38fd60SJacques Pienaar op.getIteratorTypes()); 290edd9515bSthomasraoux return success(); 291edd9515bSthomasraoux } 292edd9515bSthomasraoux }; 293edd9515bSthomasraoux 294edd9515bSthomasraoux // Merge transpose op into the transfer read op. Transpose are not supported on 295edd9515bSthomasraoux // MMA types but MMA load can transpose the matrix when loading. 296edd9515bSthomasraoux struct CombineTransferReadOpTranspose final 297edd9515bSthomasraoux : public OpRewritePattern<vector::TransposeOp> { 298edd9515bSthomasraoux using OpRewritePattern<vector::TransposeOp>::OpRewritePattern; 299edd9515bSthomasraoux 300edd9515bSthomasraoux LogicalResult matchAndRewrite(vector::TransposeOp op, 301edd9515bSthomasraoux PatternRewriter &rewriter) const override { 302*7c38fd60SJacques Pienaar auto transferReadOp = 303*7c38fd60SJacques Pienaar op.getVector().getDefiningOp<vector::TransferReadOp>(); 304edd9515bSthomasraoux if (!transferReadOp) 305edd9515bSthomasraoux return failure(); 306c537a943SNicolas Vasilache 307c537a943SNicolas Vasilache // TODO: support 0-d corner case. 308c537a943SNicolas Vasilache if (transferReadOp.getTransferRank() == 0) 309c537a943SNicolas Vasilache return failure(); 310c537a943SNicolas Vasilache 311*7c38fd60SJacques Pienaar if (transferReadOp.getMask() || transferReadOp.hasOutOfBoundsDim()) 312edd9515bSthomasraoux return failure(); 313edd9515bSthomasraoux SmallVector<int64_t, 2> perm; 314edd9515bSthomasraoux op.getTransp(perm); 315edd9515bSthomasraoux SmallVector<unsigned, 2> permU; 316edd9515bSthomasraoux for (int64_t o : perm) 317edd9515bSthomasraoux permU.push_back(unsigned(o)); 318edd9515bSthomasraoux AffineMap permutationMap = 319edd9515bSthomasraoux AffineMap::getPermutationMap(permU, op.getContext()); 320*7c38fd60SJacques Pienaar AffineMap newMap = 321*7c38fd60SJacques Pienaar permutationMap.compose(transferReadOp.getPermutationMap()); 322edd9515bSthomasraoux rewriter.replaceOpWithNewOp<vector::TransferReadOp>( 323*7c38fd60SJacques Pienaar op, op.getType(), transferReadOp.getSource(), 324*7c38fd60SJacques Pienaar transferReadOp.getIndices(), AffineMapAttr::get(newMap), 325*7c38fd60SJacques Pienaar transferReadOp.getPadding(), transferReadOp.getMask(), 326*7c38fd60SJacques Pienaar transferReadOp.getInBoundsAttr()); 327edd9515bSthomasraoux return success(); 328edd9515bSthomasraoux } 329edd9515bSthomasraoux }; 330edd9515bSthomasraoux 331edd9515bSthomasraoux } // namespace 332edd9515bSthomasraoux 333edd9515bSthomasraoux // MMA types have different layout based on how they are used in matmul ops. 3346413226dSthomasraoux // Figure the right layout to use by looking at op uses. 335edd9515bSthomasraoux // TODO: Change the GPU dialect to abstract the layout at the this level and 336edd9515bSthomasraoux // only care about it during lowering to NVVM. 3376413226dSthomasraoux template <typename OpTy> 3386413226dSthomasraoux static const char *inferFragType(OpTy op) { 339edd9515bSthomasraoux for (Operation *users : op->getUsers()) { 340edd9515bSthomasraoux auto contract = dyn_cast<vector::ContractionOp>(users); 341edd9515bSthomasraoux if (!contract) 342edd9515bSthomasraoux continue; 343*7c38fd60SJacques Pienaar if (contract.getLhs() == op.getResult()) 344edd9515bSthomasraoux return "AOp"; 345*7c38fd60SJacques Pienaar if (contract.getRhs() == op.getResult()) 346edd9515bSthomasraoux return "BOp"; 347edd9515bSthomasraoux } 348edd9515bSthomasraoux return "COp"; 349edd9515bSthomasraoux } 350edd9515bSthomasraoux 351edd9515bSthomasraoux static void convertTransferReadOp(vector::TransferReadOp op, 352edd9515bSthomasraoux llvm::DenseMap<Value, Value> &valueMapping) { 353c537a943SNicolas Vasilache assert(op.getTransferRank() > 0 && "unexpected 0-d transfer"); 354edd9515bSthomasraoux assert(transferReadSupportsMMAMatrixType(op)); 355edd9515bSthomasraoux Optional<int64_t> stride = 356edd9515bSthomasraoux getMemrefConstantHorizontalStride(op.getShapedType()); 357*7c38fd60SJacques Pienaar AffineMap map = op.getPermutationMap(); 358e7969240SThomas Raoux // Handle broadcast by setting the stride to 0. 359e7969240SThomas Raoux if (map.getResult(0).isa<AffineConstantExpr>()) { 360e7969240SThomas Raoux assert(map.getResult(0).cast<AffineConstantExpr>().getValue() == 0); 361e7969240SThomas Raoux stride = 0; 362e7969240SThomas Raoux } 363edd9515bSthomasraoux assert(stride); 364edd9515bSthomasraoux const char *fragType = inferFragType(op); 365edd9515bSthomasraoux gpu::MMAMatrixType type = 366edd9515bSthomasraoux gpu::MMAMatrixType::get(op.getVectorType().getShape(), 367edd9515bSthomasraoux op.getVectorType().getElementType(), fragType); 368edd9515bSthomasraoux OpBuilder b(op); 369edd9515bSthomasraoux Value load = b.create<gpu::SubgroupMmaLoadMatrixOp>( 370*7c38fd60SJacques Pienaar op.getLoc(), type, op.getSource(), op.getIndices(), 371*7c38fd60SJacques Pienaar b.getIndexAttr(*stride)); 372edd9515bSthomasraoux valueMapping[op.getResult()] = load; 373edd9515bSthomasraoux } 374edd9515bSthomasraoux 375edd9515bSthomasraoux static void convertTransferWriteOp(vector::TransferWriteOp op, 376edd9515bSthomasraoux llvm::DenseMap<Value, Value> &valueMapping) { 377edd9515bSthomasraoux assert(transferWriteSupportsMMAMatrixType(op)); 378edd9515bSthomasraoux Optional<int64_t> stride = 379edd9515bSthomasraoux getMemrefConstantHorizontalStride(op.getShapedType()); 380edd9515bSthomasraoux assert(stride); 381edd9515bSthomasraoux OpBuilder b(op); 382*7c38fd60SJacques Pienaar Value matrix = valueMapping.find(op.getVector())->second; 383*7c38fd60SJacques Pienaar b.create<gpu::SubgroupMmaStoreMatrixOp>(op.getLoc(), matrix, op.getSource(), 384*7c38fd60SJacques Pienaar op.getIndices(), 385*7c38fd60SJacques Pienaar b.getIndexAttr(*stride)); 386edd9515bSthomasraoux op.erase(); 387edd9515bSthomasraoux } 388edd9515bSthomasraoux 389edd9515bSthomasraoux static void convertContractOp(vector::ContractionOp op, 390edd9515bSthomasraoux llvm::DenseMap<Value, Value> &valueMapping) { 391edd9515bSthomasraoux OpBuilder b(op); 392*7c38fd60SJacques Pienaar Value opA = valueMapping.find(op.getLhs())->second; 393*7c38fd60SJacques Pienaar Value opB = valueMapping.find(op.getRhs())->second; 394*7c38fd60SJacques Pienaar Value opC = valueMapping.find(op.getAcc())->second; 395edd9515bSthomasraoux Value matmul = b.create<gpu::SubgroupMmaComputeOp>(op.getLoc(), opC.getType(), 396edd9515bSthomasraoux opA, opB, opC); 397edd9515bSthomasraoux valueMapping[op.getResult()] = matmul; 398edd9515bSthomasraoux } 399edd9515bSthomasraoux 4006413226dSthomasraoux /// Convert a 2D splat ConstantOp to a SubgroupMmaConstantMatrix op. 401a54f4eaeSMogball static void convertConstantOp(arith::ConstantOp op, 4026413226dSthomasraoux llvm::DenseMap<Value, Value> &valueMapping) { 4036413226dSthomasraoux assert(constantSupportsMMAMatrixType(op)); 4046413226dSthomasraoux OpBuilder b(op); 405937e40a8SRiver Riddle Attribute splat = 406937e40a8SRiver Riddle op.getValue().cast<SplatElementsAttr>().getSplatValue<Attribute>(); 4076413226dSthomasraoux auto scalarConstant = 408a54f4eaeSMogball b.create<arith::ConstantOp>(op.getLoc(), splat.getType(), splat); 4096413226dSthomasraoux const char *fragType = inferFragType(op); 4106413226dSthomasraoux auto vecType = op.getType().cast<VectorType>(); 4116413226dSthomasraoux gpu::MMAMatrixType type = gpu::MMAMatrixType::get( 4126413226dSthomasraoux vecType.getShape(), vecType.getElementType(), llvm::StringRef(fragType)); 4136413226dSthomasraoux auto matrix = b.create<gpu::SubgroupMmaConstantMatrixOp>(op.getLoc(), type, 4146413226dSthomasraoux scalarConstant); 4156413226dSthomasraoux valueMapping[op.getResult()] = matrix; 4166413226dSthomasraoux } 4176413226dSthomasraoux 41843928419Sthomasraoux /// Convert a vector.broadcast from scalar to a SubgroupMmaConstantMatrix op. 41943928419Sthomasraoux static void convertBroadcastOp(vector::BroadcastOp op, 42043928419Sthomasraoux llvm::DenseMap<Value, Value> &valueMapping) { 42143928419Sthomasraoux assert(broadcastSupportsMMAMatrixType(op)); 42243928419Sthomasraoux OpBuilder b(op); 42343928419Sthomasraoux const char *fragType = inferFragType(op); 42443928419Sthomasraoux auto vecType = op.getVectorType(); 42543928419Sthomasraoux gpu::MMAMatrixType type = gpu::MMAMatrixType::get( 42643928419Sthomasraoux vecType.getShape(), vecType.getElementType(), llvm::StringRef(fragType)); 42743928419Sthomasraoux auto matrix = b.create<gpu::SubgroupMmaConstantMatrixOp>(op.getLoc(), type, 428*7c38fd60SJacques Pienaar op.getSource()); 42943928419Sthomasraoux valueMapping[op.getResult()] = matrix; 43043928419Sthomasraoux } 43143928419Sthomasraoux 4321a865592Sthomasraoux // Replace ForOp with a new ForOp with extra operands. The YieldOp is not 4331a865592Sthomasraoux // updated and needs to be updated separatly for the loop to be correct. 4341a865592Sthomasraoux static scf::ForOp replaceForOpWithNewSignature(OpBuilder &b, scf::ForOp loop, 4351a865592Sthomasraoux ValueRange newIterOperands) { 4361a865592Sthomasraoux // Create a new loop before the existing one, with the extra operands. 4371a865592Sthomasraoux OpBuilder::InsertionGuard g(b); 4381a865592Sthomasraoux b.setInsertionPoint(loop); 4391a865592Sthomasraoux auto operands = llvm::to_vector<4>(loop.getIterOperands()); 4401a865592Sthomasraoux operands.append(newIterOperands.begin(), newIterOperands.end()); 4411a865592Sthomasraoux scf::ForOp newLoop = 442c0342a2dSJacques Pienaar b.create<scf::ForOp>(loop.getLoc(), loop.getLowerBound(), 443c0342a2dSJacques Pienaar loop.getUpperBound(), loop.getStep(), operands); 4441a865592Sthomasraoux newLoop.getBody()->erase(); 4451a865592Sthomasraoux newLoop.getLoopBody().getBlocks().splice( 4461a865592Sthomasraoux newLoop.getLoopBody().getBlocks().begin(), 4471a865592Sthomasraoux loop.getLoopBody().getBlocks()); 448e084679fSRiver Riddle for (Value operand : newIterOperands) 449e084679fSRiver Riddle newLoop.getBody()->addArgument(operand.getType(), operand.getLoc()); 4501a865592Sthomasraoux 4511a865592Sthomasraoux for (auto it : llvm::zip(loop.getResults(), newLoop.getResults().take_front( 4521a865592Sthomasraoux loop.getNumResults()))) 4531a865592Sthomasraoux std::get<0>(it).replaceAllUsesWith(std::get<1>(it)); 4541a865592Sthomasraoux loop.erase(); 4551a865592Sthomasraoux return newLoop; 4561a865592Sthomasraoux } 4571a865592Sthomasraoux 4581a865592Sthomasraoux static void convertForOp(scf::ForOp op, 4591a865592Sthomasraoux llvm::DenseMap<Value, Value> &valueMapping) { 4601a865592Sthomasraoux SmallVector<Value> newOperands; 4611a865592Sthomasraoux SmallVector<std::pair<size_t, size_t>> argMapping; 462e4853be2SMehdi Amini for (const auto &operand : llvm::enumerate(op.getIterOperands())) { 4631a865592Sthomasraoux auto it = valueMapping.find(operand.value()); 4641a865592Sthomasraoux if (it == valueMapping.end()) 4651a865592Sthomasraoux continue; 4661a865592Sthomasraoux argMapping.push_back(std::make_pair( 4671a865592Sthomasraoux operand.index(), op.getNumIterOperands() + newOperands.size())); 4681a865592Sthomasraoux newOperands.push_back(it->second); 4691a865592Sthomasraoux } 4701a865592Sthomasraoux OpBuilder b(op); 4711a865592Sthomasraoux scf::ForOp newForOp = replaceForOpWithNewSignature(b, op, newOperands); 4721a865592Sthomasraoux Block &loopBody = *newForOp.getBody(); 4731a865592Sthomasraoux for (auto mapping : argMapping) { 4741a865592Sthomasraoux valueMapping[newForOp.getResult(mapping.first)] = 4751a865592Sthomasraoux newForOp.getResult(mapping.second); 4761a865592Sthomasraoux valueMapping[loopBody.getArgument(mapping.first + 4771a865592Sthomasraoux newForOp.getNumInductionVars())] = 4781a865592Sthomasraoux loopBody.getArgument(mapping.second + newForOp.getNumInductionVars()); 4791a865592Sthomasraoux } 4801a865592Sthomasraoux } 4811a865592Sthomasraoux 4821a865592Sthomasraoux static void convertYieldOp(scf::YieldOp op, 4831a865592Sthomasraoux llvm::DenseMap<Value, Value> &valueMapping) { 4841a865592Sthomasraoux OpBuilder b(op); 4851a865592Sthomasraoux auto loop = cast<scf::ForOp>(op->getParentOp()); 4861a865592Sthomasraoux auto yieldOperands = llvm::to_vector<4>(op.getOperands()); 487e4853be2SMehdi Amini for (const auto &operand : llvm::enumerate(op.getOperands())) { 4881a865592Sthomasraoux auto it = valueMapping.find(operand.value()); 4891a865592Sthomasraoux if (it == valueMapping.end()) 4901a865592Sthomasraoux continue; 4911a865592Sthomasraoux // Replace the yield of old value with the for op argument to make it easier 4921a865592Sthomasraoux // to remove the dead code. 4931a865592Sthomasraoux yieldOperands[operand.index()] = loop.getIterOperands()[operand.index()]; 4941a865592Sthomasraoux yieldOperands.push_back(it->second); 4951a865592Sthomasraoux } 4961a865592Sthomasraoux b.create<scf::YieldOp>(op.getLoc(), yieldOperands); 4971a865592Sthomasraoux op.erase(); 4981a865592Sthomasraoux } 4991a865592Sthomasraoux 5007fbb0678Sthomasraoux /// Convert an elementwise op to the equivalent elementwise op on MMA matrix. 5017fbb0678Sthomasraoux static void convertElementwiseOp(Operation *op, gpu::MMAElementwiseOp opType, 5027fbb0678Sthomasraoux llvm::DenseMap<Value, Value> &valueMapping) { 5037fbb0678Sthomasraoux OpBuilder b(op); 5047fbb0678Sthomasraoux SmallVector<Value> matrixOperands; 5057fbb0678Sthomasraoux for (Value operand : op->getOperands()) 5067fbb0678Sthomasraoux matrixOperands.push_back(valueMapping.find(operand)->second); 5077fbb0678Sthomasraoux Value newOp = b.create<gpu::SubgroupMmaElementwiseOp>( 5087fbb0678Sthomasraoux op->getLoc(), matrixOperands[0].getType(), matrixOperands, opType); 5097fbb0678Sthomasraoux valueMapping[op->getResult(0)] = newOp; 5107fbb0678Sthomasraoux } 5117fbb0678Sthomasraoux 51247f175b0SRiver Riddle void mlir::populatePrepareVectorToMMAPatterns(RewritePatternSet &patterns) { 513edd9515bSthomasraoux patterns.add<PrepareContractToGPUMMA, CombineTransferReadOpTranspose>( 514edd9515bSthomasraoux patterns.getContext()); 515edd9515bSthomasraoux } 516edd9515bSthomasraoux 51747f175b0SRiver Riddle void mlir::convertVectorToMMAOps(Operation *rootOp) { 51847f175b0SRiver Riddle SetVector<Operation *> ops = getOpToConvert(rootOp); 519edd9515bSthomasraoux llvm::DenseMap<Value, Value> valueMapping; 520edd9515bSthomasraoux for (Operation *op : ops) { 521edd9515bSthomasraoux if (auto transferRead = dyn_cast<vector::TransferReadOp>(op)) { 522edd9515bSthomasraoux convertTransferReadOp(transferRead, valueMapping); 523edd9515bSthomasraoux } else if (auto transferWrite = dyn_cast<vector::TransferWriteOp>(op)) { 524edd9515bSthomasraoux convertTransferWriteOp(transferWrite, valueMapping); 525edd9515bSthomasraoux } else if (auto contractOp = dyn_cast<vector::ContractionOp>(op)) { 526edd9515bSthomasraoux convertContractOp(contractOp, valueMapping); 527a54f4eaeSMogball } else if (auto constantOp = dyn_cast<arith::ConstantOp>(op)) { 5286413226dSthomasraoux convertConstantOp(constantOp, valueMapping); 52943928419Sthomasraoux } else if (auto broadcastOp = dyn_cast<vector::BroadcastOp>(op)) { 53043928419Sthomasraoux convertBroadcastOp(broadcastOp, valueMapping); 5311a865592Sthomasraoux } else if (auto forOp = dyn_cast<scf::ForOp>(op)) { 5321a865592Sthomasraoux convertForOp(forOp, valueMapping); 5331a865592Sthomasraoux } else if (auto yiledOp = dyn_cast<scf::YieldOp>(op)) { 5341a865592Sthomasraoux convertYieldOp(yiledOp, valueMapping); 5357fbb0678Sthomasraoux } else if (auto elementwiseType = convertElementwiseOpToMMA(op)) { 5367fbb0678Sthomasraoux convertElementwiseOp(op, *elementwiseType, valueMapping); 537edd9515bSthomasraoux } 538edd9515bSthomasraoux } 539edd9515bSthomasraoux } 540edd9515bSthomasraoux 541edd9515bSthomasraoux namespace { 542edd9515bSthomasraoux 543edd9515bSthomasraoux struct ConvertVectorToGPUPass 544edd9515bSthomasraoux : public ConvertVectorToGPUBase<ConvertVectorToGPUPass> { 54541574554SRiver Riddle void runOnOperation() override { 54647f175b0SRiver Riddle RewritePatternSet patterns(&getContext()); 547edd9515bSthomasraoux populatePrepareVectorToMMAPatterns(patterns); 54841574554SRiver Riddle (void)applyPatternsAndFoldGreedily(getOperation(), std::move(patterns)); 549edd9515bSthomasraoux 55041574554SRiver Riddle convertVectorToMMAOps(getOperation()); 551edd9515bSthomasraoux } 552edd9515bSthomasraoux }; 553edd9515bSthomasraoux 554edd9515bSthomasraoux } // namespace 555edd9515bSthomasraoux 556edd9515bSthomasraoux std::unique_ptr<Pass> mlir::createConvertVectorToGPUPass() { 557edd9515bSthomasraoux return std::make_unique<ConvertVectorToGPUPass>(); 558edd9515bSthomasraoux } 559