1 //===- LowerGpuOpsToNVVMOps.cpp - MLIR GPU to NVVM lowering passes --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements a pass to generate NVVMIR operations for higher-level
10 // GPU operations.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "mlir/Conversion/GPUToNVVM/GPUToNVVMPass.h"
15 
16 #include "mlir/Conversion/ArithmeticToLLVM/ArithmeticToLLVM.h"
17 #include "mlir/Conversion/ControlFlowToLLVM/ControlFlowToLLVM.h"
18 #include "mlir/Conversion/FuncToLLVM/ConvertFuncToLLVM.h"
19 #include "mlir/Conversion/LLVMCommon/ConversionTarget.h"
20 #include "mlir/Conversion/LLVMCommon/LoweringOptions.h"
21 #include "mlir/Conversion/LLVMCommon/TypeConverter.h"
22 #include "mlir/Conversion/MemRefToLLVM/MemRefToLLVM.h"
23 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h"
24 #include "mlir/Dialect/ControlFlow/IR/ControlFlow.h"
25 #include "mlir/Dialect/Func/IR/FuncOps.h"
26 #include "mlir/Dialect/GPU/IR/GPUDialect.h"
27 #include "mlir/Dialect/GPU/Transforms/Passes.h"
28 #include "mlir/Dialect/LLVMIR/NVVMDialect.h"
29 #include "mlir/Dialect/Math/IR/Math.h"
30 #include "mlir/Dialect/MemRef/IR/MemRef.h"
31 #include "mlir/IR/BlockAndValueMapping.h"
32 #include "mlir/Transforms/DialectConversion.h"
33 #include "mlir/Transforms/GreedyPatternRewriteDriver.h"
34 #include "llvm/Support/FormatVariadic.h"
35 
36 #include "../GPUCommon/GPUOpsLowering.h"
37 #include "../GPUCommon/IndexIntrinsicsOpLowering.h"
38 #include "../GPUCommon/OpToFuncCallLowering.h"
39 #include "../PassDetail.h"
40 
41 using namespace mlir;
42 
43 namespace {
44 
45 /// Convert gpu dialect shfl mode enum to the equivalent nvvm one.
convertShflKind(gpu::ShuffleMode mode)46 static NVVM::ShflKind convertShflKind(gpu::ShuffleMode mode) {
47   switch (mode) {
48   case gpu::ShuffleMode::XOR:
49     return NVVM::ShflKind::bfly;
50   case gpu::ShuffleMode::UP:
51     return NVVM::ShflKind::up;
52   case gpu::ShuffleMode::DOWN:
53     return NVVM::ShflKind::down;
54   case gpu::ShuffleMode::IDX:
55     return NVVM::ShflKind::idx;
56   }
57   llvm_unreachable("unknown shuffle mode");
58 }
59 
60 struct GPUShuffleOpLowering : public ConvertOpToLLVMPattern<gpu::ShuffleOp> {
61   using ConvertOpToLLVMPattern<gpu::ShuffleOp>::ConvertOpToLLVMPattern;
62 
63   /// Lowers a shuffle to the corresponding NVVM op.
64   ///
65   /// Convert the `width` argument into an activeMask (a bitmask which specifies
66   /// which threads participate in the shuffle) and a maskAndClamp (specifying
67   /// the highest lane which participates in the shuffle).
68   ///
69   ///     %one = llvm.constant(1 : i32) : i32
70   ///     %minus_one = llvm.constant(-1 : i32) : i32
71   ///     %thirty_two = llvm.constant(32 : i32) : i32
72   ///     %num_lanes = llvm.sub %thirty_two, %width : i32
73   ///     %active_mask = llvm.lshr %minus_one, %num_lanes : i32
74   ///     %mask_and_clamp = llvm.sub %width, %one : i32
75   ///     %shfl = nvvm.shfl.sync.bfly %active_mask, %value, %offset,
76   ///         %mask_and_clamp : !llvm<"{ float, i1 }">
77   ///     %shfl_value = llvm.extractvalue %shfl[0 : index] :
78   ///         !llvm<"{ float, i1 }">
79   ///     %shfl_pred = llvm.extractvalue %shfl[1 : index] :
80   ///         !llvm<"{ float, i1 }">
81   LogicalResult
matchAndRewrite__anon5bec55b60111::GPUShuffleOpLowering82   matchAndRewrite(gpu::ShuffleOp op, OpAdaptor adaptor,
83                   ConversionPatternRewriter &rewriter) const override {
84     Location loc = op->getLoc();
85 
86     auto valueTy = adaptor.value().getType();
87     auto int32Type = IntegerType::get(rewriter.getContext(), 32);
88     auto predTy = IntegerType::get(rewriter.getContext(), 1);
89     auto resultTy = LLVM::LLVMStructType::getLiteral(rewriter.getContext(),
90                                                      {valueTy, predTy});
91 
92     Value one = rewriter.create<LLVM::ConstantOp>(
93         loc, int32Type, rewriter.getI32IntegerAttr(1));
94     Value minusOne = rewriter.create<LLVM::ConstantOp>(
95         loc, int32Type, rewriter.getI32IntegerAttr(-1));
96     Value thirtyTwo = rewriter.create<LLVM::ConstantOp>(
97         loc, int32Type, rewriter.getI32IntegerAttr(32));
98     Value numLeadInactiveLane = rewriter.create<LLVM::SubOp>(
99         loc, int32Type, thirtyTwo, adaptor.width());
100     // Bit mask of active lanes: `(-1) >> (32 - activeWidth)`.
101     Value activeMask = rewriter.create<LLVM::LShrOp>(loc, int32Type, minusOne,
102                                                      numLeadInactiveLane);
103     Value maskAndClamp;
104     if (op.mode() == gpu::ShuffleMode::UP) {
105       // Clamp lane: `32 - activeWidth`
106       maskAndClamp = numLeadInactiveLane;
107     } else {
108       // Clamp lane: `activeWidth - 1`
109       maskAndClamp =
110           rewriter.create<LLVM::SubOp>(loc, int32Type, adaptor.width(), one);
111     }
112 
113     auto returnValueAndIsValidAttr = rewriter.getUnitAttr();
114     Value shfl = rewriter.create<NVVM::ShflOp>(
115         loc, resultTy, activeMask, adaptor.value(), adaptor.offset(),
116         maskAndClamp, convertShflKind(op.mode()), returnValueAndIsValidAttr);
117     Value shflValue = rewriter.create<LLVM::ExtractValueOp>(
118         loc, valueTy, shfl, rewriter.getIndexArrayAttr(0));
119     Value isActiveSrcLane = rewriter.create<LLVM::ExtractValueOp>(
120         loc, predTy, shfl, rewriter.getIndexArrayAttr(1));
121 
122     rewriter.replaceOp(op, {shflValue, isActiveSrcLane});
123     return success();
124   }
125 };
126 
127 struct GPULaneIdOpToNVVM : ConvertOpToLLVMPattern<gpu::LaneIdOp> {
128   using ConvertOpToLLVMPattern<gpu::LaneIdOp>::ConvertOpToLLVMPattern;
129 
130   LogicalResult
matchAndRewrite__anon5bec55b60111::GPULaneIdOpToNVVM131   matchAndRewrite(gpu::LaneIdOp op, gpu::LaneIdOp::Adaptor adaptor,
132                   ConversionPatternRewriter &rewriter) const override {
133     auto loc = op->getLoc();
134     MLIRContext *context = rewriter.getContext();
135     Value newOp = rewriter.create<NVVM::LaneIdOp>(loc, rewriter.getI32Type());
136     // Truncate or extend the result depending on the index bitwidth specified
137     // by the LLVMTypeConverter options.
138     const unsigned indexBitwidth = getTypeConverter()->getIndexTypeBitwidth();
139     if (indexBitwidth > 32) {
140       newOp = rewriter.create<LLVM::SExtOp>(
141           loc, IntegerType::get(context, indexBitwidth), newOp);
142     } else if (indexBitwidth < 32) {
143       newOp = rewriter.create<LLVM::TruncOp>(
144           loc, IntegerType::get(context, indexBitwidth), newOp);
145     }
146     rewriter.replaceOp(op, {newOp});
147     return success();
148   }
149 };
150 
151 /// Import the GPU Ops to NVVM Patterns.
152 #include "GPUToNVVM.cpp.inc"
153 
154 /// A pass that replaces all occurrences of GPU device operations with their
155 /// corresponding NVVM equivalent.
156 ///
157 /// This pass only handles device code and is not meant to be run on GPU host
158 /// code.
159 struct LowerGpuOpsToNVVMOpsPass
160     : public ConvertGpuOpsToNVVMOpsBase<LowerGpuOpsToNVVMOpsPass> {
161   LowerGpuOpsToNVVMOpsPass() = default;
LowerGpuOpsToNVVMOpsPass__anon5bec55b60111::LowerGpuOpsToNVVMOpsPass162   LowerGpuOpsToNVVMOpsPass(unsigned indexBitwidth) {
163     this->indexBitwidth = indexBitwidth;
164   }
165 
runOnOperation__anon5bec55b60111::LowerGpuOpsToNVVMOpsPass166   void runOnOperation() override {
167     gpu::GPUModuleOp m = getOperation();
168 
169     // Request C wrapper emission.
170     for (auto func : m.getOps<func::FuncOp>()) {
171       func->setAttr(LLVM::LLVMDialect::getEmitCWrapperAttrName(),
172                     UnitAttr::get(&getContext()));
173     }
174 
175     // Customize the bitwidth used for the device side index computations.
176     LowerToLLVMOptions options(
177         m.getContext(),
178         DataLayout(cast<DataLayoutOpInterface>(m.getOperation())));
179     if (indexBitwidth != kDeriveIndexBitwidthFromDataLayout)
180       options.overrideIndexBitwidth(indexBitwidth);
181 
182     // MemRef conversion for GPU to NVVM lowering. The GPU dialect uses memory
183     // space 5 for private memory attributions, but NVVM represents private
184     // memory allocations as local `alloca`s in the default address space. This
185     // converter drops the private memory space to support the use case above.
186     LLVMTypeConverter converter(m.getContext(), options);
187     converter.addConversion([&](MemRefType type) -> Optional<Type> {
188       if (type.getMemorySpaceAsInt() !=
189           gpu::GPUDialect::getPrivateAddressSpace())
190         return llvm::None;
191       return converter.convertType(MemRefType::Builder(type).setMemorySpace(0));
192     });
193     // Lowering for MMAMatrixType.
194     converter.addConversion([&](gpu::MMAMatrixType type) -> Type {
195       return convertMMAToLLVMType(type);
196     });
197     RewritePatternSet patterns(m.getContext());
198     RewritePatternSet llvmPatterns(m.getContext());
199 
200     // Apply in-dialect lowering first. In-dialect lowering will replace ops
201     // which need to be lowered further, which is not supported by a single
202     // conversion pass.
203     populateGpuRewritePatterns(patterns);
204     (void)applyPatternsAndFoldGreedily(m, std::move(patterns));
205 
206     arith::populateArithmeticToLLVMConversionPatterns(converter, llvmPatterns);
207     cf::populateControlFlowToLLVMConversionPatterns(converter, llvmPatterns);
208     populateFuncToLLVMConversionPatterns(converter, llvmPatterns);
209     populateMemRefToLLVMConversionPatterns(converter, llvmPatterns);
210     populateGpuToNVVMConversionPatterns(converter, llvmPatterns);
211     populateGpuWMMAToNVVMConversionPatterns(converter, llvmPatterns);
212     LLVMConversionTarget target(getContext());
213     configureGpuToNVVMConversionLegality(target);
214     if (failed(applyPartialConversion(m, target, std::move(llvmPatterns))))
215       signalPassFailure();
216   }
217 };
218 
219 } // namespace
220 
configureGpuToNVVMConversionLegality(ConversionTarget & target)221 void mlir::configureGpuToNVVMConversionLegality(ConversionTarget &target) {
222   target.addIllegalOp<func::FuncOp>();
223   target.addLegalDialect<::mlir::LLVM::LLVMDialect>();
224   target.addLegalDialect<::mlir::NVVM::NVVMDialect>();
225   target.addIllegalDialect<gpu::GPUDialect>();
226   target.addIllegalOp<LLVM::CosOp, LLVM::ExpOp, LLVM::Exp2Op, LLVM::FAbsOp,
227                       LLVM::FCeilOp, LLVM::FFloorOp, LLVM::LogOp, LLVM::Log10Op,
228                       LLVM::Log2Op, LLVM::PowOp, LLVM::SinOp, LLVM::SqrtOp>();
229 
230   // TODO: Remove once we support replacing non-root ops.
231   target.addLegalOp<gpu::YieldOp, gpu::GPUModuleOp, gpu::ModuleEndOp>();
232 }
233 
populateGpuToNVVMConversionPatterns(LLVMTypeConverter & converter,RewritePatternSet & patterns)234 void mlir::populateGpuToNVVMConversionPatterns(LLVMTypeConverter &converter,
235                                                RewritePatternSet &patterns) {
236   populateWithGenerated(patterns);
237   patterns
238       .add<GPUIndexIntrinsicOpLowering<gpu::ThreadIdOp, NVVM::ThreadIdXOp,
239                                        NVVM::ThreadIdYOp, NVVM::ThreadIdZOp>,
240            GPUIndexIntrinsicOpLowering<gpu::BlockDimOp, NVVM::BlockDimXOp,
241                                        NVVM::BlockDimYOp, NVVM::BlockDimZOp>,
242            GPUIndexIntrinsicOpLowering<gpu::BlockIdOp, NVVM::BlockIdXOp,
243                                        NVVM::BlockIdYOp, NVVM::BlockIdZOp>,
244            GPUIndexIntrinsicOpLowering<gpu::GridDimOp, NVVM::GridDimXOp,
245                                        NVVM::GridDimYOp, NVVM::GridDimZOp>,
246            GPULaneIdOpToNVVM, GPUShuffleOpLowering, GPUReturnOpLowering>(
247           converter);
248 
249   // Explicitly drop memory space when lowering private memory
250   // attributions since NVVM models it as `alloca`s in the default
251   // memory space and does not support `alloca`s with addrspace(5).
252   patterns.add<GPUFuncOpLowering>(
253       converter, /*allocaAddrSpace=*/0,
254       StringAttr::get(&converter.getContext(),
255                       NVVM::NVVMDialect::getKernelFuncAttrName()));
256 
257   patterns.add<OpToFuncCallLowering<math::AbsOp>>(converter, "__nv_fabsf",
258                                                   "__nv_fabs");
259   patterns.add<OpToFuncCallLowering<math::AtanOp>>(converter, "__nv_atanf",
260                                                    "__nv_atan");
261   patterns.add<OpToFuncCallLowering<math::Atan2Op>>(converter, "__nv_atan2f",
262                                                     "__nv_atan2");
263   patterns.add<OpToFuncCallLowering<math::CeilOp>>(converter, "__nv_ceilf",
264                                                    "__nv_ceil");
265   patterns.add<OpToFuncCallLowering<math::CosOp>>(converter, "__nv_cosf",
266                                                   "__nv_cos");
267   patterns.add<OpToFuncCallLowering<math::ExpOp>>(converter, "__nv_expf",
268                                                   "__nv_exp");
269   patterns.add<OpToFuncCallLowering<math::Exp2Op>>(converter, "__nv_exp2f",
270                                                    "__nv_exp2");
271   patterns.add<OpToFuncCallLowering<math::ExpM1Op>>(converter, "__nv_expm1f",
272                                                     "__nv_expm1");
273   patterns.add<OpToFuncCallLowering<math::FloorOp>>(converter, "__nv_floorf",
274                                                     "__nv_floor");
275   patterns.add<OpToFuncCallLowering<math::LogOp>>(converter, "__nv_logf",
276                                                   "__nv_log");
277   patterns.add<OpToFuncCallLowering<math::Log1pOp>>(converter, "__nv_log1pf",
278                                                     "__nv_log1p");
279   patterns.add<OpToFuncCallLowering<math::Log10Op>>(converter, "__nv_log10f",
280                                                     "__nv_log10");
281   patterns.add<OpToFuncCallLowering<math::Log2Op>>(converter, "__nv_log2f",
282                                                    "__nv_log2");
283   patterns.add<OpToFuncCallLowering<math::PowFOp>>(converter, "__nv_powf",
284                                                    "__nv_pow");
285   patterns.add<OpToFuncCallLowering<math::RsqrtOp>>(converter, "__nv_rsqrtf",
286                                                     "__nv_rsqrt");
287   patterns.add<OpToFuncCallLowering<math::SinOp>>(converter, "__nv_sinf",
288                                                   "__nv_sin");
289   patterns.add<OpToFuncCallLowering<math::SqrtOp>>(converter, "__nv_sqrtf",
290                                                    "__nv_sqrt");
291   patterns.add<OpToFuncCallLowering<math::TanhOp>>(converter, "__nv_tanhf",
292                                                    "__nv_tanh");
293 }
294 
295 std::unique_ptr<OperationPass<gpu::GPUModuleOp>>
createLowerGpuOpsToNVVMOpsPass(unsigned indexBitwidth)296 mlir::createLowerGpuOpsToNVVMOpsPass(unsigned indexBitwidth) {
297   return std::make_unique<LowerGpuOpsToNVVMOpsPass>(indexBitwidth);
298 }
299