1 //===- KernelOutlining.cpp - Implementation of GPU kernel outlining -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the GPU dialect kernel outlining pass.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "PassDetail.h"
14 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h"
15 #include "mlir/Dialect/ControlFlow/IR/ControlFlowOps.h"
16 #include "mlir/Dialect/DLTI/DLTI.h"
17 #include "mlir/Dialect/GPU/GPUDialect.h"
18 #include "mlir/Dialect/GPU/Passes.h"
19 #include "mlir/Dialect/GPU/Utils.h"
20 #include "mlir/Dialect/MemRef/IR/MemRef.h"
21 #include "mlir/IR/BlockAndValueMapping.h"
22 #include "mlir/IR/Builders.h"
23 #include "mlir/IR/Matchers.h"
24 #include "mlir/IR/SymbolTable.h"
25 #include "mlir/Parser/Parser.h"
26 #include "mlir/Support/LLVM.h"
27 #include "mlir/Transforms/RegionUtils.h"
28 
29 using namespace mlir;
30 
31 template <typename OpTy>
32 static void createForAllDimensions(OpBuilder &builder, Location loc,
33                                    SmallVectorImpl<Value> &values) {
34   for (auto dim : {gpu::Dimension::x, gpu::Dimension::y, gpu::Dimension::z})
35     values.push_back(builder.create<OpTy>(loc, builder.getIndexType(), dim));
36 }
37 
38 /// Adds operations generating block/thread ids and grid/block dimensions at the
39 /// beginning of the `launchFuncOpBody` region. Add mapping from argument in
40 /// entry block of `launchOpBody`, to the corresponding result value of the
41 /// added operations.
42 static void injectGpuIndexOperations(Location loc, Region &launchFuncOpBody,
43                                      Region &launchOpBody,
44                                      BlockAndValueMapping &map) {
45   OpBuilder builder(loc->getContext());
46   Block &firstBlock = launchOpBody.front();
47   builder.setInsertionPointToStart(&launchFuncOpBody.front());
48   SmallVector<Value, 12> indexOps;
49   createForAllDimensions<gpu::BlockIdOp>(builder, loc, indexOps);
50   createForAllDimensions<gpu::ThreadIdOp>(builder, loc, indexOps);
51   createForAllDimensions<gpu::GridDimOp>(builder, loc, indexOps);
52   createForAllDimensions<gpu::BlockDimOp>(builder, loc, indexOps);
53   // Replace the leading 12 function args with the respective thread/block index
54   // operations. Iterate backwards since args are erased and indices change.
55   for (const auto &indexOp : enumerate(indexOps))
56     map.map(firstBlock.getArgument(indexOp.index()), indexOp.value());
57 }
58 
59 /// Identifies operations that are beneficial to sink into kernels. These
60 /// operations may not have side-effects, as otherwise sinking (and hence
61 /// duplicating them) is not legal.
62 static bool isLikelyAnIndexComputation(Operation *op) {
63   return matchPattern(op, m_Constant()) ||
64          isa<memref::DimOp, arith::SelectOp, arith::CmpIOp>(op);
65 }
66 
67 /// For a given operation `op`, computes whether it is beneficial to sink the
68 /// operation into the kernel. An operation can be sunk if doing so does not
69 /// introduce new kernel arguments. Whether a value is already available in the
70 /// kernel (and hence does not introduce new arguments) is checked by
71 /// querying `existingDependencies` and `availableValues`.
72 /// If an operand is not yet available, we recursively check whether it can be
73 /// made available by siking its defining op.
74 /// Operations that are indentified for sinking are added to `beneficiaryOps` in
75 /// the order they should appear in the kernel. Furthermore, `availableValues`
76 /// is updated with results that will be available after sinking the identified
77 /// ops.
78 static bool extractBeneficiaryOps(
79     Operation *op, const SetVector<Value> &existingDependencies,
80     SetVector<Operation *> &beneficiaryOps,
81     llvm::SmallPtrSetImpl<Value> &availableValues,
82     llvm::function_ref<bool(Operation *)> isSinkingBeneficiary) {
83   if (beneficiaryOps.count(op))
84     return true;
85 
86   if (!isSinkingBeneficiary(op))
87     return false;
88 
89   for (Value operand : op->getOperands()) {
90     // It is already visible in the kernel, keep going.
91     if (availableValues.count(operand))
92       continue;
93     // Else check whether it can be made available via sinking or already is a
94     // dependency.
95     Operation *definingOp = operand.getDefiningOp();
96     if ((!definingOp || !extractBeneficiaryOps(definingOp, existingDependencies,
97                                                beneficiaryOps, availableValues,
98                                                isSinkingBeneficiary)) &&
99         !existingDependencies.count(operand))
100       return false;
101   }
102   // We will sink the operation, mark its results as now available.
103   beneficiaryOps.insert(op);
104   for (Value result : op->getResults())
105     availableValues.insert(result);
106   return true;
107 }
108 
109 LogicalResult mlir::sinkOperationsIntoLaunchOp(
110     gpu::LaunchOp launchOp,
111     llvm::function_ref<bool(Operation *)> isSinkingBeneficiary) {
112   assert(isSinkingBeneficiary);
113   Region &launchOpBody = launchOp.body();
114 
115   // Identify uses from values defined outside of the scope of the launch
116   // operation.
117   SetVector<Value> sinkCandidates;
118   getUsedValuesDefinedAbove(launchOpBody, sinkCandidates);
119 
120   SetVector<Operation *> toBeSunk;
121   llvm::SmallPtrSet<Value, 4> availableValues;
122   for (Value operand : sinkCandidates) {
123     Operation *operandOp = operand.getDefiningOp();
124     if (!operandOp)
125       continue;
126     extractBeneficiaryOps(operandOp, sinkCandidates, toBeSunk, availableValues,
127                           isSinkingBeneficiary);
128   }
129 
130   // Insert operations so that the defs get cloned before uses.
131   BlockAndValueMapping map;
132   OpBuilder builder(launchOpBody);
133   for (Operation *op : toBeSunk) {
134     Operation *clonedOp = builder.clone(*op, map);
135     // Only replace uses within the launch op.
136     for (auto pair : llvm::zip(op->getResults(), clonedOp->getResults()))
137       replaceAllUsesInRegionWith(std::get<0>(pair), std::get<1>(pair),
138                                  launchOp.body());
139   }
140   return success();
141 }
142 
143 /// Outline the `gpu.launch` operation body into a kernel function. Replace
144 /// `gpu.terminator` operations by `gpu.return` in the generated function.
145 static gpu::GPUFuncOp outlineKernelFuncImpl(gpu::LaunchOp launchOp,
146                                             StringRef kernelFnName,
147                                             SetVector<Value> &operands) {
148   Location loc = launchOp.getLoc();
149   // Create a builder with no insertion point, insertion will happen separately
150   // due to symbol table manipulation.
151   OpBuilder builder(launchOp.getContext());
152   Region &launchOpBody = launchOp.body();
153 
154   // Identify uses from values defined outside of the scope of the launch
155   // operation.
156   getUsedValuesDefinedAbove(launchOpBody, operands);
157 
158   // Create the gpu.func operation.
159   SmallVector<Type, 4> kernelOperandTypes;
160   kernelOperandTypes.reserve(operands.size());
161   for (Value operand : operands) {
162     kernelOperandTypes.push_back(operand.getType());
163   }
164   FunctionType type =
165       FunctionType::get(launchOp.getContext(), kernelOperandTypes, {});
166   auto outlinedFunc = builder.create<gpu::GPUFuncOp>(loc, kernelFnName, type);
167   outlinedFunc->setAttr(gpu::GPUDialect::getKernelFuncAttrName(),
168                         builder.getUnitAttr());
169   BlockAndValueMapping map;
170 
171   // Map the arguments corresponding to the launch parameters like blockIdx,
172   // threadIdx, etc.
173   Region &outlinedFuncBody = outlinedFunc.body();
174   injectGpuIndexOperations(loc, outlinedFuncBody, launchOpBody, map);
175 
176   // Map arguments from gpu.launch region to the arguments of the gpu.func
177   // operation.
178   Block &entryBlock = outlinedFuncBody.front();
179   for (const auto &operand : enumerate(operands))
180     map.map(operand.value(), entryBlock.getArgument(operand.index()));
181 
182   // Clone the region of the gpu.launch operation into the gpu.func operation.
183   // TODO: If cloneInto can be modified such that if a mapping for
184   // a block exists, that block will be used to clone operations into (at the
185   // end of the block), instead of creating a new block, this would be much
186   // cleaner.
187   launchOpBody.cloneInto(&outlinedFuncBody, map);
188 
189   // Branch from entry of the gpu.func operation to the block that is cloned
190   // from the entry block of the gpu.launch operation.
191   Block &launchOpEntry = launchOpBody.front();
192   Block *clonedLaunchOpEntry = map.lookup(&launchOpEntry);
193   builder.setInsertionPointToEnd(&entryBlock);
194   builder.create<cf::BranchOp>(loc, clonedLaunchOpEntry);
195 
196   outlinedFunc.walk([](gpu::TerminatorOp op) {
197     OpBuilder replacer(op);
198     replacer.create<gpu::ReturnOp>(op.getLoc());
199     op.erase();
200   });
201   return outlinedFunc;
202 }
203 
204 gpu::GPUFuncOp mlir::outlineKernelFunc(gpu::LaunchOp launchOp,
205                                        StringRef kernelFnName,
206                                        llvm::SmallVectorImpl<Value> &operands) {
207   DenseSet<Value> inputOperandSet;
208   inputOperandSet.insert(operands.begin(), operands.end());
209   SetVector<Value> operandSet(operands.begin(), operands.end());
210   auto funcOp = outlineKernelFuncImpl(launchOp, kernelFnName, operandSet);
211   for (auto operand : operandSet) {
212     if (!inputOperandSet.count(operand))
213       operands.push_back(operand);
214   }
215   return funcOp;
216 }
217 
218 /// Replace `gpu.launch` operations with an `gpu.launch_func` operation
219 /// launching `kernelFunc`. The kernel func contains the body of the
220 /// `gpu.launch` with constant region arguments inlined.
221 static void convertToLaunchFuncOp(gpu::LaunchOp launchOp,
222                                   gpu::GPUFuncOp kernelFunc,
223                                   ValueRange operands) {
224   OpBuilder builder(launchOp);
225   // The launch op has an optional dynamic shared memory size. If it doesn't
226   // exist, we use zero.
227   builder.create<gpu::LaunchFuncOp>(
228       launchOp.getLoc(), kernelFunc, launchOp.getGridSizeOperandValues(),
229       launchOp.getBlockSizeOperandValues(), launchOp.dynamicSharedMemorySize(),
230       operands);
231   launchOp.erase();
232 }
233 
234 namespace {
235 /// Pass that moves ops which are likely an index computation into gpu.launch
236 /// body.
237 class GpuLaunchSinkIndexComputationsPass
238     : public GpuLaunchSinkIndexComputationsBase<
239           GpuLaunchSinkIndexComputationsPass> {
240 public:
241   void runOnOperation() override {
242     Operation *op = getOperation();
243     if (op->walk([](gpu::LaunchOp launch) {
244             // Pull in instructions that can be sunk
245             if (failed(sinkOperationsIntoLaunchOp(launch,
246                                                   isLikelyAnIndexComputation)))
247               return WalkResult::interrupt();
248 
249             return WalkResult::advance();
250           }).wasInterrupted())
251       signalPassFailure();
252   }
253 };
254 
255 /// Pass that moves the kernel of each LaunchOp into its separate nested module.
256 ///
257 /// This pass moves the kernel code of each LaunchOp into a function created
258 /// inside a nested module. It also creates an external function of the same
259 /// name in the parent module.
260 ///
261 /// The gpu.modules are intended to be compiled to a cubin blob independently in
262 /// a separate pass. The external functions can then be annotated with the
263 /// symbol of the cubin accessor function.
264 class GpuKernelOutliningPass
265     : public GpuKernelOutliningBase<GpuKernelOutliningPass> {
266 public:
267   GpuKernelOutliningPass(StringRef dlStr) {
268     if (!dlStr.empty() && !dataLayoutStr.hasValue())
269       dataLayoutStr = dlStr.str();
270   }
271 
272   GpuKernelOutliningPass(const GpuKernelOutliningPass &other)
273       : dataLayoutSpec(other.dataLayoutSpec) {
274     dataLayoutStr = other.dataLayoutStr;
275   }
276 
277   LogicalResult initialize(MLIRContext *context) override {
278     // Initialize the data layout specification from the data layout string.
279     if (!dataLayoutStr.empty()) {
280       Attribute resultAttr = mlir::parseAttribute(dataLayoutStr, context);
281       if (!resultAttr)
282         return failure();
283 
284       dataLayoutSpec = resultAttr.dyn_cast<DataLayoutSpecInterface>();
285       if (!dataLayoutSpec)
286         return failure();
287     }
288 
289     return success();
290   }
291 
292   void runOnOperation() override {
293     SymbolTable symbolTable(getOperation());
294     bool modified = false;
295     for (auto func : getOperation().getOps<FuncOp>()) {
296       // Insert just after the function.
297       Block::iterator insertPt(func->getNextNode());
298       auto funcWalkResult = func.walk([&](gpu::LaunchOp op) {
299         SetVector<Value> operands;
300         std::string kernelFnName =
301             Twine(op->getParentOfType<FuncOp>().getName(), "_kernel").str();
302 
303         gpu::GPUFuncOp outlinedFunc =
304             outlineKernelFuncImpl(op, kernelFnName, operands);
305 
306         // Create nested module and insert outlinedFunc. The module will
307         // originally get the same name as the function, but may be renamed on
308         // insertion into the parent module.
309         auto kernelModule = createKernelModule(outlinedFunc, symbolTable);
310         symbolTable.insert(kernelModule, insertPt);
311 
312         // Potentially changes signature, pulling in constants.
313         convertToLaunchFuncOp(op, outlinedFunc, operands.getArrayRef());
314         modified = true;
315         return WalkResult::advance();
316       });
317       if (funcWalkResult.wasInterrupted())
318         return signalPassFailure();
319     }
320 
321     // If any new module was inserted in this module, annotate this module as
322     // a container module.
323     if (modified)
324       getOperation()->setAttr(gpu::GPUDialect::getContainerModuleAttrName(),
325                               UnitAttr::get(&getContext()));
326   }
327 
328 private:
329   /// Returns a gpu.module containing kernelFunc and all callees (recursive).
330   gpu::GPUModuleOp createKernelModule(gpu::GPUFuncOp kernelFunc,
331                                       const SymbolTable &parentSymbolTable) {
332     // TODO: This code cannot use an OpBuilder because it must be inserted into
333     // a SymbolTable by the caller. SymbolTable needs to be refactored to
334     // prevent manual building of Ops with symbols in code using SymbolTables
335     // and then this needs to use the OpBuilder.
336     auto *context = getOperation().getContext();
337     OpBuilder builder(context);
338     auto kernelModule = builder.create<gpu::GPUModuleOp>(kernelFunc.getLoc(),
339                                                          kernelFunc.getName());
340 
341     // If a valid data layout spec was provided, attach it to the kernel module.
342     // Otherwise, the default data layout will be used.
343     if (dataLayoutSpec)
344       kernelModule->setAttr(DLTIDialect::kDataLayoutAttrName, dataLayoutSpec);
345 
346     SymbolTable symbolTable(kernelModule);
347     symbolTable.insert(kernelFunc);
348 
349     SmallVector<Operation *, 8> symbolDefWorklist = {kernelFunc};
350     while (!symbolDefWorklist.empty()) {
351       if (Optional<SymbolTable::UseRange> symbolUses =
352               SymbolTable::getSymbolUses(symbolDefWorklist.pop_back_val())) {
353         for (SymbolTable::SymbolUse symbolUse : *symbolUses) {
354           StringRef symbolName =
355               symbolUse.getSymbolRef().cast<FlatSymbolRefAttr>().getValue();
356           if (symbolTable.lookup(symbolName))
357             continue;
358 
359           Operation *symbolDefClone =
360               parentSymbolTable.lookup(symbolName)->clone();
361           symbolDefWorklist.push_back(symbolDefClone);
362           symbolTable.insert(symbolDefClone);
363         }
364       }
365     }
366 
367     return kernelModule;
368   }
369 
370   Option<std::string> dataLayoutStr{
371       *this, "data-layout-str",
372       llvm::cl::desc("String containing the data layout specification to be "
373                      "attached to the GPU kernel module")};
374 
375   DataLayoutSpecInterface dataLayoutSpec;
376 };
377 
378 } // namespace
379 
380 std::unique_ptr<Pass> mlir::createGpuLauchSinkIndexComputationsPass() {
381   return std::make_unique<GpuLaunchSinkIndexComputationsPass>();
382 }
383 
384 std::unique_ptr<OperationPass<ModuleOp>>
385 mlir::createGpuKernelOutliningPass(StringRef dataLayoutStr) {
386   return std::make_unique<GpuKernelOutliningPass>(dataLayoutStr);
387 }
388