1 //===- KernelOutlining.cpp - Implementation of GPU kernel outlining -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the GPU dialect kernel outlining pass.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "PassDetail.h"
14 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h"
15 #include "mlir/Dialect/ControlFlow/IR/ControlFlowOps.h"
16 #include "mlir/Dialect/DLTI/DLTI.h"
17 #include "mlir/Dialect/Func/IR/FuncOps.h"
18 #include "mlir/Dialect/GPU/GPUDialect.h"
19 #include "mlir/Dialect/GPU/Passes.h"
20 #include "mlir/Dialect/GPU/Utils.h"
21 #include "mlir/Dialect/MemRef/IR/MemRef.h"
22 #include "mlir/IR/BlockAndValueMapping.h"
23 #include "mlir/IR/Builders.h"
24 #include "mlir/IR/Matchers.h"
25 #include "mlir/IR/SymbolTable.h"
26 #include "mlir/Parser/Parser.h"
27 #include "mlir/Support/LLVM.h"
28 #include "mlir/Transforms/RegionUtils.h"
29 
30 using namespace mlir;
31 
32 template <typename OpTy>
33 static void createForAllDimensions(OpBuilder &builder, Location loc,
34                                    SmallVectorImpl<Value> &values) {
35   for (auto dim : {gpu::Dimension::x, gpu::Dimension::y, gpu::Dimension::z})
36     values.push_back(builder.create<OpTy>(loc, builder.getIndexType(), dim));
37 }
38 
39 /// Adds operations generating block/thread ids and grid/block dimensions at the
40 /// beginning of the `launchFuncOpBody` region. Add mapping from argument in
41 /// entry block of `launchOpBody`, to the corresponding result value of the
42 /// added operations.
43 static void injectGpuIndexOperations(Location loc, Region &launchFuncOpBody,
44                                      Region &launchOpBody,
45                                      BlockAndValueMapping &map) {
46   OpBuilder builder(loc->getContext());
47   Block &firstBlock = launchOpBody.front();
48   builder.setInsertionPointToStart(&launchFuncOpBody.front());
49   SmallVector<Value, 12> indexOps;
50   createForAllDimensions<gpu::BlockIdOp>(builder, loc, indexOps);
51   createForAllDimensions<gpu::ThreadIdOp>(builder, loc, indexOps);
52   createForAllDimensions<gpu::GridDimOp>(builder, loc, indexOps);
53   createForAllDimensions<gpu::BlockDimOp>(builder, loc, indexOps);
54   // Replace the leading 12 function args with the respective thread/block index
55   // operations. Iterate backwards since args are erased and indices change.
56   for (const auto &indexOp : enumerate(indexOps))
57     map.map(firstBlock.getArgument(indexOp.index()), indexOp.value());
58 }
59 
60 /// Identifies operations that are beneficial to sink into kernels. These
61 /// operations may not have side-effects, as otherwise sinking (and hence
62 /// duplicating them) is not legal.
63 static bool isLikelyAnIndexComputation(Operation *op) {
64   return matchPattern(op, m_Constant()) ||
65          isa<memref::DimOp, arith::SelectOp, arith::CmpIOp>(op);
66 }
67 
68 /// For a given operation `op`, computes whether it is beneficial to sink the
69 /// operation into the kernel. An operation can be sunk if doing so does not
70 /// introduce new kernel arguments. Whether a value is already available in the
71 /// kernel (and hence does not introduce new arguments) is checked by
72 /// querying `existingDependencies` and `availableValues`.
73 /// If an operand is not yet available, we recursively check whether it can be
74 /// made available by siking its defining op.
75 /// Operations that are indentified for sinking are added to `beneficiaryOps` in
76 /// the order they should appear in the kernel. Furthermore, `availableValues`
77 /// is updated with results that will be available after sinking the identified
78 /// ops.
79 static bool extractBeneficiaryOps(
80     Operation *op, const SetVector<Value> &existingDependencies,
81     SetVector<Operation *> &beneficiaryOps,
82     llvm::SmallPtrSetImpl<Value> &availableValues,
83     llvm::function_ref<bool(Operation *)> isSinkingBeneficiary) {
84   if (beneficiaryOps.count(op))
85     return true;
86 
87   if (!isSinkingBeneficiary(op))
88     return false;
89 
90   for (Value operand : op->getOperands()) {
91     // It is already visible in the kernel, keep going.
92     if (availableValues.count(operand))
93       continue;
94     // Else check whether it can be made available via sinking or already is a
95     // dependency.
96     Operation *definingOp = operand.getDefiningOp();
97     if ((!definingOp || !extractBeneficiaryOps(definingOp, existingDependencies,
98                                                beneficiaryOps, availableValues,
99                                                isSinkingBeneficiary)) &&
100         !existingDependencies.count(operand))
101       return false;
102   }
103   // We will sink the operation, mark its results as now available.
104   beneficiaryOps.insert(op);
105   for (Value result : op->getResults())
106     availableValues.insert(result);
107   return true;
108 }
109 
110 LogicalResult mlir::sinkOperationsIntoLaunchOp(
111     gpu::LaunchOp launchOp,
112     llvm::function_ref<bool(Operation *)> isSinkingBeneficiary) {
113   assert(isSinkingBeneficiary);
114   Region &launchOpBody = launchOp.body();
115 
116   // Identify uses from values defined outside of the scope of the launch
117   // operation.
118   SetVector<Value> sinkCandidates;
119   getUsedValuesDefinedAbove(launchOpBody, sinkCandidates);
120 
121   SetVector<Operation *> toBeSunk;
122   llvm::SmallPtrSet<Value, 4> availableValues;
123   for (Value operand : sinkCandidates) {
124     Operation *operandOp = operand.getDefiningOp();
125     if (!operandOp)
126       continue;
127     extractBeneficiaryOps(operandOp, sinkCandidates, toBeSunk, availableValues,
128                           isSinkingBeneficiary);
129   }
130 
131   // Insert operations so that the defs get cloned before uses.
132   BlockAndValueMapping map;
133   OpBuilder builder(launchOpBody);
134   for (Operation *op : toBeSunk) {
135     Operation *clonedOp = builder.clone(*op, map);
136     // Only replace uses within the launch op.
137     for (auto pair : llvm::zip(op->getResults(), clonedOp->getResults()))
138       replaceAllUsesInRegionWith(std::get<0>(pair), std::get<1>(pair),
139                                  launchOp.body());
140   }
141   return success();
142 }
143 
144 /// Outline the `gpu.launch` operation body into a kernel function. Replace
145 /// `gpu.terminator` operations by `gpu.return` in the generated function.
146 static gpu::GPUFuncOp outlineKernelFuncImpl(gpu::LaunchOp launchOp,
147                                             StringRef kernelFnName,
148                                             SetVector<Value> &operands) {
149   Location loc = launchOp.getLoc();
150   // Create a builder with no insertion point, insertion will happen separately
151   // due to symbol table manipulation.
152   OpBuilder builder(launchOp.getContext());
153   Region &launchOpBody = launchOp.body();
154 
155   // Identify uses from values defined outside of the scope of the launch
156   // operation.
157   getUsedValuesDefinedAbove(launchOpBody, operands);
158 
159   // Create the gpu.func operation.
160   SmallVector<Type, 4> kernelOperandTypes;
161   kernelOperandTypes.reserve(operands.size());
162   for (Value operand : operands) {
163     kernelOperandTypes.push_back(operand.getType());
164   }
165   FunctionType type =
166       FunctionType::get(launchOp.getContext(), kernelOperandTypes, {});
167   auto outlinedFunc = builder.create<gpu::GPUFuncOp>(loc, kernelFnName, type);
168   outlinedFunc->setAttr(gpu::GPUDialect::getKernelFuncAttrName(),
169                         builder.getUnitAttr());
170   BlockAndValueMapping map;
171 
172   // Map the arguments corresponding to the launch parameters like blockIdx,
173   // threadIdx, etc.
174   Region &outlinedFuncBody = outlinedFunc.body();
175   injectGpuIndexOperations(loc, outlinedFuncBody, launchOpBody, map);
176 
177   // Map arguments from gpu.launch region to the arguments of the gpu.func
178   // operation.
179   Block &entryBlock = outlinedFuncBody.front();
180   for (const auto &operand : enumerate(operands))
181     map.map(operand.value(), entryBlock.getArgument(operand.index()));
182 
183   // Clone the region of the gpu.launch operation into the gpu.func operation.
184   // TODO: If cloneInto can be modified such that if a mapping for
185   // a block exists, that block will be used to clone operations into (at the
186   // end of the block), instead of creating a new block, this would be much
187   // cleaner.
188   launchOpBody.cloneInto(&outlinedFuncBody, map);
189 
190   // Branch from entry of the gpu.func operation to the block that is cloned
191   // from the entry block of the gpu.launch operation.
192   Block &launchOpEntry = launchOpBody.front();
193   Block *clonedLaunchOpEntry = map.lookup(&launchOpEntry);
194   builder.setInsertionPointToEnd(&entryBlock);
195   builder.create<cf::BranchOp>(loc, clonedLaunchOpEntry);
196 
197   outlinedFunc.walk([](gpu::TerminatorOp op) {
198     OpBuilder replacer(op);
199     replacer.create<gpu::ReturnOp>(op.getLoc());
200     op.erase();
201   });
202   return outlinedFunc;
203 }
204 
205 gpu::GPUFuncOp mlir::outlineKernelFunc(gpu::LaunchOp launchOp,
206                                        StringRef kernelFnName,
207                                        llvm::SmallVectorImpl<Value> &operands) {
208   DenseSet<Value> inputOperandSet;
209   inputOperandSet.insert(operands.begin(), operands.end());
210   SetVector<Value> operandSet(operands.begin(), operands.end());
211   auto funcOp = outlineKernelFuncImpl(launchOp, kernelFnName, operandSet);
212   for (auto operand : operandSet) {
213     if (!inputOperandSet.count(operand))
214       operands.push_back(operand);
215   }
216   return funcOp;
217 }
218 
219 /// Replace `gpu.launch` operations with an `gpu.launch_func` operation
220 /// launching `kernelFunc`. The kernel func contains the body of the
221 /// `gpu.launch` with constant region arguments inlined.
222 static void convertToLaunchFuncOp(gpu::LaunchOp launchOp,
223                                   gpu::GPUFuncOp kernelFunc,
224                                   ValueRange operands) {
225   OpBuilder builder(launchOp);
226   // The launch op has an optional dynamic shared memory size. If it doesn't
227   // exist, we use zero.
228   builder.create<gpu::LaunchFuncOp>(
229       launchOp.getLoc(), kernelFunc, launchOp.getGridSizeOperandValues(),
230       launchOp.getBlockSizeOperandValues(), launchOp.dynamicSharedMemorySize(),
231       operands);
232   launchOp.erase();
233 }
234 
235 namespace {
236 /// Pass that moves ops which are likely an index computation into gpu.launch
237 /// body.
238 class GpuLaunchSinkIndexComputationsPass
239     : public GpuLaunchSinkIndexComputationsBase<
240           GpuLaunchSinkIndexComputationsPass> {
241 public:
242   void runOnOperation() override {
243     Operation *op = getOperation();
244     if (op->walk([](gpu::LaunchOp launch) {
245             // Pull in instructions that can be sunk
246             if (failed(sinkOperationsIntoLaunchOp(launch,
247                                                   isLikelyAnIndexComputation)))
248               return WalkResult::interrupt();
249 
250             return WalkResult::advance();
251           }).wasInterrupted())
252       signalPassFailure();
253   }
254 };
255 
256 /// Pass that moves the kernel of each LaunchOp into its separate nested module.
257 ///
258 /// This pass moves the kernel code of each LaunchOp into a function created
259 /// inside a nested module. It also creates an external function of the same
260 /// name in the parent module.
261 ///
262 /// The gpu.modules are intended to be compiled to a cubin blob independently in
263 /// a separate pass. The external functions can then be annotated with the
264 /// symbol of the cubin accessor function.
265 class GpuKernelOutliningPass
266     : public GpuKernelOutliningBase<GpuKernelOutliningPass> {
267 public:
268   GpuKernelOutliningPass(StringRef dlStr) {
269     if (!dlStr.empty() && !dataLayoutStr.hasValue())
270       dataLayoutStr = dlStr.str();
271   }
272 
273   GpuKernelOutliningPass(const GpuKernelOutliningPass &other)
274       : dataLayoutSpec(other.dataLayoutSpec) {
275     dataLayoutStr = other.dataLayoutStr;
276   }
277 
278   LogicalResult initialize(MLIRContext *context) override {
279     // Initialize the data layout specification from the data layout string.
280     if (!dataLayoutStr.empty()) {
281       Attribute resultAttr = mlir::parseAttribute(dataLayoutStr, context);
282       if (!resultAttr)
283         return failure();
284 
285       dataLayoutSpec = resultAttr.dyn_cast<DataLayoutSpecInterface>();
286       if (!dataLayoutSpec)
287         return failure();
288     }
289 
290     return success();
291   }
292 
293   void runOnOperation() override {
294     SymbolTable symbolTable(getOperation());
295     bool modified = false;
296     for (auto func : getOperation().getOps<FuncOp>()) {
297       // Insert just after the function.
298       Block::iterator insertPt(func->getNextNode());
299       auto funcWalkResult = func.walk([&](gpu::LaunchOp op) {
300         SetVector<Value> operands;
301         std::string kernelFnName =
302             Twine(op->getParentOfType<FuncOp>().getName(), "_kernel").str();
303 
304         gpu::GPUFuncOp outlinedFunc =
305             outlineKernelFuncImpl(op, kernelFnName, operands);
306 
307         // Create nested module and insert outlinedFunc. The module will
308         // originally get the same name as the function, but may be renamed on
309         // insertion into the parent module.
310         auto kernelModule = createKernelModule(outlinedFunc, symbolTable);
311         symbolTable.insert(kernelModule, insertPt);
312 
313         // Potentially changes signature, pulling in constants.
314         convertToLaunchFuncOp(op, outlinedFunc, operands.getArrayRef());
315         modified = true;
316         return WalkResult::advance();
317       });
318       if (funcWalkResult.wasInterrupted())
319         return signalPassFailure();
320     }
321 
322     // If any new module was inserted in this module, annotate this module as
323     // a container module.
324     if (modified)
325       getOperation()->setAttr(gpu::GPUDialect::getContainerModuleAttrName(),
326                               UnitAttr::get(&getContext()));
327   }
328 
329 private:
330   /// Returns a gpu.module containing kernelFunc and all callees (recursive).
331   gpu::GPUModuleOp createKernelModule(gpu::GPUFuncOp kernelFunc,
332                                       const SymbolTable &parentSymbolTable) {
333     // TODO: This code cannot use an OpBuilder because it must be inserted into
334     // a SymbolTable by the caller. SymbolTable needs to be refactored to
335     // prevent manual building of Ops with symbols in code using SymbolTables
336     // and then this needs to use the OpBuilder.
337     auto *context = getOperation().getContext();
338     OpBuilder builder(context);
339     auto kernelModule = builder.create<gpu::GPUModuleOp>(kernelFunc.getLoc(),
340                                                          kernelFunc.getName());
341 
342     // If a valid data layout spec was provided, attach it to the kernel module.
343     // Otherwise, the default data layout will be used.
344     if (dataLayoutSpec)
345       kernelModule->setAttr(DLTIDialect::kDataLayoutAttrName, dataLayoutSpec);
346 
347     SymbolTable symbolTable(kernelModule);
348     symbolTable.insert(kernelFunc);
349 
350     SmallVector<Operation *, 8> symbolDefWorklist = {kernelFunc};
351     while (!symbolDefWorklist.empty()) {
352       if (Optional<SymbolTable::UseRange> symbolUses =
353               SymbolTable::getSymbolUses(symbolDefWorklist.pop_back_val())) {
354         for (SymbolTable::SymbolUse symbolUse : *symbolUses) {
355           StringRef symbolName =
356               symbolUse.getSymbolRef().cast<FlatSymbolRefAttr>().getValue();
357           if (symbolTable.lookup(symbolName))
358             continue;
359 
360           Operation *symbolDefClone =
361               parentSymbolTable.lookup(symbolName)->clone();
362           symbolDefWorklist.push_back(symbolDefClone);
363           symbolTable.insert(symbolDefClone);
364         }
365       }
366     }
367 
368     return kernelModule;
369   }
370 
371   Option<std::string> dataLayoutStr{
372       *this, "data-layout-str",
373       llvm::cl::desc("String containing the data layout specification to be "
374                      "attached to the GPU kernel module")};
375 
376   DataLayoutSpecInterface dataLayoutSpec;
377 };
378 
379 } // namespace
380 
381 std::unique_ptr<Pass> mlir::createGpuLauchSinkIndexComputationsPass() {
382   return std::make_unique<GpuLaunchSinkIndexComputationsPass>();
383 }
384 
385 std::unique_ptr<OperationPass<ModuleOp>>
386 mlir::createGpuKernelOutliningPass(StringRef dataLayoutStr) {
387   return std::make_unique<GpuKernelOutliningPass>(dataLayoutStr);
388 }
389