1 //===- KernelOutlining.cpp - Implementation of GPU kernel outlining -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the GPU dialect kernel outlining pass.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "PassDetail.h"
14 #include "mlir/AsmParser/AsmParser.h"
15 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h"
16 #include "mlir/Dialect/ControlFlow/IR/ControlFlowOps.h"
17 #include "mlir/Dialect/DLTI/DLTI.h"
18 #include "mlir/Dialect/Func/IR/FuncOps.h"
19 #include "mlir/Dialect/GPU/IR/GPUDialect.h"
20 #include "mlir/Dialect/GPU/Transforms/Passes.h"
21 #include "mlir/Dialect/GPU/Transforms/Utils.h"
22 #include "mlir/Dialect/MemRef/IR/MemRef.h"
23 #include "mlir/IR/BlockAndValueMapping.h"
24 #include "mlir/IR/Builders.h"
25 #include "mlir/IR/Matchers.h"
26 #include "mlir/IR/SymbolTable.h"
27 #include "mlir/Support/LLVM.h"
28 #include "mlir/Transforms/RegionUtils.h"
29
30 using namespace mlir;
31
32 template <typename OpTy>
createForAllDimensions(OpBuilder & builder,Location loc,SmallVectorImpl<Value> & values)33 static void createForAllDimensions(OpBuilder &builder, Location loc,
34 SmallVectorImpl<Value> &values) {
35 for (auto dim : {gpu::Dimension::x, gpu::Dimension::y, gpu::Dimension::z})
36 values.push_back(builder.create<OpTy>(loc, builder.getIndexType(), dim));
37 }
38
39 /// Adds operations generating block/thread ids and grid/block dimensions at the
40 /// beginning of the `launchFuncOpBody` region. Add mapping from argument in
41 /// entry block of `launchOpBody`, to the corresponding result value of the
42 /// added operations.
injectGpuIndexOperations(Location loc,Region & launchFuncOpBody,Region & launchOpBody,BlockAndValueMapping & map)43 static void injectGpuIndexOperations(Location loc, Region &launchFuncOpBody,
44 Region &launchOpBody,
45 BlockAndValueMapping &map) {
46 OpBuilder builder(loc->getContext());
47 Block &firstBlock = launchOpBody.front();
48 builder.setInsertionPointToStart(&launchFuncOpBody.front());
49 SmallVector<Value, 12> indexOps;
50 createForAllDimensions<gpu::BlockIdOp>(builder, loc, indexOps);
51 createForAllDimensions<gpu::ThreadIdOp>(builder, loc, indexOps);
52 createForAllDimensions<gpu::GridDimOp>(builder, loc, indexOps);
53 createForAllDimensions<gpu::BlockDimOp>(builder, loc, indexOps);
54 // Replace the leading 12 function args with the respective thread/block index
55 // operations. Iterate backwards since args are erased and indices change.
56 for (const auto &indexOp : enumerate(indexOps))
57 map.map(firstBlock.getArgument(indexOp.index()), indexOp.value());
58 }
59
60 /// Identifies operations that are beneficial to sink into kernels. These
61 /// operations may not have side-effects, as otherwise sinking (and hence
62 /// duplicating them) is not legal.
isLikelyAnIndexComputation(Operation * op)63 static bool isLikelyAnIndexComputation(Operation *op) {
64 return matchPattern(op, m_Constant()) ||
65 isa<memref::DimOp, arith::SelectOp, arith::CmpIOp>(op);
66 }
67
68 /// For a given operation `op`, computes whether it is beneficial to sink the
69 /// operation into the kernel. An operation can be sunk if doing so does not
70 /// introduce new kernel arguments. Whether a value is already available in the
71 /// kernel (and hence does not introduce new arguments) is checked by
72 /// querying `existingDependencies` and `availableValues`.
73 /// If an operand is not yet available, we recursively check whether it can be
74 /// made available by siking its defining op.
75 /// Operations that are indentified for sinking are added to `beneficiaryOps` in
76 /// the order they should appear in the kernel. Furthermore, `availableValues`
77 /// is updated with results that will be available after sinking the identified
78 /// ops.
extractBeneficiaryOps(Operation * op,const SetVector<Value> & existingDependencies,SetVector<Operation * > & beneficiaryOps,llvm::SmallPtrSetImpl<Value> & availableValues,llvm::function_ref<bool (Operation *)> isSinkingBeneficiary)79 static bool extractBeneficiaryOps(
80 Operation *op, const SetVector<Value> &existingDependencies,
81 SetVector<Operation *> &beneficiaryOps,
82 llvm::SmallPtrSetImpl<Value> &availableValues,
83 llvm::function_ref<bool(Operation *)> isSinkingBeneficiary) {
84 if (beneficiaryOps.count(op))
85 return true;
86
87 if (!isSinkingBeneficiary(op))
88 return false;
89
90 for (Value operand : op->getOperands()) {
91 // It is already visible in the kernel, keep going.
92 if (availableValues.count(operand))
93 continue;
94 // Else check whether it can be made available via sinking or already is a
95 // dependency.
96 Operation *definingOp = operand.getDefiningOp();
97 if ((!definingOp || !extractBeneficiaryOps(definingOp, existingDependencies,
98 beneficiaryOps, availableValues,
99 isSinkingBeneficiary)) &&
100 !existingDependencies.count(operand))
101 return false;
102 }
103 // We will sink the operation, mark its results as now available.
104 beneficiaryOps.insert(op);
105 for (Value result : op->getResults())
106 availableValues.insert(result);
107 return true;
108 }
109
sinkOperationsIntoLaunchOp(gpu::LaunchOp launchOp,llvm::function_ref<bool (Operation *)> isSinkingBeneficiary)110 LogicalResult mlir::sinkOperationsIntoLaunchOp(
111 gpu::LaunchOp launchOp,
112 llvm::function_ref<bool(Operation *)> isSinkingBeneficiary) {
113 assert(isSinkingBeneficiary);
114 Region &launchOpBody = launchOp.body();
115
116 // Identify uses from values defined outside of the scope of the launch
117 // operation.
118 SetVector<Value> sinkCandidates;
119 getUsedValuesDefinedAbove(launchOpBody, sinkCandidates);
120
121 SetVector<Operation *> toBeSunk;
122 llvm::SmallPtrSet<Value, 4> availableValues;
123 for (Value operand : sinkCandidates) {
124 Operation *operandOp = operand.getDefiningOp();
125 if (!operandOp)
126 continue;
127 extractBeneficiaryOps(operandOp, sinkCandidates, toBeSunk, availableValues,
128 isSinkingBeneficiary);
129 }
130
131 // Insert operations so that the defs get cloned before uses.
132 BlockAndValueMapping map;
133 OpBuilder builder(launchOpBody);
134 for (Operation *op : toBeSunk) {
135 Operation *clonedOp = builder.clone(*op, map);
136 // Only replace uses within the launch op.
137 for (auto pair : llvm::zip(op->getResults(), clonedOp->getResults()))
138 replaceAllUsesInRegionWith(std::get<0>(pair), std::get<1>(pair),
139 launchOp.body());
140 }
141 return success();
142 }
143
144 /// Outline the `gpu.launch` operation body into a kernel function. Replace
145 /// `gpu.terminator` operations by `gpu.return` in the generated function.
outlineKernelFuncImpl(gpu::LaunchOp launchOp,StringRef kernelFnName,SetVector<Value> & operands)146 static gpu::GPUFuncOp outlineKernelFuncImpl(gpu::LaunchOp launchOp,
147 StringRef kernelFnName,
148 SetVector<Value> &operands) {
149 Location loc = launchOp.getLoc();
150 // Create a builder with no insertion point, insertion will happen separately
151 // due to symbol table manipulation.
152 OpBuilder builder(launchOp.getContext());
153 Region &launchOpBody = launchOp.body();
154
155 // Identify uses from values defined outside of the scope of the launch
156 // operation.
157 getUsedValuesDefinedAbove(launchOpBody, operands);
158
159 // Create the gpu.func operation.
160 SmallVector<Type, 4> kernelOperandTypes;
161 kernelOperandTypes.reserve(operands.size());
162 for (Value operand : operands) {
163 kernelOperandTypes.push_back(operand.getType());
164 }
165 FunctionType type =
166 FunctionType::get(launchOp.getContext(), kernelOperandTypes, {});
167 auto outlinedFunc = builder.create<gpu::GPUFuncOp>(loc, kernelFnName, type);
168 outlinedFunc->setAttr(gpu::GPUDialect::getKernelFuncAttrName(),
169 builder.getUnitAttr());
170 BlockAndValueMapping map;
171
172 // Map the arguments corresponding to the launch parameters like blockIdx,
173 // threadIdx, etc.
174 Region &outlinedFuncBody = outlinedFunc.body();
175 injectGpuIndexOperations(loc, outlinedFuncBody, launchOpBody, map);
176
177 // Map arguments from gpu.launch region to the arguments of the gpu.func
178 // operation.
179 Block &entryBlock = outlinedFuncBody.front();
180 for (const auto &operand : enumerate(operands))
181 map.map(operand.value(), entryBlock.getArgument(operand.index()));
182
183 // Clone the region of the gpu.launch operation into the gpu.func operation.
184 // TODO: If cloneInto can be modified such that if a mapping for
185 // a block exists, that block will be used to clone operations into (at the
186 // end of the block), instead of creating a new block, this would be much
187 // cleaner.
188 launchOpBody.cloneInto(&outlinedFuncBody, map);
189
190 // Branch from entry of the gpu.func operation to the block that is cloned
191 // from the entry block of the gpu.launch operation.
192 Block &launchOpEntry = launchOpBody.front();
193 Block *clonedLaunchOpEntry = map.lookup(&launchOpEntry);
194 builder.setInsertionPointToEnd(&entryBlock);
195 builder.create<cf::BranchOp>(loc, clonedLaunchOpEntry);
196
197 outlinedFunc.walk([](gpu::TerminatorOp op) {
198 OpBuilder replacer(op);
199 replacer.create<gpu::ReturnOp>(op.getLoc());
200 op.erase();
201 });
202 return outlinedFunc;
203 }
204
outlineKernelFunc(gpu::LaunchOp launchOp,StringRef kernelFnName,llvm::SmallVectorImpl<Value> & operands)205 gpu::GPUFuncOp mlir::outlineKernelFunc(gpu::LaunchOp launchOp,
206 StringRef kernelFnName,
207 llvm::SmallVectorImpl<Value> &operands) {
208 DenseSet<Value> inputOperandSet;
209 inputOperandSet.insert(operands.begin(), operands.end());
210 SetVector<Value> operandSet(operands.begin(), operands.end());
211 auto funcOp = outlineKernelFuncImpl(launchOp, kernelFnName, operandSet);
212 for (auto operand : operandSet) {
213 if (!inputOperandSet.count(operand))
214 operands.push_back(operand);
215 }
216 return funcOp;
217 }
218
219 /// Replace `gpu.launch` operations with an `gpu.launch_func` operation
220 /// launching `kernelFunc`. The kernel func contains the body of the
221 /// `gpu.launch` with constant region arguments inlined.
convertToLaunchFuncOp(gpu::LaunchOp launchOp,gpu::GPUFuncOp kernelFunc,ValueRange operands)222 static void convertToLaunchFuncOp(gpu::LaunchOp launchOp,
223 gpu::GPUFuncOp kernelFunc,
224 ValueRange operands) {
225 OpBuilder builder(launchOp);
226 // The launch op has an optional dynamic shared memory size. If it doesn't
227 // exist, we use zero.
228 Value asyncToken = launchOp.asyncToken();
229 auto launchFunc = builder.create<gpu::LaunchFuncOp>(
230 launchOp.getLoc(), kernelFunc, launchOp.getGridSizeOperandValues(),
231 launchOp.getBlockSizeOperandValues(), launchOp.dynamicSharedMemorySize(),
232 operands, asyncToken ? asyncToken.getType() : nullptr,
233 launchOp.asyncDependencies());
234 launchOp.replaceAllUsesWith(launchFunc);
235 launchOp.erase();
236 }
237
238 namespace {
239 /// Pass that moves ops which are likely an index computation into gpu.launch
240 /// body.
241 class GpuLaunchSinkIndexComputationsPass
242 : public GpuLaunchSinkIndexComputationsBase<
243 GpuLaunchSinkIndexComputationsPass> {
244 public:
runOnOperation()245 void runOnOperation() override {
246 Operation *op = getOperation();
247 if (op->walk([](gpu::LaunchOp launch) {
248 // Pull in instructions that can be sunk
249 if (failed(sinkOperationsIntoLaunchOp(launch,
250 isLikelyAnIndexComputation)))
251 return WalkResult::interrupt();
252
253 return WalkResult::advance();
254 }).wasInterrupted())
255 signalPassFailure();
256 }
257 };
258
259 /// Pass that moves the kernel of each LaunchOp into its separate nested module.
260 ///
261 /// This pass moves the kernel code of each LaunchOp into a function created
262 /// inside a nested module. It also creates an external function of the same
263 /// name in the parent module.
264 ///
265 /// The gpu.modules are intended to be compiled to a cubin blob independently in
266 /// a separate pass. The external functions can then be annotated with the
267 /// symbol of the cubin accessor function.
268 class GpuKernelOutliningPass
269 : public GpuKernelOutliningBase<GpuKernelOutliningPass> {
270 public:
GpuKernelOutliningPass(StringRef dlStr)271 GpuKernelOutliningPass(StringRef dlStr) {
272 if (!dlStr.empty() && !dataLayoutStr.hasValue())
273 dataLayoutStr = dlStr.str();
274 }
275
GpuKernelOutliningPass(const GpuKernelOutliningPass & other)276 GpuKernelOutliningPass(const GpuKernelOutliningPass &other)
277 : GpuKernelOutliningBase(other), dataLayoutSpec(other.dataLayoutSpec) {
278 dataLayoutStr = other.dataLayoutStr.getValue();
279 }
280
initialize(MLIRContext * context)281 LogicalResult initialize(MLIRContext *context) override {
282 // Initialize the data layout specification from the data layout string.
283 if (!dataLayoutStr.empty()) {
284 Attribute resultAttr = mlir::parseAttribute(dataLayoutStr, context);
285 if (!resultAttr)
286 return failure();
287
288 dataLayoutSpec = resultAttr.dyn_cast<DataLayoutSpecInterface>();
289 if (!dataLayoutSpec)
290 return failure();
291 }
292
293 return success();
294 }
295
runOnOperation()296 void runOnOperation() override {
297 SymbolTable symbolTable(getOperation());
298 bool modified = false;
299 for (auto func : getOperation().getOps<func::FuncOp>()) {
300 // Insert just after the function.
301 Block::iterator insertPt(func->getNextNode());
302 auto funcWalkResult = func.walk([&](gpu::LaunchOp op) {
303 SetVector<Value> operands;
304 std::string kernelFnName =
305 Twine(op->getParentOfType<func::FuncOp>().getName(), "_kernel")
306 .str();
307
308 gpu::GPUFuncOp outlinedFunc =
309 outlineKernelFuncImpl(op, kernelFnName, operands);
310
311 // Create nested module and insert outlinedFunc. The module will
312 // originally get the same name as the function, but may be renamed on
313 // insertion into the parent module.
314 auto kernelModule = createKernelModule(outlinedFunc, symbolTable);
315 symbolTable.insert(kernelModule, insertPt);
316
317 // Potentially changes signature, pulling in constants.
318 convertToLaunchFuncOp(op, outlinedFunc, operands.getArrayRef());
319 modified = true;
320 return WalkResult::advance();
321 });
322 if (funcWalkResult.wasInterrupted())
323 return signalPassFailure();
324 }
325
326 // If any new module was inserted in this module, annotate this module as
327 // a container module.
328 if (modified)
329 getOperation()->setAttr(gpu::GPUDialect::getContainerModuleAttrName(),
330 UnitAttr::get(&getContext()));
331 }
332
333 private:
334 /// Returns a gpu.module containing kernelFunc and all callees (recursive).
createKernelModule(gpu::GPUFuncOp kernelFunc,const SymbolTable & parentSymbolTable)335 gpu::GPUModuleOp createKernelModule(gpu::GPUFuncOp kernelFunc,
336 const SymbolTable &parentSymbolTable) {
337 // TODO: This code cannot use an OpBuilder because it must be inserted into
338 // a SymbolTable by the caller. SymbolTable needs to be refactored to
339 // prevent manual building of Ops with symbols in code using SymbolTables
340 // and then this needs to use the OpBuilder.
341 auto *context = getOperation().getContext();
342 OpBuilder builder(context);
343 auto kernelModule = builder.create<gpu::GPUModuleOp>(kernelFunc.getLoc(),
344 kernelFunc.getName());
345
346 // If a valid data layout spec was provided, attach it to the kernel module.
347 // Otherwise, the default data layout will be used.
348 if (dataLayoutSpec)
349 kernelModule->setAttr(DLTIDialect::kDataLayoutAttrName, dataLayoutSpec);
350
351 SymbolTable symbolTable(kernelModule);
352 symbolTable.insert(kernelFunc);
353
354 SmallVector<Operation *, 8> symbolDefWorklist = {kernelFunc};
355 while (!symbolDefWorklist.empty()) {
356 if (Optional<SymbolTable::UseRange> symbolUses =
357 SymbolTable::getSymbolUses(symbolDefWorklist.pop_back_val())) {
358 for (SymbolTable::SymbolUse symbolUse : *symbolUses) {
359 StringRef symbolName =
360 symbolUse.getSymbolRef().cast<FlatSymbolRefAttr>().getValue();
361 if (symbolTable.lookup(symbolName))
362 continue;
363
364 Operation *symbolDefClone =
365 parentSymbolTable.lookup(symbolName)->clone();
366 symbolDefWorklist.push_back(symbolDefClone);
367 symbolTable.insert(symbolDefClone);
368 }
369 }
370 }
371
372 return kernelModule;
373 }
374
375 Option<std::string> dataLayoutStr{
376 *this, "data-layout-str",
377 llvm::cl::desc("String containing the data layout specification to be "
378 "attached to the GPU kernel module")};
379
380 DataLayoutSpecInterface dataLayoutSpec;
381 };
382
383 } // namespace
384
createGpuLauchSinkIndexComputationsPass()385 std::unique_ptr<Pass> mlir::createGpuLauchSinkIndexComputationsPass() {
386 return std::make_unique<GpuLaunchSinkIndexComputationsPass>();
387 }
388
389 std::unique_ptr<OperationPass<ModuleOp>>
createGpuKernelOutliningPass(StringRef dataLayoutStr)390 mlir::createGpuKernelOutliningPass(StringRef dataLayoutStr) {
391 return std::make_unique<GpuKernelOutliningPass>(dataLayoutStr);
392 }
393