1 //===- KernelOutlining.cpp - Implementation of GPU kernel outlining -------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the GPU dialect kernel outlining pass. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "PassDetail.h" 14 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h" 15 #include "mlir/Dialect/GPU/GPUDialect.h" 16 #include "mlir/Dialect/GPU/Passes.h" 17 #include "mlir/Dialect/GPU/Utils.h" 18 #include "mlir/Dialect/MemRef/IR/MemRef.h" 19 #include "mlir/Dialect/StandardOps/IR/Ops.h" 20 #include "mlir/IR/BlockAndValueMapping.h" 21 #include "mlir/IR/Builders.h" 22 #include "mlir/IR/SymbolTable.h" 23 #include "mlir/Support/LLVM.h" 24 #include "mlir/Transforms/RegionUtils.h" 25 26 using namespace mlir; 27 28 template <typename OpTy> 29 static void createForAllDimensions(OpBuilder &builder, Location loc, 30 SmallVectorImpl<Value> &values) { 31 for (StringRef dim : {"x", "y", "z"}) { 32 Value v = builder.create<OpTy>(loc, builder.getIndexType(), 33 builder.getStringAttr(dim)); 34 values.push_back(v); 35 } 36 } 37 38 /// Adds operations generating block/thread ids and grid/block dimensions at the 39 /// beginning of the `launchFuncOpBody` region. Add mapping from argument in 40 /// entry block of `launchOpBody`, to the corresponding result value of the 41 /// added operations. 42 static void injectGpuIndexOperations(Location loc, Region &launchFuncOpBody, 43 Region &launchOpBody, 44 BlockAndValueMapping &map) { 45 OpBuilder builder(loc->getContext()); 46 Block &firstBlock = launchOpBody.front(); 47 builder.setInsertionPointToStart(&launchFuncOpBody.front()); 48 SmallVector<Value, 12> indexOps; 49 createForAllDimensions<gpu::BlockIdOp>(builder, loc, indexOps); 50 createForAllDimensions<gpu::ThreadIdOp>(builder, loc, indexOps); 51 createForAllDimensions<gpu::GridDimOp>(builder, loc, indexOps); 52 createForAllDimensions<gpu::BlockDimOp>(builder, loc, indexOps); 53 // Replace the leading 12 function args with the respective thread/block index 54 // operations. Iterate backwards since args are erased and indices change. 55 for (auto indexOp : enumerate(indexOps)) 56 map.map(firstBlock.getArgument(indexOp.index()), indexOp.value()); 57 } 58 59 /// Identifies operations that are beneficial to sink into kernels. These 60 /// operations may not have side-effects, as otherwise sinking (and hence 61 /// duplicating them) is not legal. 62 static bool isSinkingBeneficiary(Operation *op) { 63 return isa<arith::ConstantOp, ConstantOp, memref::DimOp, SelectOp, 64 arith::CmpIOp>(op); 65 } 66 67 /// For a given operation `op`, computes whether it is beneficial to sink the 68 /// operation into the kernel. An operation can be sunk if doing so does not 69 /// introduce new kernel arguments. Whether a value is already available in the 70 /// kernel (and hence does not introduce new arguments) is checked by 71 /// querying `existingDependencies` and `availableValues`. 72 /// If an operand is not yet available, we recursively check whether it can be 73 /// made available by siking its defining op. 74 /// Operations that are indentified for sinking are added to `beneficiaryOps` in 75 /// the order they should appear in the kernel. Furthermore, `availableValues` 76 /// is updated with results that will be available after sinking the identified 77 /// ops. 78 static bool 79 extractBeneficiaryOps(Operation *op, SetVector<Value> existingDependencies, 80 SetVector<Operation *> &beneficiaryOps, 81 llvm::SmallPtrSetImpl<Value> &availableValues) { 82 if (beneficiaryOps.count(op)) 83 return true; 84 85 if (!isSinkingBeneficiary(op)) 86 return false; 87 88 for (Value operand : op->getOperands()) { 89 // It is already visible in the kernel, keep going. 90 if (availableValues.count(operand)) 91 continue; 92 // Else check whether it can be made available via sinking or already is a 93 // dependency. 94 Operation *definingOp = operand.getDefiningOp(); 95 if ((!definingOp || 96 !extractBeneficiaryOps(definingOp, existingDependencies, 97 beneficiaryOps, availableValues)) && 98 !existingDependencies.count(operand)) 99 return false; 100 } 101 // We will sink the operation, mark its results as now available. 102 beneficiaryOps.insert(op); 103 for (Value result : op->getResults()) 104 availableValues.insert(result); 105 return true; 106 } 107 108 LogicalResult mlir::sinkOperationsIntoLaunchOp(gpu::LaunchOp launchOp) { 109 Region &launchOpBody = launchOp.body(); 110 111 // Identify uses from values defined outside of the scope of the launch 112 // operation. 113 SetVector<Value> sinkCandidates; 114 getUsedValuesDefinedAbove(launchOpBody, sinkCandidates); 115 116 SetVector<Operation *> toBeSunk; 117 llvm::SmallPtrSet<Value, 4> availableValues; 118 for (Value operand : sinkCandidates) { 119 Operation *operandOp = operand.getDefiningOp(); 120 if (!operandOp) 121 continue; 122 extractBeneficiaryOps(operandOp, sinkCandidates, toBeSunk, availableValues); 123 } 124 125 // Insert operations so that the defs get cloned before uses. 126 BlockAndValueMapping map; 127 OpBuilder builder(launchOpBody); 128 for (Operation *op : toBeSunk) { 129 Operation *clonedOp = builder.clone(*op, map); 130 // Only replace uses within the launch op. 131 for (auto pair : llvm::zip(op->getResults(), clonedOp->getResults())) 132 replaceAllUsesInRegionWith(std::get<0>(pair), std::get<1>(pair), 133 launchOp.body()); 134 } 135 return success(); 136 } 137 138 /// Outline the `gpu.launch` operation body into a kernel function. Replace 139 /// `gpu.terminator` operations by `gpu.return` in the generated function. 140 static gpu::GPUFuncOp outlineKernelFuncImpl(gpu::LaunchOp launchOp, 141 StringRef kernelFnName, 142 SetVector<Value> &operands) { 143 Location loc = launchOp.getLoc(); 144 // Create a builder with no insertion point, insertion will happen separately 145 // due to symbol table manipulation. 146 OpBuilder builder(launchOp.getContext()); 147 Region &launchOpBody = launchOp.body(); 148 149 // Identify uses from values defined outside of the scope of the launch 150 // operation. 151 getUsedValuesDefinedAbove(launchOpBody, operands); 152 153 // Create the gpu.func operation. 154 SmallVector<Type, 4> kernelOperandTypes; 155 kernelOperandTypes.reserve(operands.size()); 156 for (Value operand : operands) { 157 kernelOperandTypes.push_back(operand.getType()); 158 } 159 FunctionType type = 160 FunctionType::get(launchOp.getContext(), kernelOperandTypes, {}); 161 auto outlinedFunc = builder.create<gpu::GPUFuncOp>(loc, kernelFnName, type); 162 outlinedFunc->setAttr(gpu::GPUDialect::getKernelFuncAttrName(), 163 builder.getUnitAttr()); 164 BlockAndValueMapping map; 165 166 // Map the arguments corresponding to the launch parameters like blockIdx, 167 // threadIdx, etc. 168 Region &outlinedFuncBody = outlinedFunc.body(); 169 injectGpuIndexOperations(loc, outlinedFuncBody, launchOpBody, map); 170 171 // Map arguments from gpu.launch region to the arguments of the gpu.func 172 // operation. 173 Block &entryBlock = outlinedFuncBody.front(); 174 for (auto operand : enumerate(operands)) 175 map.map(operand.value(), entryBlock.getArgument(operand.index())); 176 177 // Clone the region of the gpu.launch operation into the gpu.func operation. 178 // TODO: If cloneInto can be modified such that if a mapping for 179 // a block exists, that block will be used to clone operations into (at the 180 // end of the block), instead of creating a new block, this would be much 181 // cleaner. 182 launchOpBody.cloneInto(&outlinedFuncBody, map); 183 184 // Branch from entry of the gpu.func operation to the block that is cloned 185 // from the entry block of the gpu.launch operation. 186 Block &launchOpEntry = launchOpBody.front(); 187 Block *clonedLaunchOpEntry = map.lookup(&launchOpEntry); 188 builder.setInsertionPointToEnd(&entryBlock); 189 builder.create<BranchOp>(loc, clonedLaunchOpEntry); 190 191 outlinedFunc.walk([](gpu::TerminatorOp op) { 192 OpBuilder replacer(op); 193 replacer.create<gpu::ReturnOp>(op.getLoc()); 194 op.erase(); 195 }); 196 return outlinedFunc; 197 } 198 199 gpu::GPUFuncOp mlir::outlineKernelFunc(gpu::LaunchOp launchOp, 200 StringRef kernelFnName, 201 llvm::SmallVectorImpl<Value> &operands) { 202 DenseSet<Value> inputOperandSet; 203 inputOperandSet.insert(operands.begin(), operands.end()); 204 SetVector<Value> operandSet(operands.begin(), operands.end()); 205 auto funcOp = outlineKernelFuncImpl(launchOp, kernelFnName, operandSet); 206 for (auto operand : operandSet) { 207 if (!inputOperandSet.count(operand)) 208 operands.push_back(operand); 209 } 210 return funcOp; 211 } 212 213 /// Replace `gpu.launch` operations with an `gpu.launch_func` operation 214 /// launching `kernelFunc`. The kernel func contains the body of the 215 /// `gpu.launch` with constant region arguments inlined. 216 static void convertToLaunchFuncOp(gpu::LaunchOp launchOp, 217 gpu::GPUFuncOp kernelFunc, 218 ValueRange operands) { 219 OpBuilder builder(launchOp); 220 // The launch op has an optional dynamic shared memory size. If it doesn't 221 // exist, we use zero. 222 builder.create<gpu::LaunchFuncOp>( 223 launchOp.getLoc(), kernelFunc, launchOp.getGridSizeOperandValues(), 224 launchOp.getBlockSizeOperandValues(), launchOp.dynamicSharedMemorySize(), 225 operands); 226 launchOp.erase(); 227 } 228 229 namespace { 230 /// Pass that moves the kernel of each LaunchOp into its separate nested module. 231 /// 232 /// This pass moves the kernel code of each LaunchOp into a function created 233 /// inside a nested module. It also creates an external function of the same 234 /// name in the parent module. 235 /// 236 /// The gpu.modules are intended to be compiled to a cubin blob independently in 237 /// a separate pass. The external functions can then be annotated with the 238 /// symbol of the cubin accessor function. 239 class GpuKernelOutliningPass 240 : public GpuKernelOutliningBase<GpuKernelOutliningPass> { 241 public: 242 void runOnOperation() override { 243 SymbolTable symbolTable(getOperation()); 244 bool modified = false; 245 for (auto func : getOperation().getOps<FuncOp>()) { 246 // Insert just after the function. 247 Block::iterator insertPt(func->getNextNode()); 248 auto funcWalkResult = func.walk([&](gpu::LaunchOp op) { 249 SetVector<Value> operands; 250 std::string kernelFnName = 251 Twine(op->getParentOfType<FuncOp>().getName(), "_kernel").str(); 252 253 // Pull in instructions that can be sunk 254 if (failed(sinkOperationsIntoLaunchOp(op))) 255 return WalkResult::interrupt(); 256 gpu::GPUFuncOp outlinedFunc = 257 outlineKernelFuncImpl(op, kernelFnName, operands); 258 259 // Create nested module and insert outlinedFunc. The module will 260 // originally get the same name as the function, but may be renamed on 261 // insertion into the parent module. 262 auto kernelModule = createKernelModule(outlinedFunc, symbolTable); 263 symbolTable.insert(kernelModule, insertPt); 264 265 // Potentially changes signature, pulling in constants. 266 convertToLaunchFuncOp(op, outlinedFunc, operands.getArrayRef()); 267 modified = true; 268 return WalkResult::advance(); 269 }); 270 if (funcWalkResult.wasInterrupted()) 271 return signalPassFailure(); 272 } 273 274 // If any new module was inserted in this module, annotate this module as 275 // a container module. 276 if (modified) 277 getOperation()->setAttr(gpu::GPUDialect::getContainerModuleAttrName(), 278 UnitAttr::get(&getContext())); 279 } 280 281 private: 282 /// Returns a gpu.module containing kernelFunc and all callees (recursive). 283 gpu::GPUModuleOp createKernelModule(gpu::GPUFuncOp kernelFunc, 284 const SymbolTable &parentSymbolTable) { 285 // TODO: This code cannot use an OpBuilder because it must be inserted into 286 // a SymbolTable by the caller. SymbolTable needs to be refactored to 287 // prevent manual building of Ops with symbols in code using SymbolTables 288 // and then this needs to use the OpBuilder. 289 auto context = getOperation().getContext(); 290 OpBuilder builder(context); 291 auto kernelModule = builder.create<gpu::GPUModuleOp>(kernelFunc.getLoc(), 292 kernelFunc.getName()); 293 SymbolTable symbolTable(kernelModule); 294 symbolTable.insert(kernelFunc); 295 296 SmallVector<Operation *, 8> symbolDefWorklist = {kernelFunc}; 297 while (!symbolDefWorklist.empty()) { 298 if (Optional<SymbolTable::UseRange> symbolUses = 299 SymbolTable::getSymbolUses(symbolDefWorklist.pop_back_val())) { 300 for (SymbolTable::SymbolUse symbolUse : *symbolUses) { 301 StringRef symbolName = 302 symbolUse.getSymbolRef().cast<FlatSymbolRefAttr>().getValue(); 303 if (symbolTable.lookup(symbolName)) 304 continue; 305 306 Operation *symbolDefClone = 307 parentSymbolTable.lookup(symbolName)->clone(); 308 symbolDefWorklist.push_back(symbolDefClone); 309 symbolTable.insert(symbolDefClone); 310 } 311 } 312 } 313 314 return kernelModule; 315 } 316 }; 317 318 } // namespace 319 320 std::unique_ptr<OperationPass<ModuleOp>> mlir::createGpuKernelOutliningPass() { 321 return std::make_unique<GpuKernelOutliningPass>(); 322 } 323