1 //===- GPUDialect.cpp - MLIR Dialect for GPU Kernels implementation -------===// 2 // 3 // Part of the MLIR Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the GPU kernel-related dialect and its operations. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "mlir/Dialect/GPU/GPUDialect.h" 14 #include "mlir/Dialect/LLVMIR/LLVMDialect.h" 15 #include "mlir/Dialect/StandardOps/Ops.h" 16 #include "mlir/IR/Builders.h" 17 #include "mlir/IR/Function.h" 18 #include "mlir/IR/FunctionImplementation.h" 19 #include "mlir/IR/Module.h" 20 #include "mlir/IR/OpImplementation.h" 21 #include "mlir/IR/PatternMatch.h" 22 #include "mlir/IR/StandardTypes.h" 23 24 using namespace mlir; 25 using namespace mlir::gpu; 26 27 //===----------------------------------------------------------------------===// 28 // GPUDialect 29 //===----------------------------------------------------------------------===// 30 31 StringRef GPUDialect::getDialectName() { return "gpu"; } 32 33 bool GPUDialect::isKernel(Operation *op) { 34 UnitAttr isKernelAttr = op->getAttrOfType<UnitAttr>(getKernelFuncAttrName()); 35 return static_cast<bool>(isKernelAttr); 36 } 37 38 GPUDialect::GPUDialect(MLIRContext *context) 39 : Dialect(getDialectName(), context) { 40 addOperations< 41 #define GET_OP_LIST 42 #include "mlir/Dialect/GPU/GPUOps.cpp.inc" 43 >(); 44 } 45 46 LogicalResult GPUDialect::verifyOperationAttribute(Operation *op, 47 NamedAttribute attr) { 48 if (!attr.second.isa<UnitAttr>() || 49 !attr.first.is(getContainerModuleAttrName())) 50 return success(); 51 52 auto module = dyn_cast<ModuleOp>(op); 53 if (!module) 54 return op->emitError("expected '") 55 << getContainerModuleAttrName() << "' attribute to be attached to '" 56 << ModuleOp::getOperationName() << '\''; 57 58 auto walkResult = module.walk([&module](LaunchFuncOp launchOp) -> WalkResult { 59 // Ignore launches that are nested more or less deep than functions in the 60 // module we are currently checking. 61 if (!launchOp.getParentOp() || 62 launchOp.getParentOp()->getParentOp() != module) 63 return success(); 64 65 // Ignore launch ops with missing attributes here. The errors will be 66 // reported by the verifiers of those ops. 67 if (!launchOp.getAttrOfType<StringAttr>( 68 LaunchFuncOp::getKernelAttrName()) || 69 !launchOp.getAttrOfType<SymbolRefAttr>( 70 LaunchFuncOp::getKernelModuleAttrName())) 71 return success(); 72 73 // Check that `launch_func` refers to a well-formed GPU kernel module. 74 StringRef kernelModuleName = launchOp.getKernelModuleName(); 75 auto kernelModule = module.lookupSymbol<ModuleOp>(kernelModuleName); 76 if (!kernelModule) 77 return launchOp.emitOpError() 78 << "kernel module '" << kernelModuleName << "' is undefined"; 79 if (!kernelModule.getAttrOfType<UnitAttr>( 80 GPUDialect::getKernelModuleAttrName())) 81 return launchOp.emitOpError("module '") 82 << kernelModuleName << "' is missing the '" 83 << GPUDialect::getKernelModuleAttrName() << "' attribute"; 84 85 // Check that `launch_func` refers to a well-formed kernel function. 86 StringRef kernelName = launchOp.kernel(); 87 Operation *kernelFunc = kernelModule.lookupSymbol(kernelName); 88 auto kernelGPUFunction = dyn_cast_or_null<gpu::GPUFuncOp>(kernelFunc); 89 auto kernelLLVMFunction = dyn_cast_or_null<LLVM::LLVMFuncOp>(kernelFunc); 90 if (!kernelGPUFunction && !kernelLLVMFunction) 91 return launchOp.emitOpError("kernel function '") 92 << kernelName << "' is undefined"; 93 if (!kernelFunc->getAttrOfType<mlir::UnitAttr>( 94 GPUDialect::getKernelFuncAttrName())) 95 return launchOp.emitOpError("kernel function is missing the '") 96 << GPUDialect::getKernelFuncAttrName() << "' attribute"; 97 98 unsigned actualNumArguments = launchOp.getNumKernelOperands(); 99 unsigned expectedNumArguments = kernelLLVMFunction 100 ? kernelLLVMFunction.getNumArguments() 101 : kernelGPUFunction.getNumArguments(); 102 if (expectedNumArguments != actualNumArguments) 103 return launchOp.emitOpError("got ") 104 << actualNumArguments << " kernel operands but expected " 105 << expectedNumArguments; 106 107 // Due to the ordering of the current impl of lowering and LLVMLowering, 108 // type checks need to be temporarily disabled. 109 // TODO(ntv,zinenko,herhut): reactivate checks once "changing gpu.launchFunc 110 // to encode target module" has landed. 111 // auto functionType = kernelFunc.getType(); 112 // for (unsigned i = 0; i < numKernelFuncArgs; ++i) { 113 // if (getKernelOperand(i)->getType() != functionType.getInput(i)) { 114 // return emitOpError("type of function argument ") 115 // << i << " does not match"; 116 // } 117 // } 118 119 return success(); 120 }); 121 122 return walkResult.wasInterrupted() ? failure() : success(); 123 } 124 125 template <typename T> static LogicalResult verifyIndexOp(T op) { 126 auto dimension = op.dimension(); 127 if (dimension != "x" && dimension != "y" && dimension != "z") 128 return op.emitError("dimension \"") << dimension << "\" is invalid"; 129 return success(); 130 } 131 132 static LogicalResult verifyAllReduce(gpu::AllReduceOp allReduce) { 133 if (allReduce.body().empty() != allReduce.op().hasValue()) 134 return allReduce.emitError( 135 "expected either an op attribute or a non-empty body"); 136 if (!allReduce.body().empty()) { 137 if (allReduce.body().front().getNumArguments() != 2) 138 return allReduce.emitError("expected two region arguments"); 139 for (auto argument : allReduce.body().front().getArguments()) { 140 if (argument->getType() != allReduce.getType()) 141 return allReduce.emitError("incorrect region argument type"); 142 } 143 unsigned yieldCount = 0; 144 for (Block &block : allReduce.body()) { 145 if (auto yield = dyn_cast<gpu::YieldOp>(block.getTerminator())) { 146 if (yield.getNumOperands() != 1) 147 return allReduce.emitError("expected one gpu.yield operand"); 148 if (yield.getOperand(0)->getType() != allReduce.getType()) 149 return allReduce.emitError("incorrect gpu.yield type"); 150 ++yieldCount; 151 } 152 } 153 if (yieldCount == 0) 154 return allReduce.emitError("expected gpu.yield op in region"); 155 } 156 return success(); 157 } 158 159 static LogicalResult verifyShuffleOp(gpu::ShuffleOp shuffleOp) { 160 auto type = shuffleOp.value()->getType(); 161 if (shuffleOp.result()->getType() != type) { 162 return shuffleOp.emitOpError() 163 << "requires the same type for value operand and result"; 164 } 165 if (!type.isIntOrFloat() || type.getIntOrFloatBitWidth() != 32) { 166 return shuffleOp.emitOpError() 167 << "requires value operand type to be f32 or i32"; 168 } 169 return success(); 170 } 171 172 static void printShuffleOp(OpAsmPrinter &p, ShuffleOp op) { 173 p << ShuffleOp::getOperationName() << ' '; 174 p.printOperands(op.getOperands()); 175 p << ' ' << op.mode() << " : "; 176 p.printType(op.value()->getType()); 177 } 178 179 static ParseResult parseShuffleOp(OpAsmParser &parser, OperationState &state) { 180 SmallVector<OpAsmParser::OperandType, 3> operandInfo; 181 if (parser.parseOperandList(operandInfo, 3)) 182 return failure(); 183 184 StringRef mode; 185 if (parser.parseKeyword(&mode)) 186 return failure(); 187 state.addAttribute("mode", parser.getBuilder().getStringAttr(mode)); 188 189 Type valueType; 190 Type int32Type = parser.getBuilder().getIntegerType(32); 191 Type int1Type = parser.getBuilder().getI1Type(); 192 if (parser.parseColonType(valueType) || 193 parser.resolveOperands(operandInfo, {valueType, int32Type, int32Type}, 194 parser.getCurrentLocation(), state.operands) || 195 parser.addTypesToList({valueType, int1Type}, state.types)) 196 return failure(); 197 return success(); 198 } 199 200 //===----------------------------------------------------------------------===// 201 // LaunchOp 202 //===----------------------------------------------------------------------===// 203 204 static SmallVector<Type, 4> getValueTypes(ValueRange values) { 205 SmallVector<Type, 4> types; 206 types.reserve(values.size()); 207 for (Value v : values) 208 types.push_back(v->getType()); 209 return types; 210 } 211 212 void LaunchOp::build(Builder *builder, OperationState &result, Value gridSizeX, 213 Value gridSizeY, Value gridSizeZ, Value blockSizeX, 214 Value blockSizeY, Value blockSizeZ, ValueRange operands) { 215 // Add grid and block sizes as op operands, followed by the data operands. 216 result.addOperands( 217 {gridSizeX, gridSizeY, gridSizeZ, blockSizeX, blockSizeY, blockSizeZ}); 218 result.addOperands(operands); 219 220 // Create a kernel body region with kNumConfigRegionAttributes + N arguments, 221 // where the first kNumConfigRegionAttributes arguments have `index` type and 222 // the rest have the same types as the data operands. 223 Region *kernelRegion = result.addRegion(); 224 Block *body = new Block(); 225 body->addArguments( 226 std::vector<Type>(kNumConfigRegionAttributes, builder->getIndexType())); 227 body->addArguments(getValueTypes(operands)); 228 kernelRegion->push_back(body); 229 } 230 231 KernelDim3 LaunchOp::getBlockIds() { 232 assert(!body().getBlocks().empty() && "FuncOp body must not be empty."); 233 auto args = body().getBlocks().front().getArguments(); 234 return KernelDim3{args[0], args[1], args[2]}; 235 } 236 237 KernelDim3 LaunchOp::getThreadIds() { 238 assert(!body().getBlocks().empty() && "FuncOp body must not be empty."); 239 auto args = body().getBlocks().front().getArguments(); 240 return KernelDim3{args[3], args[4], args[5]}; 241 } 242 243 KernelDim3 LaunchOp::getGridSize() { 244 assert(!body().getBlocks().empty() && "FuncOp body must not be empty."); 245 auto args = body().getBlocks().front().getArguments(); 246 return KernelDim3{args[6], args[7], args[8]}; 247 } 248 249 KernelDim3 LaunchOp::getBlockSize() { 250 assert(!body().getBlocks().empty() && "FuncOp body must not be empty."); 251 auto args = body().getBlocks().front().getArguments(); 252 return KernelDim3{args[9], args[10], args[11]}; 253 } 254 255 LaunchOp::operand_range LaunchOp::getKernelOperandValues() { 256 return llvm::drop_begin(getOperands(), kNumConfigOperands); 257 } 258 259 LaunchOp::operand_type_range LaunchOp::getKernelOperandTypes() { 260 return llvm::drop_begin(getOperandTypes(), kNumConfigOperands); 261 } 262 263 KernelDim3 LaunchOp::getGridSizeOperandValues() { 264 return KernelDim3{getOperand(0), getOperand(1), getOperand(2)}; 265 } 266 267 KernelDim3 LaunchOp::getBlockSizeOperandValues() { 268 return KernelDim3{getOperand(3), getOperand(4), getOperand(5)}; 269 } 270 271 iterator_range<Block::args_iterator> LaunchOp::getKernelArguments() { 272 auto args = body().getBlocks().front().getArguments(); 273 return llvm::drop_begin(args, LaunchOp::kNumConfigRegionAttributes); 274 } 275 276 LogicalResult verify(LaunchOp op) { 277 // Kernel launch takes kNumConfigOperands leading operands for grid/block 278 // sizes and transforms them into kNumConfigRegionAttributes region arguments 279 // for block/thread identifiers and grid/block sizes. 280 if (!op.body().empty()) { 281 Block &entryBlock = op.body().front(); 282 if (entryBlock.getNumArguments() != 283 LaunchOp::kNumConfigOperands + op.getNumOperands()) 284 return op.emitOpError("unexpected number of region arguments"); 285 } 286 287 // Block terminators without successors are expected to exit the kernel region 288 // and must be `gpu.launch`. 289 for (Block &block : op.body()) { 290 if (block.empty()) 291 continue; 292 if (block.back().getNumSuccessors() != 0) 293 continue; 294 if (!isa<gpu::ReturnOp>(&block.back())) { 295 return block.back() 296 .emitError("expected 'gpu.terminator' or a terminator with " 297 "successors") 298 .attachNote(op.getLoc()) 299 << "in '" << LaunchOp::getOperationName() << "' body region"; 300 } 301 } 302 303 return success(); 304 } 305 306 // Pretty-print the kernel grid/block size assignment as 307 // (%iter-x, %iter-y, %iter-z) in 308 // (%size-x = %ssa-use, %size-y = %ssa-use, %size-z = %ssa-use) 309 // where %size-* and %iter-* will correspond to the body region arguments. 310 static void printSizeAssignment(OpAsmPrinter &p, KernelDim3 size, 311 ValueRange operands, KernelDim3 ids) { 312 p << '(' << *ids.x << ", " << *ids.y << ", " << *ids.z << ") in ("; 313 p << *size.x << " = " << *operands[0] << ", "; 314 p << *size.y << " = " << *operands[1] << ", "; 315 p << *size.z << " = " << *operands[2] << ')'; 316 } 317 318 void printLaunchOp(OpAsmPrinter &p, LaunchOp op) { 319 ValueRange operands = op.getOperands(); 320 321 // Print the launch configuration. 322 p << LaunchOp::getOperationName() << ' ' << op.getBlocksKeyword(); 323 printSizeAssignment(p, op.getGridSize(), operands.take_front(3), 324 op.getBlockIds()); 325 p << ' ' << op.getThreadsKeyword(); 326 printSizeAssignment(p, op.getBlockSize(), operands.slice(3, 3), 327 op.getThreadIds()); 328 329 // From now on, the first kNumConfigOperands operands corresponding to grid 330 // and block sizes are irrelevant, so we can drop them. 331 operands = operands.drop_front(LaunchOp::kNumConfigOperands); 332 333 // Print the data argument remapping. 334 if (!op.body().empty() && !operands.empty()) { 335 p << ' ' << op.getArgsKeyword() << '('; 336 Block *entryBlock = &op.body().front(); 337 interleaveComma(llvm::seq<int>(0, operands.size()), p, [&](int i) { 338 p << *entryBlock->getArgument(LaunchOp::kNumConfigRegionAttributes + i) 339 << " = " << *operands[i]; 340 }); 341 p << ") "; 342 } 343 344 // Print the types of data arguments. 345 if (!operands.empty()) 346 p << ": " << operands.getTypes(); 347 348 p.printRegion(op.body(), /*printEntryBlockArgs=*/false); 349 p.printOptionalAttrDict(op.getAttrs()); 350 } 351 352 // Parse the size assignment blocks for blocks and threads. These have the form 353 // (%region_arg, %region_arg, %region_arg) in 354 // (%region_arg = %operand, %region_arg = %operand, %region_arg = %operand) 355 // where %region_arg are percent-identifiers for the region arguments to be 356 // introduced further (SSA defs), and %operand are percent-identifiers for the 357 // SSA value uses. 358 static ParseResult 359 parseSizeAssignment(OpAsmParser &parser, 360 MutableArrayRef<OpAsmParser::OperandType> sizes, 361 MutableArrayRef<OpAsmParser::OperandType> regionSizes, 362 MutableArrayRef<OpAsmParser::OperandType> indices) { 363 assert(indices.size() == 3 && "space for three indices expected"); 364 SmallVector<OpAsmParser::OperandType, 3> args; 365 if (parser.parseRegionArgumentList(args, /*requiredOperandCount=*/3, 366 OpAsmParser::Delimiter::Paren) || 367 parser.parseKeyword("in") || parser.parseLParen()) 368 return failure(); 369 std::move(args.begin(), args.end(), indices.begin()); 370 371 for (int i = 0; i < 3; ++i) { 372 if (i != 0 && parser.parseComma()) 373 return failure(); 374 if (parser.parseRegionArgument(regionSizes[i]) || parser.parseEqual() || 375 parser.parseOperand(sizes[i])) 376 return failure(); 377 } 378 379 return parser.parseRParen(); 380 } 381 382 // Parses a Launch operation. 383 // operation ::= `gpu.launch` `blocks` `(` ssa-id-list `)` `in` ssa-reassignment 384 // `threads` `(` ssa-id-list `)` `in` ssa-reassignment 385 // (`args` ssa-reassignment `:` type-list)? 386 // region attr-dict? 387 // ssa-reassignment ::= `(` ssa-id `=` ssa-use (`,` ssa-id `=` ssa-use)* `)` 388 ParseResult parseLaunchOp(OpAsmParser &parser, OperationState &result) { 389 // Sizes of the grid and block. 390 SmallVector<OpAsmParser::OperandType, LaunchOp::kNumConfigOperands> sizes( 391 LaunchOp::kNumConfigOperands); 392 MutableArrayRef<OpAsmParser::OperandType> sizesRef(sizes); 393 394 // Actual (data) operands passed to the kernel. 395 SmallVector<OpAsmParser::OperandType, 4> dataOperands; 396 397 // Region arguments to be created. 398 SmallVector<OpAsmParser::OperandType, 16> regionArgs( 399 LaunchOp::kNumConfigRegionAttributes); 400 MutableArrayRef<OpAsmParser::OperandType> regionArgsRef(regionArgs); 401 402 // Parse the size assignment segments: the first segment assigns grid sizes 403 // and defines values for block identifiers; the second segment assigns block 404 // sizes and defines values for thread identifiers. In the region argument 405 // list, identifiers precede sizes, and block-related values precede 406 // thread-related values. 407 if (parser.parseKeyword(LaunchOp::getBlocksKeyword().data()) || 408 parseSizeAssignment(parser, sizesRef.take_front(3), 409 regionArgsRef.slice(6, 3), 410 regionArgsRef.slice(0, 3)) || 411 parser.parseKeyword(LaunchOp::getThreadsKeyword().data()) || 412 parseSizeAssignment(parser, sizesRef.drop_front(3), 413 regionArgsRef.slice(9, 3), 414 regionArgsRef.slice(3, 3)) || 415 parser.resolveOperands(sizes, parser.getBuilder().getIndexType(), 416 result.operands)) 417 return failure(); 418 419 // If kernel argument renaming segment is present, parse it. When present, 420 // the segment should have at least one element. If this segment is present, 421 // so is the trailing type list. Parse it as well and use the parsed types 422 // to resolve the operands passed to the kernel arguments. 423 SmallVector<Type, 4> dataTypes; 424 if (!parser.parseOptionalKeyword(LaunchOp::getArgsKeyword())) { 425 llvm::SMLoc argsLoc = parser.getCurrentLocation(); 426 427 regionArgs.push_back({}); 428 dataOperands.push_back({}); 429 if (parser.parseLParen() || parser.parseRegionArgument(regionArgs.back()) || 430 parser.parseEqual() || parser.parseOperand(dataOperands.back())) 431 return failure(); 432 433 while (!parser.parseOptionalComma()) { 434 regionArgs.push_back({}); 435 dataOperands.push_back({}); 436 if (parser.parseRegionArgument(regionArgs.back()) || 437 parser.parseEqual() || parser.parseOperand(dataOperands.back())) 438 return failure(); 439 } 440 441 if (parser.parseRParen() || parser.parseColonTypeList(dataTypes) || 442 parser.resolveOperands(dataOperands, dataTypes, argsLoc, 443 result.operands)) 444 return failure(); 445 } 446 447 // Introduce the body region and parse it. The region has 448 // kNumConfigRegionAttributes leading arguments that correspond to 449 // block/thread identifiers and grid/block sizes, all of the `index` type. 450 // Follow the actual kernel arguments. 451 Type index = parser.getBuilder().getIndexType(); 452 dataTypes.insert(dataTypes.begin(), LaunchOp::kNumConfigRegionAttributes, 453 index); 454 Region *body = result.addRegion(); 455 return failure(parser.parseRegion(*body, regionArgs, dataTypes) || 456 parser.parseOptionalAttrDict(result.attributes)); 457 } 458 459 void LaunchOp::eraseKernelArgument(unsigned index) { 460 Block &entryBlock = body().front(); 461 assert(index < entryBlock.getNumArguments() - kNumConfigRegionAttributes && 462 "kernel argument index overflow"); 463 entryBlock.eraseArgument(kNumConfigRegionAttributes + index); 464 getOperation()->eraseOperand(kNumConfigOperands + index); 465 } 466 467 namespace { 468 // Clone any known constants passed as operands to the kernel into its body. 469 class PropagateConstantBounds : public OpRewritePattern<LaunchOp> { 470 using OpRewritePattern<LaunchOp>::OpRewritePattern; 471 472 PatternMatchResult matchAndRewrite(LaunchOp launchOp, 473 PatternRewriter &rewriter) const override { 474 rewriter.startRootUpdate(launchOp); 475 PatternRewriter::InsertionGuard guard(rewriter); 476 rewriter.setInsertionPointToStart(&launchOp.body().front()); 477 478 // Traverse operands passed to kernel and check if some of them are known 479 // constants. If so, clone the constant operation inside the kernel region 480 // and use it instead of passing the value from the parent region. Perform 481 // the traversal in the inverse order to simplify index arithmetics when 482 // dropping arguments. 483 auto operands = launchOp.getKernelOperandValues(); 484 auto kernelArgs = launchOp.getKernelArguments(); 485 bool found = false; 486 for (unsigned i = operands.size(); i > 0; --i) { 487 unsigned index = i - 1; 488 Value operand = operands[index]; 489 if (!isa_and_nonnull<ConstantOp>(operand->getDefiningOp())) 490 continue; 491 492 found = true; 493 Value internalConstant = 494 rewriter.clone(*operand->getDefiningOp())->getResult(0); 495 Value kernelArg = *std::next(kernelArgs.begin(), index); 496 kernelArg->replaceAllUsesWith(internalConstant); 497 launchOp.eraseKernelArgument(index); 498 } 499 500 if (!found) { 501 rewriter.cancelRootUpdate(launchOp); 502 return matchFailure(); 503 } 504 505 rewriter.finalizeRootUpdate(launchOp); 506 return matchSuccess(); 507 } 508 }; 509 } // end namespace 510 511 void LaunchOp::getCanonicalizationPatterns(OwningRewritePatternList &results, 512 MLIRContext *context) { 513 results.insert<PropagateConstantBounds>(context); 514 } 515 516 //===----------------------------------------------------------------------===// 517 // LaunchFuncOp 518 //===----------------------------------------------------------------------===// 519 520 void LaunchFuncOp::build(Builder *builder, OperationState &result, 521 GPUFuncOp kernelFunc, Value gridSizeX, Value gridSizeY, 522 Value gridSizeZ, Value blockSizeX, Value blockSizeY, 523 Value blockSizeZ, ValueRange kernelOperands) { 524 // Add grid and block sizes as op operands, followed by the data operands. 525 result.addOperands( 526 {gridSizeX, gridSizeY, gridSizeZ, blockSizeX, blockSizeY, blockSizeZ}); 527 result.addOperands(kernelOperands); 528 result.addAttribute(getKernelAttrName(), 529 builder->getStringAttr(kernelFunc.getName())); 530 auto kernelModule = kernelFunc.getParentOfType<ModuleOp>(); 531 if (Optional<StringRef> kernelModuleName = kernelModule.getName()) 532 result.addAttribute(getKernelModuleAttrName(), 533 builder->getSymbolRefAttr(*kernelModuleName)); 534 } 535 536 void LaunchFuncOp::build(Builder *builder, OperationState &result, 537 GPUFuncOp kernelFunc, KernelDim3 gridSize, 538 KernelDim3 blockSize, ValueRange kernelOperands) { 539 build(builder, result, kernelFunc, gridSize.x, gridSize.y, gridSize.z, 540 blockSize.x, blockSize.y, blockSize.z, kernelOperands); 541 } 542 543 StringRef LaunchFuncOp::kernel() { 544 return getAttrOfType<StringAttr>(getKernelAttrName()).getValue(); 545 } 546 547 unsigned LaunchFuncOp::getNumKernelOperands() { 548 return getNumOperands() - kNumConfigOperands; 549 } 550 551 StringRef LaunchFuncOp::getKernelModuleName() { 552 return getAttrOfType<SymbolRefAttr>(getKernelModuleAttrName()) 553 .getRootReference(); 554 } 555 556 Value LaunchFuncOp::getKernelOperand(unsigned i) { 557 return getOperation()->getOperand(i + kNumConfigOperands); 558 } 559 560 KernelDim3 LaunchFuncOp::getGridSizeOperandValues() { 561 return KernelDim3{getOperand(0), getOperand(1), getOperand(2)}; 562 } 563 564 KernelDim3 LaunchFuncOp::getBlockSizeOperandValues() { 565 return KernelDim3{getOperand(3), getOperand(4), getOperand(5)}; 566 } 567 568 LogicalResult verify(LaunchFuncOp op) { 569 auto module = op.getParentOfType<ModuleOp>(); 570 if (!module) 571 return op.emitOpError("expected to belong to a module"); 572 573 if (!module.getAttrOfType<UnitAttr>(GPUDialect::getContainerModuleAttrName())) 574 return op.emitOpError( 575 "expected the closest surrounding module to have the '" + 576 GPUDialect::getContainerModuleAttrName() + "' attribute"); 577 578 auto kernelAttr = op.getAttrOfType<StringAttr>(op.getKernelAttrName()); 579 if (!kernelAttr) 580 return op.emitOpError("string attribute '" + op.getKernelAttrName() + 581 "' must be specified"); 582 583 auto kernelModuleAttr = 584 op.getAttrOfType<SymbolRefAttr>(op.getKernelModuleAttrName()); 585 if (!kernelModuleAttr) 586 return op.emitOpError("symbol reference attribute '" + 587 op.getKernelModuleAttrName() + "' must be specified"); 588 589 return success(); 590 } 591 592 //===----------------------------------------------------------------------===// 593 // GPUFuncOp 594 //===----------------------------------------------------------------------===// 595 596 /// Adds a workgroup attribution to "op" of the MemRef type with the given shape 597 /// and element type. 598 Value GPUFuncOp::addWorkgroupAttribution(ArrayRef<int64_t> shape, 599 Type elementType) { 600 unsigned pos = getNumFuncArguments() + getNumWorkgroupAttributions(); 601 Block &bodyBlock = body().front(); 602 Value attribution = bodyBlock.insertArgument( 603 std::next(bodyBlock.args_begin(), pos), 604 MemRefType::get(shape, elementType, /*affineMapComposition=*/{}, 605 GPUDialect::getWorkgroupAddressSpace())); 606 auto numWorkgroupBuffersAttr = 607 getAttrOfType<IntegerAttr>(getNumWorkgroupAttributionsAttrName()); 608 setAttr(getNumWorkgroupAttributionsAttrName(), 609 IntegerAttr::get(numWorkgroupBuffersAttr.getType(), 610 numWorkgroupBuffersAttr.getValue() + 1)); 611 return attribution; 612 } 613 614 void GPUFuncOp::build(Builder *builder, OperationState &result, StringRef name, 615 FunctionType type, ArrayRef<Type> workgroupAttributions, 616 ArrayRef<Type> privateAttributions, 617 ArrayRef<NamedAttribute> attrs) { 618 result.addAttribute(SymbolTable::getSymbolAttrName(), 619 builder->getStringAttr(name)); 620 result.addAttribute(getTypeAttrName(), TypeAttr::get(type)); 621 result.addAttribute(getNumWorkgroupAttributionsAttrName(), 622 builder->getI64IntegerAttr(workgroupAttributions.size())); 623 result.addAttributes(attrs); 624 Region *body = result.addRegion(); 625 Block *entryBlock = new Block; 626 entryBlock->addArguments(type.getInputs()); 627 entryBlock->addArguments(workgroupAttributions); 628 entryBlock->addArguments(privateAttributions); 629 630 body->getBlocks().push_back(entryBlock); 631 } 632 633 /// Parses a GPU function memory attribution. 634 /// 635 /// memory-attribution ::= (`workgroup` `(` ssa-id-and-type-list `)`)? 636 /// (`private` `(` ssa-id-and-type-list `)`)? 637 /// 638 /// Note that this function parses only one of the two similar parts, with the 639 /// keyword provided as argument. 640 static ParseResult 641 parseAttributions(OpAsmParser &parser, StringRef keyword, 642 SmallVectorImpl<OpAsmParser::OperandType> &args, 643 SmallVectorImpl<Type> &argTypes) { 644 // If we could not parse the keyword, just assume empty list and succeed. 645 if (failed(parser.parseOptionalKeyword(keyword))) 646 return success(); 647 648 if (failed(parser.parseLParen())) 649 return failure(); 650 651 // Early exit for an empty list. 652 if (succeeded(parser.parseOptionalRParen())) 653 return success(); 654 655 do { 656 OpAsmParser::OperandType arg; 657 Type type; 658 659 if (parser.parseRegionArgument(arg) || parser.parseColonType(type)) 660 return failure(); 661 662 args.push_back(arg); 663 argTypes.push_back(type); 664 } while (succeeded(parser.parseOptionalComma())); 665 666 return parser.parseRParen(); 667 } 668 669 /// Parses a GPU function. 670 /// 671 /// <operation> ::= `gpu.func` symbol-ref-id `(` argument-list `)` 672 /// (`->` function-result-list)? memory-attribution `kernel`? 673 /// function-attributes? region 674 static ParseResult parseGPUFuncOp(OpAsmParser &parser, OperationState &result) { 675 SmallVector<OpAsmParser::OperandType, 8> entryArgs; 676 SmallVector<SmallVector<NamedAttribute, 2>, 1> argAttrs; 677 SmallVector<SmallVector<NamedAttribute, 2>, 1> resultAttrs; 678 SmallVector<Type, 8> argTypes; 679 SmallVector<Type, 4> resultTypes; 680 bool isVariadic; 681 682 // Parse the function name. 683 StringAttr nameAttr; 684 if (parser.parseSymbolName(nameAttr, ::mlir::SymbolTable::getSymbolAttrName(), 685 result.attributes)) 686 return failure(); 687 688 auto signatureLocation = parser.getCurrentLocation(); 689 if (failed(impl::parseFunctionSignature( 690 parser, /*allowVariadic=*/false, entryArgs, argTypes, argAttrs, 691 isVariadic, resultTypes, resultAttrs))) 692 return failure(); 693 694 if (entryArgs.empty() && !argTypes.empty()) 695 return parser.emitError(signatureLocation) 696 << "gpu.func requires named arguments"; 697 698 // Construct the function type. More types will be added to the region, but 699 // not to the functiont type. 700 Builder &builder = parser.getBuilder(); 701 auto type = builder.getFunctionType(argTypes, resultTypes); 702 result.addAttribute(GPUFuncOp::getTypeAttrName(), TypeAttr::get(type)); 703 704 // Parse workgroup memory attributions. 705 if (failed(parseAttributions(parser, GPUFuncOp::getWorkgroupKeyword(), 706 entryArgs, argTypes))) 707 return failure(); 708 709 // Store the number of operands we just parsed as the number of workgroup 710 // memory attributions. 711 unsigned numWorkgroupAttrs = argTypes.size() - type.getNumInputs(); 712 result.addAttribute(GPUFuncOp::getNumWorkgroupAttributionsAttrName(), 713 builder.getI64IntegerAttr(numWorkgroupAttrs)); 714 715 // Parse private memory attributions. 716 if (failed(parseAttributions(parser, GPUFuncOp::getPrivateKeyword(), 717 entryArgs, argTypes))) 718 return failure(); 719 720 // Parse the kernel attribute if present. 721 if (succeeded(parser.parseOptionalKeyword(GPUFuncOp::getKernelKeyword()))) 722 result.addAttribute(GPUDialect::getKernelFuncAttrName(), 723 builder.getUnitAttr()); 724 725 // Parse attributes. 726 if (failed(parser.parseOptionalAttrDictWithKeyword(result.attributes))) 727 return failure(); 728 mlir::impl::addArgAndResultAttrs(builder, result, argAttrs, resultAttrs); 729 730 // Parse the region. If no argument names were provided, take all names 731 // (including those of attributions) from the entry block. 732 auto *body = result.addRegion(); 733 return parser.parseRegion(*body, entryArgs, argTypes); 734 } 735 736 static void printAttributions(OpAsmPrinter &p, StringRef keyword, 737 ArrayRef<BlockArgument> values) { 738 if (values.empty()) 739 return; 740 741 p << ' ' << keyword << '('; 742 interleaveComma(values, p, 743 [&p](BlockArgument v) { p << *v << " : " << v->getType(); }); 744 p << ')'; 745 } 746 747 /// Prints a GPU Func op. 748 void printGPUFuncOp(OpAsmPrinter &p, GPUFuncOp op) { 749 p << GPUFuncOp::getOperationName() << ' '; 750 p.printSymbolName(op.getName()); 751 752 FunctionType type = op.getType(); 753 impl::printFunctionSignature(p, op.getOperation(), type.getInputs(), 754 /*isVariadic=*/false, type.getResults()); 755 756 printAttributions(p, op.getWorkgroupKeyword(), op.getWorkgroupAttributions()); 757 printAttributions(p, op.getPrivateKeyword(), op.getPrivateAttributions()); 758 if (op.isKernel()) 759 p << ' ' << op.getKernelKeyword(); 760 761 impl::printFunctionAttributes(p, op.getOperation(), type.getNumInputs(), 762 type.getNumResults(), 763 {op.getNumWorkgroupAttributionsAttrName(), 764 GPUDialect::getKernelFuncAttrName()}); 765 p.printRegion(op.getBody(), /*printEntryBlockArgs=*/false); 766 } 767 768 void GPUFuncOp::setType(FunctionType newType) { 769 auto oldType = getType(); 770 assert(newType.getNumResults() == oldType.getNumResults() && 771 "unimplemented: changes to the number of results"); 772 773 SmallVector<char, 16> nameBuf; 774 for (int i = newType.getNumInputs(), e = oldType.getNumInputs(); i < e; i++) 775 removeAttr(getArgAttrName(i, nameBuf)); 776 777 setAttr(getTypeAttrName(), TypeAttr::get(newType)); 778 } 779 780 /// Hook for FunctionLike verifier. 781 LogicalResult GPUFuncOp::verifyType() { 782 Type type = getTypeAttr().getValue(); 783 if (!type.isa<FunctionType>()) 784 return emitOpError("requires '" + getTypeAttrName() + 785 "' attribute of function type"); 786 return success(); 787 } 788 789 static LogicalResult verifyAttributions(Operation *op, 790 ArrayRef<BlockArgument> attributions, 791 unsigned memorySpace) { 792 for (Value v : attributions) { 793 auto type = v->getType().dyn_cast<MemRefType>(); 794 if (!type) 795 return op->emitOpError() << "expected memref type in attribution"; 796 797 if (type.getMemorySpace() != memorySpace) { 798 return op->emitOpError() 799 << "expected memory space " << memorySpace << " in attribution"; 800 } 801 } 802 return success(); 803 } 804 805 /// Verifies the body of the function. 806 LogicalResult GPUFuncOp::verifyBody() { 807 unsigned numFuncArguments = getNumArguments(); 808 unsigned numWorkgroupAttributions = getNumWorkgroupAttributions(); 809 unsigned numBlockArguments = front().getNumArguments(); 810 if (numBlockArguments < numFuncArguments + numWorkgroupAttributions) 811 return emitOpError() << "expected at least " 812 << numFuncArguments + numWorkgroupAttributions 813 << " arguments to body region"; 814 815 ArrayRef<Type> funcArgTypes = getType().getInputs(); 816 for (unsigned i = 0; i < numFuncArguments; ++i) { 817 Type blockArgType = front().getArgument(i)->getType(); 818 if (funcArgTypes[i] != blockArgType) 819 return emitOpError() << "expected body region argument #" << i 820 << " to be of type " << funcArgTypes[i] << ", got " 821 << blockArgType; 822 } 823 824 if (failed(verifyAttributions(getOperation(), getWorkgroupAttributions(), 825 GPUDialect::getWorkgroupAddressSpace())) || 826 failed(verifyAttributions(getOperation(), getPrivateAttributions(), 827 GPUDialect::getPrivateAddressSpace()))) 828 return failure(); 829 830 return success(); 831 } 832 833 // Namespace avoids ambiguous ReturnOpOperandAdaptor. 834 namespace mlir { 835 namespace gpu { 836 #define GET_OP_CLASSES 837 #include "mlir/Dialect/GPU/GPUOps.cpp.inc" 838 } // namespace gpu 839 } // namespace mlir 840