1 //===- ConvertLaunchFuncToGpuRuntimeCalls.cpp - MLIR GPU lowering passes --===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements a pass to convert gpu.launch_func op into a sequence of 10 // GPU runtime calls. As most of GPU runtimes does not have a stable published 11 // ABI, this pass uses a slim runtime layer that builds on top of the public 12 // API from GPU runtime headers. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "mlir/Conversion/GPUCommon/GPUCommonPass.h" 17 18 #include "../PassDetail.h" 19 #include "mlir/Conversion/AsyncToLLVM/AsyncToLLVM.h" 20 #include "mlir/Conversion/LLVMCommon/ConversionTarget.h" 21 #include "mlir/Conversion/LLVMCommon/Pattern.h" 22 #include "mlir/Conversion/MemRefToLLVM/MemRefToLLVM.h" 23 #include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h" 24 #include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVMPass.h" 25 #include "mlir/Conversion/VectorToLLVM/ConvertVectorToLLVM.h" 26 #include "mlir/Dialect/Async/IR/Async.h" 27 #include "mlir/Dialect/GPU/GPUDialect.h" 28 #include "mlir/Dialect/GPU/Passes.h" 29 #include "mlir/Dialect/LLVMIR/LLVMDialect.h" 30 #include "mlir/IR/Attributes.h" 31 #include "mlir/IR/Builders.h" 32 #include "mlir/IR/BuiltinOps.h" 33 #include "mlir/IR/BuiltinTypes.h" 34 35 #include "llvm/ADT/STLExtras.h" 36 #include "llvm/Support/Error.h" 37 #include "llvm/Support/FormatVariadic.h" 38 39 using namespace mlir; 40 41 static constexpr const char *kGpuBinaryStorageSuffix = "_gpubin_cst"; 42 43 namespace { 44 45 class GpuToLLVMConversionPass 46 : public GpuToLLVMConversionPassBase<GpuToLLVMConversionPass> { 47 public: 48 GpuToLLVMConversionPass() = default; 49 50 GpuToLLVMConversionPass(const GpuToLLVMConversionPass &other) 51 : GpuToLLVMConversionPassBase(other) {} 52 53 // Run the dialect converter on the module. 54 void runOnOperation() override; 55 56 private: 57 Option<std::string> gpuBinaryAnnotation{ 58 *this, "gpu-binary-annotation", 59 llvm::cl::desc("Annotation attribute string for GPU binary"), 60 llvm::cl::init(gpu::getDefaultGpuBinaryAnnotation())}; 61 }; 62 63 struct FunctionCallBuilder { 64 FunctionCallBuilder(StringRef functionName, Type returnType, 65 ArrayRef<Type> argumentTypes) 66 : functionName(functionName), 67 functionType(LLVM::LLVMFunctionType::get(returnType, argumentTypes)) {} 68 LLVM::CallOp create(Location loc, OpBuilder &builder, 69 ArrayRef<Value> arguments) const; 70 71 StringRef functionName; 72 LLVM::LLVMFunctionType functionType; 73 }; 74 75 template <typename OpTy> 76 class ConvertOpToGpuRuntimeCallPattern : public ConvertOpToLLVMPattern<OpTy> { 77 public: 78 explicit ConvertOpToGpuRuntimeCallPattern(LLVMTypeConverter &typeConverter) 79 : ConvertOpToLLVMPattern<OpTy>(typeConverter) {} 80 81 protected: 82 MLIRContext *context = &this->getTypeConverter()->getContext(); 83 84 Type llvmVoidType = LLVM::LLVMVoidType::get(context); 85 Type llvmPointerType = 86 LLVM::LLVMPointerType::get(IntegerType::get(context, 8)); 87 Type llvmPointerPointerType = LLVM::LLVMPointerType::get(llvmPointerType); 88 Type llvmInt8Type = IntegerType::get(context, 8); 89 Type llvmInt32Type = IntegerType::get(context, 32); 90 Type llvmInt64Type = IntegerType::get(context, 64); 91 Type llvmIntPtrType = IntegerType::get( 92 context, this->getTypeConverter()->getPointerBitwidth(0)); 93 94 FunctionCallBuilder moduleLoadCallBuilder = { 95 "mgpuModuleLoad", 96 llvmPointerType /* void *module */, 97 {llvmPointerType /* void *cubin */}}; 98 FunctionCallBuilder moduleUnloadCallBuilder = { 99 "mgpuModuleUnload", llvmVoidType, {llvmPointerType /* void *module */}}; 100 FunctionCallBuilder moduleGetFunctionCallBuilder = { 101 "mgpuModuleGetFunction", 102 llvmPointerType /* void *function */, 103 { 104 llvmPointerType, /* void *module */ 105 llvmPointerType /* char *name */ 106 }}; 107 FunctionCallBuilder launchKernelCallBuilder = { 108 "mgpuLaunchKernel", 109 llvmVoidType, 110 { 111 llvmPointerType, /* void* f */ 112 llvmIntPtrType, /* intptr_t gridXDim */ 113 llvmIntPtrType, /* intptr_t gridyDim */ 114 llvmIntPtrType, /* intptr_t gridZDim */ 115 llvmIntPtrType, /* intptr_t blockXDim */ 116 llvmIntPtrType, /* intptr_t blockYDim */ 117 llvmIntPtrType, /* intptr_t blockZDim */ 118 llvmInt32Type, /* unsigned int sharedMemBytes */ 119 llvmPointerType, /* void *hstream */ 120 llvmPointerPointerType, /* void **kernelParams */ 121 llvmPointerPointerType /* void **extra */ 122 }}; 123 FunctionCallBuilder streamCreateCallBuilder = { 124 "mgpuStreamCreate", llvmPointerType /* void *stream */, {}}; 125 FunctionCallBuilder streamDestroyCallBuilder = { 126 "mgpuStreamDestroy", llvmVoidType, {llvmPointerType /* void *stream */}}; 127 FunctionCallBuilder streamSynchronizeCallBuilder = { 128 "mgpuStreamSynchronize", 129 llvmVoidType, 130 {llvmPointerType /* void *stream */}}; 131 FunctionCallBuilder streamWaitEventCallBuilder = { 132 "mgpuStreamWaitEvent", 133 llvmVoidType, 134 {llvmPointerType /* void *stream */, llvmPointerType /* void *event */}}; 135 FunctionCallBuilder eventCreateCallBuilder = { 136 "mgpuEventCreate", llvmPointerType /* void *event */, {}}; 137 FunctionCallBuilder eventDestroyCallBuilder = { 138 "mgpuEventDestroy", llvmVoidType, {llvmPointerType /* void *event */}}; 139 FunctionCallBuilder eventSynchronizeCallBuilder = { 140 "mgpuEventSynchronize", 141 llvmVoidType, 142 {llvmPointerType /* void *event */}}; 143 FunctionCallBuilder eventRecordCallBuilder = { 144 "mgpuEventRecord", 145 llvmVoidType, 146 {llvmPointerType /* void *event */, llvmPointerType /* void *stream */}}; 147 FunctionCallBuilder hostRegisterCallBuilder = { 148 "mgpuMemHostRegisterMemRef", 149 llvmVoidType, 150 {llvmIntPtrType /* intptr_t rank */, 151 llvmPointerType /* void *memrefDesc */, 152 llvmIntPtrType /* intptr_t elementSizeBytes */}}; 153 FunctionCallBuilder allocCallBuilder = { 154 "mgpuMemAlloc", 155 llvmPointerType /* void * */, 156 {llvmIntPtrType /* intptr_t sizeBytes */, 157 llvmPointerType /* void *stream */}}; 158 FunctionCallBuilder deallocCallBuilder = { 159 "mgpuMemFree", 160 llvmVoidType, 161 {llvmPointerType /* void *ptr */, llvmPointerType /* void *stream */}}; 162 FunctionCallBuilder memcpyCallBuilder = { 163 "mgpuMemcpy", 164 llvmVoidType, 165 {llvmPointerType /* void *dst */, llvmPointerType /* void *src */, 166 llvmIntPtrType /* intptr_t sizeBytes */, 167 llvmPointerType /* void *stream */}}; 168 }; 169 170 /// A rewrite pattern to convert gpu.host_register operations into a GPU runtime 171 /// call. Currently it supports CUDA and ROCm (HIP). 172 class ConvertHostRegisterOpToGpuRuntimeCallPattern 173 : public ConvertOpToGpuRuntimeCallPattern<gpu::HostRegisterOp> { 174 public: 175 ConvertHostRegisterOpToGpuRuntimeCallPattern(LLVMTypeConverter &typeConverter) 176 : ConvertOpToGpuRuntimeCallPattern<gpu::HostRegisterOp>(typeConverter) {} 177 178 private: 179 LogicalResult 180 matchAndRewrite(gpu::HostRegisterOp hostRegisterOp, ArrayRef<Value> operands, 181 ConversionPatternRewriter &rewriter) const override; 182 }; 183 184 /// A rewrite pattern to convert gpu.alloc operations into a GPU runtime 185 /// call. Currently it supports CUDA and ROCm (HIP). 186 class ConvertAllocOpToGpuRuntimeCallPattern 187 : public ConvertOpToGpuRuntimeCallPattern<gpu::AllocOp> { 188 public: 189 ConvertAllocOpToGpuRuntimeCallPattern(LLVMTypeConverter &typeConverter) 190 : ConvertOpToGpuRuntimeCallPattern<gpu::AllocOp>(typeConverter) {} 191 192 private: 193 LogicalResult 194 matchAndRewrite(gpu::AllocOp allocOp, ArrayRef<Value> operands, 195 ConversionPatternRewriter &rewriter) const override; 196 }; 197 198 /// A rewrite pattern to convert gpu.dealloc operations into a GPU runtime 199 /// call. Currently it supports CUDA and ROCm (HIP). 200 class ConvertDeallocOpToGpuRuntimeCallPattern 201 : public ConvertOpToGpuRuntimeCallPattern<gpu::DeallocOp> { 202 public: 203 ConvertDeallocOpToGpuRuntimeCallPattern(LLVMTypeConverter &typeConverter) 204 : ConvertOpToGpuRuntimeCallPattern<gpu::DeallocOp>(typeConverter) {} 205 206 private: 207 LogicalResult 208 matchAndRewrite(gpu::DeallocOp deallocOp, ArrayRef<Value> operands, 209 ConversionPatternRewriter &rewriter) const override; 210 }; 211 212 class ConvertAsyncYieldToGpuRuntimeCallPattern 213 : public ConvertOpToGpuRuntimeCallPattern<async::YieldOp> { 214 public: 215 ConvertAsyncYieldToGpuRuntimeCallPattern(LLVMTypeConverter &typeConverter) 216 : ConvertOpToGpuRuntimeCallPattern<async::YieldOp>(typeConverter) {} 217 218 private: 219 LogicalResult 220 matchAndRewrite(async::YieldOp yieldOp, ArrayRef<Value> operands, 221 ConversionPatternRewriter &rewriter) const override; 222 }; 223 224 /// A rewrite pattern to convert gpu.wait operations into a GPU runtime 225 /// call. Currently it supports CUDA and ROCm (HIP). 226 class ConvertWaitOpToGpuRuntimeCallPattern 227 : public ConvertOpToGpuRuntimeCallPattern<gpu::WaitOp> { 228 public: 229 ConvertWaitOpToGpuRuntimeCallPattern(LLVMTypeConverter &typeConverter) 230 : ConvertOpToGpuRuntimeCallPattern<gpu::WaitOp>(typeConverter) {} 231 232 private: 233 LogicalResult 234 matchAndRewrite(gpu::WaitOp waitOp, ArrayRef<Value> operands, 235 ConversionPatternRewriter &rewriter) const override; 236 }; 237 238 /// A rewrite pattern to convert gpu.wait async operations into a GPU runtime 239 /// call. Currently it supports CUDA and ROCm (HIP). 240 class ConvertWaitAsyncOpToGpuRuntimeCallPattern 241 : public ConvertOpToGpuRuntimeCallPattern<gpu::WaitOp> { 242 public: 243 ConvertWaitAsyncOpToGpuRuntimeCallPattern(LLVMTypeConverter &typeConverter) 244 : ConvertOpToGpuRuntimeCallPattern<gpu::WaitOp>(typeConverter) {} 245 246 private: 247 LogicalResult 248 matchAndRewrite(gpu::WaitOp waitOp, ArrayRef<Value> operands, 249 ConversionPatternRewriter &rewriter) const override; 250 }; 251 252 /// A rewrite patter to convert gpu.launch_func operations into a sequence of 253 /// GPU runtime calls. Currently it supports CUDA and ROCm (HIP). 254 /// 255 /// In essence, a gpu.launch_func operations gets compiled into the following 256 /// sequence of runtime calls: 257 /// 258 /// * moduleLoad -- loads the module given the cubin / hsaco data 259 /// * moduleGetFunction -- gets a handle to the actual kernel function 260 /// * getStreamHelper -- initializes a new compute stream on GPU 261 /// * launchKernel -- launches the kernel on a stream 262 /// * streamSynchronize -- waits for operations on the stream to finish 263 /// 264 /// Intermediate data structures are allocated on the stack. 265 class ConvertLaunchFuncOpToGpuRuntimeCallPattern 266 : public ConvertOpToGpuRuntimeCallPattern<gpu::LaunchFuncOp> { 267 public: 268 ConvertLaunchFuncOpToGpuRuntimeCallPattern(LLVMTypeConverter &typeConverter, 269 StringRef gpuBinaryAnnotation) 270 : ConvertOpToGpuRuntimeCallPattern<gpu::LaunchFuncOp>(typeConverter), 271 gpuBinaryAnnotation(gpuBinaryAnnotation) {} 272 273 private: 274 Value generateParamsArray(gpu::LaunchFuncOp launchOp, 275 ArrayRef<Value> operands, OpBuilder &builder) const; 276 Value generateKernelNameConstant(StringRef moduleName, StringRef name, 277 Location loc, OpBuilder &builder) const; 278 279 LogicalResult 280 matchAndRewrite(gpu::LaunchFuncOp launchOp, ArrayRef<Value> operands, 281 ConversionPatternRewriter &rewriter) const override; 282 283 llvm::SmallString<32> gpuBinaryAnnotation; 284 }; 285 286 class EraseGpuModuleOpPattern : public OpRewritePattern<gpu::GPUModuleOp> { 287 using OpRewritePattern<gpu::GPUModuleOp>::OpRewritePattern; 288 289 LogicalResult matchAndRewrite(gpu::GPUModuleOp op, 290 PatternRewriter &rewriter) const override { 291 // GPU kernel modules are no longer necessary since we have a global 292 // constant with the CUBIN, or HSACO data. 293 rewriter.eraseOp(op); 294 return success(); 295 } 296 }; 297 298 /// A rewrite pattern to convert gpu.memcpy operations into a GPU runtime 299 /// call. Currently it supports CUDA and ROCm (HIP). 300 class ConvertMemcpyOpToGpuRuntimeCallPattern 301 : public ConvertOpToGpuRuntimeCallPattern<gpu::MemcpyOp> { 302 public: 303 ConvertMemcpyOpToGpuRuntimeCallPattern(LLVMTypeConverter &typeConverter) 304 : ConvertOpToGpuRuntimeCallPattern<gpu::MemcpyOp>(typeConverter) {} 305 306 private: 307 LogicalResult 308 matchAndRewrite(gpu::MemcpyOp memcpyOp, ArrayRef<Value> operands, 309 ConversionPatternRewriter &rewriter) const override; 310 }; 311 } // namespace 312 313 void GpuToLLVMConversionPass::runOnOperation() { 314 LLVMTypeConverter converter(&getContext()); 315 RewritePatternSet patterns(&getContext()); 316 LLVMConversionTarget target(getContext()); 317 318 target.addIllegalDialect<gpu::GPUDialect>(); 319 target.addIllegalOp<UnrealizedConversionCastOp>(); 320 321 populateVectorToLLVMConversionPatterns(converter, patterns); 322 populateMemRefToLLVMConversionPatterns(converter, patterns); 323 populateStdToLLVMConversionPatterns(converter, patterns); 324 populateAsyncStructuralTypeConversionsAndLegality(converter, patterns, 325 target); 326 327 converter.addConversion( 328 [context = &converter.getContext()](gpu::AsyncTokenType type) -> Type { 329 return LLVM::LLVMPointerType::get(IntegerType::get(context, 8)); 330 }); 331 patterns.add<ConvertAllocOpToGpuRuntimeCallPattern, 332 ConvertDeallocOpToGpuRuntimeCallPattern, 333 ConvertHostRegisterOpToGpuRuntimeCallPattern, 334 ConvertMemcpyOpToGpuRuntimeCallPattern, 335 ConvertWaitAsyncOpToGpuRuntimeCallPattern, 336 ConvertWaitOpToGpuRuntimeCallPattern, 337 ConvertAsyncYieldToGpuRuntimeCallPattern>(converter); 338 patterns.add<ConvertLaunchFuncOpToGpuRuntimeCallPattern>(converter, 339 gpuBinaryAnnotation); 340 patterns.add<EraseGpuModuleOpPattern>(&converter.getContext()); 341 342 if (failed( 343 applyPartialConversion(getOperation(), target, std::move(patterns)))) 344 signalPassFailure(); 345 } 346 347 LLVM::CallOp FunctionCallBuilder::create(Location loc, OpBuilder &builder, 348 ArrayRef<Value> arguments) const { 349 auto module = builder.getBlock()->getParent()->getParentOfType<ModuleOp>(); 350 auto function = [&] { 351 if (auto function = module.lookupSymbol<LLVM::LLVMFuncOp>(functionName)) 352 return function; 353 return OpBuilder::atBlockEnd(module.getBody()) 354 .create<LLVM::LLVMFuncOp>(loc, functionName, functionType); 355 }(); 356 return builder.create<LLVM::CallOp>( 357 loc, const_cast<LLVM::LLVMFunctionType &>(functionType).getReturnType(), 358 builder.getSymbolRefAttr(function), arguments); 359 } 360 361 // Returns whether all operands are of LLVM type. 362 static LogicalResult areAllLLVMTypes(Operation *op, ValueRange operands, 363 ConversionPatternRewriter &rewriter) { 364 if (!llvm::all_of(operands, [](Value value) { 365 return LLVM::isCompatibleType(value.getType()); 366 })) 367 return rewriter.notifyMatchFailure( 368 op, "Cannot convert if operands aren't of LLVM type."); 369 return success(); 370 } 371 372 static LogicalResult 373 isAsyncWithOneDependency(ConversionPatternRewriter &rewriter, 374 gpu::AsyncOpInterface op) { 375 if (op.getAsyncDependencies().size() != 1) 376 return rewriter.notifyMatchFailure( 377 op, "Can only convert with exactly one async dependency."); 378 379 if (!op.getAsyncToken()) 380 return rewriter.notifyMatchFailure(op, "Can convert only async version."); 381 382 return success(); 383 } 384 385 LogicalResult ConvertHostRegisterOpToGpuRuntimeCallPattern::matchAndRewrite( 386 gpu::HostRegisterOp hostRegisterOp, ArrayRef<Value> operands, 387 ConversionPatternRewriter &rewriter) const { 388 auto *op = hostRegisterOp.getOperation(); 389 if (failed(areAllLLVMTypes(op, operands, rewriter))) 390 return failure(); 391 392 Location loc = op->getLoc(); 393 394 auto memRefType = hostRegisterOp.value().getType(); 395 auto elementType = memRefType.cast<UnrankedMemRefType>().getElementType(); 396 auto elementSize = getSizeInBytes(loc, elementType, rewriter); 397 398 auto arguments = getTypeConverter()->promoteOperands(loc, op->getOperands(), 399 operands, rewriter); 400 arguments.push_back(elementSize); 401 hostRegisterCallBuilder.create(loc, rewriter, arguments); 402 403 rewriter.eraseOp(op); 404 return success(); 405 } 406 407 LogicalResult ConvertAllocOpToGpuRuntimeCallPattern::matchAndRewrite( 408 gpu::AllocOp allocOp, ArrayRef<Value> operands, 409 ConversionPatternRewriter &rewriter) const { 410 MemRefType memRefType = allocOp.getType(); 411 412 if (failed(areAllLLVMTypes(allocOp, operands, rewriter)) || 413 !isConvertibleAndHasIdentityMaps(memRefType) || 414 failed(isAsyncWithOneDependency(rewriter, allocOp))) 415 return failure(); 416 417 auto loc = allocOp.getLoc(); 418 auto adaptor = gpu::AllocOpAdaptor(operands, allocOp->getAttrDictionary()); 419 420 // Get shape of the memref as values: static sizes are constant 421 // values and dynamic sizes are passed to 'alloc' as operands. 422 SmallVector<Value, 4> shape; 423 SmallVector<Value, 4> strides; 424 Value sizeBytes; 425 getMemRefDescriptorSizes(loc, memRefType, adaptor.dynamicSizes(), rewriter, 426 shape, strides, sizeBytes); 427 428 // Allocate the underlying buffer and store a pointer to it in the MemRef 429 // descriptor. 430 Type elementPtrType = this->getElementPtrType(memRefType); 431 auto stream = adaptor.asyncDependencies().front(); 432 Value allocatedPtr = 433 allocCallBuilder.create(loc, rewriter, {sizeBytes, stream}).getResult(0); 434 allocatedPtr = 435 rewriter.create<LLVM::BitcastOp>(loc, elementPtrType, allocatedPtr); 436 437 // No alignment. 438 Value alignedPtr = allocatedPtr; 439 440 // Create the MemRef descriptor. 441 auto memRefDescriptor = this->createMemRefDescriptor( 442 loc, memRefType, allocatedPtr, alignedPtr, shape, strides, rewriter); 443 444 rewriter.replaceOp(allocOp, {memRefDescriptor, stream}); 445 446 return success(); 447 } 448 449 LogicalResult ConvertDeallocOpToGpuRuntimeCallPattern::matchAndRewrite( 450 gpu::DeallocOp deallocOp, ArrayRef<Value> operands, 451 ConversionPatternRewriter &rewriter) const { 452 if (failed(areAllLLVMTypes(deallocOp, operands, rewriter)) || 453 failed(isAsyncWithOneDependency(rewriter, deallocOp))) 454 return failure(); 455 456 Location loc = deallocOp.getLoc(); 457 458 auto adaptor = 459 gpu::DeallocOpAdaptor(operands, deallocOp->getAttrDictionary()); 460 Value pointer = 461 MemRefDescriptor(adaptor.memref()).allocatedPtr(rewriter, loc); 462 auto casted = rewriter.create<LLVM::BitcastOp>(loc, llvmPointerType, pointer); 463 Value stream = adaptor.asyncDependencies().front(); 464 deallocCallBuilder.create(loc, rewriter, {casted, stream}); 465 466 rewriter.replaceOp(deallocOp, {stream}); 467 return success(); 468 } 469 470 static bool isGpuAsyncTokenType(Value value) { 471 return value.getType().isa<gpu::AsyncTokenType>(); 472 } 473 474 // Converts !gpu.async.token operands of `async.yield` to runtime calls. The 475 // !gpu.async.token are lowered to stream within the async.execute region, but 476 // are passed as events between them. For each !gpu.async.token operand, we 477 // create an event and record it on the stream. 478 LogicalResult ConvertAsyncYieldToGpuRuntimeCallPattern::matchAndRewrite( 479 async::YieldOp yieldOp, ArrayRef<Value> operands, 480 ConversionPatternRewriter &rewriter) const { 481 if (llvm::none_of(yieldOp.operands(), isGpuAsyncTokenType)) 482 return rewriter.notifyMatchFailure(yieldOp, "no gpu async token operand"); 483 484 Location loc = yieldOp.getLoc(); 485 SmallVector<Value, 4> newOperands(operands.begin(), operands.end()); 486 llvm::SmallDenseSet<Value> streams; 487 for (auto &operand : yieldOp->getOpOperands()) { 488 if (!isGpuAsyncTokenType(operand.get())) 489 continue; 490 auto idx = operand.getOperandNumber(); 491 auto stream = operands[idx]; 492 auto event = eventCreateCallBuilder.create(loc, rewriter, {}).getResult(0); 493 eventRecordCallBuilder.create(loc, rewriter, {event, stream}); 494 newOperands[idx] = event; 495 streams.insert(stream); 496 } 497 for (auto stream : streams) 498 streamDestroyCallBuilder.create(loc, rewriter, {stream}); 499 500 rewriter.updateRootInPlace(yieldOp, 501 [&] { yieldOp->setOperands(newOperands); }); 502 return success(); 503 } 504 505 // Returns whether `value` is the result of an LLVM::CallOp to `functionName`. 506 static bool isDefinedByCallTo(Value value, StringRef functionName) { 507 assert(value.getType().isa<LLVM::LLVMPointerType>()); 508 if (auto defOp = value.getDefiningOp<LLVM::CallOp>()) 509 return defOp.callee()->equals(functionName); 510 return false; 511 } 512 513 // Converts `gpu.wait` to runtime calls. The converted op synchronizes the host 514 // with the stream/event operands. The operands are destroyed. That is, it 515 // assumes that it is not used afterwards or elsewhere. Otherwise we will get a 516 // runtime error. Eventually, we should guarantee this property. 517 LogicalResult ConvertWaitOpToGpuRuntimeCallPattern::matchAndRewrite( 518 gpu::WaitOp waitOp, ArrayRef<Value> operands, 519 ConversionPatternRewriter &rewriter) const { 520 if (waitOp.asyncToken()) 521 return rewriter.notifyMatchFailure(waitOp, "Cannot convert async op."); 522 523 Location loc = waitOp.getLoc(); 524 525 for (auto operand : operands) { 526 if (isDefinedByCallTo(operand, streamCreateCallBuilder.functionName)) { 527 // The converted operand's definition created a stream. 528 streamSynchronizeCallBuilder.create(loc, rewriter, {operand}); 529 streamDestroyCallBuilder.create(loc, rewriter, {operand}); 530 } else { 531 // Otherwise the converted operand is an event. This assumes that we use 532 // events in control flow code as well. 533 eventSynchronizeCallBuilder.create(loc, rewriter, {operand}); 534 eventDestroyCallBuilder.create(loc, rewriter, {operand}); 535 } 536 } 537 538 rewriter.eraseOp(waitOp); 539 return success(); 540 } 541 542 // Converts `gpu.wait async` to runtime calls. The converted op creates a new 543 // stream that is synchronized with stream/event operands. The operands are 544 // destroyed. That is, it assumes that it is not used afterwards or elsewhere. 545 // Otherwise we will get a runtime error. Eventually, we should guarantee this 546 // property. 547 LogicalResult ConvertWaitAsyncOpToGpuRuntimeCallPattern::matchAndRewrite( 548 gpu::WaitOp waitOp, ArrayRef<Value> operands, 549 ConversionPatternRewriter &rewriter) const { 550 if (!waitOp.asyncToken()) 551 return rewriter.notifyMatchFailure(waitOp, "Can only convert async op."); 552 553 Location loc = waitOp.getLoc(); 554 555 auto insertionPoint = rewriter.saveInsertionPoint(); 556 SmallVector<Value, 1> events; 557 for (auto pair : llvm::zip(waitOp.asyncDependencies(), operands)) { 558 auto operand = std::get<1>(pair); 559 if (isDefinedByCallTo(operand, streamCreateCallBuilder.functionName)) { 560 // The converted operand's definition created a stream. Insert an event 561 // into the stream just after the last use of the original token operand. 562 auto *defOp = std::get<0>(pair).getDefiningOp(); 563 rewriter.setInsertionPointAfter(defOp); 564 auto event = 565 eventCreateCallBuilder.create(loc, rewriter, {}).getResult(0); 566 eventRecordCallBuilder.create(loc, rewriter, {event, operand}); 567 events.push_back(event); 568 } else { 569 // Otherwise the converted operand is an event. This assumes that we use 570 // events in control flow code as well. 571 events.push_back(operand); 572 } 573 } 574 rewriter.restoreInsertionPoint(insertionPoint); 575 auto stream = streamCreateCallBuilder.create(loc, rewriter, {}).getResult(0); 576 for (auto event : events) 577 streamWaitEventCallBuilder.create(loc, rewriter, {stream, event}); 578 for (auto event : events) 579 eventDestroyCallBuilder.create(loc, rewriter, {event}); 580 rewriter.replaceOp(waitOp, {stream}); 581 582 return success(); 583 } 584 585 // Creates a struct containing all kernel parameters on the stack and returns 586 // an array of type-erased pointers to the fields of the struct. The array can 587 // then be passed to the CUDA / ROCm (HIP) kernel launch calls. 588 // The generated code is essentially as follows: 589 // 590 // %struct = alloca(sizeof(struct { Parameters... })) 591 // %array = alloca(NumParameters * sizeof(void *)) 592 // for (i : [0, NumParameters)) 593 // %fieldPtr = llvm.getelementptr %struct[0, i] 594 // llvm.store parameters[i], %fieldPtr 595 // %elementPtr = llvm.getelementptr %array[i] 596 // llvm.store %fieldPtr, %elementPtr 597 // return %array 598 Value ConvertLaunchFuncOpToGpuRuntimeCallPattern::generateParamsArray( 599 gpu::LaunchFuncOp launchOp, ArrayRef<Value> operands, 600 OpBuilder &builder) const { 601 auto loc = launchOp.getLoc(); 602 auto numKernelOperands = launchOp.getNumKernelOperands(); 603 auto arguments = getTypeConverter()->promoteOperands( 604 loc, launchOp.getOperands().take_back(numKernelOperands), 605 operands.take_back(numKernelOperands), builder); 606 auto numArguments = arguments.size(); 607 SmallVector<Type, 4> argumentTypes; 608 argumentTypes.reserve(numArguments); 609 for (auto argument : arguments) 610 argumentTypes.push_back(argument.getType()); 611 auto structType = LLVM::LLVMStructType::getNewIdentified(context, StringRef(), 612 argumentTypes); 613 auto one = builder.create<LLVM::ConstantOp>(loc, llvmInt32Type, 614 builder.getI32IntegerAttr(1)); 615 auto structPtr = builder.create<LLVM::AllocaOp>( 616 loc, LLVM::LLVMPointerType::get(structType), one, /*alignment=*/0); 617 auto arraySize = builder.create<LLVM::ConstantOp>( 618 loc, llvmInt32Type, builder.getI32IntegerAttr(numArguments)); 619 auto arrayPtr = builder.create<LLVM::AllocaOp>(loc, llvmPointerPointerType, 620 arraySize, /*alignment=*/0); 621 auto zero = builder.create<LLVM::ConstantOp>(loc, llvmInt32Type, 622 builder.getI32IntegerAttr(0)); 623 for (auto en : llvm::enumerate(arguments)) { 624 auto index = builder.create<LLVM::ConstantOp>( 625 loc, llvmInt32Type, builder.getI32IntegerAttr(en.index())); 626 auto fieldPtr = builder.create<LLVM::GEPOp>( 627 loc, LLVM::LLVMPointerType::get(argumentTypes[en.index()]), structPtr, 628 ArrayRef<Value>{zero, index.getResult()}); 629 builder.create<LLVM::StoreOp>(loc, en.value(), fieldPtr); 630 auto elementPtr = builder.create<LLVM::GEPOp>(loc, llvmPointerPointerType, 631 arrayPtr, index.getResult()); 632 auto casted = 633 builder.create<LLVM::BitcastOp>(loc, llvmPointerType, fieldPtr); 634 builder.create<LLVM::StoreOp>(loc, casted, elementPtr); 635 } 636 return arrayPtr; 637 } 638 639 // Generates an LLVM IR dialect global that contains the name of the given 640 // kernel function as a C string, and returns a pointer to its beginning. 641 // The code is essentially: 642 // 643 // llvm.global constant @kernel_name("function_name\00") 644 // func(...) { 645 // %0 = llvm.addressof @kernel_name 646 // %1 = llvm.constant (0 : index) 647 // %2 = llvm.getelementptr %0[%1, %1] : !llvm<"i8*"> 648 // } 649 Value ConvertLaunchFuncOpToGpuRuntimeCallPattern::generateKernelNameConstant( 650 StringRef moduleName, StringRef name, Location loc, 651 OpBuilder &builder) const { 652 // Make sure the trailing zero is included in the constant. 653 std::vector<char> kernelName(name.begin(), name.end()); 654 kernelName.push_back('\0'); 655 656 std::string globalName = 657 std::string(llvm::formatv("{0}_{1}_kernel_name", moduleName, name)); 658 return LLVM::createGlobalString( 659 loc, builder, globalName, StringRef(kernelName.data(), kernelName.size()), 660 LLVM::Linkage::Internal); 661 } 662 663 // Emits LLVM IR to launch a kernel function. Expects the module that contains 664 // the compiled kernel function as a cubin in the 'nvvm.cubin' attribute, or a 665 // hsaco in the 'rocdl.hsaco' attribute of the kernel function in the IR. 666 // 667 // %0 = call %binarygetter 668 // %1 = call %moduleLoad(%0) 669 // %2 = <see generateKernelNameConstant> 670 // %3 = call %moduleGetFunction(%1, %2) 671 // %4 = call %streamCreate() 672 // %5 = <see generateParamsArray> 673 // call %launchKernel(%3, <launchOp operands 0..5>, 0, %4, %5, nullptr) 674 // call %streamSynchronize(%4) 675 // call %streamDestroy(%4) 676 // call %moduleUnload(%1) 677 // 678 // If the op is async, the stream corresponds to the (single) async dependency 679 // as well as the async token the op produces. 680 LogicalResult ConvertLaunchFuncOpToGpuRuntimeCallPattern::matchAndRewrite( 681 gpu::LaunchFuncOp launchOp, ArrayRef<Value> operands, 682 ConversionPatternRewriter &rewriter) const { 683 if (failed(areAllLLVMTypes(launchOp, operands, rewriter))) 684 return failure(); 685 686 if (launchOp.asyncDependencies().size() > 1) 687 return rewriter.notifyMatchFailure( 688 launchOp, "Cannot convert with more than one async dependency."); 689 690 // Fail when the synchronous version of the op has async dependencies. The 691 // lowering destroys the stream, and we do not want to check that there is no 692 // use of the stream after this op. 693 if (!launchOp.asyncToken() && !launchOp.asyncDependencies().empty()) 694 return rewriter.notifyMatchFailure( 695 launchOp, "Cannot convert non-async op with async dependencies."); 696 697 Location loc = launchOp.getLoc(); 698 699 // Create an LLVM global with CUBIN extracted from the kernel annotation and 700 // obtain a pointer to the first byte in it. 701 auto kernelModule = SymbolTable::lookupNearestSymbolFrom<gpu::GPUModuleOp>( 702 launchOp, launchOp.getKernelModuleName()); 703 assert(kernelModule && "expected a kernel module"); 704 705 auto binaryAttr = 706 kernelModule->getAttrOfType<StringAttr>(gpuBinaryAnnotation); 707 if (!binaryAttr) { 708 kernelModule.emitOpError() 709 << "missing " << gpuBinaryAnnotation << " attribute"; 710 return failure(); 711 } 712 713 SmallString<128> nameBuffer(kernelModule.getName()); 714 nameBuffer.append(kGpuBinaryStorageSuffix); 715 Value data = 716 LLVM::createGlobalString(loc, rewriter, nameBuffer.str(), 717 binaryAttr.getValue(), LLVM::Linkage::Internal); 718 719 auto module = moduleLoadCallBuilder.create(loc, rewriter, data); 720 // Get the function from the module. The name corresponds to the name of 721 // the kernel function. 722 auto kernelName = generateKernelNameConstant( 723 launchOp.getKernelModuleName(), launchOp.getKernelName(), loc, rewriter); 724 auto function = moduleGetFunctionCallBuilder.create( 725 loc, rewriter, {module.getResult(0), kernelName}); 726 auto zero = rewriter.create<LLVM::ConstantOp>(loc, llvmInt32Type, 727 rewriter.getI32IntegerAttr(0)); 728 auto adaptor = 729 gpu::LaunchFuncOpAdaptor(operands, launchOp->getAttrDictionary()); 730 Value stream = 731 adaptor.asyncDependencies().empty() 732 ? streamCreateCallBuilder.create(loc, rewriter, {}).getResult(0) 733 : adaptor.asyncDependencies().front(); 734 // Create array of pointers to kernel arguments. 735 auto kernelParams = generateParamsArray(launchOp, operands, rewriter); 736 auto nullpointer = rewriter.create<LLVM::NullOp>(loc, llvmPointerPointerType); 737 launchKernelCallBuilder.create(loc, rewriter, 738 {function.getResult(0), launchOp.gridSizeX(), 739 launchOp.gridSizeY(), launchOp.gridSizeZ(), 740 launchOp.blockSizeX(), launchOp.blockSizeY(), 741 launchOp.blockSizeZ(), 742 /*sharedMemBytes=*/zero, stream, kernelParams, 743 /*extra=*/nullpointer}); 744 745 if (launchOp.asyncToken()) { 746 // Async launch: make dependent ops use the same stream. 747 rewriter.replaceOp(launchOp, {stream}); 748 } else { 749 // Synchronize with host and destroy stream. This must be the stream created 750 // above (with no other uses) because we check that the synchronous version 751 // does not have any async dependencies. 752 streamSynchronizeCallBuilder.create(loc, rewriter, stream); 753 streamDestroyCallBuilder.create(loc, rewriter, stream); 754 rewriter.eraseOp(launchOp); 755 } 756 moduleUnloadCallBuilder.create(loc, rewriter, module.getResult(0)); 757 758 return success(); 759 } 760 761 LogicalResult ConvertMemcpyOpToGpuRuntimeCallPattern::matchAndRewrite( 762 gpu::MemcpyOp memcpyOp, ArrayRef<Value> operands, 763 ConversionPatternRewriter &rewriter) const { 764 auto memRefType = memcpyOp.src().getType().cast<MemRefType>(); 765 766 if (failed(areAllLLVMTypes(memcpyOp, operands, rewriter)) || 767 !isConvertibleAndHasIdentityMaps(memRefType) || 768 failed(isAsyncWithOneDependency(rewriter, memcpyOp))) 769 return failure(); 770 771 auto loc = memcpyOp.getLoc(); 772 auto adaptor = gpu::MemcpyOpAdaptor(operands, memcpyOp->getAttrDictionary()); 773 774 MemRefDescriptor srcDesc(adaptor.src()); 775 776 Value numElements = 777 memRefType.hasStaticShape() 778 ? createIndexConstant(rewriter, loc, memRefType.getNumElements()) 779 // For identity layouts (verified above), the number of elements is 780 // stride[0] * size[0]. 781 : rewriter.create<LLVM::MulOp>(loc, srcDesc.stride(rewriter, loc, 0), 782 srcDesc.size(rewriter, loc, 0)); 783 784 Type elementPtrType = getElementPtrType(memRefType); 785 Value nullPtr = rewriter.create<LLVM::NullOp>(loc, elementPtrType); 786 Value gepPtr = rewriter.create<LLVM::GEPOp>( 787 loc, elementPtrType, ArrayRef<Value>{nullPtr, numElements}); 788 auto sizeBytes = 789 rewriter.create<LLVM::PtrToIntOp>(loc, getIndexType(), gepPtr); 790 791 auto src = rewriter.create<LLVM::BitcastOp>( 792 loc, llvmPointerType, srcDesc.alignedPtr(rewriter, loc)); 793 auto dst = rewriter.create<LLVM::BitcastOp>( 794 loc, llvmPointerType, 795 MemRefDescriptor(adaptor.dst()).alignedPtr(rewriter, loc)); 796 797 auto stream = adaptor.asyncDependencies().front(); 798 memcpyCallBuilder.create(loc, rewriter, {dst, src, sizeBytes, stream}); 799 800 rewriter.replaceOp(memcpyOp, {stream}); 801 802 return success(); 803 } 804 805 std::unique_ptr<mlir::OperationPass<mlir::ModuleOp>> 806 mlir::createGpuToLLVMConversionPass() { 807 return std::make_unique<GpuToLLVMConversionPass>(); 808 } 809