1 //===- ConvertLaunchFuncToGpuRuntimeCalls.cpp - MLIR GPU lowering passes --===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements a pass to convert gpu.launch_func op into a sequence of 10 // GPU runtime calls. As most of GPU runtimes does not have a stable published 11 // ABI, this pass uses a slim runtime layer that builds on top of the public 12 // API from GPU runtime headers. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "mlir/Conversion/GPUCommon/GPUCommonPass.h" 17 18 #include "../PassDetail.h" 19 #include "mlir/Conversion/ArithmeticToLLVM/ArithmeticToLLVM.h" 20 #include "mlir/Conversion/AsyncToLLVM/AsyncToLLVM.h" 21 #include "mlir/Conversion/ControlFlowToLLVM/ControlFlowToLLVM.h" 22 #include "mlir/Conversion/LLVMCommon/ConversionTarget.h" 23 #include "mlir/Conversion/LLVMCommon/Pattern.h" 24 #include "mlir/Conversion/MemRefToLLVM/MemRefToLLVM.h" 25 #include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h" 26 #include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVMPass.h" 27 #include "mlir/Conversion/VectorToLLVM/ConvertVectorToLLVM.h" 28 #include "mlir/Dialect/Async/IR/Async.h" 29 #include "mlir/Dialect/GPU/GPUDialect.h" 30 #include "mlir/Dialect/GPU/Passes.h" 31 #include "mlir/Dialect/LLVMIR/LLVMDialect.h" 32 #include "mlir/IR/Attributes.h" 33 #include "mlir/IR/Builders.h" 34 #include "mlir/IR/BuiltinOps.h" 35 #include "mlir/IR/BuiltinTypes.h" 36 37 #include "llvm/ADT/STLExtras.h" 38 #include "llvm/Support/Error.h" 39 #include "llvm/Support/FormatVariadic.h" 40 41 using namespace mlir; 42 43 static constexpr const char *kGpuBinaryStorageSuffix = "_gpubin_cst"; 44 45 namespace { 46 47 class GpuToLLVMConversionPass 48 : public GpuToLLVMConversionPassBase<GpuToLLVMConversionPass> { 49 public: 50 GpuToLLVMConversionPass() = default; 51 52 GpuToLLVMConversionPass(const GpuToLLVMConversionPass &other) 53 : GpuToLLVMConversionPassBase(other) {} 54 55 // Run the dialect converter on the module. 56 void runOnOperation() override; 57 58 private: 59 Option<std::string> gpuBinaryAnnotation{ 60 *this, "gpu-binary-annotation", 61 llvm::cl::desc("Annotation attribute string for GPU binary"), 62 llvm::cl::init(gpu::getDefaultGpuBinaryAnnotation())}; 63 }; 64 65 struct FunctionCallBuilder { 66 FunctionCallBuilder(StringRef functionName, Type returnType, 67 ArrayRef<Type> argumentTypes) 68 : functionName(functionName), 69 functionType(LLVM::LLVMFunctionType::get(returnType, argumentTypes)) {} 70 LLVM::CallOp create(Location loc, OpBuilder &builder, 71 ArrayRef<Value> arguments) const; 72 73 StringRef functionName; 74 LLVM::LLVMFunctionType functionType; 75 }; 76 77 template <typename OpTy> 78 class ConvertOpToGpuRuntimeCallPattern : public ConvertOpToLLVMPattern<OpTy> { 79 public: 80 explicit ConvertOpToGpuRuntimeCallPattern(LLVMTypeConverter &typeConverter) 81 : ConvertOpToLLVMPattern<OpTy>(typeConverter) {} 82 83 protected: 84 Value getNumElements(ConversionPatternRewriter &rewriter, Location loc, 85 MemRefType type, MemRefDescriptor desc) const { 86 return type.hasStaticShape() 87 ? ConvertToLLVMPattern::createIndexConstant( 88 rewriter, loc, type.getNumElements()) 89 // For identity maps (verified by caller), the number of 90 // elements is stride[0] * size[0]. 91 : rewriter.create<LLVM::MulOp>(loc, 92 desc.stride(rewriter, loc, 0), 93 desc.size(rewriter, loc, 0)); 94 } 95 96 MLIRContext *context = &this->getTypeConverter()->getContext(); 97 98 Type llvmVoidType = LLVM::LLVMVoidType::get(context); 99 Type llvmPointerType = 100 LLVM::LLVMPointerType::get(IntegerType::get(context, 8)); 101 Type llvmPointerPointerType = LLVM::LLVMPointerType::get(llvmPointerType); 102 Type llvmInt8Type = IntegerType::get(context, 8); 103 Type llvmInt32Type = IntegerType::get(context, 32); 104 Type llvmInt64Type = IntegerType::get(context, 64); 105 Type llvmIntPtrType = IntegerType::get( 106 context, this->getTypeConverter()->getPointerBitwidth(0)); 107 108 FunctionCallBuilder moduleLoadCallBuilder = { 109 "mgpuModuleLoad", 110 llvmPointerType /* void *module */, 111 {llvmPointerType /* void *cubin */}}; 112 FunctionCallBuilder moduleUnloadCallBuilder = { 113 "mgpuModuleUnload", llvmVoidType, {llvmPointerType /* void *module */}}; 114 FunctionCallBuilder moduleGetFunctionCallBuilder = { 115 "mgpuModuleGetFunction", 116 llvmPointerType /* void *function */, 117 { 118 llvmPointerType, /* void *module */ 119 llvmPointerType /* char *name */ 120 }}; 121 FunctionCallBuilder launchKernelCallBuilder = { 122 "mgpuLaunchKernel", 123 llvmVoidType, 124 { 125 llvmPointerType, /* void* f */ 126 llvmIntPtrType, /* intptr_t gridXDim */ 127 llvmIntPtrType, /* intptr_t gridyDim */ 128 llvmIntPtrType, /* intptr_t gridZDim */ 129 llvmIntPtrType, /* intptr_t blockXDim */ 130 llvmIntPtrType, /* intptr_t blockYDim */ 131 llvmIntPtrType, /* intptr_t blockZDim */ 132 llvmInt32Type, /* unsigned int sharedMemBytes */ 133 llvmPointerType, /* void *hstream */ 134 llvmPointerPointerType, /* void **kernelParams */ 135 llvmPointerPointerType /* void **extra */ 136 }}; 137 FunctionCallBuilder streamCreateCallBuilder = { 138 "mgpuStreamCreate", llvmPointerType /* void *stream */, {}}; 139 FunctionCallBuilder streamDestroyCallBuilder = { 140 "mgpuStreamDestroy", llvmVoidType, {llvmPointerType /* void *stream */}}; 141 FunctionCallBuilder streamSynchronizeCallBuilder = { 142 "mgpuStreamSynchronize", 143 llvmVoidType, 144 {llvmPointerType /* void *stream */}}; 145 FunctionCallBuilder streamWaitEventCallBuilder = { 146 "mgpuStreamWaitEvent", 147 llvmVoidType, 148 {llvmPointerType /* void *stream */, llvmPointerType /* void *event */}}; 149 FunctionCallBuilder eventCreateCallBuilder = { 150 "mgpuEventCreate", llvmPointerType /* void *event */, {}}; 151 FunctionCallBuilder eventDestroyCallBuilder = { 152 "mgpuEventDestroy", llvmVoidType, {llvmPointerType /* void *event */}}; 153 FunctionCallBuilder eventSynchronizeCallBuilder = { 154 "mgpuEventSynchronize", 155 llvmVoidType, 156 {llvmPointerType /* void *event */}}; 157 FunctionCallBuilder eventRecordCallBuilder = { 158 "mgpuEventRecord", 159 llvmVoidType, 160 {llvmPointerType /* void *event */, llvmPointerType /* void *stream */}}; 161 FunctionCallBuilder hostRegisterCallBuilder = { 162 "mgpuMemHostRegisterMemRef", 163 llvmVoidType, 164 {llvmIntPtrType /* intptr_t rank */, 165 llvmPointerType /* void *memrefDesc */, 166 llvmIntPtrType /* intptr_t elementSizeBytes */}}; 167 FunctionCallBuilder allocCallBuilder = { 168 "mgpuMemAlloc", 169 llvmPointerType /* void * */, 170 {llvmIntPtrType /* intptr_t sizeBytes */, 171 llvmPointerType /* void *stream */}}; 172 FunctionCallBuilder deallocCallBuilder = { 173 "mgpuMemFree", 174 llvmVoidType, 175 {llvmPointerType /* void *ptr */, llvmPointerType /* void *stream */}}; 176 FunctionCallBuilder memcpyCallBuilder = { 177 "mgpuMemcpy", 178 llvmVoidType, 179 {llvmPointerType /* void *dst */, llvmPointerType /* void *src */, 180 llvmIntPtrType /* intptr_t sizeBytes */, 181 llvmPointerType /* void *stream */}}; 182 FunctionCallBuilder memsetCallBuilder = { 183 "mgpuMemset32", 184 llvmVoidType, 185 {llvmPointerType /* void *dst */, llvmInt32Type /* unsigned int value */, 186 llvmIntPtrType /* intptr_t sizeBytes */, 187 llvmPointerType /* void *stream */}}; 188 }; 189 190 /// A rewrite pattern to convert gpu.host_register operations into a GPU runtime 191 /// call. Currently it supports CUDA and ROCm (HIP). 192 class ConvertHostRegisterOpToGpuRuntimeCallPattern 193 : public ConvertOpToGpuRuntimeCallPattern<gpu::HostRegisterOp> { 194 public: 195 ConvertHostRegisterOpToGpuRuntimeCallPattern(LLVMTypeConverter &typeConverter) 196 : ConvertOpToGpuRuntimeCallPattern<gpu::HostRegisterOp>(typeConverter) {} 197 198 private: 199 LogicalResult 200 matchAndRewrite(gpu::HostRegisterOp hostRegisterOp, OpAdaptor adaptor, 201 ConversionPatternRewriter &rewriter) const override; 202 }; 203 204 /// A rewrite pattern to convert gpu.alloc operations into a GPU runtime 205 /// call. Currently it supports CUDA and ROCm (HIP). 206 class ConvertAllocOpToGpuRuntimeCallPattern 207 : public ConvertOpToGpuRuntimeCallPattern<gpu::AllocOp> { 208 public: 209 ConvertAllocOpToGpuRuntimeCallPattern(LLVMTypeConverter &typeConverter) 210 : ConvertOpToGpuRuntimeCallPattern<gpu::AllocOp>(typeConverter) {} 211 212 private: 213 LogicalResult 214 matchAndRewrite(gpu::AllocOp allocOp, OpAdaptor adaptor, 215 ConversionPatternRewriter &rewriter) const override; 216 }; 217 218 /// A rewrite pattern to convert gpu.dealloc operations into a GPU runtime 219 /// call. Currently it supports CUDA and ROCm (HIP). 220 class ConvertDeallocOpToGpuRuntimeCallPattern 221 : public ConvertOpToGpuRuntimeCallPattern<gpu::DeallocOp> { 222 public: 223 ConvertDeallocOpToGpuRuntimeCallPattern(LLVMTypeConverter &typeConverter) 224 : ConvertOpToGpuRuntimeCallPattern<gpu::DeallocOp>(typeConverter) {} 225 226 private: 227 LogicalResult 228 matchAndRewrite(gpu::DeallocOp deallocOp, OpAdaptor adaptor, 229 ConversionPatternRewriter &rewriter) const override; 230 }; 231 232 class ConvertAsyncYieldToGpuRuntimeCallPattern 233 : public ConvertOpToGpuRuntimeCallPattern<async::YieldOp> { 234 public: 235 ConvertAsyncYieldToGpuRuntimeCallPattern(LLVMTypeConverter &typeConverter) 236 : ConvertOpToGpuRuntimeCallPattern<async::YieldOp>(typeConverter) {} 237 238 private: 239 LogicalResult 240 matchAndRewrite(async::YieldOp yieldOp, OpAdaptor adaptor, 241 ConversionPatternRewriter &rewriter) const override; 242 }; 243 244 /// A rewrite pattern to convert gpu.wait operations into a GPU runtime 245 /// call. Currently it supports CUDA and ROCm (HIP). 246 class ConvertWaitOpToGpuRuntimeCallPattern 247 : public ConvertOpToGpuRuntimeCallPattern<gpu::WaitOp> { 248 public: 249 ConvertWaitOpToGpuRuntimeCallPattern(LLVMTypeConverter &typeConverter) 250 : ConvertOpToGpuRuntimeCallPattern<gpu::WaitOp>(typeConverter) {} 251 252 private: 253 LogicalResult 254 matchAndRewrite(gpu::WaitOp waitOp, OpAdaptor adaptor, 255 ConversionPatternRewriter &rewriter) const override; 256 }; 257 258 /// A rewrite pattern to convert gpu.wait async operations into a GPU runtime 259 /// call. Currently it supports CUDA and ROCm (HIP). 260 class ConvertWaitAsyncOpToGpuRuntimeCallPattern 261 : public ConvertOpToGpuRuntimeCallPattern<gpu::WaitOp> { 262 public: 263 ConvertWaitAsyncOpToGpuRuntimeCallPattern(LLVMTypeConverter &typeConverter) 264 : ConvertOpToGpuRuntimeCallPattern<gpu::WaitOp>(typeConverter) {} 265 266 private: 267 LogicalResult 268 matchAndRewrite(gpu::WaitOp waitOp, OpAdaptor adaptor, 269 ConversionPatternRewriter &rewriter) const override; 270 }; 271 272 /// A rewrite patter to convert gpu.launch_func operations into a sequence of 273 /// GPU runtime calls. Currently it supports CUDA and ROCm (HIP). 274 /// 275 /// In essence, a gpu.launch_func operations gets compiled into the following 276 /// sequence of runtime calls: 277 /// 278 /// * moduleLoad -- loads the module given the cubin / hsaco data 279 /// * moduleGetFunction -- gets a handle to the actual kernel function 280 /// * getStreamHelper -- initializes a new compute stream on GPU 281 /// * launchKernel -- launches the kernel on a stream 282 /// * streamSynchronize -- waits for operations on the stream to finish 283 /// 284 /// Intermediate data structures are allocated on the stack. 285 class ConvertLaunchFuncOpToGpuRuntimeCallPattern 286 : public ConvertOpToGpuRuntimeCallPattern<gpu::LaunchFuncOp> { 287 public: 288 ConvertLaunchFuncOpToGpuRuntimeCallPattern(LLVMTypeConverter &typeConverter, 289 StringRef gpuBinaryAnnotation) 290 : ConvertOpToGpuRuntimeCallPattern<gpu::LaunchFuncOp>(typeConverter), 291 gpuBinaryAnnotation(gpuBinaryAnnotation) {} 292 293 private: 294 Value generateParamsArray(gpu::LaunchFuncOp launchOp, OpAdaptor adaptor, 295 OpBuilder &builder) const; 296 Value generateKernelNameConstant(StringRef moduleName, StringRef name, 297 Location loc, OpBuilder &builder) const; 298 299 LogicalResult 300 matchAndRewrite(gpu::LaunchFuncOp launchOp, OpAdaptor adaptor, 301 ConversionPatternRewriter &rewriter) const override; 302 303 llvm::SmallString<32> gpuBinaryAnnotation; 304 }; 305 306 class EraseGpuModuleOpPattern : public OpRewritePattern<gpu::GPUModuleOp> { 307 using OpRewritePattern<gpu::GPUModuleOp>::OpRewritePattern; 308 309 LogicalResult matchAndRewrite(gpu::GPUModuleOp op, 310 PatternRewriter &rewriter) const override { 311 // GPU kernel modules are no longer necessary since we have a global 312 // constant with the CUBIN, or HSACO data. 313 rewriter.eraseOp(op); 314 return success(); 315 } 316 }; 317 318 /// A rewrite pattern to convert gpu.memcpy operations into a GPU runtime 319 /// call. Currently it supports CUDA and ROCm (HIP). 320 class ConvertMemcpyOpToGpuRuntimeCallPattern 321 : public ConvertOpToGpuRuntimeCallPattern<gpu::MemcpyOp> { 322 public: 323 ConvertMemcpyOpToGpuRuntimeCallPattern(LLVMTypeConverter &typeConverter) 324 : ConvertOpToGpuRuntimeCallPattern<gpu::MemcpyOp>(typeConverter) {} 325 326 private: 327 LogicalResult 328 matchAndRewrite(gpu::MemcpyOp memcpyOp, OpAdaptor adaptor, 329 ConversionPatternRewriter &rewriter) const override; 330 }; 331 332 /// A rewrite pattern to convert gpu.memset operations into a GPU runtime 333 /// call. Currently it supports CUDA and ROCm (HIP). 334 class ConvertMemsetOpToGpuRuntimeCallPattern 335 : public ConvertOpToGpuRuntimeCallPattern<gpu::MemsetOp> { 336 public: 337 ConvertMemsetOpToGpuRuntimeCallPattern(LLVMTypeConverter &typeConverter) 338 : ConvertOpToGpuRuntimeCallPattern<gpu::MemsetOp>(typeConverter) {} 339 340 private: 341 LogicalResult 342 matchAndRewrite(gpu::MemsetOp memsetOp, OpAdaptor adaptor, 343 ConversionPatternRewriter &rewriter) const override; 344 }; 345 } // namespace 346 347 void GpuToLLVMConversionPass::runOnOperation() { 348 LLVMTypeConverter converter(&getContext()); 349 RewritePatternSet patterns(&getContext()); 350 LLVMConversionTarget target(getContext()); 351 352 target.addIllegalDialect<gpu::GPUDialect>(); 353 354 mlir::arith::populateArithmeticToLLVMConversionPatterns(converter, patterns); 355 mlir::cf::populateControlFlowToLLVMConversionPatterns(converter, patterns); 356 populateVectorToLLVMConversionPatterns(converter, patterns); 357 populateMemRefToLLVMConversionPatterns(converter, patterns); 358 populateStdToLLVMConversionPatterns(converter, patterns); 359 populateAsyncStructuralTypeConversionsAndLegality(converter, patterns, 360 target); 361 populateGpuToLLVMConversionPatterns(converter, patterns, gpuBinaryAnnotation); 362 363 if (failed( 364 applyPartialConversion(getOperation(), target, std::move(patterns)))) 365 signalPassFailure(); 366 } 367 368 LLVM::CallOp FunctionCallBuilder::create(Location loc, OpBuilder &builder, 369 ArrayRef<Value> arguments) const { 370 auto module = builder.getBlock()->getParent()->getParentOfType<ModuleOp>(); 371 auto function = [&] { 372 if (auto function = module.lookupSymbol<LLVM::LLVMFuncOp>(functionName)) 373 return function; 374 return OpBuilder::atBlockEnd(module.getBody()) 375 .create<LLVM::LLVMFuncOp>(loc, functionName, functionType); 376 }(); 377 return builder.create<LLVM::CallOp>(loc, function, arguments); 378 } 379 380 // Returns whether all operands are of LLVM type. 381 static LogicalResult areAllLLVMTypes(Operation *op, ValueRange operands, 382 ConversionPatternRewriter &rewriter) { 383 if (!llvm::all_of(operands, [](Value value) { 384 return LLVM::isCompatibleType(value.getType()); 385 })) 386 return rewriter.notifyMatchFailure( 387 op, "Cannot convert if operands aren't of LLVM type."); 388 return success(); 389 } 390 391 static LogicalResult 392 isAsyncWithOneDependency(ConversionPatternRewriter &rewriter, 393 gpu::AsyncOpInterface op) { 394 if (op.getAsyncDependencies().size() != 1) 395 return rewriter.notifyMatchFailure( 396 op, "Can only convert with exactly one async dependency."); 397 398 if (!op.getAsyncToken()) 399 return rewriter.notifyMatchFailure(op, "Can convert only async version."); 400 401 return success(); 402 } 403 404 LogicalResult ConvertHostRegisterOpToGpuRuntimeCallPattern::matchAndRewrite( 405 gpu::HostRegisterOp hostRegisterOp, OpAdaptor adaptor, 406 ConversionPatternRewriter &rewriter) const { 407 auto *op = hostRegisterOp.getOperation(); 408 if (failed(areAllLLVMTypes(op, adaptor.getOperands(), rewriter))) 409 return failure(); 410 411 Location loc = op->getLoc(); 412 413 auto memRefType = hostRegisterOp.value().getType(); 414 auto elementType = memRefType.cast<UnrankedMemRefType>().getElementType(); 415 auto elementSize = getSizeInBytes(loc, elementType, rewriter); 416 417 auto arguments = getTypeConverter()->promoteOperands( 418 loc, op->getOperands(), adaptor.getOperands(), rewriter); 419 arguments.push_back(elementSize); 420 hostRegisterCallBuilder.create(loc, rewriter, arguments); 421 422 rewriter.eraseOp(op); 423 return success(); 424 } 425 426 LogicalResult ConvertAllocOpToGpuRuntimeCallPattern::matchAndRewrite( 427 gpu::AllocOp allocOp, OpAdaptor adaptor, 428 ConversionPatternRewriter &rewriter) const { 429 MemRefType memRefType = allocOp.getType(); 430 431 if (failed(areAllLLVMTypes(allocOp, adaptor.getOperands(), rewriter)) || 432 !isConvertibleAndHasIdentityMaps(memRefType) || 433 failed(isAsyncWithOneDependency(rewriter, allocOp))) 434 return failure(); 435 436 auto loc = allocOp.getLoc(); 437 438 // Get shape of the memref as values: static sizes are constant 439 // values and dynamic sizes are passed to 'alloc' as operands. 440 SmallVector<Value, 4> shape; 441 SmallVector<Value, 4> strides; 442 Value sizeBytes; 443 getMemRefDescriptorSizes(loc, memRefType, adaptor.dynamicSizes(), rewriter, 444 shape, strides, sizeBytes); 445 446 // Allocate the underlying buffer and store a pointer to it in the MemRef 447 // descriptor. 448 Type elementPtrType = this->getElementPtrType(memRefType); 449 auto stream = adaptor.asyncDependencies().front(); 450 Value allocatedPtr = 451 allocCallBuilder.create(loc, rewriter, {sizeBytes, stream}).getResult(0); 452 allocatedPtr = 453 rewriter.create<LLVM::BitcastOp>(loc, elementPtrType, allocatedPtr); 454 455 // No alignment. 456 Value alignedPtr = allocatedPtr; 457 458 // Create the MemRef descriptor. 459 auto memRefDescriptor = this->createMemRefDescriptor( 460 loc, memRefType, allocatedPtr, alignedPtr, shape, strides, rewriter); 461 462 rewriter.replaceOp(allocOp, {memRefDescriptor, stream}); 463 464 return success(); 465 } 466 467 LogicalResult ConvertDeallocOpToGpuRuntimeCallPattern::matchAndRewrite( 468 gpu::DeallocOp deallocOp, OpAdaptor adaptor, 469 ConversionPatternRewriter &rewriter) const { 470 if (failed(areAllLLVMTypes(deallocOp, adaptor.getOperands(), rewriter)) || 471 failed(isAsyncWithOneDependency(rewriter, deallocOp))) 472 return failure(); 473 474 Location loc = deallocOp.getLoc(); 475 476 Value pointer = 477 MemRefDescriptor(adaptor.memref()).allocatedPtr(rewriter, loc); 478 auto casted = rewriter.create<LLVM::BitcastOp>(loc, llvmPointerType, pointer); 479 Value stream = adaptor.asyncDependencies().front(); 480 deallocCallBuilder.create(loc, rewriter, {casted, stream}); 481 482 rewriter.replaceOp(deallocOp, {stream}); 483 return success(); 484 } 485 486 static bool isGpuAsyncTokenType(Value value) { 487 return value.getType().isa<gpu::AsyncTokenType>(); 488 } 489 490 // Converts !gpu.async.token operands of `async.yield` to runtime calls. The 491 // !gpu.async.token are lowered to stream within the async.execute region, but 492 // are passed as events between them. For each !gpu.async.token operand, we 493 // create an event and record it on the stream. 494 LogicalResult ConvertAsyncYieldToGpuRuntimeCallPattern::matchAndRewrite( 495 async::YieldOp yieldOp, OpAdaptor adaptor, 496 ConversionPatternRewriter &rewriter) const { 497 if (llvm::none_of(yieldOp.operands(), isGpuAsyncTokenType)) 498 return rewriter.notifyMatchFailure(yieldOp, "no gpu async token operand"); 499 500 Location loc = yieldOp.getLoc(); 501 SmallVector<Value, 4> newOperands(adaptor.getOperands()); 502 llvm::SmallDenseSet<Value> streams; 503 for (auto &operand : yieldOp->getOpOperands()) { 504 if (!isGpuAsyncTokenType(operand.get())) 505 continue; 506 auto idx = operand.getOperandNumber(); 507 auto stream = adaptor.getOperands()[idx]; 508 auto event = eventCreateCallBuilder.create(loc, rewriter, {}).getResult(0); 509 eventRecordCallBuilder.create(loc, rewriter, {event, stream}); 510 newOperands[idx] = event; 511 streams.insert(stream); 512 } 513 for (auto stream : streams) 514 streamDestroyCallBuilder.create(loc, rewriter, {stream}); 515 516 rewriter.updateRootInPlace(yieldOp, 517 [&] { yieldOp->setOperands(newOperands); }); 518 return success(); 519 } 520 521 // Returns whether `value` is the result of an LLVM::CallOp to `functionName`. 522 static bool isDefinedByCallTo(Value value, StringRef functionName) { 523 assert(value.getType().isa<LLVM::LLVMPointerType>()); 524 if (auto defOp = value.getDefiningOp<LLVM::CallOp>()) 525 return defOp.getCallee()->equals(functionName); 526 return false; 527 } 528 529 // Converts `gpu.wait` to runtime calls. The converted op synchronizes the host 530 // with the stream/event operands. The operands are destroyed. That is, it 531 // assumes that it is not used afterwards or elsewhere. Otherwise we will get a 532 // runtime error. Eventually, we should guarantee this property. 533 LogicalResult ConvertWaitOpToGpuRuntimeCallPattern::matchAndRewrite( 534 gpu::WaitOp waitOp, OpAdaptor adaptor, 535 ConversionPatternRewriter &rewriter) const { 536 if (waitOp.asyncToken()) 537 return rewriter.notifyMatchFailure(waitOp, "Cannot convert async op."); 538 539 Location loc = waitOp.getLoc(); 540 541 for (auto operand : adaptor.getOperands()) { 542 if (isDefinedByCallTo(operand, streamCreateCallBuilder.functionName)) { 543 // The converted operand's definition created a stream. 544 streamSynchronizeCallBuilder.create(loc, rewriter, {operand}); 545 streamDestroyCallBuilder.create(loc, rewriter, {operand}); 546 } else { 547 // Otherwise the converted operand is an event. This assumes that we use 548 // events in control flow code as well. 549 eventSynchronizeCallBuilder.create(loc, rewriter, {operand}); 550 eventDestroyCallBuilder.create(loc, rewriter, {operand}); 551 } 552 } 553 554 rewriter.eraseOp(waitOp); 555 return success(); 556 } 557 558 // Converts `gpu.wait async` to runtime calls. The converted op creates a new 559 // stream that is synchronized with stream/event operands. The operands are 560 // destroyed. That is, it assumes that it is not used afterwards or elsewhere. 561 // Otherwise we will get a runtime error. Eventually, we should guarantee this 562 // property. 563 LogicalResult ConvertWaitAsyncOpToGpuRuntimeCallPattern::matchAndRewrite( 564 gpu::WaitOp waitOp, OpAdaptor adaptor, 565 ConversionPatternRewriter &rewriter) const { 566 if (!waitOp.asyncToken()) 567 return rewriter.notifyMatchFailure(waitOp, "Can only convert async op."); 568 569 Location loc = waitOp.getLoc(); 570 571 auto insertionPoint = rewriter.saveInsertionPoint(); 572 SmallVector<Value, 1> events; 573 for (auto pair : 574 llvm::zip(waitOp.asyncDependencies(), adaptor.getOperands())) { 575 auto operand = std::get<1>(pair); 576 if (isDefinedByCallTo(operand, streamCreateCallBuilder.functionName)) { 577 // The converted operand's definition created a stream. Insert an event 578 // into the stream just after the last use of the original token operand. 579 auto *defOp = std::get<0>(pair).getDefiningOp(); 580 rewriter.setInsertionPointAfter(defOp); 581 auto event = 582 eventCreateCallBuilder.create(loc, rewriter, {}).getResult(0); 583 eventRecordCallBuilder.create(loc, rewriter, {event, operand}); 584 events.push_back(event); 585 } else { 586 // Otherwise the converted operand is an event. This assumes that we use 587 // events in control flow code as well. 588 events.push_back(operand); 589 } 590 } 591 rewriter.restoreInsertionPoint(insertionPoint); 592 auto stream = streamCreateCallBuilder.create(loc, rewriter, {}).getResult(0); 593 for (auto event : events) 594 streamWaitEventCallBuilder.create(loc, rewriter, {stream, event}); 595 for (auto event : events) 596 eventDestroyCallBuilder.create(loc, rewriter, {event}); 597 rewriter.replaceOp(waitOp, {stream}); 598 599 return success(); 600 } 601 602 // Creates a struct containing all kernel parameters on the stack and returns 603 // an array of type-erased pointers to the fields of the struct. The array can 604 // then be passed to the CUDA / ROCm (HIP) kernel launch calls. 605 // The generated code is essentially as follows: 606 // 607 // %struct = alloca(sizeof(struct { Parameters... })) 608 // %array = alloca(NumParameters * sizeof(void *)) 609 // for (i : [0, NumParameters)) 610 // %fieldPtr = llvm.getelementptr %struct[0, i] 611 // llvm.store parameters[i], %fieldPtr 612 // %elementPtr = llvm.getelementptr %array[i] 613 // llvm.store %fieldPtr, %elementPtr 614 // return %array 615 Value ConvertLaunchFuncOpToGpuRuntimeCallPattern::generateParamsArray( 616 gpu::LaunchFuncOp launchOp, OpAdaptor adaptor, OpBuilder &builder) const { 617 auto loc = launchOp.getLoc(); 618 auto numKernelOperands = launchOp.getNumKernelOperands(); 619 auto arguments = getTypeConverter()->promoteOperands( 620 loc, launchOp.getOperands().take_back(numKernelOperands), 621 adaptor.getOperands().take_back(numKernelOperands), builder); 622 auto numArguments = arguments.size(); 623 SmallVector<Type, 4> argumentTypes; 624 argumentTypes.reserve(numArguments); 625 for (auto argument : arguments) 626 argumentTypes.push_back(argument.getType()); 627 auto structType = LLVM::LLVMStructType::getNewIdentified(context, StringRef(), 628 argumentTypes); 629 auto one = builder.create<LLVM::ConstantOp>(loc, llvmInt32Type, 630 builder.getI32IntegerAttr(1)); 631 auto structPtr = builder.create<LLVM::AllocaOp>( 632 loc, LLVM::LLVMPointerType::get(structType), one, /*alignment=*/0); 633 auto arraySize = builder.create<LLVM::ConstantOp>( 634 loc, llvmInt32Type, builder.getI32IntegerAttr(numArguments)); 635 auto arrayPtr = builder.create<LLVM::AllocaOp>(loc, llvmPointerPointerType, 636 arraySize, /*alignment=*/0); 637 auto zero = builder.create<LLVM::ConstantOp>(loc, llvmInt32Type, 638 builder.getI32IntegerAttr(0)); 639 for (const auto &en : llvm::enumerate(arguments)) { 640 auto index = builder.create<LLVM::ConstantOp>( 641 loc, llvmInt32Type, builder.getI32IntegerAttr(en.index())); 642 auto fieldPtr = builder.create<LLVM::GEPOp>( 643 loc, LLVM::LLVMPointerType::get(argumentTypes[en.index()]), structPtr, 644 ArrayRef<Value>{zero, index.getResult()}); 645 builder.create<LLVM::StoreOp>(loc, en.value(), fieldPtr); 646 auto elementPtr = builder.create<LLVM::GEPOp>(loc, llvmPointerPointerType, 647 arrayPtr, index.getResult()); 648 auto casted = 649 builder.create<LLVM::BitcastOp>(loc, llvmPointerType, fieldPtr); 650 builder.create<LLVM::StoreOp>(loc, casted, elementPtr); 651 } 652 return arrayPtr; 653 } 654 655 // Generates an LLVM IR dialect global that contains the name of the given 656 // kernel function as a C string, and returns a pointer to its beginning. 657 // The code is essentially: 658 // 659 // llvm.global constant @kernel_name("function_name\00") 660 // func(...) { 661 // %0 = llvm.addressof @kernel_name 662 // %1 = llvm.constant (0 : index) 663 // %2 = llvm.getelementptr %0[%1, %1] : !llvm<"i8*"> 664 // } 665 Value ConvertLaunchFuncOpToGpuRuntimeCallPattern::generateKernelNameConstant( 666 StringRef moduleName, StringRef name, Location loc, 667 OpBuilder &builder) const { 668 // Make sure the trailing zero is included in the constant. 669 std::vector<char> kernelName(name.begin(), name.end()); 670 kernelName.push_back('\0'); 671 672 std::string globalName = 673 std::string(llvm::formatv("{0}_{1}_kernel_name", moduleName, name)); 674 return LLVM::createGlobalString( 675 loc, builder, globalName, StringRef(kernelName.data(), kernelName.size()), 676 LLVM::Linkage::Internal); 677 } 678 679 // Emits LLVM IR to launch a kernel function. Expects the module that contains 680 // the compiled kernel function as a cubin in the 'nvvm.cubin' attribute, or a 681 // hsaco in the 'rocdl.hsaco' attribute of the kernel function in the IR. 682 // 683 // %0 = call %binarygetter 684 // %1 = call %moduleLoad(%0) 685 // %2 = <see generateKernelNameConstant> 686 // %3 = call %moduleGetFunction(%1, %2) 687 // %4 = call %streamCreate() 688 // %5 = <see generateParamsArray> 689 // call %launchKernel(%3, <launchOp operands 0..5>, 0, %4, %5, nullptr) 690 // call %streamSynchronize(%4) 691 // call %streamDestroy(%4) 692 // call %moduleUnload(%1) 693 // 694 // If the op is async, the stream corresponds to the (single) async dependency 695 // as well as the async token the op produces. 696 LogicalResult ConvertLaunchFuncOpToGpuRuntimeCallPattern::matchAndRewrite( 697 gpu::LaunchFuncOp launchOp, OpAdaptor adaptor, 698 ConversionPatternRewriter &rewriter) const { 699 if (failed(areAllLLVMTypes(launchOp, adaptor.getOperands(), rewriter))) 700 return failure(); 701 702 if (launchOp.asyncDependencies().size() > 1) 703 return rewriter.notifyMatchFailure( 704 launchOp, "Cannot convert with more than one async dependency."); 705 706 // Fail when the synchronous version of the op has async dependencies. The 707 // lowering destroys the stream, and we do not want to check that there is no 708 // use of the stream after this op. 709 if (!launchOp.asyncToken() && !launchOp.asyncDependencies().empty()) 710 return rewriter.notifyMatchFailure( 711 launchOp, "Cannot convert non-async op with async dependencies."); 712 713 Location loc = launchOp.getLoc(); 714 715 // Create an LLVM global with CUBIN extracted from the kernel annotation and 716 // obtain a pointer to the first byte in it. 717 auto kernelModule = SymbolTable::lookupNearestSymbolFrom<gpu::GPUModuleOp>( 718 launchOp, launchOp.getKernelModuleName()); 719 assert(kernelModule && "expected a kernel module"); 720 721 auto binaryAttr = 722 kernelModule->getAttrOfType<StringAttr>(gpuBinaryAnnotation); 723 if (!binaryAttr) { 724 kernelModule.emitOpError() 725 << "missing " << gpuBinaryAnnotation << " attribute"; 726 return failure(); 727 } 728 729 SmallString<128> nameBuffer(kernelModule.getName()); 730 nameBuffer.append(kGpuBinaryStorageSuffix); 731 Value data = 732 LLVM::createGlobalString(loc, rewriter, nameBuffer.str(), 733 binaryAttr.getValue(), LLVM::Linkage::Internal); 734 735 auto module = moduleLoadCallBuilder.create(loc, rewriter, data); 736 // Get the function from the module. The name corresponds to the name of 737 // the kernel function. 738 auto kernelName = generateKernelNameConstant( 739 launchOp.getKernelModuleName().getValue(), 740 launchOp.getKernelName().getValue(), loc, rewriter); 741 auto function = moduleGetFunctionCallBuilder.create( 742 loc, rewriter, {module.getResult(0), kernelName}); 743 auto zero = rewriter.create<LLVM::ConstantOp>(loc, llvmInt32Type, 744 rewriter.getI32IntegerAttr(0)); 745 Value stream = 746 adaptor.asyncDependencies().empty() 747 ? streamCreateCallBuilder.create(loc, rewriter, {}).getResult(0) 748 : adaptor.asyncDependencies().front(); 749 // Create array of pointers to kernel arguments. 750 auto kernelParams = generateParamsArray(launchOp, adaptor, rewriter); 751 auto nullpointer = rewriter.create<LLVM::NullOp>(loc, llvmPointerPointerType); 752 Value dynamicSharedMemorySize = launchOp.dynamicSharedMemorySize() 753 ? launchOp.dynamicSharedMemorySize() 754 : zero; 755 launchKernelCallBuilder.create( 756 loc, rewriter, 757 {function.getResult(0), adaptor.gridSizeX(), adaptor.gridSizeY(), 758 adaptor.gridSizeZ(), adaptor.blockSizeX(), adaptor.blockSizeY(), 759 adaptor.blockSizeZ(), dynamicSharedMemorySize, stream, kernelParams, 760 /*extra=*/nullpointer}); 761 762 if (launchOp.asyncToken()) { 763 // Async launch: make dependent ops use the same stream. 764 rewriter.replaceOp(launchOp, {stream}); 765 } else { 766 // Synchronize with host and destroy stream. This must be the stream created 767 // above (with no other uses) because we check that the synchronous version 768 // does not have any async dependencies. 769 streamSynchronizeCallBuilder.create(loc, rewriter, stream); 770 streamDestroyCallBuilder.create(loc, rewriter, stream); 771 rewriter.eraseOp(launchOp); 772 } 773 moduleUnloadCallBuilder.create(loc, rewriter, module.getResult(0)); 774 775 return success(); 776 } 777 778 LogicalResult ConvertMemcpyOpToGpuRuntimeCallPattern::matchAndRewrite( 779 gpu::MemcpyOp memcpyOp, OpAdaptor adaptor, 780 ConversionPatternRewriter &rewriter) const { 781 auto memRefType = memcpyOp.src().getType().cast<MemRefType>(); 782 783 if (failed(areAllLLVMTypes(memcpyOp, adaptor.getOperands(), rewriter)) || 784 !isConvertibleAndHasIdentityMaps(memRefType) || 785 failed(isAsyncWithOneDependency(rewriter, memcpyOp))) 786 return failure(); 787 788 auto loc = memcpyOp.getLoc(); 789 790 MemRefDescriptor srcDesc(adaptor.src()); 791 Value numElements = getNumElements(rewriter, loc, memRefType, srcDesc); 792 793 Type elementPtrType = getElementPtrType(memRefType); 794 Value nullPtr = rewriter.create<LLVM::NullOp>(loc, elementPtrType); 795 Value gepPtr = rewriter.create<LLVM::GEPOp>(loc, elementPtrType, nullPtr, 796 ArrayRef<Value>{numElements}); 797 auto sizeBytes = 798 rewriter.create<LLVM::PtrToIntOp>(loc, getIndexType(), gepPtr); 799 800 auto src = rewriter.create<LLVM::BitcastOp>( 801 loc, llvmPointerType, srcDesc.alignedPtr(rewriter, loc)); 802 auto dst = rewriter.create<LLVM::BitcastOp>( 803 loc, llvmPointerType, 804 MemRefDescriptor(adaptor.dst()).alignedPtr(rewriter, loc)); 805 806 auto stream = adaptor.asyncDependencies().front(); 807 memcpyCallBuilder.create(loc, rewriter, {dst, src, sizeBytes, stream}); 808 809 rewriter.replaceOp(memcpyOp, {stream}); 810 811 return success(); 812 } 813 814 LogicalResult ConvertMemsetOpToGpuRuntimeCallPattern::matchAndRewrite( 815 gpu::MemsetOp memsetOp, OpAdaptor adaptor, 816 ConversionPatternRewriter &rewriter) const { 817 auto memRefType = memsetOp.dst().getType().cast<MemRefType>(); 818 819 if (failed(areAllLLVMTypes(memsetOp, adaptor.getOperands(), rewriter)) || 820 !isConvertibleAndHasIdentityMaps(memRefType) || 821 failed(isAsyncWithOneDependency(rewriter, memsetOp))) 822 return failure(); 823 824 auto loc = memsetOp.getLoc(); 825 826 Type valueType = adaptor.value().getType(); 827 if (!valueType.isIntOrFloat() || valueType.getIntOrFloatBitWidth() != 32) { 828 return rewriter.notifyMatchFailure(memsetOp, 829 "value must be a 32 bit scalar"); 830 } 831 832 MemRefDescriptor dstDesc(adaptor.dst()); 833 Value numElements = getNumElements(rewriter, loc, memRefType, dstDesc); 834 835 auto value = 836 rewriter.create<LLVM::BitcastOp>(loc, llvmInt32Type, adaptor.value()); 837 auto dst = rewriter.create<LLVM::BitcastOp>( 838 loc, llvmPointerType, dstDesc.alignedPtr(rewriter, loc)); 839 840 auto stream = adaptor.asyncDependencies().front(); 841 memsetCallBuilder.create(loc, rewriter, {dst, value, numElements, stream}); 842 843 rewriter.replaceOp(memsetOp, {stream}); 844 return success(); 845 } 846 847 std::unique_ptr<mlir::OperationPass<mlir::ModuleOp>> 848 mlir::createGpuToLLVMConversionPass() { 849 return std::make_unique<GpuToLLVMConversionPass>(); 850 } 851 852 void mlir::populateGpuToLLVMConversionPatterns( 853 LLVMTypeConverter &converter, RewritePatternSet &patterns, 854 StringRef gpuBinaryAnnotation) { 855 converter.addConversion( 856 [context = &converter.getContext()](gpu::AsyncTokenType type) -> Type { 857 return LLVM::LLVMPointerType::get(IntegerType::get(context, 8)); 858 }); 859 patterns.add<ConvertAllocOpToGpuRuntimeCallPattern, 860 ConvertDeallocOpToGpuRuntimeCallPattern, 861 ConvertHostRegisterOpToGpuRuntimeCallPattern, 862 ConvertMemcpyOpToGpuRuntimeCallPattern, 863 ConvertMemsetOpToGpuRuntimeCallPattern, 864 ConvertWaitAsyncOpToGpuRuntimeCallPattern, 865 ConvertWaitOpToGpuRuntimeCallPattern, 866 ConvertAsyncYieldToGpuRuntimeCallPattern>(converter); 867 patterns.add<ConvertLaunchFuncOpToGpuRuntimeCallPattern>(converter, 868 gpuBinaryAnnotation); 869 patterns.add<EraseGpuModuleOpPattern>(&converter.getContext()); 870 } 871