1 //===- ConvertLaunchFuncToGpuRuntimeCalls.cpp - MLIR GPU lowering passes --===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements a pass to convert gpu.launch_func op into a sequence of 10 // GPU runtime calls. As most of GPU runtimes does not have a stable published 11 // ABI, this pass uses a slim runtime layer that builds on top of the public 12 // API from GPU runtime headers. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "mlir/Conversion/GPUCommon/GPUCommonPass.h" 17 18 #include "../PassDetail.h" 19 #include "mlir/Conversion/AsyncToLLVM/AsyncToLLVM.h" 20 #include "mlir/Conversion/LLVMCommon/ConversionTarget.h" 21 #include "mlir/Conversion/LLVMCommon/Pattern.h" 22 #include "mlir/Conversion/MemRefToLLVM/MemRefToLLVM.h" 23 #include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h" 24 #include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVMPass.h" 25 #include "mlir/Conversion/VectorToLLVM/ConvertVectorToLLVM.h" 26 #include "mlir/Dialect/Async/IR/Async.h" 27 #include "mlir/Dialect/GPU/GPUDialect.h" 28 #include "mlir/Dialect/GPU/Passes.h" 29 #include "mlir/Dialect/LLVMIR/LLVMDialect.h" 30 #include "mlir/IR/Attributes.h" 31 #include "mlir/IR/Builders.h" 32 #include "mlir/IR/BuiltinOps.h" 33 #include "mlir/IR/BuiltinTypes.h" 34 35 #include "llvm/ADT/STLExtras.h" 36 #include "llvm/Support/Error.h" 37 #include "llvm/Support/FormatVariadic.h" 38 39 using namespace mlir; 40 41 static constexpr const char *kGpuBinaryStorageSuffix = "_gpubin_cst"; 42 43 namespace { 44 45 class GpuToLLVMConversionPass 46 : public GpuToLLVMConversionPassBase<GpuToLLVMConversionPass> { 47 public: 48 GpuToLLVMConversionPass() = default; 49 50 GpuToLLVMConversionPass(const GpuToLLVMConversionPass &other) 51 : GpuToLLVMConversionPassBase(other) {} 52 53 // Run the dialect converter on the module. 54 void runOnOperation() override; 55 56 private: 57 Option<std::string> gpuBinaryAnnotation{ 58 *this, "gpu-binary-annotation", 59 llvm::cl::desc("Annotation attribute string for GPU binary"), 60 llvm::cl::init(gpu::getDefaultGpuBinaryAnnotation())}; 61 }; 62 63 struct FunctionCallBuilder { 64 FunctionCallBuilder(StringRef functionName, Type returnType, 65 ArrayRef<Type> argumentTypes) 66 : functionName(functionName), 67 functionType(LLVM::LLVMFunctionType::get(returnType, argumentTypes)) {} 68 LLVM::CallOp create(Location loc, OpBuilder &builder, 69 ArrayRef<Value> arguments) const; 70 71 StringRef functionName; 72 LLVM::LLVMFunctionType functionType; 73 }; 74 75 template <typename OpTy> 76 class ConvertOpToGpuRuntimeCallPattern : public ConvertOpToLLVMPattern<OpTy> { 77 public: 78 explicit ConvertOpToGpuRuntimeCallPattern(LLVMTypeConverter &typeConverter) 79 : ConvertOpToLLVMPattern<OpTy>(typeConverter) {} 80 81 protected: 82 MLIRContext *context = &this->getTypeConverter()->getContext(); 83 84 Type llvmVoidType = LLVM::LLVMVoidType::get(context); 85 Type llvmPointerType = 86 LLVM::LLVMPointerType::get(IntegerType::get(context, 8)); 87 Type llvmPointerPointerType = LLVM::LLVMPointerType::get(llvmPointerType); 88 Type llvmInt8Type = IntegerType::get(context, 8); 89 Type llvmInt32Type = IntegerType::get(context, 32); 90 Type llvmInt64Type = IntegerType::get(context, 64); 91 Type llvmIntPtrType = IntegerType::get( 92 context, this->getTypeConverter()->getPointerBitwidth(0)); 93 94 FunctionCallBuilder moduleLoadCallBuilder = { 95 "mgpuModuleLoad", 96 llvmPointerType /* void *module */, 97 {llvmPointerType /* void *cubin */}}; 98 FunctionCallBuilder moduleUnloadCallBuilder = { 99 "mgpuModuleUnload", llvmVoidType, {llvmPointerType /* void *module */}}; 100 FunctionCallBuilder moduleGetFunctionCallBuilder = { 101 "mgpuModuleGetFunction", 102 llvmPointerType /* void *function */, 103 { 104 llvmPointerType, /* void *module */ 105 llvmPointerType /* char *name */ 106 }}; 107 FunctionCallBuilder launchKernelCallBuilder = { 108 "mgpuLaunchKernel", 109 llvmVoidType, 110 { 111 llvmPointerType, /* void* f */ 112 llvmIntPtrType, /* intptr_t gridXDim */ 113 llvmIntPtrType, /* intptr_t gridyDim */ 114 llvmIntPtrType, /* intptr_t gridZDim */ 115 llvmIntPtrType, /* intptr_t blockXDim */ 116 llvmIntPtrType, /* intptr_t blockYDim */ 117 llvmIntPtrType, /* intptr_t blockZDim */ 118 llvmInt32Type, /* unsigned int sharedMemBytes */ 119 llvmPointerType, /* void *hstream */ 120 llvmPointerPointerType, /* void **kernelParams */ 121 llvmPointerPointerType /* void **extra */ 122 }}; 123 FunctionCallBuilder streamCreateCallBuilder = { 124 "mgpuStreamCreate", llvmPointerType /* void *stream */, {}}; 125 FunctionCallBuilder streamDestroyCallBuilder = { 126 "mgpuStreamDestroy", llvmVoidType, {llvmPointerType /* void *stream */}}; 127 FunctionCallBuilder streamSynchronizeCallBuilder = { 128 "mgpuStreamSynchronize", 129 llvmVoidType, 130 {llvmPointerType /* void *stream */}}; 131 FunctionCallBuilder streamWaitEventCallBuilder = { 132 "mgpuStreamWaitEvent", 133 llvmVoidType, 134 {llvmPointerType /* void *stream */, llvmPointerType /* void *event */}}; 135 FunctionCallBuilder eventCreateCallBuilder = { 136 "mgpuEventCreate", llvmPointerType /* void *event */, {}}; 137 FunctionCallBuilder eventDestroyCallBuilder = { 138 "mgpuEventDestroy", llvmVoidType, {llvmPointerType /* void *event */}}; 139 FunctionCallBuilder eventSynchronizeCallBuilder = { 140 "mgpuEventSynchronize", 141 llvmVoidType, 142 {llvmPointerType /* void *event */}}; 143 FunctionCallBuilder eventRecordCallBuilder = { 144 "mgpuEventRecord", 145 llvmVoidType, 146 {llvmPointerType /* void *event */, llvmPointerType /* void *stream */}}; 147 FunctionCallBuilder hostRegisterCallBuilder = { 148 "mgpuMemHostRegisterMemRef", 149 llvmVoidType, 150 {llvmIntPtrType /* intptr_t rank */, 151 llvmPointerType /* void *memrefDesc */, 152 llvmIntPtrType /* intptr_t elementSizeBytes */}}; 153 FunctionCallBuilder allocCallBuilder = { 154 "mgpuMemAlloc", 155 llvmPointerType /* void * */, 156 {llvmIntPtrType /* intptr_t sizeBytes */, 157 llvmPointerType /* void *stream */}}; 158 FunctionCallBuilder deallocCallBuilder = { 159 "mgpuMemFree", 160 llvmVoidType, 161 {llvmPointerType /* void *ptr */, llvmPointerType /* void *stream */}}; 162 FunctionCallBuilder memcpyCallBuilder = { 163 "mgpuMemcpy", 164 llvmVoidType, 165 {llvmPointerType /* void *dst */, llvmPointerType /* void *src */, 166 llvmIntPtrType /* intptr_t sizeBytes */, 167 llvmPointerType /* void *stream */}}; 168 }; 169 170 /// A rewrite pattern to convert gpu.host_register operations into a GPU runtime 171 /// call. Currently it supports CUDA and ROCm (HIP). 172 class ConvertHostRegisterOpToGpuRuntimeCallPattern 173 : public ConvertOpToGpuRuntimeCallPattern<gpu::HostRegisterOp> { 174 public: 175 ConvertHostRegisterOpToGpuRuntimeCallPattern(LLVMTypeConverter &typeConverter) 176 : ConvertOpToGpuRuntimeCallPattern<gpu::HostRegisterOp>(typeConverter) {} 177 178 private: 179 LogicalResult 180 matchAndRewrite(gpu::HostRegisterOp hostRegisterOp, ArrayRef<Value> operands, 181 ConversionPatternRewriter &rewriter) const override; 182 }; 183 184 /// A rewrite pattern to convert gpu.alloc operations into a GPU runtime 185 /// call. Currently it supports CUDA and ROCm (HIP). 186 class ConvertAllocOpToGpuRuntimeCallPattern 187 : public ConvertOpToGpuRuntimeCallPattern<gpu::AllocOp> { 188 public: 189 ConvertAllocOpToGpuRuntimeCallPattern(LLVMTypeConverter &typeConverter) 190 : ConvertOpToGpuRuntimeCallPattern<gpu::AllocOp>(typeConverter) {} 191 192 private: 193 LogicalResult 194 matchAndRewrite(gpu::AllocOp allocOp, ArrayRef<Value> operands, 195 ConversionPatternRewriter &rewriter) const override; 196 }; 197 198 /// A rewrite pattern to convert gpu.dealloc operations into a GPU runtime 199 /// call. Currently it supports CUDA and ROCm (HIP). 200 class ConvertDeallocOpToGpuRuntimeCallPattern 201 : public ConvertOpToGpuRuntimeCallPattern<gpu::DeallocOp> { 202 public: 203 ConvertDeallocOpToGpuRuntimeCallPattern(LLVMTypeConverter &typeConverter) 204 : ConvertOpToGpuRuntimeCallPattern<gpu::DeallocOp>(typeConverter) {} 205 206 private: 207 LogicalResult 208 matchAndRewrite(gpu::DeallocOp deallocOp, ArrayRef<Value> operands, 209 ConversionPatternRewriter &rewriter) const override; 210 }; 211 212 class ConvertAsyncYieldToGpuRuntimeCallPattern 213 : public ConvertOpToGpuRuntimeCallPattern<async::YieldOp> { 214 public: 215 ConvertAsyncYieldToGpuRuntimeCallPattern(LLVMTypeConverter &typeConverter) 216 : ConvertOpToGpuRuntimeCallPattern<async::YieldOp>(typeConverter) {} 217 218 private: 219 LogicalResult 220 matchAndRewrite(async::YieldOp yieldOp, ArrayRef<Value> operands, 221 ConversionPatternRewriter &rewriter) const override; 222 }; 223 224 /// A rewrite pattern to convert gpu.wait operations into a GPU runtime 225 /// call. Currently it supports CUDA and ROCm (HIP). 226 class ConvertWaitOpToGpuRuntimeCallPattern 227 : public ConvertOpToGpuRuntimeCallPattern<gpu::WaitOp> { 228 public: 229 ConvertWaitOpToGpuRuntimeCallPattern(LLVMTypeConverter &typeConverter) 230 : ConvertOpToGpuRuntimeCallPattern<gpu::WaitOp>(typeConverter) {} 231 232 private: 233 LogicalResult 234 matchAndRewrite(gpu::WaitOp waitOp, ArrayRef<Value> operands, 235 ConversionPatternRewriter &rewriter) const override; 236 }; 237 238 /// A rewrite pattern to convert gpu.wait async operations into a GPU runtime 239 /// call. Currently it supports CUDA and ROCm (HIP). 240 class ConvertWaitAsyncOpToGpuRuntimeCallPattern 241 : public ConvertOpToGpuRuntimeCallPattern<gpu::WaitOp> { 242 public: 243 ConvertWaitAsyncOpToGpuRuntimeCallPattern(LLVMTypeConverter &typeConverter) 244 : ConvertOpToGpuRuntimeCallPattern<gpu::WaitOp>(typeConverter) {} 245 246 private: 247 LogicalResult 248 matchAndRewrite(gpu::WaitOp waitOp, ArrayRef<Value> operands, 249 ConversionPatternRewriter &rewriter) const override; 250 }; 251 252 /// A rewrite patter to convert gpu.launch_func operations into a sequence of 253 /// GPU runtime calls. Currently it supports CUDA and ROCm (HIP). 254 /// 255 /// In essence, a gpu.launch_func operations gets compiled into the following 256 /// sequence of runtime calls: 257 /// 258 /// * moduleLoad -- loads the module given the cubin / hsaco data 259 /// * moduleGetFunction -- gets a handle to the actual kernel function 260 /// * getStreamHelper -- initializes a new compute stream on GPU 261 /// * launchKernel -- launches the kernel on a stream 262 /// * streamSynchronize -- waits for operations on the stream to finish 263 /// 264 /// Intermediate data structures are allocated on the stack. 265 class ConvertLaunchFuncOpToGpuRuntimeCallPattern 266 : public ConvertOpToGpuRuntimeCallPattern<gpu::LaunchFuncOp> { 267 public: 268 ConvertLaunchFuncOpToGpuRuntimeCallPattern(LLVMTypeConverter &typeConverter, 269 StringRef gpuBinaryAnnotation) 270 : ConvertOpToGpuRuntimeCallPattern<gpu::LaunchFuncOp>(typeConverter), 271 gpuBinaryAnnotation(gpuBinaryAnnotation) {} 272 273 private: 274 Value generateParamsArray(gpu::LaunchFuncOp launchOp, 275 ArrayRef<Value> operands, OpBuilder &builder) const; 276 Value generateKernelNameConstant(StringRef moduleName, StringRef name, 277 Location loc, OpBuilder &builder) const; 278 279 LogicalResult 280 matchAndRewrite(gpu::LaunchFuncOp launchOp, ArrayRef<Value> operands, 281 ConversionPatternRewriter &rewriter) const override; 282 283 llvm::SmallString<32> gpuBinaryAnnotation; 284 }; 285 286 class EraseGpuModuleOpPattern : public OpRewritePattern<gpu::GPUModuleOp> { 287 using OpRewritePattern<gpu::GPUModuleOp>::OpRewritePattern; 288 289 LogicalResult matchAndRewrite(gpu::GPUModuleOp op, 290 PatternRewriter &rewriter) const override { 291 // GPU kernel modules are no longer necessary since we have a global 292 // constant with the CUBIN, or HSACO data. 293 rewriter.eraseOp(op); 294 return success(); 295 } 296 }; 297 298 /// A rewrite pattern to convert gpu.memcpy operations into a GPU runtime 299 /// call. Currently it supports CUDA and ROCm (HIP). 300 class ConvertMemcpyOpToGpuRuntimeCallPattern 301 : public ConvertOpToGpuRuntimeCallPattern<gpu::MemcpyOp> { 302 public: 303 ConvertMemcpyOpToGpuRuntimeCallPattern(LLVMTypeConverter &typeConverter) 304 : ConvertOpToGpuRuntimeCallPattern<gpu::MemcpyOp>(typeConverter) {} 305 306 private: 307 LogicalResult 308 matchAndRewrite(gpu::MemcpyOp memcpyOp, ArrayRef<Value> operands, 309 ConversionPatternRewriter &rewriter) const override; 310 }; 311 } // namespace 312 313 void GpuToLLVMConversionPass::runOnOperation() { 314 LLVMTypeConverter converter(&getContext()); 315 RewritePatternSet patterns(&getContext()); 316 LLVMConversionTarget target(getContext()); 317 318 target.addIllegalDialect<gpu::GPUDialect>(); 319 target.addIllegalOp<UnrealizedConversionCastOp>(); 320 321 populateVectorToLLVMConversionPatterns(converter, patterns); 322 populateMemRefToLLVMConversionPatterns(converter, patterns); 323 populateStdToLLVMConversionPatterns(converter, patterns); 324 populateAsyncStructuralTypeConversionsAndLegality(converter, patterns, 325 target); 326 327 converter.addConversion( 328 [context = &converter.getContext()](gpu::AsyncTokenType type) -> Type { 329 return LLVM::LLVMPointerType::get(IntegerType::get(context, 8)); 330 }); 331 patterns.add<ConvertAllocOpToGpuRuntimeCallPattern, 332 ConvertDeallocOpToGpuRuntimeCallPattern, 333 ConvertHostRegisterOpToGpuRuntimeCallPattern, 334 ConvertMemcpyOpToGpuRuntimeCallPattern, 335 ConvertWaitAsyncOpToGpuRuntimeCallPattern, 336 ConvertWaitOpToGpuRuntimeCallPattern, 337 ConvertAsyncYieldToGpuRuntimeCallPattern>(converter); 338 patterns.add<ConvertLaunchFuncOpToGpuRuntimeCallPattern>(converter, 339 gpuBinaryAnnotation); 340 patterns.add<EraseGpuModuleOpPattern>(&converter.getContext()); 341 342 if (failed( 343 applyPartialConversion(getOperation(), target, std::move(patterns)))) 344 signalPassFailure(); 345 } 346 347 LLVM::CallOp FunctionCallBuilder::create(Location loc, OpBuilder &builder, 348 ArrayRef<Value> arguments) const { 349 auto module = builder.getBlock()->getParent()->getParentOfType<ModuleOp>(); 350 auto function = [&] { 351 if (auto function = module.lookupSymbol<LLVM::LLVMFuncOp>(functionName)) 352 return function; 353 return OpBuilder::atBlockEnd(module.getBody()) 354 .create<LLVM::LLVMFuncOp>(loc, functionName, functionType); 355 }(); 356 return builder.create<LLVM::CallOp>(loc, function, arguments); 357 } 358 359 // Returns whether all operands are of LLVM type. 360 static LogicalResult areAllLLVMTypes(Operation *op, ValueRange operands, 361 ConversionPatternRewriter &rewriter) { 362 if (!llvm::all_of(operands, [](Value value) { 363 return LLVM::isCompatibleType(value.getType()); 364 })) 365 return rewriter.notifyMatchFailure( 366 op, "Cannot convert if operands aren't of LLVM type."); 367 return success(); 368 } 369 370 static LogicalResult 371 isAsyncWithOneDependency(ConversionPatternRewriter &rewriter, 372 gpu::AsyncOpInterface op) { 373 if (op.getAsyncDependencies().size() != 1) 374 return rewriter.notifyMatchFailure( 375 op, "Can only convert with exactly one async dependency."); 376 377 if (!op.getAsyncToken()) 378 return rewriter.notifyMatchFailure(op, "Can convert only async version."); 379 380 return success(); 381 } 382 383 LogicalResult ConvertHostRegisterOpToGpuRuntimeCallPattern::matchAndRewrite( 384 gpu::HostRegisterOp hostRegisterOp, ArrayRef<Value> operands, 385 ConversionPatternRewriter &rewriter) const { 386 auto *op = hostRegisterOp.getOperation(); 387 if (failed(areAllLLVMTypes(op, operands, rewriter))) 388 return failure(); 389 390 Location loc = op->getLoc(); 391 392 auto memRefType = hostRegisterOp.value().getType(); 393 auto elementType = memRefType.cast<UnrankedMemRefType>().getElementType(); 394 auto elementSize = getSizeInBytes(loc, elementType, rewriter); 395 396 auto arguments = getTypeConverter()->promoteOperands(loc, op->getOperands(), 397 operands, rewriter); 398 arguments.push_back(elementSize); 399 hostRegisterCallBuilder.create(loc, rewriter, arguments); 400 401 rewriter.eraseOp(op); 402 return success(); 403 } 404 405 LogicalResult ConvertAllocOpToGpuRuntimeCallPattern::matchAndRewrite( 406 gpu::AllocOp allocOp, ArrayRef<Value> operands, 407 ConversionPatternRewriter &rewriter) const { 408 MemRefType memRefType = allocOp.getType(); 409 410 if (failed(areAllLLVMTypes(allocOp, operands, rewriter)) || 411 !isConvertibleAndHasIdentityMaps(memRefType) || 412 failed(isAsyncWithOneDependency(rewriter, allocOp))) 413 return failure(); 414 415 auto loc = allocOp.getLoc(); 416 auto adaptor = gpu::AllocOpAdaptor(operands, allocOp->getAttrDictionary()); 417 418 // Get shape of the memref as values: static sizes are constant 419 // values and dynamic sizes are passed to 'alloc' as operands. 420 SmallVector<Value, 4> shape; 421 SmallVector<Value, 4> strides; 422 Value sizeBytes; 423 getMemRefDescriptorSizes(loc, memRefType, adaptor.dynamicSizes(), rewriter, 424 shape, strides, sizeBytes); 425 426 // Allocate the underlying buffer and store a pointer to it in the MemRef 427 // descriptor. 428 Type elementPtrType = this->getElementPtrType(memRefType); 429 auto stream = adaptor.asyncDependencies().front(); 430 Value allocatedPtr = 431 allocCallBuilder.create(loc, rewriter, {sizeBytes, stream}).getResult(0); 432 allocatedPtr = 433 rewriter.create<LLVM::BitcastOp>(loc, elementPtrType, allocatedPtr); 434 435 // No alignment. 436 Value alignedPtr = allocatedPtr; 437 438 // Create the MemRef descriptor. 439 auto memRefDescriptor = this->createMemRefDescriptor( 440 loc, memRefType, allocatedPtr, alignedPtr, shape, strides, rewriter); 441 442 rewriter.replaceOp(allocOp, {memRefDescriptor, stream}); 443 444 return success(); 445 } 446 447 LogicalResult ConvertDeallocOpToGpuRuntimeCallPattern::matchAndRewrite( 448 gpu::DeallocOp deallocOp, ArrayRef<Value> operands, 449 ConversionPatternRewriter &rewriter) const { 450 if (failed(areAllLLVMTypes(deallocOp, operands, rewriter)) || 451 failed(isAsyncWithOneDependency(rewriter, deallocOp))) 452 return failure(); 453 454 Location loc = deallocOp.getLoc(); 455 456 auto adaptor = 457 gpu::DeallocOpAdaptor(operands, deallocOp->getAttrDictionary()); 458 Value pointer = 459 MemRefDescriptor(adaptor.memref()).allocatedPtr(rewriter, loc); 460 auto casted = rewriter.create<LLVM::BitcastOp>(loc, llvmPointerType, pointer); 461 Value stream = adaptor.asyncDependencies().front(); 462 deallocCallBuilder.create(loc, rewriter, {casted, stream}); 463 464 rewriter.replaceOp(deallocOp, {stream}); 465 return success(); 466 } 467 468 static bool isGpuAsyncTokenType(Value value) { 469 return value.getType().isa<gpu::AsyncTokenType>(); 470 } 471 472 // Converts !gpu.async.token operands of `async.yield` to runtime calls. The 473 // !gpu.async.token are lowered to stream within the async.execute region, but 474 // are passed as events between them. For each !gpu.async.token operand, we 475 // create an event and record it on the stream. 476 LogicalResult ConvertAsyncYieldToGpuRuntimeCallPattern::matchAndRewrite( 477 async::YieldOp yieldOp, ArrayRef<Value> operands, 478 ConversionPatternRewriter &rewriter) const { 479 if (llvm::none_of(yieldOp.operands(), isGpuAsyncTokenType)) 480 return rewriter.notifyMatchFailure(yieldOp, "no gpu async token operand"); 481 482 Location loc = yieldOp.getLoc(); 483 SmallVector<Value, 4> newOperands(operands.begin(), operands.end()); 484 llvm::SmallDenseSet<Value> streams; 485 for (auto &operand : yieldOp->getOpOperands()) { 486 if (!isGpuAsyncTokenType(operand.get())) 487 continue; 488 auto idx = operand.getOperandNumber(); 489 auto stream = operands[idx]; 490 auto event = eventCreateCallBuilder.create(loc, rewriter, {}).getResult(0); 491 eventRecordCallBuilder.create(loc, rewriter, {event, stream}); 492 newOperands[idx] = event; 493 streams.insert(stream); 494 } 495 for (auto stream : streams) 496 streamDestroyCallBuilder.create(loc, rewriter, {stream}); 497 498 rewriter.updateRootInPlace(yieldOp, 499 [&] { yieldOp->setOperands(newOperands); }); 500 return success(); 501 } 502 503 // Returns whether `value` is the result of an LLVM::CallOp to `functionName`. 504 static bool isDefinedByCallTo(Value value, StringRef functionName) { 505 assert(value.getType().isa<LLVM::LLVMPointerType>()); 506 if (auto defOp = value.getDefiningOp<LLVM::CallOp>()) 507 return defOp.callee()->equals(functionName); 508 return false; 509 } 510 511 // Converts `gpu.wait` to runtime calls. The converted op synchronizes the host 512 // with the stream/event operands. The operands are destroyed. That is, it 513 // assumes that it is not used afterwards or elsewhere. Otherwise we will get a 514 // runtime error. Eventually, we should guarantee this property. 515 LogicalResult ConvertWaitOpToGpuRuntimeCallPattern::matchAndRewrite( 516 gpu::WaitOp waitOp, ArrayRef<Value> operands, 517 ConversionPatternRewriter &rewriter) const { 518 if (waitOp.asyncToken()) 519 return rewriter.notifyMatchFailure(waitOp, "Cannot convert async op."); 520 521 Location loc = waitOp.getLoc(); 522 523 for (auto operand : operands) { 524 if (isDefinedByCallTo(operand, streamCreateCallBuilder.functionName)) { 525 // The converted operand's definition created a stream. 526 streamSynchronizeCallBuilder.create(loc, rewriter, {operand}); 527 streamDestroyCallBuilder.create(loc, rewriter, {operand}); 528 } else { 529 // Otherwise the converted operand is an event. This assumes that we use 530 // events in control flow code as well. 531 eventSynchronizeCallBuilder.create(loc, rewriter, {operand}); 532 eventDestroyCallBuilder.create(loc, rewriter, {operand}); 533 } 534 } 535 536 rewriter.eraseOp(waitOp); 537 return success(); 538 } 539 540 // Converts `gpu.wait async` to runtime calls. The converted op creates a new 541 // stream that is synchronized with stream/event operands. The operands are 542 // destroyed. That is, it assumes that it is not used afterwards or elsewhere. 543 // Otherwise we will get a runtime error. Eventually, we should guarantee this 544 // property. 545 LogicalResult ConvertWaitAsyncOpToGpuRuntimeCallPattern::matchAndRewrite( 546 gpu::WaitOp waitOp, ArrayRef<Value> operands, 547 ConversionPatternRewriter &rewriter) const { 548 if (!waitOp.asyncToken()) 549 return rewriter.notifyMatchFailure(waitOp, "Can only convert async op."); 550 551 Location loc = waitOp.getLoc(); 552 553 auto insertionPoint = rewriter.saveInsertionPoint(); 554 SmallVector<Value, 1> events; 555 for (auto pair : llvm::zip(waitOp.asyncDependencies(), operands)) { 556 auto operand = std::get<1>(pair); 557 if (isDefinedByCallTo(operand, streamCreateCallBuilder.functionName)) { 558 // The converted operand's definition created a stream. Insert an event 559 // into the stream just after the last use of the original token operand. 560 auto *defOp = std::get<0>(pair).getDefiningOp(); 561 rewriter.setInsertionPointAfter(defOp); 562 auto event = 563 eventCreateCallBuilder.create(loc, rewriter, {}).getResult(0); 564 eventRecordCallBuilder.create(loc, rewriter, {event, operand}); 565 events.push_back(event); 566 } else { 567 // Otherwise the converted operand is an event. This assumes that we use 568 // events in control flow code as well. 569 events.push_back(operand); 570 } 571 } 572 rewriter.restoreInsertionPoint(insertionPoint); 573 auto stream = streamCreateCallBuilder.create(loc, rewriter, {}).getResult(0); 574 for (auto event : events) 575 streamWaitEventCallBuilder.create(loc, rewriter, {stream, event}); 576 for (auto event : events) 577 eventDestroyCallBuilder.create(loc, rewriter, {event}); 578 rewriter.replaceOp(waitOp, {stream}); 579 580 return success(); 581 } 582 583 // Creates a struct containing all kernel parameters on the stack and returns 584 // an array of type-erased pointers to the fields of the struct. The array can 585 // then be passed to the CUDA / ROCm (HIP) kernel launch calls. 586 // The generated code is essentially as follows: 587 // 588 // %struct = alloca(sizeof(struct { Parameters... })) 589 // %array = alloca(NumParameters * sizeof(void *)) 590 // for (i : [0, NumParameters)) 591 // %fieldPtr = llvm.getelementptr %struct[0, i] 592 // llvm.store parameters[i], %fieldPtr 593 // %elementPtr = llvm.getelementptr %array[i] 594 // llvm.store %fieldPtr, %elementPtr 595 // return %array 596 Value ConvertLaunchFuncOpToGpuRuntimeCallPattern::generateParamsArray( 597 gpu::LaunchFuncOp launchOp, ArrayRef<Value> operands, 598 OpBuilder &builder) const { 599 auto loc = launchOp.getLoc(); 600 auto numKernelOperands = launchOp.getNumKernelOperands(); 601 auto arguments = getTypeConverter()->promoteOperands( 602 loc, launchOp.getOperands().take_back(numKernelOperands), 603 operands.take_back(numKernelOperands), builder); 604 auto numArguments = arguments.size(); 605 SmallVector<Type, 4> argumentTypes; 606 argumentTypes.reserve(numArguments); 607 for (auto argument : arguments) 608 argumentTypes.push_back(argument.getType()); 609 auto structType = LLVM::LLVMStructType::getNewIdentified(context, StringRef(), 610 argumentTypes); 611 auto one = builder.create<LLVM::ConstantOp>(loc, llvmInt32Type, 612 builder.getI32IntegerAttr(1)); 613 auto structPtr = builder.create<LLVM::AllocaOp>( 614 loc, LLVM::LLVMPointerType::get(structType), one, /*alignment=*/0); 615 auto arraySize = builder.create<LLVM::ConstantOp>( 616 loc, llvmInt32Type, builder.getI32IntegerAttr(numArguments)); 617 auto arrayPtr = builder.create<LLVM::AllocaOp>(loc, llvmPointerPointerType, 618 arraySize, /*alignment=*/0); 619 auto zero = builder.create<LLVM::ConstantOp>(loc, llvmInt32Type, 620 builder.getI32IntegerAttr(0)); 621 for (auto en : llvm::enumerate(arguments)) { 622 auto index = builder.create<LLVM::ConstantOp>( 623 loc, llvmInt32Type, builder.getI32IntegerAttr(en.index())); 624 auto fieldPtr = builder.create<LLVM::GEPOp>( 625 loc, LLVM::LLVMPointerType::get(argumentTypes[en.index()]), structPtr, 626 ArrayRef<Value>{zero, index.getResult()}); 627 builder.create<LLVM::StoreOp>(loc, en.value(), fieldPtr); 628 auto elementPtr = builder.create<LLVM::GEPOp>(loc, llvmPointerPointerType, 629 arrayPtr, index.getResult()); 630 auto casted = 631 builder.create<LLVM::BitcastOp>(loc, llvmPointerType, fieldPtr); 632 builder.create<LLVM::StoreOp>(loc, casted, elementPtr); 633 } 634 return arrayPtr; 635 } 636 637 // Generates an LLVM IR dialect global that contains the name of the given 638 // kernel function as a C string, and returns a pointer to its beginning. 639 // The code is essentially: 640 // 641 // llvm.global constant @kernel_name("function_name\00") 642 // func(...) { 643 // %0 = llvm.addressof @kernel_name 644 // %1 = llvm.constant (0 : index) 645 // %2 = llvm.getelementptr %0[%1, %1] : !llvm<"i8*"> 646 // } 647 Value ConvertLaunchFuncOpToGpuRuntimeCallPattern::generateKernelNameConstant( 648 StringRef moduleName, StringRef name, Location loc, 649 OpBuilder &builder) const { 650 // Make sure the trailing zero is included in the constant. 651 std::vector<char> kernelName(name.begin(), name.end()); 652 kernelName.push_back('\0'); 653 654 std::string globalName = 655 std::string(llvm::formatv("{0}_{1}_kernel_name", moduleName, name)); 656 return LLVM::createGlobalString( 657 loc, builder, globalName, StringRef(kernelName.data(), kernelName.size()), 658 LLVM::Linkage::Internal); 659 } 660 661 // Emits LLVM IR to launch a kernel function. Expects the module that contains 662 // the compiled kernel function as a cubin in the 'nvvm.cubin' attribute, or a 663 // hsaco in the 'rocdl.hsaco' attribute of the kernel function in the IR. 664 // 665 // %0 = call %binarygetter 666 // %1 = call %moduleLoad(%0) 667 // %2 = <see generateKernelNameConstant> 668 // %3 = call %moduleGetFunction(%1, %2) 669 // %4 = call %streamCreate() 670 // %5 = <see generateParamsArray> 671 // call %launchKernel(%3, <launchOp operands 0..5>, 0, %4, %5, nullptr) 672 // call %streamSynchronize(%4) 673 // call %streamDestroy(%4) 674 // call %moduleUnload(%1) 675 // 676 // If the op is async, the stream corresponds to the (single) async dependency 677 // as well as the async token the op produces. 678 LogicalResult ConvertLaunchFuncOpToGpuRuntimeCallPattern::matchAndRewrite( 679 gpu::LaunchFuncOp launchOp, ArrayRef<Value> operands, 680 ConversionPatternRewriter &rewriter) const { 681 if (failed(areAllLLVMTypes(launchOp, operands, rewriter))) 682 return failure(); 683 684 if (launchOp.asyncDependencies().size() > 1) 685 return rewriter.notifyMatchFailure( 686 launchOp, "Cannot convert with more than one async dependency."); 687 688 // Fail when the synchronous version of the op has async dependencies. The 689 // lowering destroys the stream, and we do not want to check that there is no 690 // use of the stream after this op. 691 if (!launchOp.asyncToken() && !launchOp.asyncDependencies().empty()) 692 return rewriter.notifyMatchFailure( 693 launchOp, "Cannot convert non-async op with async dependencies."); 694 695 Location loc = launchOp.getLoc(); 696 697 // Create an LLVM global with CUBIN extracted from the kernel annotation and 698 // obtain a pointer to the first byte in it. 699 auto kernelModule = SymbolTable::lookupNearestSymbolFrom<gpu::GPUModuleOp>( 700 launchOp, launchOp.getKernelModuleName()); 701 assert(kernelModule && "expected a kernel module"); 702 703 auto binaryAttr = 704 kernelModule->getAttrOfType<StringAttr>(gpuBinaryAnnotation); 705 if (!binaryAttr) { 706 kernelModule.emitOpError() 707 << "missing " << gpuBinaryAnnotation << " attribute"; 708 return failure(); 709 } 710 711 SmallString<128> nameBuffer(kernelModule.getName()); 712 nameBuffer.append(kGpuBinaryStorageSuffix); 713 Value data = 714 LLVM::createGlobalString(loc, rewriter, nameBuffer.str(), 715 binaryAttr.getValue(), LLVM::Linkage::Internal); 716 717 auto module = moduleLoadCallBuilder.create(loc, rewriter, data); 718 // Get the function from the module. The name corresponds to the name of 719 // the kernel function. 720 auto kernelName = generateKernelNameConstant( 721 launchOp.getKernelModuleName(), launchOp.getKernelName(), loc, rewriter); 722 auto function = moduleGetFunctionCallBuilder.create( 723 loc, rewriter, {module.getResult(0), kernelName}); 724 auto zero = rewriter.create<LLVM::ConstantOp>(loc, llvmInt32Type, 725 rewriter.getI32IntegerAttr(0)); 726 auto adaptor = 727 gpu::LaunchFuncOpAdaptor(operands, launchOp->getAttrDictionary()); 728 Value stream = 729 adaptor.asyncDependencies().empty() 730 ? streamCreateCallBuilder.create(loc, rewriter, {}).getResult(0) 731 : adaptor.asyncDependencies().front(); 732 // Create array of pointers to kernel arguments. 733 auto kernelParams = generateParamsArray(launchOp, operands, rewriter); 734 auto nullpointer = rewriter.create<LLVM::NullOp>(loc, llvmPointerPointerType); 735 launchKernelCallBuilder.create(loc, rewriter, 736 {function.getResult(0), launchOp.gridSizeX(), 737 launchOp.gridSizeY(), launchOp.gridSizeZ(), 738 launchOp.blockSizeX(), launchOp.blockSizeY(), 739 launchOp.blockSizeZ(), 740 /*sharedMemBytes=*/zero, stream, kernelParams, 741 /*extra=*/nullpointer}); 742 743 if (launchOp.asyncToken()) { 744 // Async launch: make dependent ops use the same stream. 745 rewriter.replaceOp(launchOp, {stream}); 746 } else { 747 // Synchronize with host and destroy stream. This must be the stream created 748 // above (with no other uses) because we check that the synchronous version 749 // does not have any async dependencies. 750 streamSynchronizeCallBuilder.create(loc, rewriter, stream); 751 streamDestroyCallBuilder.create(loc, rewriter, stream); 752 rewriter.eraseOp(launchOp); 753 } 754 moduleUnloadCallBuilder.create(loc, rewriter, module.getResult(0)); 755 756 return success(); 757 } 758 759 LogicalResult ConvertMemcpyOpToGpuRuntimeCallPattern::matchAndRewrite( 760 gpu::MemcpyOp memcpyOp, ArrayRef<Value> operands, 761 ConversionPatternRewriter &rewriter) const { 762 auto memRefType = memcpyOp.src().getType().cast<MemRefType>(); 763 764 if (failed(areAllLLVMTypes(memcpyOp, operands, rewriter)) || 765 !isConvertibleAndHasIdentityMaps(memRefType) || 766 failed(isAsyncWithOneDependency(rewriter, memcpyOp))) 767 return failure(); 768 769 auto loc = memcpyOp.getLoc(); 770 auto adaptor = gpu::MemcpyOpAdaptor(operands, memcpyOp->getAttrDictionary()); 771 772 MemRefDescriptor srcDesc(adaptor.src()); 773 774 Value numElements = 775 memRefType.hasStaticShape() 776 ? createIndexConstant(rewriter, loc, memRefType.getNumElements()) 777 // For identity layouts (verified above), the number of elements is 778 // stride[0] * size[0]. 779 : rewriter.create<LLVM::MulOp>(loc, srcDesc.stride(rewriter, loc, 0), 780 srcDesc.size(rewriter, loc, 0)); 781 782 Type elementPtrType = getElementPtrType(memRefType); 783 Value nullPtr = rewriter.create<LLVM::NullOp>(loc, elementPtrType); 784 Value gepPtr = rewriter.create<LLVM::GEPOp>( 785 loc, elementPtrType, ArrayRef<Value>{nullPtr, numElements}); 786 auto sizeBytes = 787 rewriter.create<LLVM::PtrToIntOp>(loc, getIndexType(), gepPtr); 788 789 auto src = rewriter.create<LLVM::BitcastOp>( 790 loc, llvmPointerType, srcDesc.alignedPtr(rewriter, loc)); 791 auto dst = rewriter.create<LLVM::BitcastOp>( 792 loc, llvmPointerType, 793 MemRefDescriptor(adaptor.dst()).alignedPtr(rewriter, loc)); 794 795 auto stream = adaptor.asyncDependencies().front(); 796 memcpyCallBuilder.create(loc, rewriter, {dst, src, sizeBytes, stream}); 797 798 rewriter.replaceOp(memcpyOp, {stream}); 799 800 return success(); 801 } 802 803 std::unique_ptr<mlir::OperationPass<mlir::ModuleOp>> 804 mlir::createGpuToLLVMConversionPass() { 805 return std::make_unique<GpuToLLVMConversionPass>(); 806 } 807