1 //===- ConvertLaunchFuncToGpuRuntimeCalls.cpp - MLIR GPU lowering passes --===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements a pass to convert gpu.launch_func op into a sequence of 10 // GPU runtime calls. As most of GPU runtimes does not have a stable published 11 // ABI, this pass uses a slim runtime layer that builds on top of the public 12 // API from GPU runtime headers. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "mlir/Conversion/GPUCommon/GPUCommonPass.h" 17 18 #include "../PassDetail.h" 19 #include "mlir/Conversion/AsyncToLLVM/AsyncToLLVM.h" 20 #include "mlir/Conversion/LLVMCommon/ConversionTarget.h" 21 #include "mlir/Conversion/LLVMCommon/Pattern.h" 22 #include "mlir/Conversion/MemRefToLLVM/MemRefToLLVM.h" 23 #include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h" 24 #include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVMPass.h" 25 #include "mlir/Conversion/VectorToLLVM/ConvertVectorToLLVM.h" 26 #include "mlir/Dialect/Async/IR/Async.h" 27 #include "mlir/Dialect/GPU/GPUDialect.h" 28 #include "mlir/Dialect/GPU/Passes.h" 29 #include "mlir/Dialect/LLVMIR/LLVMDialect.h" 30 #include "mlir/IR/Attributes.h" 31 #include "mlir/IR/Builders.h" 32 #include "mlir/IR/BuiltinOps.h" 33 #include "mlir/IR/BuiltinTypes.h" 34 35 #include "llvm/ADT/STLExtras.h" 36 #include "llvm/Support/Error.h" 37 #include "llvm/Support/FormatVariadic.h" 38 39 using namespace mlir; 40 41 static constexpr const char *kGpuBinaryStorageSuffix = "_gpubin_cst"; 42 43 namespace { 44 45 class GpuToLLVMConversionPass 46 : public GpuToLLVMConversionPassBase<GpuToLLVMConversionPass> { 47 public: 48 GpuToLLVMConversionPass() = default; 49 50 GpuToLLVMConversionPass(const GpuToLLVMConversionPass &other) 51 : GpuToLLVMConversionPassBase(other) {} 52 53 // Run the dialect converter on the module. 54 void runOnOperation() override; 55 56 private: 57 Option<std::string> gpuBinaryAnnotation{ 58 *this, "gpu-binary-annotation", 59 llvm::cl::desc("Annotation attribute string for GPU binary"), 60 llvm::cl::init(gpu::getDefaultGpuBinaryAnnotation())}; 61 }; 62 63 struct FunctionCallBuilder { 64 FunctionCallBuilder(StringRef functionName, Type returnType, 65 ArrayRef<Type> argumentTypes) 66 : functionName(functionName), 67 functionType(LLVM::LLVMFunctionType::get(returnType, argumentTypes)) {} 68 LLVM::CallOp create(Location loc, OpBuilder &builder, 69 ArrayRef<Value> arguments) const; 70 71 StringRef functionName; 72 LLVM::LLVMFunctionType functionType; 73 }; 74 75 template <typename OpTy> 76 class ConvertOpToGpuRuntimeCallPattern : public ConvertOpToLLVMPattern<OpTy> { 77 public: 78 explicit ConvertOpToGpuRuntimeCallPattern(LLVMTypeConverter &typeConverter) 79 : ConvertOpToLLVMPattern<OpTy>(typeConverter) {} 80 81 protected: 82 MLIRContext *context = &this->getTypeConverter()->getContext(); 83 84 Type llvmVoidType = LLVM::LLVMVoidType::get(context); 85 Type llvmPointerType = 86 LLVM::LLVMPointerType::get(IntegerType::get(context, 8)); 87 Type llvmPointerPointerType = LLVM::LLVMPointerType::get(llvmPointerType); 88 Type llvmInt8Type = IntegerType::get(context, 8); 89 Type llvmInt32Type = IntegerType::get(context, 32); 90 Type llvmInt64Type = IntegerType::get(context, 64); 91 Type llvmIntPtrType = IntegerType::get( 92 context, this->getTypeConverter()->getPointerBitwidth(0)); 93 94 FunctionCallBuilder moduleLoadCallBuilder = { 95 "mgpuModuleLoad", 96 llvmPointerType /* void *module */, 97 {llvmPointerType /* void *cubin */}}; 98 FunctionCallBuilder moduleUnloadCallBuilder = { 99 "mgpuModuleUnload", llvmVoidType, {llvmPointerType /* void *module */}}; 100 FunctionCallBuilder moduleGetFunctionCallBuilder = { 101 "mgpuModuleGetFunction", 102 llvmPointerType /* void *function */, 103 { 104 llvmPointerType, /* void *module */ 105 llvmPointerType /* char *name */ 106 }}; 107 FunctionCallBuilder launchKernelCallBuilder = { 108 "mgpuLaunchKernel", 109 llvmVoidType, 110 { 111 llvmPointerType, /* void* f */ 112 llvmIntPtrType, /* intptr_t gridXDim */ 113 llvmIntPtrType, /* intptr_t gridyDim */ 114 llvmIntPtrType, /* intptr_t gridZDim */ 115 llvmIntPtrType, /* intptr_t blockXDim */ 116 llvmIntPtrType, /* intptr_t blockYDim */ 117 llvmIntPtrType, /* intptr_t blockZDim */ 118 llvmInt32Type, /* unsigned int sharedMemBytes */ 119 llvmPointerType, /* void *hstream */ 120 llvmPointerPointerType, /* void **kernelParams */ 121 llvmPointerPointerType /* void **extra */ 122 }}; 123 FunctionCallBuilder streamCreateCallBuilder = { 124 "mgpuStreamCreate", llvmPointerType /* void *stream */, {}}; 125 FunctionCallBuilder streamDestroyCallBuilder = { 126 "mgpuStreamDestroy", llvmVoidType, {llvmPointerType /* void *stream */}}; 127 FunctionCallBuilder streamSynchronizeCallBuilder = { 128 "mgpuStreamSynchronize", 129 llvmVoidType, 130 {llvmPointerType /* void *stream */}}; 131 FunctionCallBuilder streamWaitEventCallBuilder = { 132 "mgpuStreamWaitEvent", 133 llvmVoidType, 134 {llvmPointerType /* void *stream */, llvmPointerType /* void *event */}}; 135 FunctionCallBuilder eventCreateCallBuilder = { 136 "mgpuEventCreate", llvmPointerType /* void *event */, {}}; 137 FunctionCallBuilder eventDestroyCallBuilder = { 138 "mgpuEventDestroy", llvmVoidType, {llvmPointerType /* void *event */}}; 139 FunctionCallBuilder eventSynchronizeCallBuilder = { 140 "mgpuEventSynchronize", 141 llvmVoidType, 142 {llvmPointerType /* void *event */}}; 143 FunctionCallBuilder eventRecordCallBuilder = { 144 "mgpuEventRecord", 145 llvmVoidType, 146 {llvmPointerType /* void *event */, llvmPointerType /* void *stream */}}; 147 FunctionCallBuilder hostRegisterCallBuilder = { 148 "mgpuMemHostRegisterMemRef", 149 llvmVoidType, 150 {llvmIntPtrType /* intptr_t rank */, 151 llvmPointerType /* void *memrefDesc */, 152 llvmIntPtrType /* intptr_t elementSizeBytes */}}; 153 FunctionCallBuilder allocCallBuilder = { 154 "mgpuMemAlloc", 155 llvmPointerType /* void * */, 156 {llvmIntPtrType /* intptr_t sizeBytes */, 157 llvmPointerType /* void *stream */}}; 158 FunctionCallBuilder deallocCallBuilder = { 159 "mgpuMemFree", 160 llvmVoidType, 161 {llvmPointerType /* void *ptr */, llvmPointerType /* void *stream */}}; 162 FunctionCallBuilder memcpyCallBuilder = { 163 "mgpuMemcpy", 164 llvmVoidType, 165 {llvmPointerType /* void *dst */, llvmPointerType /* void *src */, 166 llvmIntPtrType /* intptr_t sizeBytes */, 167 llvmPointerType /* void *stream */}}; 168 }; 169 170 /// A rewrite pattern to convert gpu.host_register operations into a GPU runtime 171 /// call. Currently it supports CUDA and ROCm (HIP). 172 class ConvertHostRegisterOpToGpuRuntimeCallPattern 173 : public ConvertOpToGpuRuntimeCallPattern<gpu::HostRegisterOp> { 174 public: 175 ConvertHostRegisterOpToGpuRuntimeCallPattern(LLVMTypeConverter &typeConverter) 176 : ConvertOpToGpuRuntimeCallPattern<gpu::HostRegisterOp>(typeConverter) {} 177 178 private: 179 LogicalResult 180 matchAndRewrite(gpu::HostRegisterOp hostRegisterOp, ArrayRef<Value> operands, 181 ConversionPatternRewriter &rewriter) const override; 182 }; 183 184 /// A rewrite pattern to convert gpu.alloc operations into a GPU runtime 185 /// call. Currently it supports CUDA and ROCm (HIP). 186 class ConvertAllocOpToGpuRuntimeCallPattern 187 : public ConvertOpToGpuRuntimeCallPattern<gpu::AllocOp> { 188 public: 189 ConvertAllocOpToGpuRuntimeCallPattern(LLVMTypeConverter &typeConverter) 190 : ConvertOpToGpuRuntimeCallPattern<gpu::AllocOp>(typeConverter) {} 191 192 private: 193 LogicalResult 194 matchAndRewrite(gpu::AllocOp allocOp, ArrayRef<Value> operands, 195 ConversionPatternRewriter &rewriter) const override; 196 }; 197 198 /// A rewrite pattern to convert gpu.dealloc operations into a GPU runtime 199 /// call. Currently it supports CUDA and ROCm (HIP). 200 class ConvertDeallocOpToGpuRuntimeCallPattern 201 : public ConvertOpToGpuRuntimeCallPattern<gpu::DeallocOp> { 202 public: 203 ConvertDeallocOpToGpuRuntimeCallPattern(LLVMTypeConverter &typeConverter) 204 : ConvertOpToGpuRuntimeCallPattern<gpu::DeallocOp>(typeConverter) {} 205 206 private: 207 LogicalResult 208 matchAndRewrite(gpu::DeallocOp deallocOp, ArrayRef<Value> operands, 209 ConversionPatternRewriter &rewriter) const override; 210 }; 211 212 class ConvertAsyncYieldToGpuRuntimeCallPattern 213 : public ConvertOpToGpuRuntimeCallPattern<async::YieldOp> { 214 public: 215 ConvertAsyncYieldToGpuRuntimeCallPattern(LLVMTypeConverter &typeConverter) 216 : ConvertOpToGpuRuntimeCallPattern<async::YieldOp>(typeConverter) {} 217 218 private: 219 LogicalResult 220 matchAndRewrite(async::YieldOp yieldOp, ArrayRef<Value> operands, 221 ConversionPatternRewriter &rewriter) const override; 222 }; 223 224 /// A rewrite pattern to convert gpu.wait operations into a GPU runtime 225 /// call. Currently it supports CUDA and ROCm (HIP). 226 class ConvertWaitOpToGpuRuntimeCallPattern 227 : public ConvertOpToGpuRuntimeCallPattern<gpu::WaitOp> { 228 public: 229 ConvertWaitOpToGpuRuntimeCallPattern(LLVMTypeConverter &typeConverter) 230 : ConvertOpToGpuRuntimeCallPattern<gpu::WaitOp>(typeConverter) {} 231 232 private: 233 LogicalResult 234 matchAndRewrite(gpu::WaitOp waitOp, ArrayRef<Value> operands, 235 ConversionPatternRewriter &rewriter) const override; 236 }; 237 238 /// A rewrite pattern to convert gpu.wait async operations into a GPU runtime 239 /// call. Currently it supports CUDA and ROCm (HIP). 240 class ConvertWaitAsyncOpToGpuRuntimeCallPattern 241 : public ConvertOpToGpuRuntimeCallPattern<gpu::WaitOp> { 242 public: 243 ConvertWaitAsyncOpToGpuRuntimeCallPattern(LLVMTypeConverter &typeConverter) 244 : ConvertOpToGpuRuntimeCallPattern<gpu::WaitOp>(typeConverter) {} 245 246 private: 247 LogicalResult 248 matchAndRewrite(gpu::WaitOp waitOp, ArrayRef<Value> operands, 249 ConversionPatternRewriter &rewriter) const override; 250 }; 251 252 /// A rewrite patter to convert gpu.launch_func operations into a sequence of 253 /// GPU runtime calls. Currently it supports CUDA and ROCm (HIP). 254 /// 255 /// In essence, a gpu.launch_func operations gets compiled into the following 256 /// sequence of runtime calls: 257 /// 258 /// * moduleLoad -- loads the module given the cubin / hsaco data 259 /// * moduleGetFunction -- gets a handle to the actual kernel function 260 /// * getStreamHelper -- initializes a new compute stream on GPU 261 /// * launchKernel -- launches the kernel on a stream 262 /// * streamSynchronize -- waits for operations on the stream to finish 263 /// 264 /// Intermediate data structures are allocated on the stack. 265 class ConvertLaunchFuncOpToGpuRuntimeCallPattern 266 : public ConvertOpToGpuRuntimeCallPattern<gpu::LaunchFuncOp> { 267 public: 268 ConvertLaunchFuncOpToGpuRuntimeCallPattern(LLVMTypeConverter &typeConverter, 269 StringRef gpuBinaryAnnotation) 270 : ConvertOpToGpuRuntimeCallPattern<gpu::LaunchFuncOp>(typeConverter), 271 gpuBinaryAnnotation(gpuBinaryAnnotation) {} 272 273 private: 274 Value generateParamsArray(gpu::LaunchFuncOp launchOp, 275 ArrayRef<Value> operands, OpBuilder &builder) const; 276 Value generateKernelNameConstant(StringRef moduleName, StringRef name, 277 Location loc, OpBuilder &builder) const; 278 279 LogicalResult 280 matchAndRewrite(gpu::LaunchFuncOp launchOp, ArrayRef<Value> operands, 281 ConversionPatternRewriter &rewriter) const override; 282 283 llvm::SmallString<32> gpuBinaryAnnotation; 284 }; 285 286 class EraseGpuModuleOpPattern : public OpRewritePattern<gpu::GPUModuleOp> { 287 using OpRewritePattern<gpu::GPUModuleOp>::OpRewritePattern; 288 289 LogicalResult matchAndRewrite(gpu::GPUModuleOp op, 290 PatternRewriter &rewriter) const override { 291 // GPU kernel modules are no longer necessary since we have a global 292 // constant with the CUBIN, or HSACO data. 293 rewriter.eraseOp(op); 294 return success(); 295 } 296 }; 297 298 /// A rewrite pattern to convert gpu.memcpy operations into a GPU runtime 299 /// call. Currently it supports CUDA and ROCm (HIP). 300 class ConvertMemcpyOpToGpuRuntimeCallPattern 301 : public ConvertOpToGpuRuntimeCallPattern<gpu::MemcpyOp> { 302 public: 303 ConvertMemcpyOpToGpuRuntimeCallPattern(LLVMTypeConverter &typeConverter) 304 : ConvertOpToGpuRuntimeCallPattern<gpu::MemcpyOp>(typeConverter) {} 305 306 private: 307 LogicalResult 308 matchAndRewrite(gpu::MemcpyOp memcpyOp, ArrayRef<Value> operands, 309 ConversionPatternRewriter &rewriter) const override; 310 }; 311 } // namespace 312 313 void GpuToLLVMConversionPass::runOnOperation() { 314 LLVMTypeConverter converter(&getContext()); 315 RewritePatternSet patterns(&getContext()); 316 LLVMConversionTarget target(getContext()); 317 318 target.addIllegalDialect<gpu::GPUDialect>(); 319 320 populateVectorToLLVMConversionPatterns(converter, patterns); 321 populateMemRefToLLVMConversionPatterns(converter, patterns); 322 populateStdToLLVMConversionPatterns(converter, patterns); 323 populateAsyncStructuralTypeConversionsAndLegality(converter, patterns, 324 target); 325 326 converter.addConversion( 327 [context = &converter.getContext()](gpu::AsyncTokenType type) -> Type { 328 return LLVM::LLVMPointerType::get(IntegerType::get(context, 8)); 329 }); 330 patterns.add<ConvertAllocOpToGpuRuntimeCallPattern, 331 ConvertDeallocOpToGpuRuntimeCallPattern, 332 ConvertHostRegisterOpToGpuRuntimeCallPattern, 333 ConvertMemcpyOpToGpuRuntimeCallPattern, 334 ConvertWaitAsyncOpToGpuRuntimeCallPattern, 335 ConvertWaitOpToGpuRuntimeCallPattern, 336 ConvertAsyncYieldToGpuRuntimeCallPattern>(converter); 337 patterns.add<ConvertLaunchFuncOpToGpuRuntimeCallPattern>(converter, 338 gpuBinaryAnnotation); 339 patterns.add<EraseGpuModuleOpPattern>(&converter.getContext()); 340 341 if (failed( 342 applyPartialConversion(getOperation(), target, std::move(patterns)))) 343 signalPassFailure(); 344 } 345 346 LLVM::CallOp FunctionCallBuilder::create(Location loc, OpBuilder &builder, 347 ArrayRef<Value> arguments) const { 348 auto module = builder.getBlock()->getParent()->getParentOfType<ModuleOp>(); 349 auto function = [&] { 350 if (auto function = module.lookupSymbol<LLVM::LLVMFuncOp>(functionName)) 351 return function; 352 return OpBuilder::atBlockEnd(module.getBody()) 353 .create<LLVM::LLVMFuncOp>(loc, functionName, functionType); 354 }(); 355 return builder.create<LLVM::CallOp>( 356 loc, const_cast<LLVM::LLVMFunctionType &>(functionType).getReturnType(), 357 builder.getSymbolRefAttr(function), arguments); 358 } 359 360 // Returns whether all operands are of LLVM type. 361 static LogicalResult areAllLLVMTypes(Operation *op, ValueRange operands, 362 ConversionPatternRewriter &rewriter) { 363 if (!llvm::all_of(operands, [](Value value) { 364 return LLVM::isCompatibleType(value.getType()); 365 })) 366 return rewriter.notifyMatchFailure( 367 op, "Cannot convert if operands aren't of LLVM type."); 368 return success(); 369 } 370 371 static LogicalResult 372 isAsyncWithOneDependency(ConversionPatternRewriter &rewriter, 373 gpu::AsyncOpInterface op) { 374 if (op.getAsyncDependencies().size() != 1) 375 return rewriter.notifyMatchFailure( 376 op, "Can only convert with exactly one async dependency."); 377 378 if (!op.getAsyncToken()) 379 return rewriter.notifyMatchFailure(op, "Can convert only async version."); 380 381 return success(); 382 } 383 384 LogicalResult ConvertHostRegisterOpToGpuRuntimeCallPattern::matchAndRewrite( 385 gpu::HostRegisterOp hostRegisterOp, ArrayRef<Value> operands, 386 ConversionPatternRewriter &rewriter) const { 387 auto *op = hostRegisterOp.getOperation(); 388 if (failed(areAllLLVMTypes(op, operands, rewriter))) 389 return failure(); 390 391 Location loc = op->getLoc(); 392 393 auto memRefType = hostRegisterOp.value().getType(); 394 auto elementType = memRefType.cast<UnrankedMemRefType>().getElementType(); 395 auto elementSize = getSizeInBytes(loc, elementType, rewriter); 396 397 auto arguments = getTypeConverter()->promoteOperands(loc, op->getOperands(), 398 operands, rewriter); 399 arguments.push_back(elementSize); 400 hostRegisterCallBuilder.create(loc, rewriter, arguments); 401 402 rewriter.eraseOp(op); 403 return success(); 404 } 405 406 LogicalResult ConvertAllocOpToGpuRuntimeCallPattern::matchAndRewrite( 407 gpu::AllocOp allocOp, ArrayRef<Value> operands, 408 ConversionPatternRewriter &rewriter) const { 409 MemRefType memRefType = allocOp.getType(); 410 411 if (failed(areAllLLVMTypes(allocOp, operands, rewriter)) || 412 !isConvertibleAndHasIdentityMaps(memRefType) || 413 failed(isAsyncWithOneDependency(rewriter, allocOp))) 414 return failure(); 415 416 auto loc = allocOp.getLoc(); 417 auto adaptor = gpu::AllocOpAdaptor(operands, allocOp->getAttrDictionary()); 418 419 // Get shape of the memref as values: static sizes are constant 420 // values and dynamic sizes are passed to 'alloc' as operands. 421 SmallVector<Value, 4> shape; 422 SmallVector<Value, 4> strides; 423 Value sizeBytes; 424 getMemRefDescriptorSizes(loc, memRefType, adaptor.dynamicSizes(), rewriter, 425 shape, strides, sizeBytes); 426 427 // Allocate the underlying buffer and store a pointer to it in the MemRef 428 // descriptor. 429 Type elementPtrType = this->getElementPtrType(memRefType); 430 auto stream = adaptor.asyncDependencies().front(); 431 Value allocatedPtr = 432 allocCallBuilder.create(loc, rewriter, {sizeBytes, stream}).getResult(0); 433 allocatedPtr = 434 rewriter.create<LLVM::BitcastOp>(loc, elementPtrType, allocatedPtr); 435 436 // No alignment. 437 Value alignedPtr = allocatedPtr; 438 439 // Create the MemRef descriptor. 440 auto memRefDescriptor = this->createMemRefDescriptor( 441 loc, memRefType, allocatedPtr, alignedPtr, shape, strides, rewriter); 442 443 rewriter.replaceOp(allocOp, {memRefDescriptor, stream}); 444 445 return success(); 446 } 447 448 LogicalResult ConvertDeallocOpToGpuRuntimeCallPattern::matchAndRewrite( 449 gpu::DeallocOp deallocOp, ArrayRef<Value> operands, 450 ConversionPatternRewriter &rewriter) const { 451 if (failed(areAllLLVMTypes(deallocOp, operands, rewriter)) || 452 failed(isAsyncWithOneDependency(rewriter, deallocOp))) 453 return failure(); 454 455 Location loc = deallocOp.getLoc(); 456 457 auto adaptor = 458 gpu::DeallocOpAdaptor(operands, deallocOp->getAttrDictionary()); 459 Value pointer = 460 MemRefDescriptor(adaptor.memref()).allocatedPtr(rewriter, loc); 461 auto casted = rewriter.create<LLVM::BitcastOp>(loc, llvmPointerType, pointer); 462 Value stream = adaptor.asyncDependencies().front(); 463 deallocCallBuilder.create(loc, rewriter, {casted, stream}); 464 465 rewriter.replaceOp(deallocOp, {stream}); 466 return success(); 467 } 468 469 static bool isGpuAsyncTokenType(Value value) { 470 return value.getType().isa<gpu::AsyncTokenType>(); 471 } 472 473 // Converts !gpu.async.token operands of `async.yield` to runtime calls. The 474 // !gpu.async.token are lowered to stream within the async.execute region, but 475 // are passed as events between them. For each !gpu.async.token operand, we 476 // create an event and record it on the stream. 477 LogicalResult ConvertAsyncYieldToGpuRuntimeCallPattern::matchAndRewrite( 478 async::YieldOp yieldOp, ArrayRef<Value> operands, 479 ConversionPatternRewriter &rewriter) const { 480 if (llvm::none_of(yieldOp.operands(), isGpuAsyncTokenType)) 481 return rewriter.notifyMatchFailure(yieldOp, "no gpu async token operand"); 482 483 Location loc = yieldOp.getLoc(); 484 SmallVector<Value, 4> newOperands(operands.begin(), operands.end()); 485 llvm::SmallDenseSet<Value> streams; 486 for (auto &operand : yieldOp->getOpOperands()) { 487 if (!isGpuAsyncTokenType(operand.get())) 488 continue; 489 auto idx = operand.getOperandNumber(); 490 auto stream = operands[idx]; 491 auto event = eventCreateCallBuilder.create(loc, rewriter, {}).getResult(0); 492 eventRecordCallBuilder.create(loc, rewriter, {event, stream}); 493 newOperands[idx] = event; 494 streams.insert(stream); 495 } 496 for (auto stream : streams) 497 streamDestroyCallBuilder.create(loc, rewriter, {stream}); 498 499 rewriter.updateRootInPlace(yieldOp, 500 [&] { yieldOp->setOperands(newOperands); }); 501 return success(); 502 } 503 504 // Returns whether `value` is the result of an LLVM::CallOp to `functionName`. 505 static bool isDefinedByCallTo(Value value, StringRef functionName) { 506 assert(value.getType().isa<LLVM::LLVMPointerType>()); 507 if (auto defOp = value.getDefiningOp<LLVM::CallOp>()) 508 return defOp.callee()->equals(functionName); 509 return false; 510 } 511 512 // Converts `gpu.wait` to runtime calls. The converted op synchronizes the host 513 // with the stream/event operands. The operands are destroyed. That is, it 514 // assumes that it is not used afterwards or elsewhere. Otherwise we will get a 515 // runtime error. Eventually, we should guarantee this property. 516 LogicalResult ConvertWaitOpToGpuRuntimeCallPattern::matchAndRewrite( 517 gpu::WaitOp waitOp, ArrayRef<Value> operands, 518 ConversionPatternRewriter &rewriter) const { 519 if (waitOp.asyncToken()) 520 return rewriter.notifyMatchFailure(waitOp, "Cannot convert async op."); 521 522 Location loc = waitOp.getLoc(); 523 524 for (auto operand : operands) { 525 if (isDefinedByCallTo(operand, streamCreateCallBuilder.functionName)) { 526 // The converted operand's definition created a stream. 527 streamSynchronizeCallBuilder.create(loc, rewriter, {operand}); 528 streamDestroyCallBuilder.create(loc, rewriter, {operand}); 529 } else { 530 // Otherwise the converted operand is an event. This assumes that we use 531 // events in control flow code as well. 532 eventSynchronizeCallBuilder.create(loc, rewriter, {operand}); 533 eventDestroyCallBuilder.create(loc, rewriter, {operand}); 534 } 535 } 536 537 rewriter.eraseOp(waitOp); 538 return success(); 539 } 540 541 // Converts `gpu.wait async` to runtime calls. The converted op creates a new 542 // stream that is synchronized with stream/event operands. The operands are 543 // destroyed. That is, it assumes that it is not used afterwards or elsewhere. 544 // Otherwise we will get a runtime error. Eventually, we should guarantee this 545 // property. 546 LogicalResult ConvertWaitAsyncOpToGpuRuntimeCallPattern::matchAndRewrite( 547 gpu::WaitOp waitOp, ArrayRef<Value> operands, 548 ConversionPatternRewriter &rewriter) const { 549 if (!waitOp.asyncToken()) 550 return rewriter.notifyMatchFailure(waitOp, "Can only convert async op."); 551 552 Location loc = waitOp.getLoc(); 553 554 auto insertionPoint = rewriter.saveInsertionPoint(); 555 SmallVector<Value, 1> events; 556 for (auto pair : llvm::zip(waitOp.asyncDependencies(), operands)) { 557 auto operand = std::get<1>(pair); 558 if (isDefinedByCallTo(operand, streamCreateCallBuilder.functionName)) { 559 // The converted operand's definition created a stream. Insert an event 560 // into the stream just after the last use of the original token operand. 561 auto *defOp = std::get<0>(pair).getDefiningOp(); 562 rewriter.setInsertionPointAfter(defOp); 563 auto event = 564 eventCreateCallBuilder.create(loc, rewriter, {}).getResult(0); 565 eventRecordCallBuilder.create(loc, rewriter, {event, operand}); 566 events.push_back(event); 567 } else { 568 // Otherwise the converted operand is an event. This assumes that we use 569 // events in control flow code as well. 570 events.push_back(operand); 571 } 572 } 573 rewriter.restoreInsertionPoint(insertionPoint); 574 auto stream = streamCreateCallBuilder.create(loc, rewriter, {}).getResult(0); 575 for (auto event : events) 576 streamWaitEventCallBuilder.create(loc, rewriter, {stream, event}); 577 for (auto event : events) 578 eventDestroyCallBuilder.create(loc, rewriter, {event}); 579 rewriter.replaceOp(waitOp, {stream}); 580 581 return success(); 582 } 583 584 // Creates a struct containing all kernel parameters on the stack and returns 585 // an array of type-erased pointers to the fields of the struct. The array can 586 // then be passed to the CUDA / ROCm (HIP) kernel launch calls. 587 // The generated code is essentially as follows: 588 // 589 // %struct = alloca(sizeof(struct { Parameters... })) 590 // %array = alloca(NumParameters * sizeof(void *)) 591 // for (i : [0, NumParameters)) 592 // %fieldPtr = llvm.getelementptr %struct[0, i] 593 // llvm.store parameters[i], %fieldPtr 594 // %elementPtr = llvm.getelementptr %array[i] 595 // llvm.store %fieldPtr, %elementPtr 596 // return %array 597 Value ConvertLaunchFuncOpToGpuRuntimeCallPattern::generateParamsArray( 598 gpu::LaunchFuncOp launchOp, ArrayRef<Value> operands, 599 OpBuilder &builder) const { 600 auto loc = launchOp.getLoc(); 601 auto numKernelOperands = launchOp.getNumKernelOperands(); 602 auto arguments = getTypeConverter()->promoteOperands( 603 loc, launchOp.getOperands().take_back(numKernelOperands), 604 operands.take_back(numKernelOperands), builder); 605 auto numArguments = arguments.size(); 606 SmallVector<Type, 4> argumentTypes; 607 argumentTypes.reserve(numArguments); 608 for (auto argument : arguments) 609 argumentTypes.push_back(argument.getType()); 610 auto structType = LLVM::LLVMStructType::getNewIdentified(context, StringRef(), 611 argumentTypes); 612 auto one = builder.create<LLVM::ConstantOp>(loc, llvmInt32Type, 613 builder.getI32IntegerAttr(1)); 614 auto structPtr = builder.create<LLVM::AllocaOp>( 615 loc, LLVM::LLVMPointerType::get(structType), one, /*alignment=*/0); 616 auto arraySize = builder.create<LLVM::ConstantOp>( 617 loc, llvmInt32Type, builder.getI32IntegerAttr(numArguments)); 618 auto arrayPtr = builder.create<LLVM::AllocaOp>(loc, llvmPointerPointerType, 619 arraySize, /*alignment=*/0); 620 auto zero = builder.create<LLVM::ConstantOp>(loc, llvmInt32Type, 621 builder.getI32IntegerAttr(0)); 622 for (auto en : llvm::enumerate(arguments)) { 623 auto index = builder.create<LLVM::ConstantOp>( 624 loc, llvmInt32Type, builder.getI32IntegerAttr(en.index())); 625 auto fieldPtr = builder.create<LLVM::GEPOp>( 626 loc, LLVM::LLVMPointerType::get(argumentTypes[en.index()]), structPtr, 627 ArrayRef<Value>{zero, index.getResult()}); 628 builder.create<LLVM::StoreOp>(loc, en.value(), fieldPtr); 629 auto elementPtr = builder.create<LLVM::GEPOp>(loc, llvmPointerPointerType, 630 arrayPtr, index.getResult()); 631 auto casted = 632 builder.create<LLVM::BitcastOp>(loc, llvmPointerType, fieldPtr); 633 builder.create<LLVM::StoreOp>(loc, casted, elementPtr); 634 } 635 return arrayPtr; 636 } 637 638 // Generates an LLVM IR dialect global that contains the name of the given 639 // kernel function as a C string, and returns a pointer to its beginning. 640 // The code is essentially: 641 // 642 // llvm.global constant @kernel_name("function_name\00") 643 // func(...) { 644 // %0 = llvm.addressof @kernel_name 645 // %1 = llvm.constant (0 : index) 646 // %2 = llvm.getelementptr %0[%1, %1] : !llvm<"i8*"> 647 // } 648 Value ConvertLaunchFuncOpToGpuRuntimeCallPattern::generateKernelNameConstant( 649 StringRef moduleName, StringRef name, Location loc, 650 OpBuilder &builder) const { 651 // Make sure the trailing zero is included in the constant. 652 std::vector<char> kernelName(name.begin(), name.end()); 653 kernelName.push_back('\0'); 654 655 std::string globalName = 656 std::string(llvm::formatv("{0}_{1}_kernel_name", moduleName, name)); 657 return LLVM::createGlobalString( 658 loc, builder, globalName, StringRef(kernelName.data(), kernelName.size()), 659 LLVM::Linkage::Internal); 660 } 661 662 // Emits LLVM IR to launch a kernel function. Expects the module that contains 663 // the compiled kernel function as a cubin in the 'nvvm.cubin' attribute, or a 664 // hsaco in the 'rocdl.hsaco' attribute of the kernel function in the IR. 665 // 666 // %0 = call %binarygetter 667 // %1 = call %moduleLoad(%0) 668 // %2 = <see generateKernelNameConstant> 669 // %3 = call %moduleGetFunction(%1, %2) 670 // %4 = call %streamCreate() 671 // %5 = <see generateParamsArray> 672 // call %launchKernel(%3, <launchOp operands 0..5>, 0, %4, %5, nullptr) 673 // call %streamSynchronize(%4) 674 // call %streamDestroy(%4) 675 // call %moduleUnload(%1) 676 // 677 // If the op is async, the stream corresponds to the (single) async dependency 678 // as well as the async token the op produces. 679 LogicalResult ConvertLaunchFuncOpToGpuRuntimeCallPattern::matchAndRewrite( 680 gpu::LaunchFuncOp launchOp, ArrayRef<Value> operands, 681 ConversionPatternRewriter &rewriter) const { 682 if (failed(areAllLLVMTypes(launchOp, operands, rewriter))) 683 return failure(); 684 685 if (launchOp.asyncDependencies().size() > 1) 686 return rewriter.notifyMatchFailure( 687 launchOp, "Cannot convert with more than one async dependency."); 688 689 // Fail when the synchronous version of the op has async dependencies. The 690 // lowering destroys the stream, and we do not want to check that there is no 691 // use of the stream after this op. 692 if (!launchOp.asyncToken() && !launchOp.asyncDependencies().empty()) 693 return rewriter.notifyMatchFailure( 694 launchOp, "Cannot convert non-async op with async dependencies."); 695 696 Location loc = launchOp.getLoc(); 697 698 // Create an LLVM global with CUBIN extracted from the kernel annotation and 699 // obtain a pointer to the first byte in it. 700 auto kernelModule = SymbolTable::lookupNearestSymbolFrom<gpu::GPUModuleOp>( 701 launchOp, launchOp.getKernelModuleName()); 702 assert(kernelModule && "expected a kernel module"); 703 704 auto binaryAttr = 705 kernelModule->getAttrOfType<StringAttr>(gpuBinaryAnnotation); 706 if (!binaryAttr) { 707 kernelModule.emitOpError() 708 << "missing " << gpuBinaryAnnotation << " attribute"; 709 return failure(); 710 } 711 712 SmallString<128> nameBuffer(kernelModule.getName()); 713 nameBuffer.append(kGpuBinaryStorageSuffix); 714 Value data = 715 LLVM::createGlobalString(loc, rewriter, nameBuffer.str(), 716 binaryAttr.getValue(), LLVM::Linkage::Internal); 717 718 auto module = moduleLoadCallBuilder.create(loc, rewriter, data); 719 // Get the function from the module. The name corresponds to the name of 720 // the kernel function. 721 auto kernelName = generateKernelNameConstant( 722 launchOp.getKernelModuleName(), launchOp.getKernelName(), loc, rewriter); 723 auto function = moduleGetFunctionCallBuilder.create( 724 loc, rewriter, {module.getResult(0), kernelName}); 725 auto zero = rewriter.create<LLVM::ConstantOp>(loc, llvmInt32Type, 726 rewriter.getI32IntegerAttr(0)); 727 auto adaptor = 728 gpu::LaunchFuncOpAdaptor(operands, launchOp->getAttrDictionary()); 729 Value stream = 730 adaptor.asyncDependencies().empty() 731 ? streamCreateCallBuilder.create(loc, rewriter, {}).getResult(0) 732 : adaptor.asyncDependencies().front(); 733 // Create array of pointers to kernel arguments. 734 auto kernelParams = generateParamsArray(launchOp, operands, rewriter); 735 auto nullpointer = rewriter.create<LLVM::NullOp>(loc, llvmPointerPointerType); 736 launchKernelCallBuilder.create(loc, rewriter, 737 {function.getResult(0), launchOp.gridSizeX(), 738 launchOp.gridSizeY(), launchOp.gridSizeZ(), 739 launchOp.blockSizeX(), launchOp.blockSizeY(), 740 launchOp.blockSizeZ(), 741 /*sharedMemBytes=*/zero, stream, kernelParams, 742 /*extra=*/nullpointer}); 743 744 if (launchOp.asyncToken()) { 745 // Async launch: make dependent ops use the same stream. 746 rewriter.replaceOp(launchOp, {stream}); 747 } else { 748 // Synchronize with host and destroy stream. This must be the stream created 749 // above (with no other uses) because we check that the synchronous version 750 // does not have any async dependencies. 751 streamSynchronizeCallBuilder.create(loc, rewriter, stream); 752 streamDestroyCallBuilder.create(loc, rewriter, stream); 753 rewriter.eraseOp(launchOp); 754 } 755 moduleUnloadCallBuilder.create(loc, rewriter, module.getResult(0)); 756 757 return success(); 758 } 759 760 LogicalResult ConvertMemcpyOpToGpuRuntimeCallPattern::matchAndRewrite( 761 gpu::MemcpyOp memcpyOp, ArrayRef<Value> operands, 762 ConversionPatternRewriter &rewriter) const { 763 auto memRefType = memcpyOp.src().getType().cast<MemRefType>(); 764 765 if (failed(areAllLLVMTypes(memcpyOp, operands, rewriter)) || 766 !isConvertibleAndHasIdentityMaps(memRefType) || 767 failed(isAsyncWithOneDependency(rewriter, memcpyOp))) 768 return failure(); 769 770 auto loc = memcpyOp.getLoc(); 771 auto adaptor = gpu::MemcpyOpAdaptor(operands, memcpyOp->getAttrDictionary()); 772 773 MemRefDescriptor srcDesc(adaptor.src()); 774 775 Value numElements = 776 memRefType.hasStaticShape() 777 ? createIndexConstant(rewriter, loc, memRefType.getNumElements()) 778 // For identity layouts (verified above), the number of elements is 779 // stride[0] * size[0]. 780 : rewriter.create<LLVM::MulOp>(loc, srcDesc.stride(rewriter, loc, 0), 781 srcDesc.size(rewriter, loc, 0)); 782 783 Type elementPtrType = getElementPtrType(memRefType); 784 Value nullPtr = rewriter.create<LLVM::NullOp>(loc, elementPtrType); 785 Value gepPtr = rewriter.create<LLVM::GEPOp>( 786 loc, elementPtrType, ArrayRef<Value>{nullPtr, numElements}); 787 auto sizeBytes = 788 rewriter.create<LLVM::PtrToIntOp>(loc, getIndexType(), gepPtr); 789 790 auto src = rewriter.create<LLVM::BitcastOp>( 791 loc, llvmPointerType, srcDesc.alignedPtr(rewriter, loc)); 792 auto dst = rewriter.create<LLVM::BitcastOp>( 793 loc, llvmPointerType, 794 MemRefDescriptor(adaptor.dst()).alignedPtr(rewriter, loc)); 795 796 auto stream = adaptor.asyncDependencies().front(); 797 memcpyCallBuilder.create(loc, rewriter, {dst, src, sizeBytes, stream}); 798 799 rewriter.replaceOp(memcpyOp, {stream}); 800 801 return success(); 802 } 803 804 std::unique_ptr<mlir::OperationPass<mlir::ModuleOp>> 805 mlir::createGpuToLLVMConversionPass() { 806 return std::make_unique<GpuToLLVMConversionPass>(); 807 } 808