1 //===- CudaRuntimeWrappers.cpp - MLIR CUDA API wrapper library ------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Implements C wrappers around the CUDA library for easy linking in ORC jit.
10 // Also adds some debugging helpers that are helpful when writing MLIR code to
11 // run on GPUs.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include <cassert>
16 #include <numeric>
17 
18 #include "mlir/ExecutionEngine/CRunnerUtils.h"
19 #include "llvm/ADT/ArrayRef.h"
20 
21 #include "cuda.h"
22 
23 #ifdef _WIN32
24 #define MLIR_CUDA_WRAPPERS_EXPORT __declspec(dllexport)
25 #else
26 #define MLIR_CUDA_WRAPPERS_EXPORT
27 #endif // _WIN32
28 
29 #define CUDA_REPORT_IF_ERROR(expr)                                             \
30   [](CUresult result) {                                                        \
31     if (!result)                                                               \
32       return;                                                                  \
33     const char *name = nullptr;                                                \
34     cuGetErrorName(result, &name);                                             \
35     if (!name)                                                                 \
36       name = "<unknown>";                                                      \
37     fprintf(stderr, "'%s' failed with '%s'\n", #expr, name);                   \
38   }(expr)
39 
40 #pragma clang diagnostic push
41 #pragma clang diagnostic ignored "-Wglobal-constructors"
42 // Static reference to CUDA primary context for device ordinal 0.
43 static CUcontext Context = [] {
44   CUDA_REPORT_IF_ERROR(cuInit(/*flags=*/0));
45   CUdevice device;
46   CUDA_REPORT_IF_ERROR(cuDeviceGet(&device, /*ordinal=*/0));
47   CUcontext context;
48   CUDA_REPORT_IF_ERROR(cuDevicePrimaryCtxRetain(&context, device));
49   return context;
50 }();
51 #pragma clang diagnostic pop
52 
53 // Sets the `Context` for the duration of the instance and restores the previous
54 // context on destruction.
55 class ScopedContext {
56 public:
57   ScopedContext() {
58     CUDA_REPORT_IF_ERROR(cuCtxGetCurrent(&previous));
59     CUDA_REPORT_IF_ERROR(cuCtxSetCurrent(Context));
60   }
61 
62   ~ScopedContext() { CUDA_REPORT_IF_ERROR(cuCtxSetCurrent(previous)); }
63 
64 private:
65   CUcontext previous;
66 };
67 
68 extern "C" MLIR_CUDA_WRAPPERS_EXPORT CUmodule mgpuModuleLoad(void *data) {
69   ScopedContext scopedContext;
70   CUmodule module = nullptr;
71   CUDA_REPORT_IF_ERROR(cuModuleLoadData(&module, data));
72   return module;
73 }
74 
75 extern "C" MLIR_CUDA_WRAPPERS_EXPORT void mgpuModuleUnload(CUmodule module) {
76   CUDA_REPORT_IF_ERROR(cuModuleUnload(module));
77 }
78 
79 extern "C" MLIR_CUDA_WRAPPERS_EXPORT CUfunction
80 mgpuModuleGetFunction(CUmodule module, const char *name) {
81   CUfunction function = nullptr;
82   CUDA_REPORT_IF_ERROR(cuModuleGetFunction(&function, module, name));
83   return function;
84 }
85 
86 // The wrapper uses intptr_t instead of CUDA's unsigned int to match
87 // the type of MLIR's index type. This avoids the need for casts in the
88 // generated MLIR code.
89 extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
90 mgpuLaunchKernel(CUfunction function, intptr_t gridX, intptr_t gridY,
91                  intptr_t gridZ, intptr_t blockX, intptr_t blockY,
92                  intptr_t blockZ, int32_t smem, CUstream stream, void **params,
93                  void **extra) {
94   ScopedContext scopedContext;
95   CUDA_REPORT_IF_ERROR(cuLaunchKernel(function, gridX, gridY, gridZ, blockX,
96                                       blockY, blockZ, smem, stream, params,
97                                       extra));
98 }
99 
100 extern "C" MLIR_CUDA_WRAPPERS_EXPORT CUstream mgpuStreamCreate() {
101   ScopedContext scopedContext;
102   CUstream stream = nullptr;
103   CUDA_REPORT_IF_ERROR(cuStreamCreate(&stream, CU_STREAM_NON_BLOCKING));
104   return stream;
105 }
106 
107 extern "C" MLIR_CUDA_WRAPPERS_EXPORT void mgpuStreamDestroy(CUstream stream) {
108   CUDA_REPORT_IF_ERROR(cuStreamDestroy(stream));
109 }
110 
111 extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
112 mgpuStreamSynchronize(CUstream stream) {
113   CUDA_REPORT_IF_ERROR(cuStreamSynchronize(stream));
114 }
115 
116 extern "C" MLIR_CUDA_WRAPPERS_EXPORT void mgpuStreamWaitEvent(CUstream stream,
117                                                               CUevent event) {
118   CUDA_REPORT_IF_ERROR(cuStreamWaitEvent(stream, event, /*flags=*/0));
119 }
120 
121 extern "C" MLIR_CUDA_WRAPPERS_EXPORT CUevent mgpuEventCreate() {
122   ScopedContext scopedContext;
123   CUevent event = nullptr;
124   CUDA_REPORT_IF_ERROR(cuEventCreate(&event, CU_EVENT_DISABLE_TIMING));
125   return event;
126 }
127 
128 extern "C" MLIR_CUDA_WRAPPERS_EXPORT void mgpuEventDestroy(CUevent event) {
129   CUDA_REPORT_IF_ERROR(cuEventDestroy(event));
130 }
131 
132 extern MLIR_CUDA_WRAPPERS_EXPORT "C" void mgpuEventSynchronize(CUevent event) {
133   CUDA_REPORT_IF_ERROR(cuEventSynchronize(event));
134 }
135 
136 extern MLIR_CUDA_WRAPPERS_EXPORT "C" void mgpuEventRecord(CUevent event,
137                                                           CUstream stream) {
138   CUDA_REPORT_IF_ERROR(cuEventRecord(event, stream));
139 }
140 
141 extern "C" void *mgpuMemAlloc(uint64_t sizeBytes, CUstream /*stream*/) {
142   ScopedContext scopedContext;
143   CUdeviceptr ptr;
144   CUDA_REPORT_IF_ERROR(cuMemAlloc(&ptr, sizeBytes));
145   return reinterpret_cast<void *>(ptr);
146 }
147 
148 extern "C" void mgpuMemFree(void *ptr, CUstream /*stream*/) {
149   CUDA_REPORT_IF_ERROR(cuMemFree(reinterpret_cast<CUdeviceptr>(ptr)));
150 }
151 
152 extern "C" void mgpuMemcpy(void *dst, void *src, uint64_t sizeBytes,
153                            CUstream stream) {
154   CUDA_REPORT_IF_ERROR(cuMemcpyAsync(reinterpret_cast<CUdeviceptr>(dst),
155                                      reinterpret_cast<CUdeviceptr>(src),
156                                      sizeBytes, stream));
157 }
158 
159 /// Helper functions for writing mlir example code
160 
161 // Allows to register byte array with the CUDA runtime. Helpful until we have
162 // transfer functions implemented.
163 extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
164 mgpuMemHostRegister(void *ptr, uint64_t sizeBytes) {
165   ScopedContext scopedContext;
166   CUDA_REPORT_IF_ERROR(cuMemHostRegister(ptr, sizeBytes, /*flags=*/0));
167 }
168 
169 // Allows to register a MemRef with the CUDA runtime. Helpful until we have
170 // transfer functions implemented.
171 extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
172 mgpuMemHostRegisterMemRef(int64_t rank, StridedMemRefType<char, 1> *descriptor,
173                           int64_t elementSizeBytes) {
174 
175   llvm::SmallVector<int64_t, 4> denseStrides(rank);
176   llvm::ArrayRef<int64_t> sizes(descriptor->sizes, rank);
177   llvm::ArrayRef<int64_t> strides(sizes.end(), rank);
178 
179   std::partial_sum(sizes.rbegin(), sizes.rend(), denseStrides.rbegin(),
180                    std::multiplies<int64_t>());
181   auto sizeBytes = denseStrides.front() * elementSizeBytes;
182 
183   // Only densely packed tensors are currently supported.
184   std::rotate(denseStrides.begin(), denseStrides.begin() + 1,
185               denseStrides.end());
186   denseStrides.back() = 1;
187   assert(strides == llvm::makeArrayRef(denseStrides));
188 
189   auto ptr = descriptor->data + descriptor->offset * elementSizeBytes;
190   mgpuMemHostRegister(ptr, sizeBytes);
191 }
192