xref: /linux-6.15/include/uapi/linux/kfd_ioctl.h (revision 0899431f)
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 
23 #ifndef KFD_IOCTL_H_INCLUDED
24 #define KFD_IOCTL_H_INCLUDED
25 
26 #include <drm/drm.h>
27 #include <linux/ioctl.h>
28 
29 /*
30  * - 1.1 - initial version
31  * - 1.3 - Add SMI events support
32  * - 1.4 - Indicate new SRAM EDC bit in device properties
33  */
34 #define KFD_IOCTL_MAJOR_VERSION 1
35 #define KFD_IOCTL_MINOR_VERSION 4
36 
37 struct kfd_ioctl_get_version_args {
38 	__u32 major_version;	/* from KFD */
39 	__u32 minor_version;	/* from KFD */
40 };
41 
42 /* For kfd_ioctl_create_queue_args.queue_type. */
43 #define KFD_IOC_QUEUE_TYPE_COMPUTE		0x0
44 #define KFD_IOC_QUEUE_TYPE_SDMA			0x1
45 #define KFD_IOC_QUEUE_TYPE_COMPUTE_AQL		0x2
46 #define KFD_IOC_QUEUE_TYPE_SDMA_XGMI		0x3
47 
48 #define KFD_MAX_QUEUE_PERCENTAGE	100
49 #define KFD_MAX_QUEUE_PRIORITY		15
50 
51 struct kfd_ioctl_create_queue_args {
52 	__u64 ring_base_address;	/* to KFD */
53 	__u64 write_pointer_address;	/* from KFD */
54 	__u64 read_pointer_address;	/* from KFD */
55 	__u64 doorbell_offset;	/* from KFD */
56 
57 	__u32 ring_size;		/* to KFD */
58 	__u32 gpu_id;		/* to KFD */
59 	__u32 queue_type;		/* to KFD */
60 	__u32 queue_percentage;	/* to KFD */
61 	__u32 queue_priority;	/* to KFD */
62 	__u32 queue_id;		/* from KFD */
63 
64 	__u64 eop_buffer_address;	/* to KFD */
65 	__u64 eop_buffer_size;	/* to KFD */
66 	__u64 ctx_save_restore_address; /* to KFD */
67 	__u32 ctx_save_restore_size;	/* to KFD */
68 	__u32 ctl_stack_size;		/* to KFD */
69 };
70 
71 struct kfd_ioctl_destroy_queue_args {
72 	__u32 queue_id;		/* to KFD */
73 	__u32 pad;
74 };
75 
76 struct kfd_ioctl_update_queue_args {
77 	__u64 ring_base_address;	/* to KFD */
78 
79 	__u32 queue_id;		/* to KFD */
80 	__u32 ring_size;		/* to KFD */
81 	__u32 queue_percentage;	/* to KFD */
82 	__u32 queue_priority;	/* to KFD */
83 };
84 
85 struct kfd_ioctl_set_cu_mask_args {
86 	__u32 queue_id;		/* to KFD */
87 	__u32 num_cu_mask;		/* to KFD */
88 	__u64 cu_mask_ptr;		/* to KFD */
89 };
90 
91 struct kfd_ioctl_get_queue_wave_state_args {
92 	__u64 ctl_stack_address;	/* to KFD */
93 	__u32 ctl_stack_used_size;	/* from KFD */
94 	__u32 save_area_used_size;	/* from KFD */
95 	__u32 queue_id;			/* to KFD */
96 	__u32 pad;
97 };
98 
99 /* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */
100 #define KFD_IOC_CACHE_POLICY_COHERENT 0
101 #define KFD_IOC_CACHE_POLICY_NONCOHERENT 1
102 
103 struct kfd_ioctl_set_memory_policy_args {
104 	__u64 alternate_aperture_base;	/* to KFD */
105 	__u64 alternate_aperture_size;	/* to KFD */
106 
107 	__u32 gpu_id;			/* to KFD */
108 	__u32 default_policy;		/* to KFD */
109 	__u32 alternate_policy;		/* to KFD */
110 	__u32 pad;
111 };
112 
113 /*
114  * All counters are monotonic. They are used for profiling of compute jobs.
115  * The profiling is done by userspace.
116  *
117  * In case of GPU reset, the counter should not be affected.
118  */
119 
120 struct kfd_ioctl_get_clock_counters_args {
121 	__u64 gpu_clock_counter;	/* from KFD */
122 	__u64 cpu_clock_counter;	/* from KFD */
123 	__u64 system_clock_counter;	/* from KFD */
124 	__u64 system_clock_freq;	/* from KFD */
125 
126 	__u32 gpu_id;		/* to KFD */
127 	__u32 pad;
128 };
129 
130 struct kfd_process_device_apertures {
131 	__u64 lds_base;		/* from KFD */
132 	__u64 lds_limit;		/* from KFD */
133 	__u64 scratch_base;		/* from KFD */
134 	__u64 scratch_limit;		/* from KFD */
135 	__u64 gpuvm_base;		/* from KFD */
136 	__u64 gpuvm_limit;		/* from KFD */
137 	__u32 gpu_id;		/* from KFD */
138 	__u32 pad;
139 };
140 
141 /*
142  * AMDKFD_IOC_GET_PROCESS_APERTURES is deprecated. Use
143  * AMDKFD_IOC_GET_PROCESS_APERTURES_NEW instead, which supports an
144  * unlimited number of GPUs.
145  */
146 #define NUM_OF_SUPPORTED_GPUS 7
147 struct kfd_ioctl_get_process_apertures_args {
148 	struct kfd_process_device_apertures
149 			process_apertures[NUM_OF_SUPPORTED_GPUS];/* from KFD */
150 
151 	/* from KFD, should be in the range [1 - NUM_OF_SUPPORTED_GPUS] */
152 	__u32 num_of_nodes;
153 	__u32 pad;
154 };
155 
156 struct kfd_ioctl_get_process_apertures_new_args {
157 	/* User allocated. Pointer to struct kfd_process_device_apertures
158 	 * filled in by Kernel
159 	 */
160 	__u64 kfd_process_device_apertures_ptr;
161 	/* to KFD - indicates amount of memory present in
162 	 *  kfd_process_device_apertures_ptr
163 	 * from KFD - Number of entries filled by KFD.
164 	 */
165 	__u32 num_of_nodes;
166 	__u32 pad;
167 };
168 
169 #define MAX_ALLOWED_NUM_POINTS    100
170 #define MAX_ALLOWED_AW_BUFF_SIZE 4096
171 #define MAX_ALLOWED_WAC_BUFF_SIZE  128
172 
173 struct kfd_ioctl_dbg_register_args {
174 	__u32 gpu_id;		/* to KFD */
175 	__u32 pad;
176 };
177 
178 struct kfd_ioctl_dbg_unregister_args {
179 	__u32 gpu_id;		/* to KFD */
180 	__u32 pad;
181 };
182 
183 struct kfd_ioctl_dbg_address_watch_args {
184 	__u64 content_ptr;		/* a pointer to the actual content */
185 	__u32 gpu_id;		/* to KFD */
186 	__u32 buf_size_in_bytes;	/*including gpu_id and buf_size */
187 };
188 
189 struct kfd_ioctl_dbg_wave_control_args {
190 	__u64 content_ptr;		/* a pointer to the actual content */
191 	__u32 gpu_id;		/* to KFD */
192 	__u32 buf_size_in_bytes;	/*including gpu_id and buf_size */
193 };
194 
195 /* Matching HSA_EVENTTYPE */
196 #define KFD_IOC_EVENT_SIGNAL			0
197 #define KFD_IOC_EVENT_NODECHANGE		1
198 #define KFD_IOC_EVENT_DEVICESTATECHANGE		2
199 #define KFD_IOC_EVENT_HW_EXCEPTION		3
200 #define KFD_IOC_EVENT_SYSTEM_EVENT		4
201 #define KFD_IOC_EVENT_DEBUG_EVENT		5
202 #define KFD_IOC_EVENT_PROFILE_EVENT		6
203 #define KFD_IOC_EVENT_QUEUE_EVENT		7
204 #define KFD_IOC_EVENT_MEMORY			8
205 
206 #define KFD_IOC_WAIT_RESULT_COMPLETE		0
207 #define KFD_IOC_WAIT_RESULT_TIMEOUT		1
208 #define KFD_IOC_WAIT_RESULT_FAIL		2
209 
210 #define KFD_SIGNAL_EVENT_LIMIT			4096
211 
212 /* For kfd_event_data.hw_exception_data.reset_type. */
213 #define KFD_HW_EXCEPTION_WHOLE_GPU_RESET	0
214 #define KFD_HW_EXCEPTION_PER_ENGINE_RESET	1
215 
216 /* For kfd_event_data.hw_exception_data.reset_cause. */
217 #define KFD_HW_EXCEPTION_GPU_HANG	0
218 #define KFD_HW_EXCEPTION_ECC		1
219 
220 /* For kfd_hsa_memory_exception_data.ErrorType */
221 #define KFD_MEM_ERR_NO_RAS		0
222 #define KFD_MEM_ERR_SRAM_ECC		1
223 #define KFD_MEM_ERR_POISON_CONSUMED	2
224 #define KFD_MEM_ERR_GPU_HANG		3
225 
226 struct kfd_ioctl_create_event_args {
227 	__u64 event_page_offset;	/* from KFD */
228 	__u32 event_trigger_data;	/* from KFD - signal events only */
229 	__u32 event_type;		/* to KFD */
230 	__u32 auto_reset;		/* to KFD */
231 	__u32 node_id;		/* to KFD - only valid for certain
232 							event types */
233 	__u32 event_id;		/* from KFD */
234 	__u32 event_slot_index;	/* from KFD */
235 };
236 
237 struct kfd_ioctl_destroy_event_args {
238 	__u32 event_id;		/* to KFD */
239 	__u32 pad;
240 };
241 
242 struct kfd_ioctl_set_event_args {
243 	__u32 event_id;		/* to KFD */
244 	__u32 pad;
245 };
246 
247 struct kfd_ioctl_reset_event_args {
248 	__u32 event_id;		/* to KFD */
249 	__u32 pad;
250 };
251 
252 struct kfd_memory_exception_failure {
253 	__u32 NotPresent;	/* Page not present or supervisor privilege */
254 	__u32 ReadOnly;	/* Write access to a read-only page */
255 	__u32 NoExecute;	/* Execute access to a page marked NX */
256 	__u32 imprecise;	/* Can't determine the	exact fault address */
257 };
258 
259 /* memory exception data */
260 struct kfd_hsa_memory_exception_data {
261 	struct kfd_memory_exception_failure failure;
262 	__u64 va;
263 	__u32 gpu_id;
264 	__u32 ErrorType; /* 0 = no RAS error,
265 			  * 1 = ECC_SRAM,
266 			  * 2 = Link_SYNFLOOD (poison),
267 			  * 3 = GPU hang (not attributable to a specific cause),
268 			  * other values reserved
269 			  */
270 };
271 
272 /* hw exception data */
273 struct kfd_hsa_hw_exception_data {
274 	__u32 reset_type;
275 	__u32 reset_cause;
276 	__u32 memory_lost;
277 	__u32 gpu_id;
278 };
279 
280 /* Event data */
281 struct kfd_event_data {
282 	union {
283 		struct kfd_hsa_memory_exception_data memory_exception_data;
284 		struct kfd_hsa_hw_exception_data hw_exception_data;
285 	};				/* From KFD */
286 	__u64 kfd_event_data_ext;	/* pointer to an extension structure
287 					   for future exception types */
288 	__u32 event_id;		/* to KFD */
289 	__u32 pad;
290 };
291 
292 struct kfd_ioctl_wait_events_args {
293 	__u64 events_ptr;		/* pointed to struct
294 					   kfd_event_data array, to KFD */
295 	__u32 num_events;		/* to KFD */
296 	__u32 wait_for_all;		/* to KFD */
297 	__u32 timeout;		/* to KFD */
298 	__u32 wait_result;		/* from KFD */
299 };
300 
301 struct kfd_ioctl_set_scratch_backing_va_args {
302 	__u64 va_addr;	/* to KFD */
303 	__u32 gpu_id;	/* to KFD */
304 	__u32 pad;
305 };
306 
307 struct kfd_ioctl_get_tile_config_args {
308 	/* to KFD: pointer to tile array */
309 	__u64 tile_config_ptr;
310 	/* to KFD: pointer to macro tile array */
311 	__u64 macro_tile_config_ptr;
312 	/* to KFD: array size allocated by user mode
313 	 * from KFD: array size filled by kernel
314 	 */
315 	__u32 num_tile_configs;
316 	/* to KFD: array size allocated by user mode
317 	 * from KFD: array size filled by kernel
318 	 */
319 	__u32 num_macro_tile_configs;
320 
321 	__u32 gpu_id;		/* to KFD */
322 	__u32 gb_addr_config;	/* from KFD */
323 	__u32 num_banks;		/* from KFD */
324 	__u32 num_ranks;		/* from KFD */
325 	/* struct size can be extended later if needed
326 	 * without breaking ABI compatibility
327 	 */
328 };
329 
330 struct kfd_ioctl_set_trap_handler_args {
331 	__u64 tba_addr;		/* to KFD */
332 	__u64 tma_addr;		/* to KFD */
333 	__u32 gpu_id;		/* to KFD */
334 	__u32 pad;
335 };
336 
337 struct kfd_ioctl_acquire_vm_args {
338 	__u32 drm_fd;	/* to KFD */
339 	__u32 gpu_id;	/* to KFD */
340 };
341 
342 /* Allocation flags: memory types */
343 #define KFD_IOC_ALLOC_MEM_FLAGS_VRAM		(1 << 0)
344 #define KFD_IOC_ALLOC_MEM_FLAGS_GTT		(1 << 1)
345 #define KFD_IOC_ALLOC_MEM_FLAGS_USERPTR		(1 << 2)
346 #define KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL	(1 << 3)
347 #define KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP	(1 << 4)
348 /* Allocation flags: attributes/access options */
349 #define KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE	(1 << 31)
350 #define KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE	(1 << 30)
351 #define KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC		(1 << 29)
352 #define KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE	(1 << 28)
353 #define KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM	(1 << 27)
354 #define KFD_IOC_ALLOC_MEM_FLAGS_COHERENT	(1 << 26)
355 #define KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED	(1 << 25)
356 
357 /* Allocate memory for later SVM (shared virtual memory) mapping.
358  *
359  * @va_addr:     virtual address of the memory to be allocated
360  *               all later mappings on all GPUs will use this address
361  * @size:        size in bytes
362  * @handle:      buffer handle returned to user mode, used to refer to
363  *               this allocation for mapping, unmapping and freeing
364  * @mmap_offset: for CPU-mapping the allocation by mmapping a render node
365  *               for userptrs this is overloaded to specify the CPU address
366  * @gpu_id:      device identifier
367  * @flags:       memory type and attributes. See KFD_IOC_ALLOC_MEM_FLAGS above
368  */
369 struct kfd_ioctl_alloc_memory_of_gpu_args {
370 	__u64 va_addr;		/* to KFD */
371 	__u64 size;		/* to KFD */
372 	__u64 handle;		/* from KFD */
373 	__u64 mmap_offset;	/* to KFD (userptr), from KFD (mmap offset) */
374 	__u32 gpu_id;		/* to KFD */
375 	__u32 flags;
376 };
377 
378 /* Free memory allocated with kfd_ioctl_alloc_memory_of_gpu
379  *
380  * @handle: memory handle returned by alloc
381  */
382 struct kfd_ioctl_free_memory_of_gpu_args {
383 	__u64 handle;		/* to KFD */
384 };
385 
386 /* Map memory to one or more GPUs
387  *
388  * @handle:                memory handle returned by alloc
389  * @device_ids_array_ptr:  array of gpu_ids (__u32 per device)
390  * @n_devices:             number of devices in the array
391  * @n_success:             number of devices mapped successfully
392  *
393  * @n_success returns information to the caller how many devices from
394  * the start of the array have mapped the buffer successfully. It can
395  * be passed into a subsequent retry call to skip those devices. For
396  * the first call the caller should initialize it to 0.
397  *
398  * If the ioctl completes with return code 0 (success), n_success ==
399  * n_devices.
400  */
401 struct kfd_ioctl_map_memory_to_gpu_args {
402 	__u64 handle;			/* to KFD */
403 	__u64 device_ids_array_ptr;	/* to KFD */
404 	__u32 n_devices;		/* to KFD */
405 	__u32 n_success;		/* to/from KFD */
406 };
407 
408 /* Unmap memory from one or more GPUs
409  *
410  * same arguments as for mapping
411  */
412 struct kfd_ioctl_unmap_memory_from_gpu_args {
413 	__u64 handle;			/* to KFD */
414 	__u64 device_ids_array_ptr;	/* to KFD */
415 	__u32 n_devices;		/* to KFD */
416 	__u32 n_success;		/* to/from KFD */
417 };
418 
419 /* Allocate GWS for specific queue
420  *
421  * @queue_id:    queue's id that GWS is allocated for
422  * @num_gws:     how many GWS to allocate
423  * @first_gws:   index of the first GWS allocated.
424  *               only support contiguous GWS allocation
425  */
426 struct kfd_ioctl_alloc_queue_gws_args {
427 	__u32 queue_id;		/* to KFD */
428 	__u32 num_gws;		/* to KFD */
429 	__u32 first_gws;	/* from KFD */
430 	__u32 pad;
431 };
432 
433 struct kfd_ioctl_get_dmabuf_info_args {
434 	__u64 size;		/* from KFD */
435 	__u64 metadata_ptr;	/* to KFD */
436 	__u32 metadata_size;	/* to KFD (space allocated by user)
437 				 * from KFD (actual metadata size)
438 				 */
439 	__u32 gpu_id;	/* from KFD */
440 	__u32 flags;		/* from KFD (KFD_IOC_ALLOC_MEM_FLAGS) */
441 	__u32 dmabuf_fd;	/* to KFD */
442 };
443 
444 struct kfd_ioctl_import_dmabuf_args {
445 	__u64 va_addr;	/* to KFD */
446 	__u64 handle;	/* from KFD */
447 	__u32 gpu_id;	/* to KFD */
448 	__u32 dmabuf_fd;	/* to KFD */
449 };
450 
451 /*
452  * KFD SMI(System Management Interface) events
453  */
454 enum kfd_smi_event {
455 	KFD_SMI_EVENT_NONE = 0, /* not used */
456 	KFD_SMI_EVENT_VMFAULT = 1, /* event start counting at 1 */
457 	KFD_SMI_EVENT_THERMAL_THROTTLE = 2,
458 	KFD_SMI_EVENT_GPU_PRE_RESET = 3,
459 	KFD_SMI_EVENT_GPU_POST_RESET = 4,
460 };
461 
462 #define KFD_SMI_EVENT_MASK_FROM_INDEX(i) (1ULL << ((i) - 1))
463 
464 struct kfd_ioctl_smi_events_args {
465 	__u32 gpuid;	/* to KFD */
466 	__u32 anon_fd;	/* from KFD */
467 };
468 
469 /* Register offset inside the remapped mmio page
470  */
471 enum kfd_mmio_remap {
472 	KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL = 0,
473 	KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL = 4,
474 };
475 
476 #define AMDKFD_IOCTL_BASE 'K'
477 #define AMDKFD_IO(nr)			_IO(AMDKFD_IOCTL_BASE, nr)
478 #define AMDKFD_IOR(nr, type)		_IOR(AMDKFD_IOCTL_BASE, nr, type)
479 #define AMDKFD_IOW(nr, type)		_IOW(AMDKFD_IOCTL_BASE, nr, type)
480 #define AMDKFD_IOWR(nr, type)		_IOWR(AMDKFD_IOCTL_BASE, nr, type)
481 
482 #define AMDKFD_IOC_GET_VERSION			\
483 		AMDKFD_IOR(0x01, struct kfd_ioctl_get_version_args)
484 
485 #define AMDKFD_IOC_CREATE_QUEUE			\
486 		AMDKFD_IOWR(0x02, struct kfd_ioctl_create_queue_args)
487 
488 #define AMDKFD_IOC_DESTROY_QUEUE		\
489 		AMDKFD_IOWR(0x03, struct kfd_ioctl_destroy_queue_args)
490 
491 #define AMDKFD_IOC_SET_MEMORY_POLICY		\
492 		AMDKFD_IOW(0x04, struct kfd_ioctl_set_memory_policy_args)
493 
494 #define AMDKFD_IOC_GET_CLOCK_COUNTERS		\
495 		AMDKFD_IOWR(0x05, struct kfd_ioctl_get_clock_counters_args)
496 
497 #define AMDKFD_IOC_GET_PROCESS_APERTURES	\
498 		AMDKFD_IOR(0x06, struct kfd_ioctl_get_process_apertures_args)
499 
500 #define AMDKFD_IOC_UPDATE_QUEUE			\
501 		AMDKFD_IOW(0x07, struct kfd_ioctl_update_queue_args)
502 
503 #define AMDKFD_IOC_CREATE_EVENT			\
504 		AMDKFD_IOWR(0x08, struct kfd_ioctl_create_event_args)
505 
506 #define AMDKFD_IOC_DESTROY_EVENT		\
507 		AMDKFD_IOW(0x09, struct kfd_ioctl_destroy_event_args)
508 
509 #define AMDKFD_IOC_SET_EVENT			\
510 		AMDKFD_IOW(0x0A, struct kfd_ioctl_set_event_args)
511 
512 #define AMDKFD_IOC_RESET_EVENT			\
513 		AMDKFD_IOW(0x0B, struct kfd_ioctl_reset_event_args)
514 
515 #define AMDKFD_IOC_WAIT_EVENTS			\
516 		AMDKFD_IOWR(0x0C, struct kfd_ioctl_wait_events_args)
517 
518 #define AMDKFD_IOC_DBG_REGISTER			\
519 		AMDKFD_IOW(0x0D, struct kfd_ioctl_dbg_register_args)
520 
521 #define AMDKFD_IOC_DBG_UNREGISTER		\
522 		AMDKFD_IOW(0x0E, struct kfd_ioctl_dbg_unregister_args)
523 
524 #define AMDKFD_IOC_DBG_ADDRESS_WATCH		\
525 		AMDKFD_IOW(0x0F, struct kfd_ioctl_dbg_address_watch_args)
526 
527 #define AMDKFD_IOC_DBG_WAVE_CONTROL		\
528 		AMDKFD_IOW(0x10, struct kfd_ioctl_dbg_wave_control_args)
529 
530 #define AMDKFD_IOC_SET_SCRATCH_BACKING_VA	\
531 		AMDKFD_IOWR(0x11, struct kfd_ioctl_set_scratch_backing_va_args)
532 
533 #define AMDKFD_IOC_GET_TILE_CONFIG                                      \
534 		AMDKFD_IOWR(0x12, struct kfd_ioctl_get_tile_config_args)
535 
536 #define AMDKFD_IOC_SET_TRAP_HANDLER		\
537 		AMDKFD_IOW(0x13, struct kfd_ioctl_set_trap_handler_args)
538 
539 #define AMDKFD_IOC_GET_PROCESS_APERTURES_NEW	\
540 		AMDKFD_IOWR(0x14,		\
541 			struct kfd_ioctl_get_process_apertures_new_args)
542 
543 #define AMDKFD_IOC_ACQUIRE_VM			\
544 		AMDKFD_IOW(0x15, struct kfd_ioctl_acquire_vm_args)
545 
546 #define AMDKFD_IOC_ALLOC_MEMORY_OF_GPU		\
547 		AMDKFD_IOWR(0x16, struct kfd_ioctl_alloc_memory_of_gpu_args)
548 
549 #define AMDKFD_IOC_FREE_MEMORY_OF_GPU		\
550 		AMDKFD_IOW(0x17, struct kfd_ioctl_free_memory_of_gpu_args)
551 
552 #define AMDKFD_IOC_MAP_MEMORY_TO_GPU		\
553 		AMDKFD_IOWR(0x18, struct kfd_ioctl_map_memory_to_gpu_args)
554 
555 #define AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU	\
556 		AMDKFD_IOWR(0x19, struct kfd_ioctl_unmap_memory_from_gpu_args)
557 
558 #define AMDKFD_IOC_SET_CU_MASK		\
559 		AMDKFD_IOW(0x1A, struct kfd_ioctl_set_cu_mask_args)
560 
561 #define AMDKFD_IOC_GET_QUEUE_WAVE_STATE		\
562 		AMDKFD_IOWR(0x1B, struct kfd_ioctl_get_queue_wave_state_args)
563 
564 #define AMDKFD_IOC_GET_DMABUF_INFO		\
565 		AMDKFD_IOWR(0x1C, struct kfd_ioctl_get_dmabuf_info_args)
566 
567 #define AMDKFD_IOC_IMPORT_DMABUF		\
568 		AMDKFD_IOWR(0x1D, struct kfd_ioctl_import_dmabuf_args)
569 
570 #define AMDKFD_IOC_ALLOC_QUEUE_GWS		\
571 		AMDKFD_IOWR(0x1E, struct kfd_ioctl_alloc_queue_gws_args)
572 
573 #define AMDKFD_IOC_SMI_EVENTS			\
574 		AMDKFD_IOWR(0x1F, struct kfd_ioctl_smi_events_args)
575 
576 #define AMDKFD_COMMAND_START		0x01
577 #define AMDKFD_COMMAND_END		0x20
578 
579 #endif
580