xref: /linux-6.15/drivers/gpu/drm/xe/xe_exec_queue.c (revision 7090d7fc)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "xe_exec_queue.h"
7 
8 #include <linux/nospec.h>
9 
10 #include <drm/drm_device.h>
11 #include <drm/drm_file.h>
12 #include <drm/xe_drm.h>
13 
14 #include "xe_device.h"
15 #include "xe_gt.h"
16 #include "xe_hw_engine_class_sysfs.h"
17 #include "xe_hw_fence.h"
18 #include "xe_lrc.h"
19 #include "xe_macros.h"
20 #include "xe_migrate.h"
21 #include "xe_pm.h"
22 #include "xe_ring_ops_types.h"
23 #include "xe_trace.h"
24 #include "xe_vm.h"
25 
26 enum xe_exec_queue_sched_prop {
27 	XE_EXEC_QUEUE_JOB_TIMEOUT = 0,
28 	XE_EXEC_QUEUE_TIMESLICE = 1,
29 	XE_EXEC_QUEUE_PREEMPT_TIMEOUT = 2,
30 	XE_EXEC_QUEUE_SCHED_PROP_MAX = 3,
31 };
32 
33 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
34 				      u64 extensions, int ext_number);
35 
36 static void __xe_exec_queue_free(struct xe_exec_queue *q)
37 {
38 	if (q->vm)
39 		xe_vm_put(q->vm);
40 
41 	if (q->xef)
42 		xe_file_put(q->xef);
43 
44 	kfree(q);
45 }
46 
47 static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
48 						   struct xe_vm *vm,
49 						   u32 logical_mask,
50 						   u16 width, struct xe_hw_engine *hwe,
51 						   u32 flags, u64 extensions)
52 {
53 	struct xe_exec_queue *q;
54 	struct xe_gt *gt = hwe->gt;
55 	int err;
56 
57 	/* only kernel queues can be permanent */
58 	XE_WARN_ON((flags & EXEC_QUEUE_FLAG_PERMANENT) && !(flags & EXEC_QUEUE_FLAG_KERNEL));
59 
60 	q = kzalloc(struct_size(q, lrc, width), GFP_KERNEL);
61 	if (!q)
62 		return ERR_PTR(-ENOMEM);
63 
64 	kref_init(&q->refcount);
65 	q->flags = flags;
66 	q->hwe = hwe;
67 	q->gt = gt;
68 	q->class = hwe->class;
69 	q->width = width;
70 	q->logical_mask = logical_mask;
71 	q->fence_irq = &gt->fence_irq[hwe->class];
72 	q->ring_ops = gt->ring_ops[hwe->class];
73 	q->ops = gt->exec_queue_ops;
74 	INIT_LIST_HEAD(&q->lr.link);
75 	INIT_LIST_HEAD(&q->multi_gt_link);
76 
77 	q->sched_props.timeslice_us = hwe->eclass->sched_props.timeslice_us;
78 	q->sched_props.preempt_timeout_us =
79 				hwe->eclass->sched_props.preempt_timeout_us;
80 	q->sched_props.job_timeout_ms =
81 				hwe->eclass->sched_props.job_timeout_ms;
82 	if (q->flags & EXEC_QUEUE_FLAG_KERNEL &&
83 	    q->flags & EXEC_QUEUE_FLAG_HIGH_PRIORITY)
84 		q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_KERNEL;
85 	else
86 		q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_NORMAL;
87 
88 	if (vm)
89 		q->vm = xe_vm_get(vm);
90 
91 	if (extensions) {
92 		/*
93 		 * may set q->usm, must come before xe_lrc_create(),
94 		 * may overwrite q->sched_props, must come before q->ops->init()
95 		 */
96 		err = exec_queue_user_extensions(xe, q, extensions, 0);
97 		if (err) {
98 			__xe_exec_queue_free(q);
99 			return ERR_PTR(err);
100 		}
101 	}
102 
103 	return q;
104 }
105 
106 static int __xe_exec_queue_init(struct xe_exec_queue *q)
107 {
108 	struct xe_vm *vm = q->vm;
109 	int i, err;
110 
111 	if (vm) {
112 		err = xe_vm_lock(vm, true);
113 		if (err)
114 			return err;
115 	}
116 
117 	for (i = 0; i < q->width; ++i) {
118 		q->lrc[i] = xe_lrc_create(q->hwe, q->vm, SZ_16K);
119 		if (IS_ERR(q->lrc[i])) {
120 			err = PTR_ERR(q->lrc[i]);
121 			goto err_unlock;
122 		}
123 	}
124 
125 	if (vm)
126 		xe_vm_unlock(vm);
127 
128 	err = q->ops->init(q);
129 	if (err)
130 		goto err_lrc;
131 
132 	return 0;
133 
134 err_unlock:
135 	if (vm)
136 		xe_vm_unlock(vm);
137 err_lrc:
138 	for (i = i - 1; i >= 0; --i)
139 		xe_lrc_put(q->lrc[i]);
140 	return err;
141 }
142 
143 struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm,
144 					   u32 logical_mask, u16 width,
145 					   struct xe_hw_engine *hwe, u32 flags,
146 					   u64 extensions)
147 {
148 	struct xe_exec_queue *q;
149 	int err;
150 
151 	q = __xe_exec_queue_alloc(xe, vm, logical_mask, width, hwe, flags,
152 				  extensions);
153 	if (IS_ERR(q))
154 		return q;
155 
156 	err = __xe_exec_queue_init(q);
157 	if (err)
158 		goto err_post_alloc;
159 
160 	return q;
161 
162 err_post_alloc:
163 	__xe_exec_queue_free(q);
164 	return ERR_PTR(err);
165 }
166 
167 struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt,
168 						 struct xe_vm *vm,
169 						 enum xe_engine_class class, u32 flags)
170 {
171 	struct xe_hw_engine *hwe, *hwe0 = NULL;
172 	enum xe_hw_engine_id id;
173 	u32 logical_mask = 0;
174 
175 	for_each_hw_engine(hwe, gt, id) {
176 		if (xe_hw_engine_is_reserved(hwe))
177 			continue;
178 
179 		if (hwe->class == class) {
180 			logical_mask |= BIT(hwe->logical_instance);
181 			if (!hwe0)
182 				hwe0 = hwe;
183 		}
184 	}
185 
186 	if (!logical_mask)
187 		return ERR_PTR(-ENODEV);
188 
189 	return xe_exec_queue_create(xe, vm, logical_mask, 1, hwe0, flags, 0);
190 }
191 
192 void xe_exec_queue_destroy(struct kref *ref)
193 {
194 	struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount);
195 	struct xe_exec_queue *eq, *next;
196 
197 	xe_exec_queue_last_fence_put_unlocked(q);
198 	if (!(q->flags & EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD)) {
199 		list_for_each_entry_safe(eq, next, &q->multi_gt_list,
200 					 multi_gt_link)
201 			xe_exec_queue_put(eq);
202 	}
203 
204 	q->ops->fini(q);
205 }
206 
207 void xe_exec_queue_fini(struct xe_exec_queue *q)
208 {
209 	int i;
210 
211 	for (i = 0; i < q->width; ++i)
212 		xe_lrc_put(q->lrc[i]);
213 	__xe_exec_queue_free(q);
214 }
215 
216 void xe_exec_queue_assign_name(struct xe_exec_queue *q, u32 instance)
217 {
218 	switch (q->class) {
219 	case XE_ENGINE_CLASS_RENDER:
220 		snprintf(q->name, sizeof(q->name), "rcs%d", instance);
221 		break;
222 	case XE_ENGINE_CLASS_VIDEO_DECODE:
223 		snprintf(q->name, sizeof(q->name), "vcs%d", instance);
224 		break;
225 	case XE_ENGINE_CLASS_VIDEO_ENHANCE:
226 		snprintf(q->name, sizeof(q->name), "vecs%d", instance);
227 		break;
228 	case XE_ENGINE_CLASS_COPY:
229 		snprintf(q->name, sizeof(q->name), "bcs%d", instance);
230 		break;
231 	case XE_ENGINE_CLASS_COMPUTE:
232 		snprintf(q->name, sizeof(q->name), "ccs%d", instance);
233 		break;
234 	case XE_ENGINE_CLASS_OTHER:
235 		snprintf(q->name, sizeof(q->name), "gsccs%d", instance);
236 		break;
237 	default:
238 		XE_WARN_ON(q->class);
239 	}
240 }
241 
242 struct xe_exec_queue *xe_exec_queue_lookup(struct xe_file *xef, u32 id)
243 {
244 	struct xe_exec_queue *q;
245 
246 	mutex_lock(&xef->exec_queue.lock);
247 	q = xa_load(&xef->exec_queue.xa, id);
248 	if (q)
249 		xe_exec_queue_get(q);
250 	mutex_unlock(&xef->exec_queue.lock);
251 
252 	return q;
253 }
254 
255 enum xe_exec_queue_priority
256 xe_exec_queue_device_get_max_priority(struct xe_device *xe)
257 {
258 	return capable(CAP_SYS_NICE) ? XE_EXEC_QUEUE_PRIORITY_HIGH :
259 				       XE_EXEC_QUEUE_PRIORITY_NORMAL;
260 }
261 
262 static int exec_queue_set_priority(struct xe_device *xe, struct xe_exec_queue *q,
263 				   u64 value)
264 {
265 	if (XE_IOCTL_DBG(xe, value > XE_EXEC_QUEUE_PRIORITY_HIGH))
266 		return -EINVAL;
267 
268 	if (XE_IOCTL_DBG(xe, value > xe_exec_queue_device_get_max_priority(xe)))
269 		return -EPERM;
270 
271 	q->sched_props.priority = value;
272 	return 0;
273 }
274 
275 static bool xe_exec_queue_enforce_schedule_limit(void)
276 {
277 #if IS_ENABLED(CONFIG_DRM_XE_ENABLE_SCHEDTIMEOUT_LIMIT)
278 	return true;
279 #else
280 	return !capable(CAP_SYS_NICE);
281 #endif
282 }
283 
284 static void
285 xe_exec_queue_get_prop_minmax(struct xe_hw_engine_class_intf *eclass,
286 			      enum xe_exec_queue_sched_prop prop,
287 			      u32 *min, u32 *max)
288 {
289 	switch (prop) {
290 	case XE_EXEC_QUEUE_JOB_TIMEOUT:
291 		*min = eclass->sched_props.job_timeout_min;
292 		*max = eclass->sched_props.job_timeout_max;
293 		break;
294 	case XE_EXEC_QUEUE_TIMESLICE:
295 		*min = eclass->sched_props.timeslice_min;
296 		*max = eclass->sched_props.timeslice_max;
297 		break;
298 	case XE_EXEC_QUEUE_PREEMPT_TIMEOUT:
299 		*min = eclass->sched_props.preempt_timeout_min;
300 		*max = eclass->sched_props.preempt_timeout_max;
301 		break;
302 	default:
303 		break;
304 	}
305 #if IS_ENABLED(CONFIG_DRM_XE_ENABLE_SCHEDTIMEOUT_LIMIT)
306 	if (capable(CAP_SYS_NICE)) {
307 		switch (prop) {
308 		case XE_EXEC_QUEUE_JOB_TIMEOUT:
309 			*min = XE_HW_ENGINE_JOB_TIMEOUT_MIN;
310 			*max = XE_HW_ENGINE_JOB_TIMEOUT_MAX;
311 			break;
312 		case XE_EXEC_QUEUE_TIMESLICE:
313 			*min = XE_HW_ENGINE_TIMESLICE_MIN;
314 			*max = XE_HW_ENGINE_TIMESLICE_MAX;
315 			break;
316 		case XE_EXEC_QUEUE_PREEMPT_TIMEOUT:
317 			*min = XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN;
318 			*max = XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX;
319 			break;
320 		default:
321 			break;
322 		}
323 	}
324 #endif
325 }
326 
327 static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *q,
328 				    u64 value)
329 {
330 	u32 min = 0, max = 0;
331 
332 	xe_exec_queue_get_prop_minmax(q->hwe->eclass,
333 				      XE_EXEC_QUEUE_TIMESLICE, &min, &max);
334 
335 	if (xe_exec_queue_enforce_schedule_limit() &&
336 	    !xe_hw_engine_timeout_in_range(value, min, max))
337 		return -EINVAL;
338 
339 	q->sched_props.timeslice_us = value;
340 	return 0;
341 }
342 
343 typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
344 					     struct xe_exec_queue *q,
345 					     u64 value);
346 
347 static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = {
348 	[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority,
349 	[DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice,
350 };
351 
352 static int exec_queue_user_ext_set_property(struct xe_device *xe,
353 					    struct xe_exec_queue *q,
354 					    u64 extension)
355 {
356 	u64 __user *address = u64_to_user_ptr(extension);
357 	struct drm_xe_ext_set_property ext;
358 	int err;
359 	u32 idx;
360 
361 	err = __copy_from_user(&ext, address, sizeof(ext));
362 	if (XE_IOCTL_DBG(xe, err))
363 		return -EFAULT;
364 
365 	if (XE_IOCTL_DBG(xe, ext.property >=
366 			 ARRAY_SIZE(exec_queue_set_property_funcs)) ||
367 	    XE_IOCTL_DBG(xe, ext.pad) ||
368 	    XE_IOCTL_DBG(xe, ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY &&
369 			 ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE))
370 		return -EINVAL;
371 
372 	idx = array_index_nospec(ext.property, ARRAY_SIZE(exec_queue_set_property_funcs));
373 	if (!exec_queue_set_property_funcs[idx])
374 		return -EINVAL;
375 
376 	return exec_queue_set_property_funcs[idx](xe, q, ext.value);
377 }
378 
379 typedef int (*xe_exec_queue_user_extension_fn)(struct xe_device *xe,
380 					       struct xe_exec_queue *q,
381 					       u64 extension);
382 
383 static const xe_exec_queue_user_extension_fn exec_queue_user_extension_funcs[] = {
384 	[DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY] = exec_queue_user_ext_set_property,
385 };
386 
387 #define MAX_USER_EXTENSIONS	16
388 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
389 				      u64 extensions, int ext_number)
390 {
391 	u64 __user *address = u64_to_user_ptr(extensions);
392 	struct drm_xe_user_extension ext;
393 	int err;
394 	u32 idx;
395 
396 	if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS))
397 		return -E2BIG;
398 
399 	err = __copy_from_user(&ext, address, sizeof(ext));
400 	if (XE_IOCTL_DBG(xe, err))
401 		return -EFAULT;
402 
403 	if (XE_IOCTL_DBG(xe, ext.pad) ||
404 	    XE_IOCTL_DBG(xe, ext.name >=
405 			 ARRAY_SIZE(exec_queue_user_extension_funcs)))
406 		return -EINVAL;
407 
408 	idx = array_index_nospec(ext.name,
409 				 ARRAY_SIZE(exec_queue_user_extension_funcs));
410 	err = exec_queue_user_extension_funcs[idx](xe, q, extensions);
411 	if (XE_IOCTL_DBG(xe, err))
412 		return err;
413 
414 	if (ext.next_extension)
415 		return exec_queue_user_extensions(xe, q, ext.next_extension,
416 						  ++ext_number);
417 
418 	return 0;
419 }
420 
421 static const enum xe_engine_class user_to_xe_engine_class[] = {
422 	[DRM_XE_ENGINE_CLASS_RENDER] = XE_ENGINE_CLASS_RENDER,
423 	[DRM_XE_ENGINE_CLASS_COPY] = XE_ENGINE_CLASS_COPY,
424 	[DRM_XE_ENGINE_CLASS_VIDEO_DECODE] = XE_ENGINE_CLASS_VIDEO_DECODE,
425 	[DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE] = XE_ENGINE_CLASS_VIDEO_ENHANCE,
426 	[DRM_XE_ENGINE_CLASS_COMPUTE] = XE_ENGINE_CLASS_COMPUTE,
427 };
428 
429 static struct xe_hw_engine *
430 find_hw_engine(struct xe_device *xe,
431 	       struct drm_xe_engine_class_instance eci)
432 {
433 	u32 idx;
434 
435 	if (eci.engine_class >= ARRAY_SIZE(user_to_xe_engine_class))
436 		return NULL;
437 
438 	if (eci.gt_id >= xe->info.gt_count)
439 		return NULL;
440 
441 	idx = array_index_nospec(eci.engine_class,
442 				 ARRAY_SIZE(user_to_xe_engine_class));
443 
444 	return xe_gt_hw_engine(xe_device_get_gt(xe, eci.gt_id),
445 			       user_to_xe_engine_class[idx],
446 			       eci.engine_instance, true);
447 }
448 
449 static u32 bind_exec_queue_logical_mask(struct xe_device *xe, struct xe_gt *gt,
450 					struct drm_xe_engine_class_instance *eci,
451 					u16 width, u16 num_placements)
452 {
453 	struct xe_hw_engine *hwe;
454 	enum xe_hw_engine_id id;
455 	u32 logical_mask = 0;
456 
457 	if (XE_IOCTL_DBG(xe, width != 1))
458 		return 0;
459 	if (XE_IOCTL_DBG(xe, num_placements != 1))
460 		return 0;
461 	if (XE_IOCTL_DBG(xe, eci[0].engine_instance != 0))
462 		return 0;
463 
464 	eci[0].engine_class = DRM_XE_ENGINE_CLASS_COPY;
465 
466 	for_each_hw_engine(hwe, gt, id) {
467 		if (xe_hw_engine_is_reserved(hwe))
468 			continue;
469 
470 		if (hwe->class ==
471 		    user_to_xe_engine_class[DRM_XE_ENGINE_CLASS_COPY])
472 			logical_mask |= BIT(hwe->logical_instance);
473 	}
474 
475 	return logical_mask;
476 }
477 
478 static u32 calc_validate_logical_mask(struct xe_device *xe, struct xe_gt *gt,
479 				      struct drm_xe_engine_class_instance *eci,
480 				      u16 width, u16 num_placements)
481 {
482 	int len = width * num_placements;
483 	int i, j, n;
484 	u16 class;
485 	u16 gt_id;
486 	u32 return_mask = 0, prev_mask;
487 
488 	if (XE_IOCTL_DBG(xe, !xe_device_uc_enabled(xe) &&
489 			 len > 1))
490 		return 0;
491 
492 	for (i = 0; i < width; ++i) {
493 		u32 current_mask = 0;
494 
495 		for (j = 0; j < num_placements; ++j) {
496 			struct xe_hw_engine *hwe;
497 
498 			n = j * width + i;
499 
500 			hwe = find_hw_engine(xe, eci[n]);
501 			if (XE_IOCTL_DBG(xe, !hwe))
502 				return 0;
503 
504 			if (XE_IOCTL_DBG(xe, xe_hw_engine_is_reserved(hwe)))
505 				return 0;
506 
507 			if (XE_IOCTL_DBG(xe, n && eci[n].gt_id != gt_id) ||
508 			    XE_IOCTL_DBG(xe, n && eci[n].engine_class != class))
509 				return 0;
510 
511 			class = eci[n].engine_class;
512 			gt_id = eci[n].gt_id;
513 
514 			if (width == 1 || !i)
515 				return_mask |= BIT(eci[n].engine_instance);
516 			current_mask |= BIT(eci[n].engine_instance);
517 		}
518 
519 		/* Parallel submissions must be logically contiguous */
520 		if (i && XE_IOCTL_DBG(xe, current_mask != prev_mask << 1))
521 			return 0;
522 
523 		prev_mask = current_mask;
524 	}
525 
526 	return return_mask;
527 }
528 
529 int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
530 			       struct drm_file *file)
531 {
532 	struct xe_device *xe = to_xe_device(dev);
533 	struct xe_file *xef = to_xe_file(file);
534 	struct drm_xe_exec_queue_create *args = data;
535 	struct drm_xe_engine_class_instance eci[XE_HW_ENGINE_MAX_INSTANCE];
536 	struct drm_xe_engine_class_instance __user *user_eci =
537 		u64_to_user_ptr(args->instances);
538 	struct xe_hw_engine *hwe;
539 	struct xe_vm *vm, *migrate_vm;
540 	struct xe_gt *gt;
541 	struct xe_exec_queue *q = NULL;
542 	u32 logical_mask;
543 	u32 id;
544 	u32 len;
545 	int err;
546 
547 	if (XE_IOCTL_DBG(xe, args->flags) ||
548 	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
549 		return -EINVAL;
550 
551 	len = args->width * args->num_placements;
552 	if (XE_IOCTL_DBG(xe, !len || len > XE_HW_ENGINE_MAX_INSTANCE))
553 		return -EINVAL;
554 
555 	err = __copy_from_user(eci, user_eci,
556 			       sizeof(struct drm_xe_engine_class_instance) *
557 			       len);
558 	if (XE_IOCTL_DBG(xe, err))
559 		return -EFAULT;
560 
561 	if (XE_IOCTL_DBG(xe, eci[0].gt_id >= xe->info.gt_count))
562 		return -EINVAL;
563 
564 	if (eci[0].engine_class == DRM_XE_ENGINE_CLASS_VM_BIND) {
565 		for_each_gt(gt, xe, id) {
566 			struct xe_exec_queue *new;
567 			u32 flags;
568 
569 			if (xe_gt_is_media_type(gt))
570 				continue;
571 
572 			eci[0].gt_id = gt->info.id;
573 			logical_mask = bind_exec_queue_logical_mask(xe, gt, eci,
574 								    args->width,
575 								    args->num_placements);
576 			if (XE_IOCTL_DBG(xe, !logical_mask))
577 				return -EINVAL;
578 
579 			hwe = find_hw_engine(xe, eci[0]);
580 			if (XE_IOCTL_DBG(xe, !hwe))
581 				return -EINVAL;
582 
583 			/* The migration vm doesn't hold rpm ref */
584 			xe_pm_runtime_get_noresume(xe);
585 
586 			flags = EXEC_QUEUE_FLAG_VM | (id ? EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD : 0);
587 
588 			migrate_vm = xe_migrate_get_vm(gt_to_tile(gt)->migrate);
589 			new = xe_exec_queue_create(xe, migrate_vm, logical_mask,
590 						   args->width, hwe, flags,
591 						   args->extensions);
592 
593 			xe_pm_runtime_put(xe); /* now held by engine */
594 
595 			xe_vm_put(migrate_vm);
596 			if (IS_ERR(new)) {
597 				err = PTR_ERR(new);
598 				if (q)
599 					goto put_exec_queue;
600 				return err;
601 			}
602 			if (id == 0)
603 				q = new;
604 			else
605 				list_add_tail(&new->multi_gt_list,
606 					      &q->multi_gt_link);
607 		}
608 	} else {
609 		gt = xe_device_get_gt(xe, eci[0].gt_id);
610 		logical_mask = calc_validate_logical_mask(xe, gt, eci,
611 							  args->width,
612 							  args->num_placements);
613 		if (XE_IOCTL_DBG(xe, !logical_mask))
614 			return -EINVAL;
615 
616 		hwe = find_hw_engine(xe, eci[0]);
617 		if (XE_IOCTL_DBG(xe, !hwe))
618 			return -EINVAL;
619 
620 		vm = xe_vm_lookup(xef, args->vm_id);
621 		if (XE_IOCTL_DBG(xe, !vm))
622 			return -ENOENT;
623 
624 		err = down_read_interruptible(&vm->lock);
625 		if (err) {
626 			xe_vm_put(vm);
627 			return err;
628 		}
629 
630 		if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
631 			up_read(&vm->lock);
632 			xe_vm_put(vm);
633 			return -ENOENT;
634 		}
635 
636 		q = xe_exec_queue_create(xe, vm, logical_mask,
637 					 args->width, hwe, 0,
638 					 args->extensions);
639 		up_read(&vm->lock);
640 		xe_vm_put(vm);
641 		if (IS_ERR(q))
642 			return PTR_ERR(q);
643 
644 		if (xe_vm_in_preempt_fence_mode(vm)) {
645 			q->lr.context = dma_fence_context_alloc(1);
646 			spin_lock_init(&q->lr.lock);
647 
648 			err = xe_vm_add_compute_exec_queue(vm, q);
649 			if (XE_IOCTL_DBG(xe, err))
650 				goto put_exec_queue;
651 		}
652 	}
653 
654 	mutex_lock(&xef->exec_queue.lock);
655 	err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL);
656 	mutex_unlock(&xef->exec_queue.lock);
657 	if (err)
658 		goto kill_exec_queue;
659 
660 	args->exec_queue_id = id;
661 	q->xef = xe_file_get(xef);
662 
663 	return 0;
664 
665 kill_exec_queue:
666 	xe_exec_queue_kill(q);
667 put_exec_queue:
668 	xe_exec_queue_put(q);
669 	return err;
670 }
671 
672 int xe_exec_queue_get_property_ioctl(struct drm_device *dev, void *data,
673 				     struct drm_file *file)
674 {
675 	struct xe_device *xe = to_xe_device(dev);
676 	struct xe_file *xef = to_xe_file(file);
677 	struct drm_xe_exec_queue_get_property *args = data;
678 	struct xe_exec_queue *q;
679 	int ret;
680 
681 	if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
682 		return -EINVAL;
683 
684 	q = xe_exec_queue_lookup(xef, args->exec_queue_id);
685 	if (XE_IOCTL_DBG(xe, !q))
686 		return -ENOENT;
687 
688 	switch (args->property) {
689 	case DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN:
690 		args->value = q->ops->reset_status(q);
691 		ret = 0;
692 		break;
693 	default:
694 		ret = -EINVAL;
695 	}
696 
697 	xe_exec_queue_put(q);
698 
699 	return ret;
700 }
701 
702 /**
703  * xe_exec_queue_is_lr() - Whether an exec_queue is long-running
704  * @q: The exec_queue
705  *
706  * Return: True if the exec_queue is long-running, false otherwise.
707  */
708 bool xe_exec_queue_is_lr(struct xe_exec_queue *q)
709 {
710 	return q->vm && xe_vm_in_lr_mode(q->vm) &&
711 		!(q->flags & EXEC_QUEUE_FLAG_VM);
712 }
713 
714 static s32 xe_exec_queue_num_job_inflight(struct xe_exec_queue *q)
715 {
716 	return q->lrc[0]->fence_ctx.next_seqno - xe_lrc_seqno(q->lrc[0]) - 1;
717 }
718 
719 /**
720  * xe_exec_queue_ring_full() - Whether an exec_queue's ring is full
721  * @q: The exec_queue
722  *
723  * Return: True if the exec_queue's ring is full, false otherwise.
724  */
725 bool xe_exec_queue_ring_full(struct xe_exec_queue *q)
726 {
727 	struct xe_lrc *lrc = q->lrc[0];
728 	s32 max_job = lrc->ring.size / MAX_JOB_SIZE_BYTES;
729 
730 	return xe_exec_queue_num_job_inflight(q) >= max_job;
731 }
732 
733 /**
734  * xe_exec_queue_is_idle() - Whether an exec_queue is idle.
735  * @q: The exec_queue
736  *
737  * FIXME: Need to determine what to use as the short-lived
738  * timeline lock for the exec_queues, so that the return value
739  * of this function becomes more than just an advisory
740  * snapshot in time. The timeline lock must protect the
741  * seqno from racing submissions on the same exec_queue.
742  * Typically vm->resv, but user-created timeline locks use the migrate vm
743  * and never grabs the migrate vm->resv so we have a race there.
744  *
745  * Return: True if the exec_queue is idle, false otherwise.
746  */
747 bool xe_exec_queue_is_idle(struct xe_exec_queue *q)
748 {
749 	if (xe_exec_queue_is_parallel(q)) {
750 		int i;
751 
752 		for (i = 0; i < q->width; ++i) {
753 			if (xe_lrc_seqno(q->lrc[i]) !=
754 			    q->lrc[i]->fence_ctx.next_seqno - 1)
755 				return false;
756 		}
757 
758 		return true;
759 	}
760 
761 	return xe_lrc_seqno(q->lrc[0]) ==
762 		q->lrc[0]->fence_ctx.next_seqno - 1;
763 }
764 
765 /**
766  * xe_exec_queue_update_run_ticks() - Update run time in ticks for this exec queue
767  * from hw
768  * @q: The exec queue
769  *
770  * Update the timestamp saved by HW for this exec queue and save run ticks
771  * calculated by using the delta from last update.
772  */
773 void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
774 {
775 	struct xe_file *xef;
776 	struct xe_lrc *lrc;
777 	u32 old_ts, new_ts;
778 
779 	/*
780 	 * Jobs that are run during driver load may use an exec_queue, but are
781 	 * not associated with a user xe file, so avoid accumulating busyness
782 	 * for kernel specific work.
783 	 */
784 	if (!q->vm || !q->vm->xef)
785 		return;
786 
787 	xef = q->vm->xef;
788 
789 	/*
790 	 * Only sample the first LRC. For parallel submission, all of them are
791 	 * scheduled together and we compensate that below by multiplying by
792 	 * width - this may introduce errors if that premise is not true and
793 	 * they don't exit 100% aligned. On the other hand, looping through
794 	 * the LRCs and reading them in different time could also introduce
795 	 * errors.
796 	 */
797 	lrc = q->lrc[0];
798 	new_ts = xe_lrc_update_timestamp(lrc, &old_ts);
799 	xef->run_ticks[q->class] += (new_ts - old_ts) * q->width;
800 }
801 
802 void xe_exec_queue_kill(struct xe_exec_queue *q)
803 {
804 	struct xe_exec_queue *eq = q, *next;
805 
806 	list_for_each_entry_safe(eq, next, &eq->multi_gt_list,
807 				 multi_gt_link) {
808 		q->ops->kill(eq);
809 		xe_vm_remove_compute_exec_queue(q->vm, eq);
810 	}
811 
812 	q->ops->kill(q);
813 	xe_vm_remove_compute_exec_queue(q->vm, q);
814 }
815 
816 int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
817 				struct drm_file *file)
818 {
819 	struct xe_device *xe = to_xe_device(dev);
820 	struct xe_file *xef = to_xe_file(file);
821 	struct drm_xe_exec_queue_destroy *args = data;
822 	struct xe_exec_queue *q;
823 
824 	if (XE_IOCTL_DBG(xe, args->pad) ||
825 	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
826 		return -EINVAL;
827 
828 	mutex_lock(&xef->exec_queue.lock);
829 	q = xa_erase(&xef->exec_queue.xa, args->exec_queue_id);
830 	mutex_unlock(&xef->exec_queue.lock);
831 	if (XE_IOCTL_DBG(xe, !q))
832 		return -ENOENT;
833 
834 	xe_exec_queue_kill(q);
835 
836 	trace_xe_exec_queue_close(q);
837 	xe_exec_queue_put(q);
838 
839 	return 0;
840 }
841 
842 static void xe_exec_queue_last_fence_lockdep_assert(struct xe_exec_queue *q,
843 						    struct xe_vm *vm)
844 {
845 	if (q->flags & EXEC_QUEUE_FLAG_VM)
846 		lockdep_assert_held(&vm->lock);
847 	else
848 		xe_vm_assert_held(vm);
849 }
850 
851 /**
852  * xe_exec_queue_last_fence_put() - Drop ref to last fence
853  * @q: The exec queue
854  * @vm: The VM the engine does a bind or exec for
855  */
856 void xe_exec_queue_last_fence_put(struct xe_exec_queue *q, struct xe_vm *vm)
857 {
858 	xe_exec_queue_last_fence_lockdep_assert(q, vm);
859 
860 	if (q->last_fence) {
861 		dma_fence_put(q->last_fence);
862 		q->last_fence = NULL;
863 	}
864 }
865 
866 /**
867  * xe_exec_queue_last_fence_put_unlocked() - Drop ref to last fence unlocked
868  * @q: The exec queue
869  *
870  * Only safe to be called from xe_exec_queue_destroy().
871  */
872 void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *q)
873 {
874 	if (q->last_fence) {
875 		dma_fence_put(q->last_fence);
876 		q->last_fence = NULL;
877 	}
878 }
879 
880 /**
881  * xe_exec_queue_last_fence_get() - Get last fence
882  * @q: The exec queue
883  * @vm: The VM the engine does a bind or exec for
884  *
885  * Get last fence, takes a ref
886  *
887  * Returns: last fence if not signaled, dma fence stub if signaled
888  */
889 struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *q,
890 					       struct xe_vm *vm)
891 {
892 	struct dma_fence *fence;
893 
894 	xe_exec_queue_last_fence_lockdep_assert(q, vm);
895 
896 	if (q->last_fence &&
897 	    test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags))
898 		xe_exec_queue_last_fence_put(q, vm);
899 
900 	fence = q->last_fence ? q->last_fence : dma_fence_get_stub();
901 	dma_fence_get(fence);
902 	return fence;
903 }
904 
905 /**
906  * xe_exec_queue_last_fence_set() - Set last fence
907  * @q: The exec queue
908  * @vm: The VM the engine does a bind or exec for
909  * @fence: The fence
910  *
911  * Set the last fence for the engine. Increases reference count for fence, when
912  * closing engine xe_exec_queue_last_fence_put should be called.
913  */
914 void xe_exec_queue_last_fence_set(struct xe_exec_queue *q, struct xe_vm *vm,
915 				  struct dma_fence *fence)
916 {
917 	xe_exec_queue_last_fence_lockdep_assert(q, vm);
918 
919 	xe_exec_queue_last_fence_put(q, vm);
920 	q->last_fence = dma_fence_get(fence);
921 }
922