xref: /linux-6.15/drivers/gpu/drm/xe/xe_exec_queue.c (revision 617d824c)
1c22a4ed0SFrancois Dugast // SPDX-License-Identifier: MIT
2c22a4ed0SFrancois Dugast /*
3c22a4ed0SFrancois Dugast  * Copyright © 2021 Intel Corporation
4c22a4ed0SFrancois Dugast  */
5c22a4ed0SFrancois Dugast 
6c22a4ed0SFrancois Dugast #include "xe_exec_queue.h"
7c22a4ed0SFrancois Dugast 
8c22a4ed0SFrancois Dugast #include <linux/nospec.h>
9c22a4ed0SFrancois Dugast 
10c22a4ed0SFrancois Dugast #include <drm/drm_device.h>
114ca1fd41SLucas De Marchi #include <drm/drm_drv.h>
12c22a4ed0SFrancois Dugast #include <drm/drm_file.h>
1387d8ecf0SJani Nikula #include <uapi/drm/xe_drm.h>
14c22a4ed0SFrancois Dugast 
15c22a4ed0SFrancois Dugast #include "xe_device.h"
16c22a4ed0SFrancois Dugast #include "xe_gt.h"
17d2776564STejas Upadhyay #include "xe_hw_engine_class_sysfs.h"
187970cb36SFrancois Dugast #include "xe_hw_engine_group.h"
19c22a4ed0SFrancois Dugast #include "xe_hw_fence.h"
2021d07f5fSIlia Levi #include "xe_irq.h"
21c22a4ed0SFrancois Dugast #include "xe_lrc.h"
22c22a4ed0SFrancois Dugast #include "xe_macros.h"
23c22a4ed0SFrancois Dugast #include "xe_migrate.h"
24c22a4ed0SFrancois Dugast #include "xe_pm.h"
25c22a4ed0SFrancois Dugast #include "xe_ring_ops_types.h"
26c22a4ed0SFrancois Dugast #include "xe_trace.h"
27c22a4ed0SFrancois Dugast #include "xe_vm.h"
2872d47960SDaniele Ceraolo Spurio #include "xe_pxp.h"
29c22a4ed0SFrancois Dugast 
30d2776564STejas Upadhyay enum xe_exec_queue_sched_prop {
31d2776564STejas Upadhyay 	XE_EXEC_QUEUE_JOB_TIMEOUT = 0,
32d2776564STejas Upadhyay 	XE_EXEC_QUEUE_TIMESLICE = 1,
33d2776564STejas Upadhyay 	XE_EXEC_QUEUE_PREEMPT_TIMEOUT = 2,
34d2776564STejas Upadhyay 	XE_EXEC_QUEUE_SCHED_PROP_MAX = 3,
35d2776564STejas Upadhyay };
36d2776564STejas Upadhyay 
3725ce7c50SBrian Welty static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
3879f944eeSNirmoy Das 				      u64 extensions, int ext_number);
3925ce7c50SBrian Welty 
__xe_exec_queue_free(struct xe_exec_queue * q)40260fa80dSNiranjana Vishwanathapura static void __xe_exec_queue_free(struct xe_exec_queue *q)
41260fa80dSNiranjana Vishwanathapura {
4272d47960SDaniele Ceraolo Spurio 	if (xe_exec_queue_uses_pxp(q))
4372d47960SDaniele Ceraolo Spurio 		xe_pxp_exec_queue_remove(gt_to_xe(q->gt)->pxp, q);
44260fa80dSNiranjana Vishwanathapura 	if (q->vm)
45260fa80dSNiranjana Vishwanathapura 		xe_vm_put(q->vm);
462149ded6SUmesh Nerlige Ramappa 
472149ded6SUmesh Nerlige Ramappa 	if (q->xef)
482149ded6SUmesh Nerlige Ramappa 		xe_file_put(q->xef);
492149ded6SUmesh Nerlige Ramappa 
50260fa80dSNiranjana Vishwanathapura 	kfree(q);
51260fa80dSNiranjana Vishwanathapura }
52260fa80dSNiranjana Vishwanathapura 
__xe_exec_queue_alloc(struct xe_device * xe,struct xe_vm * vm,u32 logical_mask,u16 width,struct xe_hw_engine * hwe,u32 flags,u64 extensions)536e144a7dSBrian Welty static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
54c22a4ed0SFrancois Dugast 						   struct xe_vm *vm,
55c22a4ed0SFrancois Dugast 						   u32 logical_mask,
56c22a4ed0SFrancois Dugast 						   u16 width, struct xe_hw_engine *hwe,
5725ce7c50SBrian Welty 						   u32 flags, u64 extensions)
58c22a4ed0SFrancois Dugast {
599b9529ceSFrancois Dugast 	struct xe_exec_queue *q;
60c22a4ed0SFrancois Dugast 	struct xe_gt *gt = hwe->gt;
6125ce7c50SBrian Welty 	int err;
62c22a4ed0SFrancois Dugast 
63923e4238SDaniele Ceraolo Spurio 	/* only kernel queues can be permanent */
64923e4238SDaniele Ceraolo Spurio 	XE_WARN_ON((flags & EXEC_QUEUE_FLAG_PERMANENT) && !(flags & EXEC_QUEUE_FLAG_KERNEL));
65923e4238SDaniele Ceraolo Spurio 
66a7a3d736SErick Archer 	q = kzalloc(struct_size(q, lrc, width), GFP_KERNEL);
679b9529ceSFrancois Dugast 	if (!q)
68c22a4ed0SFrancois Dugast 		return ERR_PTR(-ENOMEM);
69c22a4ed0SFrancois Dugast 
709b9529ceSFrancois Dugast 	kref_init(&q->refcount);
719b9529ceSFrancois Dugast 	q->flags = flags;
729b9529ceSFrancois Dugast 	q->hwe = hwe;
739b9529ceSFrancois Dugast 	q->gt = gt;
749b9529ceSFrancois Dugast 	q->class = hwe->class;
759b9529ceSFrancois Dugast 	q->width = width;
7621d07f5fSIlia Levi 	q->msix_vec = XE_IRQ_DEFAULT_MSIX;
779b9529ceSFrancois Dugast 	q->logical_mask = logical_mask;
789b9529ceSFrancois Dugast 	q->fence_irq = &gt->fence_irq[hwe->class];
799b9529ceSFrancois Dugast 	q->ring_ops = gt->ring_ops[hwe->class];
809b9529ceSFrancois Dugast 	q->ops = gt->exec_queue_ops;
81731e46c0SFrancois Dugast 	INIT_LIST_HEAD(&q->lr.link);
829b9529ceSFrancois Dugast 	INIT_LIST_HEAD(&q->multi_gt_link);
837970cb36SFrancois Dugast 	INIT_LIST_HEAD(&q->hw_engine_group_link);
84f8caa801SDaniele Ceraolo Spurio 	INIT_LIST_HEAD(&q->pxp.link);
85c22a4ed0SFrancois Dugast 
86eef55700STejas Upadhyay 	q->sched_props.timeslice_us = hwe->eclass->sched_props.timeslice_us;
87eef55700STejas Upadhyay 	q->sched_props.preempt_timeout_us =
88eef55700STejas Upadhyay 				hwe->eclass->sched_props.preempt_timeout_us;
896ae24344SBrian Welty 	q->sched_props.job_timeout_ms =
906ae24344SBrian Welty 				hwe->eclass->sched_props.job_timeout_ms;
91a8004af3SBrian Welty 	if (q->flags & EXEC_QUEUE_FLAG_KERNEL &&
92a8004af3SBrian Welty 	    q->flags & EXEC_QUEUE_FLAG_HIGH_PRIORITY)
93a8004af3SBrian Welty 		q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_KERNEL;
94a8004af3SBrian Welty 	else
95a8004af3SBrian Welty 		q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_NORMAL;
96c22a4ed0SFrancois Dugast 
97260fa80dSNiranjana Vishwanathapura 	if (vm)
98260fa80dSNiranjana Vishwanathapura 		q->vm = xe_vm_get(vm);
99260fa80dSNiranjana Vishwanathapura 
10025ce7c50SBrian Welty 	if (extensions) {
10125ce7c50SBrian Welty 		/*
102264eecdbSNiranjana Vishwanathapura 		 * may set q->usm, must come before xe_lrc_create(),
10325ce7c50SBrian Welty 		 * may overwrite q->sched_props, must come before q->ops->init()
10425ce7c50SBrian Welty 		 */
10579f944eeSNirmoy Das 		err = exec_queue_user_extensions(xe, q, extensions, 0);
10625ce7c50SBrian Welty 		if (err) {
107260fa80dSNiranjana Vishwanathapura 			__xe_exec_queue_free(q);
10825ce7c50SBrian Welty 			return ERR_PTR(err);
10925ce7c50SBrian Welty 		}
11025ce7c50SBrian Welty 	}
11125ce7c50SBrian Welty 
1126e144a7dSBrian Welty 	return q;
1136e144a7dSBrian Welty }
1146e144a7dSBrian Welty 
__xe_exec_queue_init(struct xe_exec_queue * q)1156e144a7dSBrian Welty static int __xe_exec_queue_init(struct xe_exec_queue *q)
1166e144a7dSBrian Welty {
117549dd786SMatthew Brost 	struct xe_vm *vm = q->vm;
1186e144a7dSBrian Welty 	int i, err;
11972d47960SDaniele Ceraolo Spurio 	u32 flags = 0;
12072d47960SDaniele Ceraolo Spurio 
12172d47960SDaniele Ceraolo Spurio 	/*
12272d47960SDaniele Ceraolo Spurio 	 * PXP workloads executing on RCS or CCS must run in isolation (i.e. no
12372d47960SDaniele Ceraolo Spurio 	 * other workload can use the EUs at the same time). On MTL this is done
12472d47960SDaniele Ceraolo Spurio 	 * by setting the RUNALONE bit in the LRC, while starting on Xe2 there
12572d47960SDaniele Ceraolo Spurio 	 * is a dedicated bit for it.
12672d47960SDaniele Ceraolo Spurio 	 */
12772d47960SDaniele Ceraolo Spurio 	if (xe_exec_queue_uses_pxp(q) &&
12872d47960SDaniele Ceraolo Spurio 	    (q->class == XE_ENGINE_CLASS_RENDER || q->class == XE_ENGINE_CLASS_COMPUTE)) {
12972d47960SDaniele Ceraolo Spurio 		if (GRAPHICS_VER(gt_to_xe(q->gt)) >= 20)
13072d47960SDaniele Ceraolo Spurio 			flags |= XE_LRC_CREATE_PXP;
13172d47960SDaniele Ceraolo Spurio 		else
13272d47960SDaniele Ceraolo Spurio 			flags |= XE_LRC_CREATE_RUNALONE;
13372d47960SDaniele Ceraolo Spurio 	}
1346e144a7dSBrian Welty 
135549dd786SMatthew Brost 	if (vm) {
136549dd786SMatthew Brost 		err = xe_vm_lock(vm, true);
137549dd786SMatthew Brost 		if (err)
138549dd786SMatthew Brost 			return err;
139549dd786SMatthew Brost 	}
140549dd786SMatthew Brost 
1416e144a7dSBrian Welty 	for (i = 0; i < q->width; ++i) {
14272d47960SDaniele Ceraolo Spurio 		q->lrc[i] = xe_lrc_create(q->hwe, q->vm, SZ_16K, q->msix_vec, flags);
143264eecdbSNiranjana Vishwanathapura 		if (IS_ERR(q->lrc[i])) {
144264eecdbSNiranjana Vishwanathapura 			err = PTR_ERR(q->lrc[i]);
145549dd786SMatthew Brost 			goto err_unlock;
146c22a4ed0SFrancois Dugast 		}
147264eecdbSNiranjana Vishwanathapura 	}
148c22a4ed0SFrancois Dugast 
149549dd786SMatthew Brost 	if (vm)
150549dd786SMatthew Brost 		xe_vm_unlock(vm);
151549dd786SMatthew Brost 
1529b9529ceSFrancois Dugast 	err = q->ops->init(q);
153c22a4ed0SFrancois Dugast 	if (err)
154c22a4ed0SFrancois Dugast 		goto err_lrc;
155c22a4ed0SFrancois Dugast 
1566e144a7dSBrian Welty 	return 0;
157c22a4ed0SFrancois Dugast 
158549dd786SMatthew Brost err_unlock:
159549dd786SMatthew Brost 	if (vm)
160549dd786SMatthew Brost 		xe_vm_unlock(vm);
161c22a4ed0SFrancois Dugast err_lrc:
162c22a4ed0SFrancois Dugast 	for (i = i - 1; i >= 0; --i)
163264eecdbSNiranjana Vishwanathapura 		xe_lrc_put(q->lrc[i]);
1646e144a7dSBrian Welty 	return err;
165c22a4ed0SFrancois Dugast }
166c22a4ed0SFrancois Dugast 
xe_exec_queue_create(struct xe_device * xe,struct xe_vm * vm,u32 logical_mask,u16 width,struct xe_hw_engine * hwe,u32 flags,u64 extensions)1679b9529ceSFrancois Dugast struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm,
168c22a4ed0SFrancois Dugast 					   u32 logical_mask, u16 width,
16925ce7c50SBrian Welty 					   struct xe_hw_engine *hwe, u32 flags,
17025ce7c50SBrian Welty 					   u64 extensions)
171c22a4ed0SFrancois Dugast {
1729b9529ceSFrancois Dugast 	struct xe_exec_queue *q;
173c22a4ed0SFrancois Dugast 	int err;
174c22a4ed0SFrancois Dugast 
175dcdd6b84SDaniele Ceraolo Spurio 	/* VMs for GSCCS queues (and only those) must have the XE_VM_FLAG_GSC flag */
176dcdd6b84SDaniele Ceraolo Spurio 	xe_assert(xe, !vm || (!!(vm->flags & XE_VM_FLAG_GSC) == !!(hwe->engine_id == XE_HW_ENGINE_GSCCS0)));
177dcdd6b84SDaniele Ceraolo Spurio 
17825ce7c50SBrian Welty 	q = __xe_exec_queue_alloc(xe, vm, logical_mask, width, hwe, flags,
17925ce7c50SBrian Welty 				  extensions);
1806e144a7dSBrian Welty 	if (IS_ERR(q))
1816e144a7dSBrian Welty 		return q;
1826e144a7dSBrian Welty 
1836e144a7dSBrian Welty 	err = __xe_exec_queue_init(q);
1846e144a7dSBrian Welty 	if (err)
1856e144a7dSBrian Welty 		goto err_post_alloc;
186c22a4ed0SFrancois Dugast 
18772d47960SDaniele Ceraolo Spurio 	/*
18872d47960SDaniele Ceraolo Spurio 	 * We can only add the queue to the PXP list after the init is complete,
18972d47960SDaniele Ceraolo Spurio 	 * because the PXP termination can call exec_queue_kill and that will
19072d47960SDaniele Ceraolo Spurio 	 * go bad if the queue is only half-initialized. This means that we
19172d47960SDaniele Ceraolo Spurio 	 * can't do it when we handle the PXP extension in __xe_exec_queue_alloc
19272d47960SDaniele Ceraolo Spurio 	 * and we need to do it here instead.
19372d47960SDaniele Ceraolo Spurio 	 */
19472d47960SDaniele Ceraolo Spurio 	if (xe_exec_queue_uses_pxp(q)) {
19572d47960SDaniele Ceraolo Spurio 		err = xe_pxp_exec_queue_add(xe->pxp, q);
19672d47960SDaniele Ceraolo Spurio 		if (err)
19772d47960SDaniele Ceraolo Spurio 			goto err_post_alloc;
19872d47960SDaniele Ceraolo Spurio 	}
19972d47960SDaniele Ceraolo Spurio 
2009b9529ceSFrancois Dugast 	return q;
2016e144a7dSBrian Welty 
2026e144a7dSBrian Welty err_post_alloc:
2036e144a7dSBrian Welty 	__xe_exec_queue_free(q);
2046e144a7dSBrian Welty 	return ERR_PTR(err);
205c22a4ed0SFrancois Dugast }
2065148da09SFrancois Dugast ALLOW_ERROR_INJECTION(xe_exec_queue_create, ERRNO);
207c22a4ed0SFrancois Dugast 
xe_exec_queue_create_class(struct xe_device * xe,struct xe_gt * gt,struct xe_vm * vm,enum xe_engine_class class,u32 flags,u64 extensions)2089b9529ceSFrancois Dugast struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt,
209c22a4ed0SFrancois Dugast 						 struct xe_vm *vm,
210852856e3SMatthew Brost 						 enum xe_engine_class class,
211852856e3SMatthew Brost 						 u32 flags, u64 extensions)
212c22a4ed0SFrancois Dugast {
213c22a4ed0SFrancois Dugast 	struct xe_hw_engine *hwe, *hwe0 = NULL;
214c22a4ed0SFrancois Dugast 	enum xe_hw_engine_id id;
215c22a4ed0SFrancois Dugast 	u32 logical_mask = 0;
216c22a4ed0SFrancois Dugast 
217c22a4ed0SFrancois Dugast 	for_each_hw_engine(hwe, gt, id) {
218c22a4ed0SFrancois Dugast 		if (xe_hw_engine_is_reserved(hwe))
219c22a4ed0SFrancois Dugast 			continue;
220c22a4ed0SFrancois Dugast 
221c22a4ed0SFrancois Dugast 		if (hwe->class == class) {
222c22a4ed0SFrancois Dugast 			logical_mask |= BIT(hwe->logical_instance);
223c22a4ed0SFrancois Dugast 			if (!hwe0)
224c22a4ed0SFrancois Dugast 				hwe0 = hwe;
225c22a4ed0SFrancois Dugast 		}
226c22a4ed0SFrancois Dugast 	}
227c22a4ed0SFrancois Dugast 
228c22a4ed0SFrancois Dugast 	if (!logical_mask)
229c22a4ed0SFrancois Dugast 		return ERR_PTR(-ENODEV);
230c22a4ed0SFrancois Dugast 
231852856e3SMatthew Brost 	return xe_exec_queue_create(xe, vm, logical_mask, 1, hwe0, flags, extensions);
232852856e3SMatthew Brost }
233852856e3SMatthew Brost 
234852856e3SMatthew Brost /**
235852856e3SMatthew Brost  * xe_exec_queue_create_bind() - Create bind exec queue.
236852856e3SMatthew Brost  * @xe: Xe device.
237852856e3SMatthew Brost  * @tile: tile which bind exec queue belongs to.
238852856e3SMatthew Brost  * @flags: exec queue creation flags
239852856e3SMatthew Brost  * @extensions: exec queue creation extensions
240852856e3SMatthew Brost  *
241852856e3SMatthew Brost  * Normalize bind exec queue creation. Bind exec queue is tied to migration VM
242852856e3SMatthew Brost  * for access to physical memory required for page table programming. On a
243852856e3SMatthew Brost  * faulting devices the reserved copy engine instance must be used to avoid
244852856e3SMatthew Brost  * deadlocking (user binds cannot get stuck behind faults as kernel binds which
245852856e3SMatthew Brost  * resolve faults depend on user binds). On non-faulting devices any copy engine
246852856e3SMatthew Brost  * can be used.
247852856e3SMatthew Brost  *
248852856e3SMatthew Brost  * Returns exec queue on success, ERR_PTR on failure
249852856e3SMatthew Brost  */
xe_exec_queue_create_bind(struct xe_device * xe,struct xe_tile * tile,u32 flags,u64 extensions)250852856e3SMatthew Brost struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe,
251852856e3SMatthew Brost 						struct xe_tile *tile,
252852856e3SMatthew Brost 						u32 flags, u64 extensions)
253852856e3SMatthew Brost {
254852856e3SMatthew Brost 	struct xe_gt *gt = tile->primary_gt;
255852856e3SMatthew Brost 	struct xe_exec_queue *q;
256852856e3SMatthew Brost 	struct xe_vm *migrate_vm;
257852856e3SMatthew Brost 
258852856e3SMatthew Brost 	migrate_vm = xe_migrate_get_vm(tile->migrate);
259852856e3SMatthew Brost 	if (xe->info.has_usm) {
260852856e3SMatthew Brost 		struct xe_hw_engine *hwe = xe_gt_hw_engine(gt,
261852856e3SMatthew Brost 							   XE_ENGINE_CLASS_COPY,
262852856e3SMatthew Brost 							   gt->usm.reserved_bcs_instance,
263852856e3SMatthew Brost 							   false);
264852856e3SMatthew Brost 
265249df8cbSDafna Hirschfeld 		if (!hwe) {
266249df8cbSDafna Hirschfeld 			xe_vm_put(migrate_vm);
267852856e3SMatthew Brost 			return ERR_PTR(-EINVAL);
268249df8cbSDafna Hirschfeld 		}
269852856e3SMatthew Brost 
270852856e3SMatthew Brost 		q = xe_exec_queue_create(xe, migrate_vm,
271852856e3SMatthew Brost 					 BIT(hwe->logical_instance), 1, hwe,
272852856e3SMatthew Brost 					 flags, extensions);
273852856e3SMatthew Brost 	} else {
274852856e3SMatthew Brost 		q = xe_exec_queue_create_class(xe, gt, migrate_vm,
275852856e3SMatthew Brost 					       XE_ENGINE_CLASS_COPY, flags,
276852856e3SMatthew Brost 					       extensions);
277852856e3SMatthew Brost 	}
278852856e3SMatthew Brost 	xe_vm_put(migrate_vm);
279852856e3SMatthew Brost 
280852856e3SMatthew Brost 	return q;
281c22a4ed0SFrancois Dugast }
2829d42476fSFrancois Dugast ALLOW_ERROR_INJECTION(xe_exec_queue_create_bind, ERRNO);
283c22a4ed0SFrancois Dugast 
xe_exec_queue_destroy(struct kref * ref)2849b9529ceSFrancois Dugast void xe_exec_queue_destroy(struct kref *ref)
285c22a4ed0SFrancois Dugast {
2869b9529ceSFrancois Dugast 	struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount);
2879b9529ceSFrancois Dugast 	struct xe_exec_queue *eq, *next;
288c22a4ed0SFrancois Dugast 
28972d47960SDaniele Ceraolo Spurio 	if (xe_exec_queue_uses_pxp(q))
29072d47960SDaniele Ceraolo Spurio 		xe_pxp_exec_queue_remove(gt_to_xe(q->gt)->pxp, q);
29172d47960SDaniele Ceraolo Spurio 
292e669f10cSMatthew Brost 	xe_exec_queue_last_fence_put_unlocked(q);
2939b9529ceSFrancois Dugast 	if (!(q->flags & EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD)) {
2949b9529ceSFrancois Dugast 		list_for_each_entry_safe(eq, next, &q->multi_gt_list,
295c22a4ed0SFrancois Dugast 					 multi_gt_link)
2969b9529ceSFrancois Dugast 			xe_exec_queue_put(eq);
297c22a4ed0SFrancois Dugast 	}
298c22a4ed0SFrancois Dugast 
2999b9529ceSFrancois Dugast 	q->ops->fini(q);
300c22a4ed0SFrancois Dugast }
301c22a4ed0SFrancois Dugast 
xe_exec_queue_fini(struct xe_exec_queue * q)3029b9529ceSFrancois Dugast void xe_exec_queue_fini(struct xe_exec_queue *q)
303c22a4ed0SFrancois Dugast {
304c22a4ed0SFrancois Dugast 	int i;
305c22a4ed0SFrancois Dugast 
30683db047dSLucas De Marchi 	/*
30783db047dSLucas De Marchi 	 * Before releasing our ref to lrc and xef, accumulate our run ticks
3080fd4380cSLucas De Marchi 	 * and wakeup any waiters.
30983db047dSLucas De Marchi 	 */
31083db047dSLucas De Marchi 	xe_exec_queue_update_run_ticks(q);
3110fd4380cSLucas De Marchi 	if (q->xef && atomic_dec_and_test(&q->xef->exec_queue.pending_removal))
3120fd4380cSLucas De Marchi 		wake_up_var(&q->xef->exec_queue.pending_removal);
31383db047dSLucas De Marchi 
3149b9529ceSFrancois Dugast 	for (i = 0; i < q->width; ++i)
315264eecdbSNiranjana Vishwanathapura 		xe_lrc_put(q->lrc[i]);
31683db047dSLucas De Marchi 
3176e144a7dSBrian Welty 	__xe_exec_queue_free(q);
318c22a4ed0SFrancois Dugast }
319c22a4ed0SFrancois Dugast 
xe_exec_queue_assign_name(struct xe_exec_queue * q,u32 instance)3200b1d1473SDaniele Ceraolo Spurio void xe_exec_queue_assign_name(struct xe_exec_queue *q, u32 instance)
3210b1d1473SDaniele Ceraolo Spurio {
3220b1d1473SDaniele Ceraolo Spurio 	switch (q->class) {
3230b1d1473SDaniele Ceraolo Spurio 	case XE_ENGINE_CLASS_RENDER:
324a3c86b6dSBommu Krishnaiah 		snprintf(q->name, sizeof(q->name), "rcs%d", instance);
3250b1d1473SDaniele Ceraolo Spurio 		break;
3260b1d1473SDaniele Ceraolo Spurio 	case XE_ENGINE_CLASS_VIDEO_DECODE:
327a3c86b6dSBommu Krishnaiah 		snprintf(q->name, sizeof(q->name), "vcs%d", instance);
3280b1d1473SDaniele Ceraolo Spurio 		break;
3290b1d1473SDaniele Ceraolo Spurio 	case XE_ENGINE_CLASS_VIDEO_ENHANCE:
330a3c86b6dSBommu Krishnaiah 		snprintf(q->name, sizeof(q->name), "vecs%d", instance);
3310b1d1473SDaniele Ceraolo Spurio 		break;
3320b1d1473SDaniele Ceraolo Spurio 	case XE_ENGINE_CLASS_COPY:
333a3c86b6dSBommu Krishnaiah 		snprintf(q->name, sizeof(q->name), "bcs%d", instance);
3340b1d1473SDaniele Ceraolo Spurio 		break;
3350b1d1473SDaniele Ceraolo Spurio 	case XE_ENGINE_CLASS_COMPUTE:
336a3c86b6dSBommu Krishnaiah 		snprintf(q->name, sizeof(q->name), "ccs%d", instance);
3370b1d1473SDaniele Ceraolo Spurio 		break;
33829654910SDaniele Ceraolo Spurio 	case XE_ENGINE_CLASS_OTHER:
339a3c86b6dSBommu Krishnaiah 		snprintf(q->name, sizeof(q->name), "gsccs%d", instance);
34029654910SDaniele Ceraolo Spurio 		break;
3410b1d1473SDaniele Ceraolo Spurio 	default:
3420b1d1473SDaniele Ceraolo Spurio 		XE_WARN_ON(q->class);
3430b1d1473SDaniele Ceraolo Spurio 	}
3440b1d1473SDaniele Ceraolo Spurio }
3450b1d1473SDaniele Ceraolo Spurio 
xe_exec_queue_lookup(struct xe_file * xef,u32 id)3469b9529ceSFrancois Dugast struct xe_exec_queue *xe_exec_queue_lookup(struct xe_file *xef, u32 id)
347c22a4ed0SFrancois Dugast {
3489b9529ceSFrancois Dugast 	struct xe_exec_queue *q;
349c22a4ed0SFrancois Dugast 
3509b9529ceSFrancois Dugast 	mutex_lock(&xef->exec_queue.lock);
3519b9529ceSFrancois Dugast 	q = xa_load(&xef->exec_queue.xa, id);
3529b9529ceSFrancois Dugast 	if (q)
3539b9529ceSFrancois Dugast 		xe_exec_queue_get(q);
3549b9529ceSFrancois Dugast 	mutex_unlock(&xef->exec_queue.lock);
355c22a4ed0SFrancois Dugast 
3569b9529ceSFrancois Dugast 	return q;
357c22a4ed0SFrancois Dugast }
358c22a4ed0SFrancois Dugast 
3599b9529ceSFrancois Dugast enum xe_exec_queue_priority
xe_exec_queue_device_get_max_priority(struct xe_device * xe)3609b9529ceSFrancois Dugast xe_exec_queue_device_get_max_priority(struct xe_device *xe)
361c22a4ed0SFrancois Dugast {
3629b9529ceSFrancois Dugast 	return capable(CAP_SYS_NICE) ? XE_EXEC_QUEUE_PRIORITY_HIGH :
3639b9529ceSFrancois Dugast 				       XE_EXEC_QUEUE_PRIORITY_NORMAL;
364c22a4ed0SFrancois Dugast }
365c22a4ed0SFrancois Dugast 
exec_queue_set_priority(struct xe_device * xe,struct xe_exec_queue * q,u64 value)3669b9529ceSFrancois Dugast static int exec_queue_set_priority(struct xe_device *xe, struct xe_exec_queue *q,
36779f944eeSNirmoy Das 				   u64 value)
368c22a4ed0SFrancois Dugast {
3699b9529ceSFrancois Dugast 	if (XE_IOCTL_DBG(xe, value > XE_EXEC_QUEUE_PRIORITY_HIGH))
370c22a4ed0SFrancois Dugast 		return -EINVAL;
371c22a4ed0SFrancois Dugast 
3729b9529ceSFrancois Dugast 	if (XE_IOCTL_DBG(xe, value > xe_exec_queue_device_get_max_priority(xe)))
373c22a4ed0SFrancois Dugast 		return -EPERM;
374c22a4ed0SFrancois Dugast 
37525ce7c50SBrian Welty 	q->sched_props.priority = value;
37625ce7c50SBrian Welty 	return 0;
377c22a4ed0SFrancois Dugast }
378c22a4ed0SFrancois Dugast 
xe_exec_queue_enforce_schedule_limit(void)379d2776564STejas Upadhyay static bool xe_exec_queue_enforce_schedule_limit(void)
380d2776564STejas Upadhyay {
381d2776564STejas Upadhyay #if IS_ENABLED(CONFIG_DRM_XE_ENABLE_SCHEDTIMEOUT_LIMIT)
382d2776564STejas Upadhyay 	return true;
383d2776564STejas Upadhyay #else
384d2776564STejas Upadhyay 	return !capable(CAP_SYS_NICE);
385d2776564STejas Upadhyay #endif
386d2776564STejas Upadhyay }
387d2776564STejas Upadhyay 
388d2776564STejas Upadhyay static void
xe_exec_queue_get_prop_minmax(struct xe_hw_engine_class_intf * eclass,enum xe_exec_queue_sched_prop prop,u32 * min,u32 * max)389d2776564STejas Upadhyay xe_exec_queue_get_prop_minmax(struct xe_hw_engine_class_intf *eclass,
390d2776564STejas Upadhyay 			      enum xe_exec_queue_sched_prop prop,
391d2776564STejas Upadhyay 			      u32 *min, u32 *max)
392d2776564STejas Upadhyay {
393d2776564STejas Upadhyay 	switch (prop) {
394d2776564STejas Upadhyay 	case XE_EXEC_QUEUE_JOB_TIMEOUT:
395d2776564STejas Upadhyay 		*min = eclass->sched_props.job_timeout_min;
396d2776564STejas Upadhyay 		*max = eclass->sched_props.job_timeout_max;
397d2776564STejas Upadhyay 		break;
398d2776564STejas Upadhyay 	case XE_EXEC_QUEUE_TIMESLICE:
399d2776564STejas Upadhyay 		*min = eclass->sched_props.timeslice_min;
400d2776564STejas Upadhyay 		*max = eclass->sched_props.timeslice_max;
401d2776564STejas Upadhyay 		break;
402d2776564STejas Upadhyay 	case XE_EXEC_QUEUE_PREEMPT_TIMEOUT:
403d2776564STejas Upadhyay 		*min = eclass->sched_props.preempt_timeout_min;
404d2776564STejas Upadhyay 		*max = eclass->sched_props.preempt_timeout_max;
405d2776564STejas Upadhyay 		break;
406d2776564STejas Upadhyay 	default:
407d2776564STejas Upadhyay 		break;
408d2776564STejas Upadhyay 	}
409d2776564STejas Upadhyay #if IS_ENABLED(CONFIG_DRM_XE_ENABLE_SCHEDTIMEOUT_LIMIT)
410d2776564STejas Upadhyay 	if (capable(CAP_SYS_NICE)) {
411d2776564STejas Upadhyay 		switch (prop) {
412d2776564STejas Upadhyay 		case XE_EXEC_QUEUE_JOB_TIMEOUT:
413d2776564STejas Upadhyay 			*min = XE_HW_ENGINE_JOB_TIMEOUT_MIN;
414d2776564STejas Upadhyay 			*max = XE_HW_ENGINE_JOB_TIMEOUT_MAX;
415d2776564STejas Upadhyay 			break;
416d2776564STejas Upadhyay 		case XE_EXEC_QUEUE_TIMESLICE:
417d2776564STejas Upadhyay 			*min = XE_HW_ENGINE_TIMESLICE_MIN;
418d2776564STejas Upadhyay 			*max = XE_HW_ENGINE_TIMESLICE_MAX;
419d2776564STejas Upadhyay 			break;
420d2776564STejas Upadhyay 		case XE_EXEC_QUEUE_PREEMPT_TIMEOUT:
421d2776564STejas Upadhyay 			*min = XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN;
422d2776564STejas Upadhyay 			*max = XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX;
423d2776564STejas Upadhyay 			break;
424d2776564STejas Upadhyay 		default:
425d2776564STejas Upadhyay 			break;
426d2776564STejas Upadhyay 		}
427d2776564STejas Upadhyay 	}
428d2776564STejas Upadhyay #endif
429d2776564STejas Upadhyay }
430d2776564STejas Upadhyay 
exec_queue_set_timeslice(struct xe_device * xe,struct xe_exec_queue * q,u64 value)4319b9529ceSFrancois Dugast static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *q,
43279f944eeSNirmoy Das 				    u64 value)
433c22a4ed0SFrancois Dugast {
434d2776564STejas Upadhyay 	u32 min = 0, max = 0;
435d2776564STejas Upadhyay 
436d2776564STejas Upadhyay 	xe_exec_queue_get_prop_minmax(q->hwe->eclass,
437d2776564STejas Upadhyay 				      XE_EXEC_QUEUE_TIMESLICE, &min, &max);
438d2776564STejas Upadhyay 
439d2776564STejas Upadhyay 	if (xe_exec_queue_enforce_schedule_limit() &&
440d2776564STejas Upadhyay 	    !xe_hw_engine_timeout_in_range(value, min, max))
441d2776564STejas Upadhyay 		return -EINVAL;
442c22a4ed0SFrancois Dugast 
44325ce7c50SBrian Welty 	q->sched_props.timeslice_us = value;
44425ce7c50SBrian Welty 	return 0;
445c22a4ed0SFrancois Dugast }
446c22a4ed0SFrancois Dugast 
44772d47960SDaniele Ceraolo Spurio static int
exec_queue_set_pxp_type(struct xe_device * xe,struct xe_exec_queue * q,u64 value)44872d47960SDaniele Ceraolo Spurio exec_queue_set_pxp_type(struct xe_device *xe, struct xe_exec_queue *q, u64 value)
44972d47960SDaniele Ceraolo Spurio {
45072d47960SDaniele Ceraolo Spurio 	if (value == DRM_XE_PXP_TYPE_NONE)
45172d47960SDaniele Ceraolo Spurio 		return 0;
45272d47960SDaniele Ceraolo Spurio 
45372d47960SDaniele Ceraolo Spurio 	/* we only support HWDRM sessions right now */
45472d47960SDaniele Ceraolo Spurio 	if (XE_IOCTL_DBG(xe, value != DRM_XE_PXP_TYPE_HWDRM))
45572d47960SDaniele Ceraolo Spurio 		return -EINVAL;
45672d47960SDaniele Ceraolo Spurio 
45772d47960SDaniele Ceraolo Spurio 	if (!xe_pxp_is_enabled(xe->pxp))
45872d47960SDaniele Ceraolo Spurio 		return -ENODEV;
45972d47960SDaniele Ceraolo Spurio 
46072d47960SDaniele Ceraolo Spurio 	return xe_pxp_exec_queue_set_type(xe->pxp, q, DRM_XE_PXP_TYPE_HWDRM);
46172d47960SDaniele Ceraolo Spurio }
46272d47960SDaniele Ceraolo Spurio 
4639b9529ceSFrancois Dugast typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
4649b9529ceSFrancois Dugast 					     struct xe_exec_queue *q,
46579f944eeSNirmoy Das 					     u64 value);
466c22a4ed0SFrancois Dugast 
4679b9529ceSFrancois Dugast static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = {
468d5dc73dbSFrancois Dugast 	[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority,
469d5dc73dbSFrancois Dugast 	[DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice,
47072d47960SDaniele Ceraolo Spurio 	[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE] = exec_queue_set_pxp_type,
471c22a4ed0SFrancois Dugast };
472c22a4ed0SFrancois Dugast 
exec_queue_user_ext_set_property(struct xe_device * xe,struct xe_exec_queue * q,u64 extension)4739b9529ceSFrancois Dugast static int exec_queue_user_ext_set_property(struct xe_device *xe,
4749b9529ceSFrancois Dugast 					    struct xe_exec_queue *q,
47579f944eeSNirmoy Das 					    u64 extension)
476c22a4ed0SFrancois Dugast {
477c22a4ed0SFrancois Dugast 	u64 __user *address = u64_to_user_ptr(extension);
4785dc079d1SAshutosh Dixit 	struct drm_xe_ext_set_property ext;
479c22a4ed0SFrancois Dugast 	int err;
480c22a4ed0SFrancois Dugast 	u32 idx;
481c22a4ed0SFrancois Dugast 
482c22a4ed0SFrancois Dugast 	err = __copy_from_user(&ext, address, sizeof(ext));
483c22a4ed0SFrancois Dugast 	if (XE_IOCTL_DBG(xe, err))
484c22a4ed0SFrancois Dugast 		return -EFAULT;
485c22a4ed0SFrancois Dugast 
486c22a4ed0SFrancois Dugast 	if (XE_IOCTL_DBG(xe, ext.property >=
4879b9529ceSFrancois Dugast 			 ARRAY_SIZE(exec_queue_set_property_funcs)) ||
48884a1ed5eSFrancois Dugast 	    XE_IOCTL_DBG(xe, ext.pad) ||
48984a1ed5eSFrancois Dugast 	    XE_IOCTL_DBG(xe, ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY &&
49072d47960SDaniele Ceraolo Spurio 			 ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE &&
49172d47960SDaniele Ceraolo Spurio 			 ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE))
492c22a4ed0SFrancois Dugast 		return -EINVAL;
493c22a4ed0SFrancois Dugast 
4949b9529ceSFrancois Dugast 	idx = array_index_nospec(ext.property, ARRAY_SIZE(exec_queue_set_property_funcs));
495f1a9abc0SThomas Hellström 	if (!exec_queue_set_property_funcs[idx])
496f1a9abc0SThomas Hellström 		return -EINVAL;
497f1a9abc0SThomas Hellström 
49879f944eeSNirmoy Das 	return exec_queue_set_property_funcs[idx](xe, q, ext.value);
499c22a4ed0SFrancois Dugast }
500c22a4ed0SFrancois Dugast 
5019b9529ceSFrancois Dugast typedef int (*xe_exec_queue_user_extension_fn)(struct xe_device *xe,
5029b9529ceSFrancois Dugast 					       struct xe_exec_queue *q,
50379f944eeSNirmoy Das 					       u64 extension);
504c22a4ed0SFrancois Dugast 
505c9cc3d65SNiranjana Vishwanathapura static const xe_exec_queue_user_extension_fn exec_queue_user_extension_funcs[] = {
506d5dc73dbSFrancois Dugast 	[DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY] = exec_queue_user_ext_set_property,
507c22a4ed0SFrancois Dugast };
508c22a4ed0SFrancois Dugast 
509c22a4ed0SFrancois Dugast #define MAX_USER_EXTENSIONS	16
exec_queue_user_extensions(struct xe_device * xe,struct xe_exec_queue * q,u64 extensions,int ext_number)5109b9529ceSFrancois Dugast static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
51179f944eeSNirmoy Das 				      u64 extensions, int ext_number)
512c22a4ed0SFrancois Dugast {
513c22a4ed0SFrancois Dugast 	u64 __user *address = u64_to_user_ptr(extensions);
5147e9337c2SRodrigo Vivi 	struct drm_xe_user_extension ext;
515c22a4ed0SFrancois Dugast 	int err;
516c22a4ed0SFrancois Dugast 	u32 idx;
517c22a4ed0SFrancois Dugast 
518c22a4ed0SFrancois Dugast 	if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS))
519c22a4ed0SFrancois Dugast 		return -E2BIG;
520c22a4ed0SFrancois Dugast 
521c22a4ed0SFrancois Dugast 	err = __copy_from_user(&ext, address, sizeof(ext));
522c22a4ed0SFrancois Dugast 	if (XE_IOCTL_DBG(xe, err))
523c22a4ed0SFrancois Dugast 		return -EFAULT;
524c22a4ed0SFrancois Dugast 
525c22a4ed0SFrancois Dugast 	if (XE_IOCTL_DBG(xe, ext.pad) ||
526c22a4ed0SFrancois Dugast 	    XE_IOCTL_DBG(xe, ext.name >=
5279b9529ceSFrancois Dugast 			 ARRAY_SIZE(exec_queue_user_extension_funcs)))
528c22a4ed0SFrancois Dugast 		return -EINVAL;
529c22a4ed0SFrancois Dugast 
530c22a4ed0SFrancois Dugast 	idx = array_index_nospec(ext.name,
5319b9529ceSFrancois Dugast 				 ARRAY_SIZE(exec_queue_user_extension_funcs));
53279f944eeSNirmoy Das 	err = exec_queue_user_extension_funcs[idx](xe, q, extensions);
533c22a4ed0SFrancois Dugast 	if (XE_IOCTL_DBG(xe, err))
534c22a4ed0SFrancois Dugast 		return err;
535c22a4ed0SFrancois Dugast 
536c22a4ed0SFrancois Dugast 	if (ext.next_extension)
5379b9529ceSFrancois Dugast 		return exec_queue_user_extensions(xe, q, ext.next_extension,
53879f944eeSNirmoy Das 						  ++ext_number);
539c22a4ed0SFrancois Dugast 
540c22a4ed0SFrancois Dugast 	return 0;
541c22a4ed0SFrancois Dugast }
542c22a4ed0SFrancois Dugast 
calc_validate_logical_mask(struct xe_device * xe,struct drm_xe_engine_class_instance * eci,u16 width,u16 num_placements)543a1e5b6d8SMatt Roper static u32 calc_validate_logical_mask(struct xe_device *xe,
544c22a4ed0SFrancois Dugast 				      struct drm_xe_engine_class_instance *eci,
545c22a4ed0SFrancois Dugast 				      u16 width, u16 num_placements)
546c22a4ed0SFrancois Dugast {
547c22a4ed0SFrancois Dugast 	int len = width * num_placements;
548c22a4ed0SFrancois Dugast 	int i, j, n;
549c22a4ed0SFrancois Dugast 	u16 class;
550c22a4ed0SFrancois Dugast 	u16 gt_id;
551c22a4ed0SFrancois Dugast 	u32 return_mask = 0, prev_mask;
552c22a4ed0SFrancois Dugast 
553c4991ee0SDaniele Ceraolo Spurio 	if (XE_IOCTL_DBG(xe, !xe_device_uc_enabled(xe) &&
554c22a4ed0SFrancois Dugast 			 len > 1))
555c22a4ed0SFrancois Dugast 		return 0;
556c22a4ed0SFrancois Dugast 
557c22a4ed0SFrancois Dugast 	for (i = 0; i < width; ++i) {
558c22a4ed0SFrancois Dugast 		u32 current_mask = 0;
559c22a4ed0SFrancois Dugast 
560c22a4ed0SFrancois Dugast 		for (j = 0; j < num_placements; ++j) {
561c22a4ed0SFrancois Dugast 			struct xe_hw_engine *hwe;
562c22a4ed0SFrancois Dugast 
563c22a4ed0SFrancois Dugast 			n = j * width + i;
564c22a4ed0SFrancois Dugast 
5656f20fc09SDominik Grzegorzek 			hwe = xe_hw_engine_lookup(xe, eci[n]);
566c22a4ed0SFrancois Dugast 			if (XE_IOCTL_DBG(xe, !hwe))
567c22a4ed0SFrancois Dugast 				return 0;
568c22a4ed0SFrancois Dugast 
569c22a4ed0SFrancois Dugast 			if (XE_IOCTL_DBG(xe, xe_hw_engine_is_reserved(hwe)))
570c22a4ed0SFrancois Dugast 				return 0;
571c22a4ed0SFrancois Dugast 
572c22a4ed0SFrancois Dugast 			if (XE_IOCTL_DBG(xe, n && eci[n].gt_id != gt_id) ||
573c22a4ed0SFrancois Dugast 			    XE_IOCTL_DBG(xe, n && eci[n].engine_class != class))
574c22a4ed0SFrancois Dugast 				return 0;
575c22a4ed0SFrancois Dugast 
576c22a4ed0SFrancois Dugast 			class = eci[n].engine_class;
577c22a4ed0SFrancois Dugast 			gt_id = eci[n].gt_id;
578c22a4ed0SFrancois Dugast 
579c22a4ed0SFrancois Dugast 			if (width == 1 || !i)
580c22a4ed0SFrancois Dugast 				return_mask |= BIT(eci[n].engine_instance);
581c22a4ed0SFrancois Dugast 			current_mask |= BIT(eci[n].engine_instance);
582c22a4ed0SFrancois Dugast 		}
583c22a4ed0SFrancois Dugast 
584c22a4ed0SFrancois Dugast 		/* Parallel submissions must be logically contiguous */
585c22a4ed0SFrancois Dugast 		if (i && XE_IOCTL_DBG(xe, current_mask != prev_mask << 1))
586c22a4ed0SFrancois Dugast 			return 0;
587c22a4ed0SFrancois Dugast 
588c22a4ed0SFrancois Dugast 		prev_mask = current_mask;
589c22a4ed0SFrancois Dugast 	}
590c22a4ed0SFrancois Dugast 
591c22a4ed0SFrancois Dugast 	return return_mask;
592c22a4ed0SFrancois Dugast }
593c22a4ed0SFrancois Dugast 
xe_exec_queue_create_ioctl(struct drm_device * dev,void * data,struct drm_file * file)5949b9529ceSFrancois Dugast int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
595c22a4ed0SFrancois Dugast 			       struct drm_file *file)
596c22a4ed0SFrancois Dugast {
597c22a4ed0SFrancois Dugast 	struct xe_device *xe = to_xe_device(dev);
598c22a4ed0SFrancois Dugast 	struct xe_file *xef = to_xe_file(file);
5999b9529ceSFrancois Dugast 	struct drm_xe_exec_queue_create *args = data;
600c22a4ed0SFrancois Dugast 	struct drm_xe_engine_class_instance eci[XE_HW_ENGINE_MAX_INSTANCE];
601c22a4ed0SFrancois Dugast 	struct drm_xe_engine_class_instance __user *user_eci =
602c22a4ed0SFrancois Dugast 		u64_to_user_ptr(args->instances);
603c22a4ed0SFrancois Dugast 	struct xe_hw_engine *hwe;
604852856e3SMatthew Brost 	struct xe_vm *vm;
605852856e3SMatthew Brost 	struct xe_tile *tile;
6069b9529ceSFrancois Dugast 	struct xe_exec_queue *q = NULL;
607c22a4ed0SFrancois Dugast 	u32 logical_mask;
6085488bec9STejas Upadhyay 	u32 flags = 0;
609c22a4ed0SFrancois Dugast 	u32 id;
610c22a4ed0SFrancois Dugast 	u32 len;
611c22a4ed0SFrancois Dugast 	int err;
612c22a4ed0SFrancois Dugast 
6135488bec9STejas Upadhyay 	if (XE_IOCTL_DBG(xe, args->flags & ~DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT) ||
614c22a4ed0SFrancois Dugast 	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
615c22a4ed0SFrancois Dugast 		return -EINVAL;
616c22a4ed0SFrancois Dugast 
617c22a4ed0SFrancois Dugast 	len = args->width * args->num_placements;
618c22a4ed0SFrancois Dugast 	if (XE_IOCTL_DBG(xe, !len || len > XE_HW_ENGINE_MAX_INSTANCE))
619c22a4ed0SFrancois Dugast 		return -EINVAL;
620c22a4ed0SFrancois Dugast 
621c22a4ed0SFrancois Dugast 	err = __copy_from_user(eci, user_eci,
622c22a4ed0SFrancois Dugast 			       sizeof(struct drm_xe_engine_class_instance) *
623c22a4ed0SFrancois Dugast 			       len);
624c22a4ed0SFrancois Dugast 	if (XE_IOCTL_DBG(xe, err))
625c22a4ed0SFrancois Dugast 		return -EFAULT;
626c22a4ed0SFrancois Dugast 
627c22a4ed0SFrancois Dugast 	if (XE_IOCTL_DBG(xe, eci[0].gt_id >= xe->info.gt_count))
628c22a4ed0SFrancois Dugast 		return -EINVAL;
629c22a4ed0SFrancois Dugast 
6305488bec9STejas Upadhyay 	if (args->flags & DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT)
6315488bec9STejas Upadhyay 		flags |= EXEC_QUEUE_FLAG_LOW_LATENCY;
6325488bec9STejas Upadhyay 
633d3d76739SMatthew Brost 	if (eci[0].engine_class == DRM_XE_ENGINE_CLASS_VM_BIND) {
634852856e3SMatthew Brost 		if (XE_IOCTL_DBG(xe, args->width != 1) ||
635852856e3SMatthew Brost 		    XE_IOCTL_DBG(xe, args->num_placements != 1) ||
636852856e3SMatthew Brost 		    XE_IOCTL_DBG(xe, eci[0].engine_instance != 0))
637852856e3SMatthew Brost 			return -EINVAL;
638852856e3SMatthew Brost 
639852856e3SMatthew Brost 		for_each_tile(tile, xe, id) {
6409b9529ceSFrancois Dugast 			struct xe_exec_queue *new;
641c22a4ed0SFrancois Dugast 
6425488bec9STejas Upadhyay 			flags |= EXEC_QUEUE_FLAG_VM;
643852856e3SMatthew Brost 			if (id)
644852856e3SMatthew Brost 				flags |= EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD;
645c22a4ed0SFrancois Dugast 
646852856e3SMatthew Brost 			new = xe_exec_queue_create_bind(xe, tile, flags,
64725ce7c50SBrian Welty 							args->extensions);
648c22a4ed0SFrancois Dugast 			if (IS_ERR(new)) {
649c22a4ed0SFrancois Dugast 				err = PTR_ERR(new);
6509b9529ceSFrancois Dugast 				if (q)
6519b9529ceSFrancois Dugast 					goto put_exec_queue;
652c22a4ed0SFrancois Dugast 				return err;
653c22a4ed0SFrancois Dugast 			}
654c22a4ed0SFrancois Dugast 			if (id == 0)
6559b9529ceSFrancois Dugast 				q = new;
656c22a4ed0SFrancois Dugast 			else
657c22a4ed0SFrancois Dugast 				list_add_tail(&new->multi_gt_list,
6589b9529ceSFrancois Dugast 					      &q->multi_gt_link);
659c22a4ed0SFrancois Dugast 		}
660c22a4ed0SFrancois Dugast 	} else {
661a1e5b6d8SMatt Roper 		logical_mask = calc_validate_logical_mask(xe, eci,
662c22a4ed0SFrancois Dugast 							  args->width,
663c22a4ed0SFrancois Dugast 							  args->num_placements);
664c22a4ed0SFrancois Dugast 		if (XE_IOCTL_DBG(xe, !logical_mask))
665c22a4ed0SFrancois Dugast 			return -EINVAL;
666c22a4ed0SFrancois Dugast 
6676f20fc09SDominik Grzegorzek 		hwe = xe_hw_engine_lookup(xe, eci[0]);
668c22a4ed0SFrancois Dugast 		if (XE_IOCTL_DBG(xe, !hwe))
669c22a4ed0SFrancois Dugast 			return -EINVAL;
670c22a4ed0SFrancois Dugast 
671c22a4ed0SFrancois Dugast 		vm = xe_vm_lookup(xef, args->vm_id);
672c22a4ed0SFrancois Dugast 		if (XE_IOCTL_DBG(xe, !vm))
673c22a4ed0SFrancois Dugast 			return -ENOENT;
674c22a4ed0SFrancois Dugast 
675c22a4ed0SFrancois Dugast 		err = down_read_interruptible(&vm->lock);
676c22a4ed0SFrancois Dugast 		if (err) {
677c22a4ed0SFrancois Dugast 			xe_vm_put(vm);
678c22a4ed0SFrancois Dugast 			return err;
679c22a4ed0SFrancois Dugast 		}
680c22a4ed0SFrancois Dugast 
681c22a4ed0SFrancois Dugast 		if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
682c22a4ed0SFrancois Dugast 			up_read(&vm->lock);
683c22a4ed0SFrancois Dugast 			xe_vm_put(vm);
684c22a4ed0SFrancois Dugast 			return -ENOENT;
685c22a4ed0SFrancois Dugast 		}
686c22a4ed0SFrancois Dugast 
6879b9529ceSFrancois Dugast 		q = xe_exec_queue_create(xe, vm, logical_mask,
6885488bec9STejas Upadhyay 					 args->width, hwe, flags,
68925ce7c50SBrian Welty 					 args->extensions);
690c22a4ed0SFrancois Dugast 		up_read(&vm->lock);
691c22a4ed0SFrancois Dugast 		xe_vm_put(vm);
6929b9529ceSFrancois Dugast 		if (IS_ERR(q))
6939b9529ceSFrancois Dugast 			return PTR_ERR(q);
694e05c6c97SMatthew Brost 
695fdb6a053SThomas Hellström 		if (xe_vm_in_preempt_fence_mode(vm)) {
696731e46c0SFrancois Dugast 			q->lr.context = dma_fence_context_alloc(1);
697e05c6c97SMatthew Brost 
698e05c6c97SMatthew Brost 			err = xe_vm_add_compute_exec_queue(vm, q);
699e05c6c97SMatthew Brost 			if (XE_IOCTL_DBG(xe, err))
700e05c6c97SMatthew Brost 				goto put_exec_queue;
701e05c6c97SMatthew Brost 		}
7027970cb36SFrancois Dugast 
7037970cb36SFrancois Dugast 		if (q->vm && q->hwe->hw_engine_group) {
7047970cb36SFrancois Dugast 			err = xe_hw_engine_group_add_exec_queue(q->hwe->hw_engine_group, q);
7057970cb36SFrancois Dugast 			if (err)
7067970cb36SFrancois Dugast 				goto put_exec_queue;
7077970cb36SFrancois Dugast 		}
708c22a4ed0SFrancois Dugast 	}
709c22a4ed0SFrancois Dugast 
71016536582SMatthew Auld 	q->xef = xe_file_get(xef);
71116536582SMatthew Auld 
71216536582SMatthew Auld 	/* user id alloc must always be last in ioctl to prevent UAF */
7139b9529ceSFrancois Dugast 	err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL);
714c22a4ed0SFrancois Dugast 	if (err)
715e05c6c97SMatthew Brost 		goto kill_exec_queue;
716c22a4ed0SFrancois Dugast 
7179b9529ceSFrancois Dugast 	args->exec_queue_id = id;
718c22a4ed0SFrancois Dugast 
719c22a4ed0SFrancois Dugast 	return 0;
720c22a4ed0SFrancois Dugast 
721e05c6c97SMatthew Brost kill_exec_queue:
7229b9529ceSFrancois Dugast 	xe_exec_queue_kill(q);
723e05c6c97SMatthew Brost put_exec_queue:
7249b9529ceSFrancois Dugast 	xe_exec_queue_put(q);
725c22a4ed0SFrancois Dugast 	return err;
726c22a4ed0SFrancois Dugast }
727c22a4ed0SFrancois Dugast 
xe_exec_queue_get_property_ioctl(struct drm_device * dev,void * data,struct drm_file * file)7289b9529ceSFrancois Dugast int xe_exec_queue_get_property_ioctl(struct drm_device *dev, void *data,
729c22a4ed0SFrancois Dugast 				     struct drm_file *file)
730c22a4ed0SFrancois Dugast {
731c22a4ed0SFrancois Dugast 	struct xe_device *xe = to_xe_device(dev);
732c22a4ed0SFrancois Dugast 	struct xe_file *xef = to_xe_file(file);
7339b9529ceSFrancois Dugast 	struct drm_xe_exec_queue_get_property *args = data;
7349b9529ceSFrancois Dugast 	struct xe_exec_queue *q;
735c22a4ed0SFrancois Dugast 	int ret;
736c22a4ed0SFrancois Dugast 
737c22a4ed0SFrancois Dugast 	if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
738c22a4ed0SFrancois Dugast 		return -EINVAL;
739c22a4ed0SFrancois Dugast 
7409b9529ceSFrancois Dugast 	q = xe_exec_queue_lookup(xef, args->exec_queue_id);
7419b9529ceSFrancois Dugast 	if (XE_IOCTL_DBG(xe, !q))
742c22a4ed0SFrancois Dugast 		return -ENOENT;
743c22a4ed0SFrancois Dugast 
744c22a4ed0SFrancois Dugast 	switch (args->property) {
745d5dc73dbSFrancois Dugast 	case DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN:
7464468d048SMatthew Brost 		args->value = q->ops->reset_status(q);
747c22a4ed0SFrancois Dugast 		ret = 0;
748c22a4ed0SFrancois Dugast 		break;
749c22a4ed0SFrancois Dugast 	default:
750c22a4ed0SFrancois Dugast 		ret = -EINVAL;
751c22a4ed0SFrancois Dugast 	}
752c22a4ed0SFrancois Dugast 
7539b9529ceSFrancois Dugast 	xe_exec_queue_put(q);
754c22a4ed0SFrancois Dugast 
755c22a4ed0SFrancois Dugast 	return ret;
756c22a4ed0SFrancois Dugast }
757c22a4ed0SFrancois Dugast 
758c22a4ed0SFrancois Dugast /**
7599b9529ceSFrancois Dugast  * xe_exec_queue_is_lr() - Whether an exec_queue is long-running
7609b9529ceSFrancois Dugast  * @q: The exec_queue
761c22a4ed0SFrancois Dugast  *
7629b9529ceSFrancois Dugast  * Return: True if the exec_queue is long-running, false otherwise.
763c22a4ed0SFrancois Dugast  */
xe_exec_queue_is_lr(struct xe_exec_queue * q)7649b9529ceSFrancois Dugast bool xe_exec_queue_is_lr(struct xe_exec_queue *q)
765c22a4ed0SFrancois Dugast {
766fdb6a053SThomas Hellström 	return q->vm && xe_vm_in_lr_mode(q->vm) &&
7679b9529ceSFrancois Dugast 		!(q->flags & EXEC_QUEUE_FLAG_VM);
768c22a4ed0SFrancois Dugast }
769c22a4ed0SFrancois Dugast 
xe_exec_queue_num_job_inflight(struct xe_exec_queue * q)7709b9529ceSFrancois Dugast static s32 xe_exec_queue_num_job_inflight(struct xe_exec_queue *q)
771c22a4ed0SFrancois Dugast {
772264eecdbSNiranjana Vishwanathapura 	return q->lrc[0]->fence_ctx.next_seqno - xe_lrc_seqno(q->lrc[0]) - 1;
773c22a4ed0SFrancois Dugast }
774c22a4ed0SFrancois Dugast 
775c22a4ed0SFrancois Dugast /**
7769b9529ceSFrancois Dugast  * xe_exec_queue_ring_full() - Whether an exec_queue's ring is full
7779b9529ceSFrancois Dugast  * @q: The exec_queue
778c22a4ed0SFrancois Dugast  *
7799b9529ceSFrancois Dugast  * Return: True if the exec_queue's ring is full, false otherwise.
780c22a4ed0SFrancois Dugast  */
xe_exec_queue_ring_full(struct xe_exec_queue * q)7819b9529ceSFrancois Dugast bool xe_exec_queue_ring_full(struct xe_exec_queue *q)
782c22a4ed0SFrancois Dugast {
783264eecdbSNiranjana Vishwanathapura 	struct xe_lrc *lrc = q->lrc[0];
784c22a4ed0SFrancois Dugast 	s32 max_job = lrc->ring.size / MAX_JOB_SIZE_BYTES;
785c22a4ed0SFrancois Dugast 
7869b9529ceSFrancois Dugast 	return xe_exec_queue_num_job_inflight(q) >= max_job;
787c22a4ed0SFrancois Dugast }
788c22a4ed0SFrancois Dugast 
789c22a4ed0SFrancois Dugast /**
7909b9529ceSFrancois Dugast  * xe_exec_queue_is_idle() - Whether an exec_queue is idle.
7919b9529ceSFrancois Dugast  * @q: The exec_queue
792c22a4ed0SFrancois Dugast  *
793c22a4ed0SFrancois Dugast  * FIXME: Need to determine what to use as the short-lived
7949b9529ceSFrancois Dugast  * timeline lock for the exec_queues, so that the return value
795c22a4ed0SFrancois Dugast  * of this function becomes more than just an advisory
796c22a4ed0SFrancois Dugast  * snapshot in time. The timeline lock must protect the
7979b9529ceSFrancois Dugast  * seqno from racing submissions on the same exec_queue.
798c22a4ed0SFrancois Dugast  * Typically vm->resv, but user-created timeline locks use the migrate vm
799c22a4ed0SFrancois Dugast  * and never grabs the migrate vm->resv so we have a race there.
800c22a4ed0SFrancois Dugast  *
8019b9529ceSFrancois Dugast  * Return: True if the exec_queue is idle, false otherwise.
802c22a4ed0SFrancois Dugast  */
xe_exec_queue_is_idle(struct xe_exec_queue * q)8039b9529ceSFrancois Dugast bool xe_exec_queue_is_idle(struct xe_exec_queue *q)
804c22a4ed0SFrancois Dugast {
8055009d554SMatthew Brost 	if (xe_exec_queue_is_parallel(q)) {
8065009d554SMatthew Brost 		int i;
8075009d554SMatthew Brost 
8085009d554SMatthew Brost 		for (i = 0; i < q->width; ++i) {
809264eecdbSNiranjana Vishwanathapura 			if (xe_lrc_seqno(q->lrc[i]) !=
810264eecdbSNiranjana Vishwanathapura 			    q->lrc[i]->fence_ctx.next_seqno - 1)
811c22a4ed0SFrancois Dugast 				return false;
8125009d554SMatthew Brost 		}
8135009d554SMatthew Brost 
8145009d554SMatthew Brost 		return true;
8155009d554SMatthew Brost 	}
816c22a4ed0SFrancois Dugast 
817264eecdbSNiranjana Vishwanathapura 	return xe_lrc_seqno(q->lrc[0]) ==
818264eecdbSNiranjana Vishwanathapura 		q->lrc[0]->fence_ctx.next_seqno - 1;
819c22a4ed0SFrancois Dugast }
820c22a4ed0SFrancois Dugast 
8216109f24fSUmesh Nerlige Ramappa /**
82245bb564dSUmesh Nerlige Ramappa  * xe_exec_queue_update_run_ticks() - Update run time in ticks for this exec queue
82345bb564dSUmesh Nerlige Ramappa  * from hw
8246109f24fSUmesh Nerlige Ramappa  * @q: The exec queue
8256109f24fSUmesh Nerlige Ramappa  *
82645bb564dSUmesh Nerlige Ramappa  * Update the timestamp saved by HW for this exec queue and save run ticks
82745bb564dSUmesh Nerlige Ramappa  * calculated by using the delta from last update.
8286109f24fSUmesh Nerlige Ramappa  */
xe_exec_queue_update_run_ticks(struct xe_exec_queue * q)82945bb564dSUmesh Nerlige Ramappa void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
8306109f24fSUmesh Nerlige Ramappa {
8314ca1fd41SLucas De Marchi 	struct xe_device *xe = gt_to_xe(q->gt);
8326109f24fSUmesh Nerlige Ramappa 	struct xe_lrc *lrc;
833*617d824cSUmesh Nerlige Ramappa 	u64 old_ts, new_ts;
8344ca1fd41SLucas De Marchi 	int idx;
8356109f24fSUmesh Nerlige Ramappa 
8366109f24fSUmesh Nerlige Ramappa 	/*
8372054d38cSLucas De Marchi 	 * Jobs that are executed by kernel doesn't have a corresponding xe_file
8382054d38cSLucas De Marchi 	 * and thus are not accounted.
8396109f24fSUmesh Nerlige Ramappa 	 */
8402054d38cSLucas De Marchi 	if (!q->xef)
8416109f24fSUmesh Nerlige Ramappa 		return;
8426109f24fSUmesh Nerlige Ramappa 
8434ca1fd41SLucas De Marchi 	/* Synchronize with unbind while holding the xe file open */
8444ca1fd41SLucas De Marchi 	if (!drm_dev_enter(&xe->drm, &idx))
8454ca1fd41SLucas De Marchi 		return;
8466109f24fSUmesh Nerlige Ramappa 	/*
8476109f24fSUmesh Nerlige Ramappa 	 * Only sample the first LRC. For parallel submission, all of them are
8486109f24fSUmesh Nerlige Ramappa 	 * scheduled together and we compensate that below by multiplying by
8496109f24fSUmesh Nerlige Ramappa 	 * width - this may introduce errors if that premise is not true and
8506109f24fSUmesh Nerlige Ramappa 	 * they don't exit 100% aligned. On the other hand, looping through
8516109f24fSUmesh Nerlige Ramappa 	 * the LRCs and reading them in different time could also introduce
8526109f24fSUmesh Nerlige Ramappa 	 * errors.
8536109f24fSUmesh Nerlige Ramappa 	 */
854264eecdbSNiranjana Vishwanathapura 	lrc = q->lrc[0];
8556109f24fSUmesh Nerlige Ramappa 	new_ts = xe_lrc_update_timestamp(lrc, &old_ts);
8562054d38cSLucas De Marchi 	q->xef->run_ticks[q->class] += (new_ts - old_ts) * q->width;
8574ca1fd41SLucas De Marchi 
8584ca1fd41SLucas De Marchi 	drm_dev_exit(idx);
8596109f24fSUmesh Nerlige Ramappa }
8606109f24fSUmesh Nerlige Ramappa 
861b0ee81daSDaniele Ceraolo Spurio /**
862b0ee81daSDaniele Ceraolo Spurio  * xe_exec_queue_kill - permanently stop all execution from an exec queue
863b0ee81daSDaniele Ceraolo Spurio  * @q: The exec queue
864b0ee81daSDaniele Ceraolo Spurio  *
865b0ee81daSDaniele Ceraolo Spurio  * This function permanently stops all activity on an exec queue. If the queue
866b0ee81daSDaniele Ceraolo Spurio  * is actively executing on the HW, it will be kicked off the engine; any
867b0ee81daSDaniele Ceraolo Spurio  * pending jobs are discarded and all future submissions are rejected.
868b0ee81daSDaniele Ceraolo Spurio  * This function is safe to call multiple times.
869b0ee81daSDaniele Ceraolo Spurio  */
xe_exec_queue_kill(struct xe_exec_queue * q)8709b9529ceSFrancois Dugast void xe_exec_queue_kill(struct xe_exec_queue *q)
871c22a4ed0SFrancois Dugast {
8729b9529ceSFrancois Dugast 	struct xe_exec_queue *eq = q, *next;
873c22a4ed0SFrancois Dugast 
8749b9529ceSFrancois Dugast 	list_for_each_entry_safe(eq, next, &eq->multi_gt_list,
875c22a4ed0SFrancois Dugast 				 multi_gt_link) {
8769b9529ceSFrancois Dugast 		q->ops->kill(eq);
877abce4e4bSMatthew Brost 		xe_vm_remove_compute_exec_queue(q->vm, eq);
878c22a4ed0SFrancois Dugast 	}
879c22a4ed0SFrancois Dugast 
8809b9529ceSFrancois Dugast 	q->ops->kill(q);
881abce4e4bSMatthew Brost 	xe_vm_remove_compute_exec_queue(q->vm, q);
882c22a4ed0SFrancois Dugast }
883c22a4ed0SFrancois Dugast 
xe_exec_queue_destroy_ioctl(struct drm_device * dev,void * data,struct drm_file * file)8849b9529ceSFrancois Dugast int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
885c22a4ed0SFrancois Dugast 				struct drm_file *file)
886c22a4ed0SFrancois Dugast {
887c22a4ed0SFrancois Dugast 	struct xe_device *xe = to_xe_device(dev);
888c22a4ed0SFrancois Dugast 	struct xe_file *xef = to_xe_file(file);
8899b9529ceSFrancois Dugast 	struct drm_xe_exec_queue_destroy *args = data;
8909b9529ceSFrancois Dugast 	struct xe_exec_queue *q;
891c22a4ed0SFrancois Dugast 
892c22a4ed0SFrancois Dugast 	if (XE_IOCTL_DBG(xe, args->pad) ||
893c22a4ed0SFrancois Dugast 	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
894c22a4ed0SFrancois Dugast 		return -EINVAL;
895c22a4ed0SFrancois Dugast 
8969b9529ceSFrancois Dugast 	mutex_lock(&xef->exec_queue.lock);
8979b9529ceSFrancois Dugast 	q = xa_erase(&xef->exec_queue.xa, args->exec_queue_id);
8980fd4380cSLucas De Marchi 	if (q)
8990fd4380cSLucas De Marchi 		atomic_inc(&xef->exec_queue.pending_removal);
9009b9529ceSFrancois Dugast 	mutex_unlock(&xef->exec_queue.lock);
9010fd4380cSLucas De Marchi 
9029b9529ceSFrancois Dugast 	if (XE_IOCTL_DBG(xe, !q))
903c22a4ed0SFrancois Dugast 		return -ENOENT;
904c22a4ed0SFrancois Dugast 
9057970cb36SFrancois Dugast 	if (q->vm && q->hwe->hw_engine_group)
9067970cb36SFrancois Dugast 		xe_hw_engine_group_del_exec_queue(q->hwe->hw_engine_group, q);
9077970cb36SFrancois Dugast 
9089b9529ceSFrancois Dugast 	xe_exec_queue_kill(q);
909c22a4ed0SFrancois Dugast 
9109b9529ceSFrancois Dugast 	trace_xe_exec_queue_close(q);
9119b9529ceSFrancois Dugast 	xe_exec_queue_put(q);
912c22a4ed0SFrancois Dugast 
913c22a4ed0SFrancois Dugast 	return 0;
914c22a4ed0SFrancois Dugast }
915c22a4ed0SFrancois Dugast 
xe_exec_queue_last_fence_lockdep_assert(struct xe_exec_queue * q,struct xe_vm * vm)916e669f10cSMatthew Brost static void xe_exec_queue_last_fence_lockdep_assert(struct xe_exec_queue *q,
917e669f10cSMatthew Brost 						    struct xe_vm *vm)
918e669f10cSMatthew Brost {
9190d92cd89SFrancois Dugast 	if (q->flags & EXEC_QUEUE_FLAG_VM) {
920eb9702adSMatthew Brost 		lockdep_assert_held(&vm->lock);
9210d92cd89SFrancois Dugast 	} else {
922eb9702adSMatthew Brost 		xe_vm_assert_held(vm);
9230d92cd89SFrancois Dugast 		lockdep_assert_held(&q->hwe->hw_engine_group->mode_sem);
9240d92cd89SFrancois Dugast 	}
925e669f10cSMatthew Brost }
926e669f10cSMatthew Brost 
927e669f10cSMatthew Brost /**
928e669f10cSMatthew Brost  * xe_exec_queue_last_fence_put() - Drop ref to last fence
929e669f10cSMatthew Brost  * @q: The exec queue
930e669f10cSMatthew Brost  * @vm: The VM the engine does a bind or exec for
931e669f10cSMatthew Brost  */
xe_exec_queue_last_fence_put(struct xe_exec_queue * q,struct xe_vm * vm)932e669f10cSMatthew Brost void xe_exec_queue_last_fence_put(struct xe_exec_queue *q, struct xe_vm *vm)
933e669f10cSMatthew Brost {
934e669f10cSMatthew Brost 	xe_exec_queue_last_fence_lockdep_assert(q, vm);
935e669f10cSMatthew Brost 
9367f0d7beeSFrancois Dugast 	xe_exec_queue_last_fence_put_unlocked(q);
937e669f10cSMatthew Brost }
938e669f10cSMatthew Brost 
939e669f10cSMatthew Brost /**
940e669f10cSMatthew Brost  * xe_exec_queue_last_fence_put_unlocked() - Drop ref to last fence unlocked
941e669f10cSMatthew Brost  * @q: The exec queue
942e669f10cSMatthew Brost  *
943e669f10cSMatthew Brost  * Only safe to be called from xe_exec_queue_destroy().
944e669f10cSMatthew Brost  */
xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue * q)945e669f10cSMatthew Brost void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *q)
946e669f10cSMatthew Brost {
947e669f10cSMatthew Brost 	if (q->last_fence) {
948e669f10cSMatthew Brost 		dma_fence_put(q->last_fence);
949e669f10cSMatthew Brost 		q->last_fence = NULL;
950e669f10cSMatthew Brost 	}
951e669f10cSMatthew Brost }
952e669f10cSMatthew Brost 
953e669f10cSMatthew Brost /**
954e669f10cSMatthew Brost  * xe_exec_queue_last_fence_get() - Get last fence
955e669f10cSMatthew Brost  * @q: The exec queue
956e669f10cSMatthew Brost  * @vm: The VM the engine does a bind or exec for
957e669f10cSMatthew Brost  *
958a856b67aSMatthew Brost  * Get last fence, takes a ref
959e669f10cSMatthew Brost  *
960e669f10cSMatthew Brost  * Returns: last fence if not signaled, dma fence stub if signaled
961e669f10cSMatthew Brost  */
xe_exec_queue_last_fence_get(struct xe_exec_queue * q,struct xe_vm * vm)962e669f10cSMatthew Brost struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *q,
963e669f10cSMatthew Brost 					       struct xe_vm *vm)
964e669f10cSMatthew Brost {
965a856b67aSMatthew Brost 	struct dma_fence *fence;
966a856b67aSMatthew Brost 
967e669f10cSMatthew Brost 	xe_exec_queue_last_fence_lockdep_assert(q, vm);
968e669f10cSMatthew Brost 
969e669f10cSMatthew Brost 	if (q->last_fence &&
970e669f10cSMatthew Brost 	    test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags))
971e669f10cSMatthew Brost 		xe_exec_queue_last_fence_put(q, vm);
972e669f10cSMatthew Brost 
973a856b67aSMatthew Brost 	fence = q->last_fence ? q->last_fence : dma_fence_get_stub();
974a856b67aSMatthew Brost 	dma_fence_get(fence);
975a856b67aSMatthew Brost 	return fence;
976e669f10cSMatthew Brost }
977e669f10cSMatthew Brost 
978e669f10cSMatthew Brost /**
9790d92cd89SFrancois Dugast  * xe_exec_queue_last_fence_get_for_resume() - Get last fence
9800d92cd89SFrancois Dugast  * @q: The exec queue
9810d92cd89SFrancois Dugast  * @vm: The VM the engine does a bind or exec for
9820d92cd89SFrancois Dugast  *
9830d92cd89SFrancois Dugast  * Get last fence, takes a ref. Only safe to be called in the context of
9840d92cd89SFrancois Dugast  * resuming the hw engine group's long-running exec queue, when the group
9850d92cd89SFrancois Dugast  * semaphore is held.
9860d92cd89SFrancois Dugast  *
9870d92cd89SFrancois Dugast  * Returns: last fence if not signaled, dma fence stub if signaled
9880d92cd89SFrancois Dugast  */
xe_exec_queue_last_fence_get_for_resume(struct xe_exec_queue * q,struct xe_vm * vm)9890d92cd89SFrancois Dugast struct dma_fence *xe_exec_queue_last_fence_get_for_resume(struct xe_exec_queue *q,
9900d92cd89SFrancois Dugast 							  struct xe_vm *vm)
9910d92cd89SFrancois Dugast {
9920d92cd89SFrancois Dugast 	struct dma_fence *fence;
9930d92cd89SFrancois Dugast 
9940d92cd89SFrancois Dugast 	lockdep_assert_held_write(&q->hwe->hw_engine_group->mode_sem);
9950d92cd89SFrancois Dugast 
9960d92cd89SFrancois Dugast 	if (q->last_fence &&
9970d92cd89SFrancois Dugast 	    test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags))
9980d92cd89SFrancois Dugast 		xe_exec_queue_last_fence_put_unlocked(q);
9990d92cd89SFrancois Dugast 
10000d92cd89SFrancois Dugast 	fence = q->last_fence ? q->last_fence : dma_fence_get_stub();
10010d92cd89SFrancois Dugast 	dma_fence_get(fence);
10020d92cd89SFrancois Dugast 	return fence;
10030d92cd89SFrancois Dugast }
10040d92cd89SFrancois Dugast 
10050d92cd89SFrancois Dugast /**
1006e669f10cSMatthew Brost  * xe_exec_queue_last_fence_set() - Set last fence
1007e669f10cSMatthew Brost  * @q: The exec queue
1008e669f10cSMatthew Brost  * @vm: The VM the engine does a bind or exec for
1009e669f10cSMatthew Brost  * @fence: The fence
1010e669f10cSMatthew Brost  *
1011e669f10cSMatthew Brost  * Set the last fence for the engine. Increases reference count for fence, when
1012e669f10cSMatthew Brost  * closing engine xe_exec_queue_last_fence_put should be called.
1013e669f10cSMatthew Brost  */
xe_exec_queue_last_fence_set(struct xe_exec_queue * q,struct xe_vm * vm,struct dma_fence * fence)1014e669f10cSMatthew Brost void xe_exec_queue_last_fence_set(struct xe_exec_queue *q, struct xe_vm *vm,
1015e669f10cSMatthew Brost 				  struct dma_fence *fence)
1016e669f10cSMatthew Brost {
1017e669f10cSMatthew Brost 	xe_exec_queue_last_fence_lockdep_assert(q, vm);
1018e669f10cSMatthew Brost 
1019e669f10cSMatthew Brost 	xe_exec_queue_last_fence_put(q, vm);
1020e669f10cSMatthew Brost 	q->last_fence = dma_fence_get(fence);
1021e669f10cSMatthew Brost }
102296e7ebb2SMatthew Brost 
102396e7ebb2SMatthew Brost /**
102496e7ebb2SMatthew Brost  * xe_exec_queue_last_fence_test_dep - Test last fence dependency of queue
102596e7ebb2SMatthew Brost  * @q: The exec queue
102696e7ebb2SMatthew Brost  * @vm: The VM the engine does a bind or exec for
102796e7ebb2SMatthew Brost  *
102896e7ebb2SMatthew Brost  * Returns:
102996e7ebb2SMatthew Brost  * -ETIME if there exists an unsignalled last fence dependency, zero otherwise.
103096e7ebb2SMatthew Brost  */
xe_exec_queue_last_fence_test_dep(struct xe_exec_queue * q,struct xe_vm * vm)103196e7ebb2SMatthew Brost int xe_exec_queue_last_fence_test_dep(struct xe_exec_queue *q, struct xe_vm *vm)
103296e7ebb2SMatthew Brost {
103396e7ebb2SMatthew Brost 	struct dma_fence *fence;
103496e7ebb2SMatthew Brost 	int err = 0;
103596e7ebb2SMatthew Brost 
103696e7ebb2SMatthew Brost 	fence = xe_exec_queue_last_fence_get(q, vm);
103796e7ebb2SMatthew Brost 	if (fence) {
103896e7ebb2SMatthew Brost 		err = test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) ?
103996e7ebb2SMatthew Brost 			0 : -ETIME;
104096e7ebb2SMatthew Brost 		dma_fence_put(fence);
104196e7ebb2SMatthew Brost 	}
104296e7ebb2SMatthew Brost 
104396e7ebb2SMatthew Brost 	return err;
104496e7ebb2SMatthew Brost }
1045