197fb5e8dSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2f7de1545SJordan Crouse /* Copyright (c) 2017 The Linux Foundation. All rights reserved.
3f7de1545SJordan Crouse */
4f7de1545SJordan Crouse
5f7de1545SJordan Crouse #include <linux/kref.h>
6feea39a8SSam Ravnborg #include <linux/uaccess.h>
7feea39a8SSam Ravnborg
8f7de1545SJordan Crouse #include "msm_gpu.h"
9f7de1545SJordan Crouse
msm_file_private_set_sysprof(struct msm_file_private * ctx,struct msm_gpu * gpu,int sysprof)1090f45c42SRob Clark int msm_file_private_set_sysprof(struct msm_file_private *ctx,
1190f45c42SRob Clark struct msm_gpu *gpu, int sysprof)
1290f45c42SRob Clark {
1390f45c42SRob Clark /*
1490f45c42SRob Clark * Since pm_runtime and sysprof_active are both refcounts, we
1590f45c42SRob Clark * call apply the new value first, and then unwind the previous
1690f45c42SRob Clark * value
1790f45c42SRob Clark */
1890f45c42SRob Clark
1990f45c42SRob Clark switch (sysprof) {
2090f45c42SRob Clark default:
21*866e43b9SRob Clark return UERR(EINVAL, gpu->dev, "Invalid sysprof: %d", sysprof);
2290f45c42SRob Clark case 2:
2390f45c42SRob Clark pm_runtime_get_sync(&gpu->pdev->dev);
2490f45c42SRob Clark fallthrough;
2590f45c42SRob Clark case 1:
2690f45c42SRob Clark refcount_inc(&gpu->sysprof_active);
2790f45c42SRob Clark fallthrough;
2890f45c42SRob Clark case 0:
2990f45c42SRob Clark break;
3090f45c42SRob Clark }
3190f45c42SRob Clark
3290f45c42SRob Clark /* unwind old value: */
3390f45c42SRob Clark switch (ctx->sysprof) {
3490f45c42SRob Clark case 2:
3590f45c42SRob Clark pm_runtime_put_autosuspend(&gpu->pdev->dev);
3690f45c42SRob Clark fallthrough;
3790f45c42SRob Clark case 1:
3890f45c42SRob Clark refcount_dec(&gpu->sysprof_active);
3990f45c42SRob Clark fallthrough;
4090f45c42SRob Clark case 0:
4190f45c42SRob Clark break;
4290f45c42SRob Clark }
4390f45c42SRob Clark
4490f45c42SRob Clark ctx->sysprof = sysprof;
4590f45c42SRob Clark
4690f45c42SRob Clark return 0;
4790f45c42SRob Clark }
4890f45c42SRob Clark
__msm_file_private_destroy(struct kref * kref)4968002469SRob Clark void __msm_file_private_destroy(struct kref *kref)
5068002469SRob Clark {
5168002469SRob Clark struct msm_file_private *ctx = container_of(kref,
5268002469SRob Clark struct msm_file_private, ref);
5368002469SRob Clark int i;
5468002469SRob Clark
5568002469SRob Clark for (i = 0; i < ARRAY_SIZE(ctx->entities); i++) {
5668002469SRob Clark if (!ctx->entities[i])
5768002469SRob Clark continue;
5868002469SRob Clark
5968002469SRob Clark drm_sched_entity_destroy(ctx->entities[i]);
6068002469SRob Clark kfree(ctx->entities[i]);
6168002469SRob Clark }
6268002469SRob Clark
6368002469SRob Clark msm_gem_address_space_put(ctx->aspace);
64d4726d77SRob Clark kfree(ctx->comm);
65d4726d77SRob Clark kfree(ctx->cmdline);
6668002469SRob Clark kfree(ctx);
6768002469SRob Clark }
6868002469SRob Clark
msm_submitqueue_destroy(struct kref * kref)69f7de1545SJordan Crouse void msm_submitqueue_destroy(struct kref *kref)
70f7de1545SJordan Crouse {
71f7de1545SJordan Crouse struct msm_gpu_submitqueue *queue = container_of(kref,
72f7de1545SJordan Crouse struct msm_gpu_submitqueue, ref);
73f7de1545SJordan Crouse
74a61acbbeSRob Clark idr_destroy(&queue->fence_idr);
75a61acbbeSRob Clark
76cf655d61SJordan Crouse msm_file_private_put(queue->ctx);
77cf655d61SJordan Crouse
78f7de1545SJordan Crouse kfree(queue);
79f7de1545SJordan Crouse }
80f7de1545SJordan Crouse
msm_submitqueue_get(struct msm_file_private * ctx,u32 id)81f7de1545SJordan Crouse struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
82f7de1545SJordan Crouse u32 id)
83f7de1545SJordan Crouse {
84f7de1545SJordan Crouse struct msm_gpu_submitqueue *entry;
85f7de1545SJordan Crouse
86f7de1545SJordan Crouse if (!ctx)
87f7de1545SJordan Crouse return NULL;
88f7de1545SJordan Crouse
89f7de1545SJordan Crouse read_lock(&ctx->queuelock);
90f7de1545SJordan Crouse
91f7de1545SJordan Crouse list_for_each_entry(entry, &ctx->submitqueues, node) {
92f7de1545SJordan Crouse if (entry->id == id) {
93f7de1545SJordan Crouse kref_get(&entry->ref);
94f7de1545SJordan Crouse read_unlock(&ctx->queuelock);
95f7de1545SJordan Crouse
96f7de1545SJordan Crouse return entry;
97f7de1545SJordan Crouse }
98f7de1545SJordan Crouse }
99f7de1545SJordan Crouse
100f7de1545SJordan Crouse read_unlock(&ctx->queuelock);
101f7de1545SJordan Crouse return NULL;
102f7de1545SJordan Crouse }
103f7de1545SJordan Crouse
msm_submitqueue_close(struct msm_file_private * ctx)104f7de1545SJordan Crouse void msm_submitqueue_close(struct msm_file_private *ctx)
105f7de1545SJordan Crouse {
106f7de1545SJordan Crouse struct msm_gpu_submitqueue *entry, *tmp;
107f7de1545SJordan Crouse
108f7de1545SJordan Crouse if (!ctx)
109f7de1545SJordan Crouse return;
110f7de1545SJordan Crouse
111f7de1545SJordan Crouse /*
112f7de1545SJordan Crouse * No lock needed in close and there won't
113f7de1545SJordan Crouse * be any more user ioctls coming our way
114f7de1545SJordan Crouse */
115a3367f5fSRob Clark list_for_each_entry_safe(entry, tmp, &ctx->submitqueues, node) {
116a3367f5fSRob Clark list_del(&entry->node);
117f7de1545SJordan Crouse msm_submitqueue_put(entry);
118f7de1545SJordan Crouse }
119a3367f5fSRob Clark }
120f7de1545SJordan Crouse
12168002469SRob Clark static struct drm_sched_entity *
get_sched_entity(struct msm_file_private * ctx,struct msm_ringbuffer * ring,unsigned ring_nr,enum drm_sched_priority sched_prio)12268002469SRob Clark get_sched_entity(struct msm_file_private *ctx, struct msm_ringbuffer *ring,
12368002469SRob Clark unsigned ring_nr, enum drm_sched_priority sched_prio)
12468002469SRob Clark {
12568002469SRob Clark static DEFINE_MUTEX(entity_lock);
12668002469SRob Clark unsigned idx = (ring_nr * NR_SCHED_PRIORITIES) + sched_prio;
12768002469SRob Clark
12868002469SRob Clark /* We should have already validated that the requested priority is
12968002469SRob Clark * valid by the time we get here.
13068002469SRob Clark */
13168002469SRob Clark if (WARN_ON(idx >= ARRAY_SIZE(ctx->entities)))
13268002469SRob Clark return ERR_PTR(-EINVAL);
13368002469SRob Clark
13468002469SRob Clark mutex_lock(&entity_lock);
13568002469SRob Clark
13668002469SRob Clark if (!ctx->entities[idx]) {
13768002469SRob Clark struct drm_sched_entity *entity;
13868002469SRob Clark struct drm_gpu_scheduler *sched = &ring->sched;
13968002469SRob Clark int ret;
14068002469SRob Clark
14168002469SRob Clark entity = kzalloc(sizeof(*ctx->entities[idx]), GFP_KERNEL);
14268002469SRob Clark
14368002469SRob Clark ret = drm_sched_entity_init(entity, sched_prio, &sched, 1, NULL);
14468002469SRob Clark if (ret) {
1457425e816SDan Carpenter mutex_unlock(&entity_lock);
14668002469SRob Clark kfree(entity);
14768002469SRob Clark return ERR_PTR(ret);
14868002469SRob Clark }
14968002469SRob Clark
15068002469SRob Clark ctx->entities[idx] = entity;
15168002469SRob Clark }
15268002469SRob Clark
15368002469SRob Clark mutex_unlock(&entity_lock);
15468002469SRob Clark
15568002469SRob Clark return ctx->entities[idx];
15668002469SRob Clark }
15768002469SRob Clark
msm_submitqueue_create(struct drm_device * drm,struct msm_file_private * ctx,u32 prio,u32 flags,u32 * id)158f97decacSJordan Crouse int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
159f97decacSJordan Crouse u32 prio, u32 flags, u32 *id)
160f7de1545SJordan Crouse {
161f97decacSJordan Crouse struct msm_drm_private *priv = drm->dev_private;
162f7de1545SJordan Crouse struct msm_gpu_submitqueue *queue;
163fc40e5e1SRob Clark enum drm_sched_priority sched_prio;
16420f33275SAntonino Maniscalco extern int enable_preemption;
16520f33275SAntonino Maniscalco bool preemption_supported;
166fc40e5e1SRob Clark unsigned ring_nr;
1671d8a5ca4SRob Clark int ret;
168f7de1545SJordan Crouse
169f7de1545SJordan Crouse if (!ctx)
170f7de1545SJordan Crouse return -ENODEV;
171f7de1545SJordan Crouse
17286c2a0f0SRob Clark if (!priv->gpu)
17386c2a0f0SRob Clark return -ENODEV;
17486c2a0f0SRob Clark
17520f33275SAntonino Maniscalco preemption_supported = priv->gpu->nr_rings == 1 && enable_preemption != 0;
17620f33275SAntonino Maniscalco
17720f33275SAntonino Maniscalco if (flags & MSM_SUBMITQUEUE_ALLOW_PREEMPT && preemption_supported)
1787788d320SAntonino Maniscalco return -EINVAL;
1797788d320SAntonino Maniscalco
180fc40e5e1SRob Clark ret = msm_gpu_convert_priority(priv->gpu, prio, &ring_nr, &sched_prio);
181fc40e5e1SRob Clark if (ret)
182fc40e5e1SRob Clark return ret;
18386c2a0f0SRob Clark
184f7de1545SJordan Crouse queue = kzalloc(sizeof(*queue), GFP_KERNEL);
185f7de1545SJordan Crouse
186f7de1545SJordan Crouse if (!queue)
187f7de1545SJordan Crouse return -ENOMEM;
188f7de1545SJordan Crouse
189f7de1545SJordan Crouse kref_init(&queue->ref);
190f7de1545SJordan Crouse queue->flags = flags;
191fc40e5e1SRob Clark queue->ring_nr = ring_nr;
192f7de1545SJordan Crouse
19368002469SRob Clark queue->entity = get_sched_entity(ctx, priv->gpu->rb[ring_nr],
19468002469SRob Clark ring_nr, sched_prio);
19568002469SRob Clark if (IS_ERR(queue->entity)) {
19668002469SRob Clark ret = PTR_ERR(queue->entity);
1971d8a5ca4SRob Clark kfree(queue);
1981d8a5ca4SRob Clark return ret;
1991d8a5ca4SRob Clark }
2001d8a5ca4SRob Clark
201f7de1545SJordan Crouse write_lock(&ctx->queuelock);
202f7de1545SJordan Crouse
203cf655d61SJordan Crouse queue->ctx = msm_file_private_get(ctx);
204f7de1545SJordan Crouse queue->id = ctx->queueid++;
205f7de1545SJordan Crouse
206f7de1545SJordan Crouse if (id)
207f7de1545SJordan Crouse *id = queue->id;
208f7de1545SJordan Crouse
209a61acbbeSRob Clark idr_init(&queue->fence_idr);
210e4f020c6SRob Clark spin_lock_init(&queue->idr_lock);
211a61acbbeSRob Clark mutex_init(&queue->lock);
212a61acbbeSRob Clark
213f7de1545SJordan Crouse list_add_tail(&queue->node, &ctx->submitqueues);
214f7de1545SJordan Crouse
215f7de1545SJordan Crouse write_unlock(&ctx->queuelock);
216f7de1545SJordan Crouse
217f7de1545SJordan Crouse return 0;
218f7de1545SJordan Crouse }
219f7de1545SJordan Crouse
220375f9a63SRob Clark /*
221375f9a63SRob Clark * Create the default submit-queue (id==0), used for backwards compatibility
222375f9a63SRob Clark * for userspace that pre-dates the introduction of submitqueues.
223375f9a63SRob Clark */
msm_submitqueue_init(struct drm_device * drm,struct msm_file_private * ctx)224f97decacSJordan Crouse int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx)
225f7de1545SJordan Crouse {
226f97decacSJordan Crouse struct msm_drm_private *priv = drm->dev_private;
227fc40e5e1SRob Clark int default_prio, max_priority;
228f97decacSJordan Crouse
22986c2a0f0SRob Clark if (!priv->gpu)
23086c2a0f0SRob Clark return -ENODEV;
23186c2a0f0SRob Clark
232fc40e5e1SRob Clark max_priority = (priv->gpu->nr_rings * NR_SCHED_PRIORITIES) - 1;
233fc40e5e1SRob Clark
234f97decacSJordan Crouse /*
235fc40e5e1SRob Clark * Pick a medium priority level as default. Lower numeric value is
236fc40e5e1SRob Clark * higher priority, so round-up to pick a priority that is not higher
237fc40e5e1SRob Clark * than the middle priority level.
238f97decacSJordan Crouse */
239fc40e5e1SRob Clark default_prio = DIV_ROUND_UP(max_priority, 2);
240f97decacSJordan Crouse
241f97decacSJordan Crouse return msm_submitqueue_create(drm, ctx, default_prio, 0, NULL);
242f7de1545SJordan Crouse }
243f7de1545SJordan Crouse
msm_submitqueue_query_faults(struct msm_gpu_submitqueue * queue,struct drm_msm_submitqueue_query * args)244b0fb6604SJordan Crouse static int msm_submitqueue_query_faults(struct msm_gpu_submitqueue *queue,
245b0fb6604SJordan Crouse struct drm_msm_submitqueue_query *args)
246b0fb6604SJordan Crouse {
247b0fb6604SJordan Crouse size_t size = min_t(size_t, args->len, sizeof(queue->faults));
248b0fb6604SJordan Crouse int ret;
249b0fb6604SJordan Crouse
250b0fb6604SJordan Crouse /* If a zero length was passed in, return the data size we expect */
251b0fb6604SJordan Crouse if (!args->len) {
252b0fb6604SJordan Crouse args->len = sizeof(queue->faults);
253b0fb6604SJordan Crouse return 0;
254b0fb6604SJordan Crouse }
255b0fb6604SJordan Crouse
256b0fb6604SJordan Crouse /* Set the length to the actual size of the data */
257b0fb6604SJordan Crouse args->len = size;
258b0fb6604SJordan Crouse
259b0fb6604SJordan Crouse ret = copy_to_user(u64_to_user_ptr(args->data), &queue->faults, size);
260b0fb6604SJordan Crouse
261b0fb6604SJordan Crouse return ret ? -EFAULT : 0;
262b0fb6604SJordan Crouse }
263b0fb6604SJordan Crouse
msm_submitqueue_query(struct drm_device * drm,struct msm_file_private * ctx,struct drm_msm_submitqueue_query * args)264b0fb6604SJordan Crouse int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
265b0fb6604SJordan Crouse struct drm_msm_submitqueue_query *args)
266b0fb6604SJordan Crouse {
267b0fb6604SJordan Crouse struct msm_gpu_submitqueue *queue;
268b0fb6604SJordan Crouse int ret = -EINVAL;
269b0fb6604SJordan Crouse
270b0fb6604SJordan Crouse if (args->pad)
271b0fb6604SJordan Crouse return -EINVAL;
272b0fb6604SJordan Crouse
273b0fb6604SJordan Crouse queue = msm_submitqueue_get(ctx, args->id);
274b0fb6604SJordan Crouse if (!queue)
275b0fb6604SJordan Crouse return -ENOENT;
276b0fb6604SJordan Crouse
277b0fb6604SJordan Crouse if (args->param == MSM_SUBMITQUEUE_PARAM_FAULTS)
278b0fb6604SJordan Crouse ret = msm_submitqueue_query_faults(queue, args);
279b0fb6604SJordan Crouse
280b0fb6604SJordan Crouse msm_submitqueue_put(queue);
281b0fb6604SJordan Crouse
282b0fb6604SJordan Crouse return ret;
283b0fb6604SJordan Crouse }
284b0fb6604SJordan Crouse
msm_submitqueue_remove(struct msm_file_private * ctx,u32 id)285f7de1545SJordan Crouse int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id)
286f7de1545SJordan Crouse {
287f7de1545SJordan Crouse struct msm_gpu_submitqueue *entry;
288f7de1545SJordan Crouse
289f7de1545SJordan Crouse if (!ctx)
290f7de1545SJordan Crouse return 0;
291f7de1545SJordan Crouse
292f7de1545SJordan Crouse /*
293f7de1545SJordan Crouse * id 0 is the "default" queue and can't be destroyed
294f7de1545SJordan Crouse * by the user
295f7de1545SJordan Crouse */
296f7de1545SJordan Crouse if (!id)
297f7de1545SJordan Crouse return -ENOENT;
298f7de1545SJordan Crouse
299f7de1545SJordan Crouse write_lock(&ctx->queuelock);
300f7de1545SJordan Crouse
301f7de1545SJordan Crouse list_for_each_entry(entry, &ctx->submitqueues, node) {
302f7de1545SJordan Crouse if (entry->id == id) {
303f7de1545SJordan Crouse list_del(&entry->node);
304f7de1545SJordan Crouse write_unlock(&ctx->queuelock);
305f7de1545SJordan Crouse
306f7de1545SJordan Crouse msm_submitqueue_put(entry);
307f7de1545SJordan Crouse return 0;
308f7de1545SJordan Crouse }
309f7de1545SJordan Crouse }
310f7de1545SJordan Crouse
311f7de1545SJordan Crouse write_unlock(&ctx->queuelock);
312f7de1545SJordan Crouse return -ENOENT;
313f7de1545SJordan Crouse }
314f7de1545SJordan Crouse
315