xref: /linux-6.15/fs/fuse/dev_uring.c (revision c090c8ab)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * FUSE: Filesystem in Userspace
4  * Copyright (c) 2023-2024 DataDirect Networks.
5  */
6 
7 #include "fuse_i.h"
8 #include "dev_uring_i.h"
9 #include "fuse_dev_i.h"
10 
11 #include <linux/fs.h>
12 #include <linux/io_uring/cmd.h>
13 
14 static bool __read_mostly enable_uring;
15 module_param(enable_uring, bool, 0644);
16 MODULE_PARM_DESC(enable_uring,
17 		 "Enable userspace communication through io-uring");
18 
19 #define FUSE_URING_IOV_SEGS 2 /* header and payload */
20 
21 
22 bool fuse_uring_enabled(void)
23 {
24 	return enable_uring;
25 }
26 
27 static void fuse_uring_req_end(struct fuse_ring_ent *ent, struct fuse_req *req,
28 			       int error)
29 {
30 	ent->fuse_req = NULL;
31 	if (error)
32 		req->out.h.error = error;
33 
34 	clear_bit(FR_SENT, &req->flags);
35 	fuse_request_end(req);
36 }
37 
38 void fuse_uring_destruct(struct fuse_conn *fc)
39 {
40 	struct fuse_ring *ring = fc->ring;
41 	int qid;
42 
43 	if (!ring)
44 		return;
45 
46 	for (qid = 0; qid < ring->nr_queues; qid++) {
47 		struct fuse_ring_queue *queue = ring->queues[qid];
48 
49 		if (!queue)
50 			continue;
51 
52 		WARN_ON(!list_empty(&queue->ent_avail_queue));
53 		WARN_ON(!list_empty(&queue->ent_w_req_queue));
54 		WARN_ON(!list_empty(&queue->ent_commit_queue));
55 		WARN_ON(!list_empty(&queue->ent_in_userspace));
56 
57 		kfree(queue->fpq.processing);
58 		kfree(queue);
59 		ring->queues[qid] = NULL;
60 	}
61 
62 	kfree(ring->queues);
63 	kfree(ring);
64 	fc->ring = NULL;
65 }
66 
67 /*
68  * Basic ring setup for this connection based on the provided configuration
69  */
70 static struct fuse_ring *fuse_uring_create(struct fuse_conn *fc)
71 {
72 	struct fuse_ring *ring;
73 	size_t nr_queues = num_possible_cpus();
74 	struct fuse_ring *res = NULL;
75 	size_t max_payload_size;
76 
77 	ring = kzalloc(sizeof(*fc->ring), GFP_KERNEL_ACCOUNT);
78 	if (!ring)
79 		return NULL;
80 
81 	ring->queues = kcalloc(nr_queues, sizeof(struct fuse_ring_queue *),
82 			       GFP_KERNEL_ACCOUNT);
83 	if (!ring->queues)
84 		goto out_err;
85 
86 	max_payload_size = max(FUSE_MIN_READ_BUFFER, fc->max_write);
87 	max_payload_size = max(max_payload_size, fc->max_pages * PAGE_SIZE);
88 
89 	spin_lock(&fc->lock);
90 	if (fc->ring) {
91 		/* race, another thread created the ring in the meantime */
92 		spin_unlock(&fc->lock);
93 		res = fc->ring;
94 		goto out_err;
95 	}
96 
97 	fc->ring = ring;
98 	ring->nr_queues = nr_queues;
99 	ring->fc = fc;
100 	ring->max_payload_sz = max_payload_size;
101 
102 	spin_unlock(&fc->lock);
103 	return ring;
104 
105 out_err:
106 	kfree(ring->queues);
107 	kfree(ring);
108 	return res;
109 }
110 
111 static struct fuse_ring_queue *fuse_uring_create_queue(struct fuse_ring *ring,
112 						       int qid)
113 {
114 	struct fuse_conn *fc = ring->fc;
115 	struct fuse_ring_queue *queue;
116 	struct list_head *pq;
117 
118 	queue = kzalloc(sizeof(*queue), GFP_KERNEL_ACCOUNT);
119 	if (!queue)
120 		return NULL;
121 	pq = kcalloc(FUSE_PQ_HASH_SIZE, sizeof(struct list_head), GFP_KERNEL);
122 	if (!pq) {
123 		kfree(queue);
124 		return NULL;
125 	}
126 
127 	queue->qid = qid;
128 	queue->ring = ring;
129 	spin_lock_init(&queue->lock);
130 
131 	INIT_LIST_HEAD(&queue->ent_avail_queue);
132 	INIT_LIST_HEAD(&queue->ent_commit_queue);
133 	INIT_LIST_HEAD(&queue->ent_w_req_queue);
134 	INIT_LIST_HEAD(&queue->ent_in_userspace);
135 	INIT_LIST_HEAD(&queue->fuse_req_queue);
136 
137 	queue->fpq.processing = pq;
138 	fuse_pqueue_init(&queue->fpq);
139 
140 	spin_lock(&fc->lock);
141 	if (ring->queues[qid]) {
142 		spin_unlock(&fc->lock);
143 		kfree(queue->fpq.processing);
144 		kfree(queue);
145 		return ring->queues[qid];
146 	}
147 
148 	/*
149 	 * write_once and lock as the caller mostly doesn't take the lock at all
150 	 */
151 	WRITE_ONCE(ring->queues[qid], queue);
152 	spin_unlock(&fc->lock);
153 
154 	return queue;
155 }
156 
157 /*
158  * Checks for errors and stores it into the request
159  */
160 static int fuse_uring_out_header_has_err(struct fuse_out_header *oh,
161 					 struct fuse_req *req,
162 					 struct fuse_conn *fc)
163 {
164 	int err;
165 
166 	err = -EINVAL;
167 	if (oh->unique == 0) {
168 		/* Not supported through io-uring yet */
169 		pr_warn_once("notify through fuse-io-uring not supported\n");
170 		goto err;
171 	}
172 
173 	if (oh->error <= -ERESTARTSYS || oh->error > 0)
174 		goto err;
175 
176 	if (oh->error) {
177 		err = oh->error;
178 		goto err;
179 	}
180 
181 	err = -ENOENT;
182 	if ((oh->unique & ~FUSE_INT_REQ_BIT) != req->in.h.unique) {
183 		pr_warn_ratelimited("unique mismatch, expected: %llu got %llu\n",
184 				    req->in.h.unique,
185 				    oh->unique & ~FUSE_INT_REQ_BIT);
186 		goto err;
187 	}
188 
189 	/*
190 	 * Is it an interrupt reply ID?
191 	 * XXX: Not supported through fuse-io-uring yet, it should not even
192 	 *      find the request - should not happen.
193 	 */
194 	WARN_ON_ONCE(oh->unique & FUSE_INT_REQ_BIT);
195 
196 	err = 0;
197 err:
198 	return err;
199 }
200 
201 static int fuse_uring_copy_from_ring(struct fuse_ring *ring,
202 				     struct fuse_req *req,
203 				     struct fuse_ring_ent *ent)
204 {
205 	struct fuse_copy_state cs;
206 	struct fuse_args *args = req->args;
207 	struct iov_iter iter;
208 	int err;
209 	struct fuse_uring_ent_in_out ring_in_out;
210 
211 	err = copy_from_user(&ring_in_out, &ent->headers->ring_ent_in_out,
212 			     sizeof(ring_in_out));
213 	if (err)
214 		return -EFAULT;
215 
216 	err = import_ubuf(ITER_SOURCE, ent->payload, ring->max_payload_sz,
217 			  &iter);
218 	if (err)
219 		return err;
220 
221 	fuse_copy_init(&cs, 0, &iter);
222 	cs.is_uring = 1;
223 	cs.req = req;
224 
225 	return fuse_copy_out_args(&cs, args, ring_in_out.payload_sz);
226 }
227 
228  /*
229   * Copy data from the req to the ring buffer
230   */
231 static int fuse_uring_args_to_ring(struct fuse_ring *ring, struct fuse_req *req,
232 				   struct fuse_ring_ent *ent)
233 {
234 	struct fuse_copy_state cs;
235 	struct fuse_args *args = req->args;
236 	struct fuse_in_arg *in_args = args->in_args;
237 	int num_args = args->in_numargs;
238 	int err;
239 	struct iov_iter iter;
240 	struct fuse_uring_ent_in_out ent_in_out = {
241 		.flags = 0,
242 		.commit_id = req->in.h.unique,
243 	};
244 
245 	err = import_ubuf(ITER_DEST, ent->payload, ring->max_payload_sz, &iter);
246 	if (err) {
247 		pr_info_ratelimited("fuse: Import of user buffer failed\n");
248 		return err;
249 	}
250 
251 	fuse_copy_init(&cs, 1, &iter);
252 	cs.is_uring = 1;
253 	cs.req = req;
254 
255 	if (num_args > 0) {
256 		/*
257 		 * Expectation is that the first argument is the per op header.
258 		 * Some op code have that as zero size.
259 		 */
260 		if (args->in_args[0].size > 0) {
261 			err = copy_to_user(&ent->headers->op_in, in_args->value,
262 					   in_args->size);
263 			if (err) {
264 				pr_info_ratelimited(
265 					"Copying the header failed.\n");
266 				return -EFAULT;
267 			}
268 		}
269 		in_args++;
270 		num_args--;
271 	}
272 
273 	/* copy the payload */
274 	err = fuse_copy_args(&cs, num_args, args->in_pages,
275 			     (struct fuse_arg *)in_args, 0);
276 	if (err) {
277 		pr_info_ratelimited("%s fuse_copy_args failed\n", __func__);
278 		return err;
279 	}
280 
281 	ent_in_out.payload_sz = cs.ring.copied_sz;
282 	err = copy_to_user(&ent->headers->ring_ent_in_out, &ent_in_out,
283 			   sizeof(ent_in_out));
284 	return err ? -EFAULT : 0;
285 }
286 
287 static int fuse_uring_copy_to_ring(struct fuse_ring_ent *ent,
288 				   struct fuse_req *req)
289 {
290 	struct fuse_ring_queue *queue = ent->queue;
291 	struct fuse_ring *ring = queue->ring;
292 	int err;
293 
294 	err = -EIO;
295 	if (WARN_ON(ent->state != FRRS_FUSE_REQ)) {
296 		pr_err("qid=%d ring-req=%p invalid state %d on send\n",
297 		       queue->qid, ent, ent->state);
298 		return err;
299 	}
300 
301 	err = -EINVAL;
302 	if (WARN_ON(req->in.h.unique == 0))
303 		return err;
304 
305 	/* copy the request */
306 	err = fuse_uring_args_to_ring(ring, req, ent);
307 	if (unlikely(err)) {
308 		pr_info_ratelimited("Copy to ring failed: %d\n", err);
309 		return err;
310 	}
311 
312 	/* copy fuse_in_header */
313 	err = copy_to_user(&ent->headers->in_out, &req->in.h,
314 			   sizeof(req->in.h));
315 	if (err) {
316 		err = -EFAULT;
317 		return err;
318 	}
319 
320 	return 0;
321 }
322 
323 static int fuse_uring_prepare_send(struct fuse_ring_ent *ent,
324 				   struct fuse_req *req)
325 {
326 	int err;
327 
328 	err = fuse_uring_copy_to_ring(ent, req);
329 	if (!err)
330 		set_bit(FR_SENT, &req->flags);
331 	else
332 		fuse_uring_req_end(ent, req, err);
333 
334 	return err;
335 }
336 
337 /*
338  * Write data to the ring buffer and send the request to userspace,
339  * userspace will read it
340  * This is comparable with classical read(/dev/fuse)
341  */
342 static int fuse_uring_send_next_to_ring(struct fuse_ring_ent *ent,
343 					struct fuse_req *req,
344 					unsigned int issue_flags)
345 {
346 	struct fuse_ring_queue *queue = ent->queue;
347 	int err;
348 	struct io_uring_cmd *cmd;
349 
350 	err = fuse_uring_prepare_send(ent, req);
351 	if (err)
352 		return err;
353 
354 	spin_lock(&queue->lock);
355 	cmd = ent->cmd;
356 	ent->cmd = NULL;
357 	ent->state = FRRS_USERSPACE;
358 	list_move(&ent->list, &queue->ent_in_userspace);
359 	spin_unlock(&queue->lock);
360 
361 	io_uring_cmd_done(cmd, 0, 0, issue_flags);
362 	return 0;
363 }
364 
365 /*
366  * Make a ring entry available for fuse_req assignment
367  */
368 static void fuse_uring_ent_avail(struct fuse_ring_ent *ent,
369 				 struct fuse_ring_queue *queue)
370 {
371 	WARN_ON_ONCE(!ent->cmd);
372 	list_move(&ent->list, &queue->ent_avail_queue);
373 	ent->state = FRRS_AVAILABLE;
374 }
375 
376 /* Used to find the request on SQE commit */
377 static void fuse_uring_add_to_pq(struct fuse_ring_ent *ent,
378 				 struct fuse_req *req)
379 {
380 	struct fuse_ring_queue *queue = ent->queue;
381 	struct fuse_pqueue *fpq = &queue->fpq;
382 	unsigned int hash;
383 
384 	req->ring_entry = ent;
385 	hash = fuse_req_hash(req->in.h.unique);
386 	list_move_tail(&req->list, &fpq->processing[hash]);
387 }
388 
389 /*
390  * Assign a fuse queue entry to the given entry
391  */
392 static void fuse_uring_add_req_to_ring_ent(struct fuse_ring_ent *ent,
393 					   struct fuse_req *req)
394 {
395 	struct fuse_ring_queue *queue = ent->queue;
396 	struct fuse_conn *fc = req->fm->fc;
397 	struct fuse_iqueue *fiq = &fc->iq;
398 
399 	lockdep_assert_held(&queue->lock);
400 
401 	if (WARN_ON_ONCE(ent->state != FRRS_AVAILABLE &&
402 			 ent->state != FRRS_COMMIT)) {
403 		pr_warn("%s qid=%d state=%d\n", __func__, ent->queue->qid,
404 			ent->state);
405 	}
406 
407 	spin_lock(&fiq->lock);
408 	clear_bit(FR_PENDING, &req->flags);
409 	spin_unlock(&fiq->lock);
410 	ent->fuse_req = req;
411 	ent->state = FRRS_FUSE_REQ;
412 	list_move(&ent->list, &queue->ent_w_req_queue);
413 	fuse_uring_add_to_pq(ent, req);
414 }
415 
416 /* Fetch the next fuse request if available */
417 static struct fuse_req *fuse_uring_ent_assign_req(struct fuse_ring_ent *ent)
418 	__must_hold(&queue->lock)
419 {
420 	struct fuse_req *req;
421 	struct fuse_ring_queue *queue = ent->queue;
422 	struct list_head *req_queue = &queue->fuse_req_queue;
423 
424 	lockdep_assert_held(&queue->lock);
425 
426 	/* get and assign the next entry while it is still holding the lock */
427 	req = list_first_entry_or_null(req_queue, struct fuse_req, list);
428 	if (req)
429 		fuse_uring_add_req_to_ring_ent(ent, req);
430 
431 	return req;
432 }
433 
434 /*
435  * Read data from the ring buffer, which user space has written to
436  * This is comparible with handling of classical write(/dev/fuse).
437  * Also make the ring request available again for new fuse requests.
438  */
439 static void fuse_uring_commit(struct fuse_ring_ent *ent, struct fuse_req *req,
440 			      unsigned int issue_flags)
441 {
442 	struct fuse_ring *ring = ent->queue->ring;
443 	struct fuse_conn *fc = ring->fc;
444 	ssize_t err = 0;
445 
446 	err = copy_from_user(&req->out.h, &ent->headers->in_out,
447 			     sizeof(req->out.h));
448 	if (err) {
449 		req->out.h.error = -EFAULT;
450 		goto out;
451 	}
452 
453 	err = fuse_uring_out_header_has_err(&req->out.h, req, fc);
454 	if (err) {
455 		/* req->out.h.error already set */
456 		goto out;
457 	}
458 
459 	err = fuse_uring_copy_from_ring(ring, req, ent);
460 out:
461 	fuse_uring_req_end(ent, req, err);
462 }
463 
464 /*
465  * Get the next fuse req and send it
466  */
467 static void fuse_uring_next_fuse_req(struct fuse_ring_ent *ent,
468 				     struct fuse_ring_queue *queue,
469 				     unsigned int issue_flags)
470 {
471 	int err;
472 	struct fuse_req *req;
473 
474 retry:
475 	spin_lock(&queue->lock);
476 	fuse_uring_ent_avail(ent, queue);
477 	req = fuse_uring_ent_assign_req(ent);
478 	spin_unlock(&queue->lock);
479 
480 	if (req) {
481 		err = fuse_uring_send_next_to_ring(ent, req, issue_flags);
482 		if (err)
483 			goto retry;
484 	}
485 }
486 
487 static int fuse_ring_ent_set_commit(struct fuse_ring_ent *ent)
488 {
489 	struct fuse_ring_queue *queue = ent->queue;
490 
491 	lockdep_assert_held(&queue->lock);
492 
493 	if (WARN_ON_ONCE(ent->state != FRRS_USERSPACE))
494 		return -EIO;
495 
496 	ent->state = FRRS_COMMIT;
497 	list_move(&ent->list, &queue->ent_commit_queue);
498 
499 	return 0;
500 }
501 
502 /* FUSE_URING_CMD_COMMIT_AND_FETCH handler */
503 static int fuse_uring_commit_fetch(struct io_uring_cmd *cmd, int issue_flags,
504 				   struct fuse_conn *fc)
505 {
506 	const struct fuse_uring_cmd_req *cmd_req = io_uring_sqe_cmd(cmd->sqe);
507 	struct fuse_ring_ent *ent;
508 	int err;
509 	struct fuse_ring *ring = fc->ring;
510 	struct fuse_ring_queue *queue;
511 	uint64_t commit_id = READ_ONCE(cmd_req->commit_id);
512 	unsigned int qid = READ_ONCE(cmd_req->qid);
513 	struct fuse_pqueue *fpq;
514 	struct fuse_req *req;
515 
516 	err = -ENOTCONN;
517 	if (!ring)
518 		return err;
519 
520 	if (qid >= ring->nr_queues)
521 		return -EINVAL;
522 
523 	queue = ring->queues[qid];
524 	if (!queue)
525 		return err;
526 	fpq = &queue->fpq;
527 
528 	spin_lock(&queue->lock);
529 	/* Find a request based on the unique ID of the fuse request
530 	 * This should get revised, as it needs a hash calculation and list
531 	 * search. And full struct fuse_pqueue is needed (memory overhead).
532 	 * As well as the link from req to ring_ent.
533 	 */
534 	req = fuse_request_find(fpq, commit_id);
535 	err = -ENOENT;
536 	if (!req) {
537 		pr_info("qid=%d commit_id %llu not found\n", queue->qid,
538 			commit_id);
539 		spin_unlock(&queue->lock);
540 		return err;
541 	}
542 	list_del_init(&req->list);
543 	ent = req->ring_entry;
544 	req->ring_entry = NULL;
545 
546 	err = fuse_ring_ent_set_commit(ent);
547 	if (err != 0) {
548 		pr_info_ratelimited("qid=%d commit_id %llu state %d",
549 				    queue->qid, commit_id, ent->state);
550 		spin_unlock(&queue->lock);
551 		req->out.h.error = err;
552 		clear_bit(FR_SENT, &req->flags);
553 		fuse_request_end(req);
554 		return err;
555 	}
556 
557 	ent->cmd = cmd;
558 	spin_unlock(&queue->lock);
559 
560 	/* without the queue lock, as other locks are taken */
561 	fuse_uring_commit(ent, req, issue_flags);
562 
563 	/*
564 	 * Fetching the next request is absolutely required as queued
565 	 * fuse requests would otherwise not get processed - committing
566 	 * and fetching is done in one step vs legacy fuse, which has separated
567 	 * read (fetch request) and write (commit result).
568 	 */
569 	fuse_uring_next_fuse_req(ent, queue, issue_flags);
570 	return 0;
571 }
572 
573 /*
574  * fuse_uring_req_fetch command handling
575  */
576 static void fuse_uring_do_register(struct fuse_ring_ent *ent,
577 				   struct io_uring_cmd *cmd,
578 				   unsigned int issue_flags)
579 {
580 	struct fuse_ring_queue *queue = ent->queue;
581 
582 	spin_lock(&queue->lock);
583 	ent->cmd = cmd;
584 	fuse_uring_ent_avail(ent, queue);
585 	spin_unlock(&queue->lock);
586 }
587 
588 /*
589  * sqe->addr is a ptr to an iovec array, iov[0] has the headers, iov[1]
590  * the payload
591  */
592 static int fuse_uring_get_iovec_from_sqe(const struct io_uring_sqe *sqe,
593 					 struct iovec iov[FUSE_URING_IOV_SEGS])
594 {
595 	struct iovec __user *uiov = u64_to_user_ptr(READ_ONCE(sqe->addr));
596 	struct iov_iter iter;
597 	ssize_t ret;
598 
599 	if (sqe->len != FUSE_URING_IOV_SEGS)
600 		return -EINVAL;
601 
602 	/*
603 	 * Direction for buffer access will actually be READ and WRITE,
604 	 * using write for the import should include READ access as well.
605 	 */
606 	ret = import_iovec(WRITE, uiov, FUSE_URING_IOV_SEGS,
607 			   FUSE_URING_IOV_SEGS, &iov, &iter);
608 	if (ret < 0)
609 		return ret;
610 
611 	return 0;
612 }
613 
614 static struct fuse_ring_ent *
615 fuse_uring_create_ring_ent(struct io_uring_cmd *cmd,
616 			   struct fuse_ring_queue *queue)
617 {
618 	struct fuse_ring *ring = queue->ring;
619 	struct fuse_ring_ent *ent;
620 	size_t payload_size;
621 	struct iovec iov[FUSE_URING_IOV_SEGS];
622 	int err;
623 
624 	err = fuse_uring_get_iovec_from_sqe(cmd->sqe, iov);
625 	if (err) {
626 		pr_info_ratelimited("Failed to get iovec from sqe, err=%d\n",
627 				    err);
628 		return ERR_PTR(err);
629 	}
630 
631 	err = -EINVAL;
632 	if (iov[0].iov_len < sizeof(struct fuse_uring_req_header)) {
633 		pr_info_ratelimited("Invalid header len %zu\n", iov[0].iov_len);
634 		return ERR_PTR(err);
635 	}
636 
637 	payload_size = iov[1].iov_len;
638 	if (payload_size < ring->max_payload_sz) {
639 		pr_info_ratelimited("Invalid req payload len %zu\n",
640 				    payload_size);
641 		return ERR_PTR(err);
642 	}
643 
644 	err = -ENOMEM;
645 	ent = kzalloc(sizeof(*ent), GFP_KERNEL_ACCOUNT);
646 	if (!ent)
647 		return ERR_PTR(err);
648 
649 	INIT_LIST_HEAD(&ent->list);
650 
651 	ent->queue = queue;
652 	ent->headers = iov[0].iov_base;
653 	ent->payload = iov[1].iov_base;
654 
655 	return ent;
656 }
657 
658 /*
659  * Register header and payload buffer with the kernel and puts the
660  * entry as "ready to get fuse requests" on the queue
661  */
662 static int fuse_uring_register(struct io_uring_cmd *cmd,
663 			       unsigned int issue_flags, struct fuse_conn *fc)
664 {
665 	const struct fuse_uring_cmd_req *cmd_req = io_uring_sqe_cmd(cmd->sqe);
666 	struct fuse_ring *ring = fc->ring;
667 	struct fuse_ring_queue *queue;
668 	struct fuse_ring_ent *ent;
669 	int err;
670 	unsigned int qid = READ_ONCE(cmd_req->qid);
671 
672 	err = -ENOMEM;
673 	if (!ring) {
674 		ring = fuse_uring_create(fc);
675 		if (!ring)
676 			return err;
677 	}
678 
679 	if (qid >= ring->nr_queues) {
680 		pr_info_ratelimited("fuse: Invalid ring qid %u\n", qid);
681 		return -EINVAL;
682 	}
683 
684 	queue = ring->queues[qid];
685 	if (!queue) {
686 		queue = fuse_uring_create_queue(ring, qid);
687 		if (!queue)
688 			return err;
689 	}
690 
691 	/*
692 	 * The created queue above does not need to be destructed in
693 	 * case of entry errors below, will be done at ring destruction time.
694 	 */
695 
696 	ent = fuse_uring_create_ring_ent(cmd, queue);
697 	if (IS_ERR(ent))
698 		return PTR_ERR(ent);
699 
700 	fuse_uring_do_register(ent, cmd, issue_flags);
701 
702 	return 0;
703 }
704 
705 /*
706  * Entry function from io_uring to handle the given passthrough command
707  * (op code IORING_OP_URING_CMD)
708  */
709 int __maybe_unused fuse_uring_cmd(struct io_uring_cmd *cmd,
710 				  unsigned int issue_flags)
711 {
712 	struct fuse_dev *fud;
713 	struct fuse_conn *fc;
714 	u32 cmd_op = cmd->cmd_op;
715 	int err;
716 
717 	if (!enable_uring) {
718 		pr_info_ratelimited("fuse-io-uring is disabled\n");
719 		return -EOPNOTSUPP;
720 	}
721 
722 	/* This extra SQE size holds struct fuse_uring_cmd_req */
723 	if (!(issue_flags & IO_URING_F_SQE128))
724 		return -EINVAL;
725 
726 	fud = fuse_get_dev(cmd->file);
727 	if (!fud) {
728 		pr_info_ratelimited("No fuse device found\n");
729 		return -ENOTCONN;
730 	}
731 	fc = fud->fc;
732 
733 	if (fc->aborted)
734 		return -ECONNABORTED;
735 	if (!fc->connected)
736 		return -ENOTCONN;
737 
738 	/*
739 	 * fuse_uring_register() needs the ring to be initialized,
740 	 * we need to know the max payload size
741 	 */
742 	if (!fc->initialized)
743 		return -EAGAIN;
744 
745 	switch (cmd_op) {
746 	case FUSE_IO_URING_CMD_REGISTER:
747 		err = fuse_uring_register(cmd, issue_flags, fc);
748 		if (err) {
749 			pr_info_once("FUSE_IO_URING_CMD_REGISTER failed err=%d\n",
750 				     err);
751 			return err;
752 		}
753 		break;
754 	case FUSE_IO_URING_CMD_COMMIT_AND_FETCH:
755 		err = fuse_uring_commit_fetch(cmd, issue_flags, fc);
756 		if (err) {
757 			pr_info_once("FUSE_IO_URING_COMMIT_AND_FETCH failed err=%d\n",
758 				     err);
759 			return err;
760 		}
761 		break;
762 	default:
763 		return -EINVAL;
764 	}
765 
766 	return -EIOCBQUEUED;
767 }
768