1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 #ifndef _LINUX_IO_URING_CMD_H 3 #define _LINUX_IO_URING_CMD_H 4 5 #include <uapi/linux/io_uring.h> 6 #include <linux/io_uring_types.h> 7 8 /* only top 8 bits of sqe->uring_cmd_flags for kernel internal use */ 9 #define IORING_URING_CMD_CANCELABLE (1U << 30) 10 11 struct io_uring_cmd { 12 struct file *file; 13 const struct io_uring_sqe *sqe; 14 /* callback to defer completions to task context */ 15 void (*task_work_cb)(struct io_uring_cmd *cmd, unsigned); 16 u32 cmd_op; 17 u32 flags; 18 u8 pdu[32]; /* available inline for free use */ 19 }; 20 21 static inline const void *io_uring_sqe_cmd(const struct io_uring_sqe *sqe) 22 { 23 return sqe->cmd; 24 } 25 26 #if defined(CONFIG_IO_URING) 27 int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw, 28 struct iov_iter *iter, void *ioucmd); 29 30 /* 31 * Completes the request, i.e. posts an io_uring CQE and deallocates @ioucmd 32 * and the corresponding io_uring request. 33 * 34 * Note: the caller should never hard code @issue_flags and is only allowed 35 * to pass the mask provided by the core io_uring code. 36 */ 37 void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, ssize_t res2, 38 unsigned issue_flags); 39 40 void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd, 41 void (*task_work_cb)(struct io_uring_cmd *, unsigned), 42 unsigned flags); 43 44 /* 45 * Note: the caller should never hard code @issue_flags and only use the 46 * mask provided by the core io_uring code. 47 */ 48 void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd, 49 unsigned int issue_flags); 50 51 /* Execute the request from a blocking context */ 52 void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd); 53 54 #else 55 static inline int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw, 56 struct iov_iter *iter, void *ioucmd) 57 { 58 return -EOPNOTSUPP; 59 } 60 static inline void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, 61 ssize_t ret2, unsigned issue_flags) 62 { 63 } 64 static inline void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd, 65 void (*task_work_cb)(struct io_uring_cmd *, unsigned), 66 unsigned flags) 67 { 68 } 69 static inline void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd, 70 unsigned int issue_flags) 71 { 72 } 73 static inline void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd) 74 { 75 } 76 #endif 77 78 /* 79 * Polled completions must ensure they are coming from a poll queue, and 80 * hence are completed inside the usual poll handling loops. 81 */ 82 static inline void io_uring_cmd_iopoll_done(struct io_uring_cmd *ioucmd, 83 ssize_t ret, ssize_t res2) 84 { 85 lockdep_assert(in_task()); 86 io_uring_cmd_done(ioucmd, ret, res2, 0); 87 } 88 89 /* users must follow the IOU_F_TWQ_LAZY_WAKE semantics */ 90 static inline void io_uring_cmd_do_in_task_lazy(struct io_uring_cmd *ioucmd, 91 void (*task_work_cb)(struct io_uring_cmd *, unsigned)) 92 { 93 __io_uring_cmd_do_in_task(ioucmd, task_work_cb, IOU_F_TWQ_LAZY_WAKE); 94 } 95 96 static inline void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd, 97 void (*task_work_cb)(struct io_uring_cmd *, unsigned)) 98 { 99 __io_uring_cmd_do_in_task(ioucmd, task_work_cb, 0); 100 } 101 102 static inline struct task_struct *io_uring_cmd_get_task(struct io_uring_cmd *cmd) 103 { 104 return cmd_to_io_kiocb(cmd)->task; 105 } 106 107 #endif /* _LINUX_IO_URING_CMD_H */ 108