xref: /linux-6.15/include/linux/aio.h (revision f5e4e7fd)
1 #ifndef __LINUX__AIO_H
2 #define __LINUX__AIO_H
3 
4 #include <linux/list.h>
5 #include <linux/workqueue.h>
6 #include <linux/aio_abi.h>
7 #include <linux/uio.h>
8 #include <linux/rcupdate.h>
9 
10 #include <linux/atomic.h>
11 
12 struct kioctx;
13 struct kiocb;
14 
15 #define KIOCB_KEY		0
16 
17 /*
18  * We use ki_cancel == KIOCB_CANCELLED to indicate that a kiocb has been either
19  * cancelled or completed (this makes a certain amount of sense because
20  * successful cancellation - io_cancel() - does deliver the completion to
21  * userspace).
22  *
23  * And since most things don't implement kiocb cancellation and we'd really like
24  * kiocb completion to be lockless when possible, we use ki_cancel to
25  * synchronize cancellation and completion - we only set it to KIOCB_CANCELLED
26  * with xchg() or cmpxchg(), see batch_complete_aio() and kiocb_cancel().
27  */
28 #define KIOCB_CANCELLED		((void *) (~0ULL))
29 
30 typedef int (kiocb_cancel_fn)(struct kiocb *, struct io_event *);
31 
32 struct kiocb {
33 	atomic_t		ki_users;
34 
35 	struct file		*ki_filp;
36 	struct kioctx		*ki_ctx;	/* NULL for sync ops */
37 	kiocb_cancel_fn		*ki_cancel;
38 	void			(*ki_dtor)(struct kiocb *);
39 
40 	union {
41 		void __user		*user;
42 		struct task_struct	*tsk;
43 	} ki_obj;
44 
45 	__u64			ki_user_data;	/* user's data for completion */
46 	loff_t			ki_pos;
47 
48 	void			*private;
49 	/* State that we remember to be able to restart/retry  */
50 	unsigned short		ki_opcode;
51 	size_t			ki_nbytes; 	/* copy of iocb->aio_nbytes */
52 	char 			__user *ki_buf;	/* remaining iocb->aio_buf */
53 	size_t			ki_left; 	/* remaining bytes */
54 	struct iovec		ki_inline_vec;	/* inline vector */
55  	struct iovec		*ki_iovec;
56  	unsigned long		ki_nr_segs;
57  	unsigned long		ki_cur_seg;
58 
59 	struct list_head	ki_list;	/* the aio core uses this
60 						 * for cancellation */
61 
62 	/*
63 	 * If the aio_resfd field of the userspace iocb is not zero,
64 	 * this is the underlying eventfd context to deliver events to.
65 	 */
66 	struct eventfd_ctx	*ki_eventfd;
67 };
68 
69 static inline bool is_sync_kiocb(struct kiocb *kiocb)
70 {
71 	return kiocb->ki_ctx == NULL;
72 }
73 
74 static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
75 {
76 	*kiocb = (struct kiocb) {
77 			.ki_users = ATOMIC_INIT(1),
78 			.ki_ctx = NULL,
79 			.ki_filp = filp,
80 			.ki_obj.tsk = current,
81 		};
82 }
83 
84 /* prototypes */
85 #ifdef CONFIG_AIO
86 extern ssize_t wait_on_sync_kiocb(struct kiocb *iocb);
87 extern void aio_put_req(struct kiocb *iocb);
88 extern void aio_complete(struct kiocb *iocb, long res, long res2);
89 struct mm_struct;
90 extern void exit_aio(struct mm_struct *mm);
91 extern long do_io_submit(aio_context_t ctx_id, long nr,
92 			 struct iocb __user *__user *iocbpp, bool compat);
93 void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel);
94 #else
95 static inline ssize_t wait_on_sync_kiocb(struct kiocb *iocb) { return 0; }
96 static inline void aio_put_req(struct kiocb *iocb) { }
97 static inline void aio_complete(struct kiocb *iocb, long res, long res2) { }
98 struct mm_struct;
99 static inline void exit_aio(struct mm_struct *mm) { }
100 static inline long do_io_submit(aio_context_t ctx_id, long nr,
101 				struct iocb __user * __user *iocbpp,
102 				bool compat) { return 0; }
103 static inline void kiocb_set_cancel_fn(struct kiocb *req,
104 				       kiocb_cancel_fn *cancel) { }
105 #endif /* CONFIG_AIO */
106 
107 static inline struct kiocb *list_kiocb(struct list_head *h)
108 {
109 	return list_entry(h, struct kiocb, ki_list);
110 }
111 
112 /* for sysctl: */
113 extern unsigned long aio_nr;
114 extern unsigned long aio_max_nr;
115 
116 #endif /* __LINUX__AIO_H */
117