xref: /linux-6.15/include/linux/aio.h (revision 4b49bb8a)
1 #ifndef __LINUX__AIO_H
2 #define __LINUX__AIO_H
3 
4 #include <linux/list.h>
5 #include <linux/workqueue.h>
6 #include <linux/aio_abi.h>
7 #include <linux/uio.h>
8 #include <linux/rcupdate.h>
9 
10 #include <linux/atomic.h>
11 
12 struct kioctx;
13 
14 #define KIOCB_SYNC_KEY		(~0U)
15 
16 /* ki_flags bits */
17 #define KIF_KICKED		1
18 #define KIF_CANCELLED		2
19 
20 #define kiocbTryKick(iocb)	test_and_set_bit(KIF_KICKED, &(iocb)->ki_flags)
21 
22 #define kiocbSetKicked(iocb)	set_bit(KIF_KICKED, &(iocb)->ki_flags)
23 #define kiocbSetCancelled(iocb)	set_bit(KIF_CANCELLED, &(iocb)->ki_flags)
24 
25 #define kiocbClearKicked(iocb)	clear_bit(KIF_KICKED, &(iocb)->ki_flags)
26 #define kiocbClearCancelled(iocb)	clear_bit(KIF_CANCELLED, &(iocb)->ki_flags)
27 
28 #define kiocbIsKicked(iocb)	test_bit(KIF_KICKED, &(iocb)->ki_flags)
29 #define kiocbIsCancelled(iocb)	test_bit(KIF_CANCELLED, &(iocb)->ki_flags)
30 
31 /* is there a better place to document function pointer methods? */
32 /**
33  * ki_retry	-	iocb forward progress callback
34  * @kiocb:	The kiocb struct to advance by performing an operation.
35  *
36  * This callback is called when the AIO core wants a given AIO operation
37  * to make forward progress.  The kiocb argument describes the operation
38  * that is to be performed.  As the operation proceeds, perhaps partially,
39  * ki_retry is expected to update the kiocb with progress made.  Typically
40  * ki_retry is set in the AIO core and it itself calls file_operations
41  * helpers.
42  *
43  * ki_retry's return value determines when the AIO operation is completed
44  * and an event is generated in the AIO event ring.  Except the special
45  * return values described below, the value that is returned from ki_retry
46  * is transferred directly into the completion ring as the operation's
47  * resulting status.  Once this has happened ki_retry *MUST NOT* reference
48  * the kiocb pointer again.
49  *
50  * If ki_retry returns -EIOCBQUEUED it has made a promise that aio_complete()
51  * will be called on the kiocb pointer in the future.  The AIO core will
52  * not ask the method again -- ki_retry must ensure forward progress.
53  * aio_complete() must be called once and only once in the future, multiple
54  * calls may result in undefined behaviour.
55  *
56  * If ki_retry returns -EIOCBRETRY it has made a promise that kick_iocb()
57  * will be called on the kiocb pointer in the future.  This may happen
58  * through generic helpers that associate kiocb->ki_wait with a wait
59  * queue head that ki_retry uses via current->io_wait.  It can also happen
60  * with custom tracking and manual calls to kick_iocb(), though that is
61  * discouraged.  In either case, kick_iocb() must be called once and only
62  * once.  ki_retry must ensure forward progress, the AIO core will wait
63  * indefinitely for kick_iocb() to be called.
64  */
65 struct kiocb {
66 	struct list_head	ki_run_list;
67 	unsigned long		ki_flags;
68 	int			ki_users;
69 	unsigned		ki_key;		/* id of this request */
70 
71 	struct file		*ki_filp;
72 	struct kioctx		*ki_ctx;	/* may be NULL for sync ops */
73 	int			(*ki_cancel)(struct kiocb *, struct io_event *);
74 	ssize_t			(*ki_retry)(struct kiocb *);
75 	void			(*ki_dtor)(struct kiocb *);
76 
77 	union {
78 		void __user		*user;
79 		struct task_struct	*tsk;
80 	} ki_obj;
81 
82 	__u64			ki_user_data;	/* user's data for completion */
83 	loff_t			ki_pos;
84 
85 	void			*private;
86 	/* State that we remember to be able to restart/retry  */
87 	unsigned short		ki_opcode;
88 	size_t			ki_nbytes; 	/* copy of iocb->aio_nbytes */
89 	char 			__user *ki_buf;	/* remaining iocb->aio_buf */
90 	size_t			ki_left; 	/* remaining bytes */
91 	struct iovec		ki_inline_vec;	/* inline vector */
92  	struct iovec		*ki_iovec;
93  	unsigned long		ki_nr_segs;
94  	unsigned long		ki_cur_seg;
95 
96 	struct list_head	ki_list;	/* the aio core uses this
97 						 * for cancellation */
98 	struct list_head	ki_batch;	/* batch allocation */
99 
100 	/*
101 	 * If the aio_resfd field of the userspace iocb is not zero,
102 	 * this is the underlying eventfd context to deliver events to.
103 	 */
104 	struct eventfd_ctx	*ki_eventfd;
105 };
106 
107 static inline bool is_sync_kiocb(struct kiocb *kiocb)
108 {
109 	return kiocb->ki_key == KIOCB_SYNC_KEY;
110 }
111 
112 static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
113 {
114 	*kiocb = (struct kiocb) {
115 			.ki_users = 1,
116 			.ki_key = KIOCB_SYNC_KEY,
117 			.ki_filp = filp,
118 			.ki_obj.tsk = current,
119 		};
120 }
121 
122 #define AIO_RING_MAGIC			0xa10a10a1
123 #define AIO_RING_COMPAT_FEATURES	1
124 #define AIO_RING_INCOMPAT_FEATURES	0
125 struct aio_ring {
126 	unsigned	id;	/* kernel internal index number */
127 	unsigned	nr;	/* number of io_events */
128 	unsigned	head;
129 	unsigned	tail;
130 
131 	unsigned	magic;
132 	unsigned	compat_features;
133 	unsigned	incompat_features;
134 	unsigned	header_length;	/* size of aio_ring */
135 
136 
137 	struct io_event		io_events[0];
138 }; /* 128 bytes + ring size */
139 
140 #define AIO_RING_PAGES	8
141 struct aio_ring_info {
142 	unsigned long		mmap_base;
143 	unsigned long		mmap_size;
144 
145 	struct page		**ring_pages;
146 	spinlock_t		ring_lock;
147 	long			nr_pages;
148 
149 	unsigned		nr, tail;
150 
151 	struct page		*internal_pages[AIO_RING_PAGES];
152 };
153 
154 static inline unsigned aio_ring_avail(struct aio_ring_info *info,
155 					struct aio_ring *ring)
156 {
157 	return (ring->head + info->nr - 1 - ring->tail) % info->nr;
158 }
159 
160 struct kioctx {
161 	atomic_t		users;
162 	int			dead;
163 	struct mm_struct	*mm;
164 
165 	/* This needs improving */
166 	unsigned long		user_id;
167 	struct hlist_node	list;
168 
169 	wait_queue_head_t	wait;
170 
171 	spinlock_t		ctx_lock;
172 
173 	int			reqs_active;
174 	struct list_head	active_reqs;	/* used for cancellation */
175 	struct list_head	run_list;	/* used for kicked reqs */
176 
177 	/* sys_io_setup currently limits this to an unsigned int */
178 	unsigned		max_reqs;
179 
180 	struct aio_ring_info	ring_info;
181 
182 	struct delayed_work	wq;
183 
184 	struct rcu_head		rcu_head;
185 };
186 
187 /* prototypes */
188 #ifdef CONFIG_AIO
189 extern ssize_t wait_on_sync_kiocb(struct kiocb *iocb);
190 extern int aio_put_req(struct kiocb *iocb);
191 extern void kick_iocb(struct kiocb *iocb);
192 extern int aio_complete(struct kiocb *iocb, long res, long res2);
193 struct mm_struct;
194 extern void exit_aio(struct mm_struct *mm);
195 extern long do_io_submit(aio_context_t ctx_id, long nr,
196 			 struct iocb __user *__user *iocbpp, bool compat);
197 #else
198 static inline ssize_t wait_on_sync_kiocb(struct kiocb *iocb) { return 0; }
199 static inline int aio_put_req(struct kiocb *iocb) { return 0; }
200 static inline void kick_iocb(struct kiocb *iocb) { }
201 static inline int aio_complete(struct kiocb *iocb, long res, long res2) { return 0; }
202 struct mm_struct;
203 static inline void exit_aio(struct mm_struct *mm) { }
204 static inline long do_io_submit(aio_context_t ctx_id, long nr,
205 				struct iocb __user * __user *iocbpp,
206 				bool compat) { return 0; }
207 #endif /* CONFIG_AIO */
208 
209 static inline struct kiocb *list_kiocb(struct list_head *h)
210 {
211 	return list_entry(h, struct kiocb, ki_list);
212 }
213 
214 /* for sysctl: */
215 extern unsigned long aio_nr;
216 extern unsigned long aio_max_nr;
217 
218 #endif /* __LINUX__AIO_H */
219