xref: /linux-6.15/include/linux/aio.h (revision 8802f616)
1 #ifndef __LINUX__AIO_H
2 #define __LINUX__AIO_H
3 
4 #include <linux/list.h>
5 #include <linux/workqueue.h>
6 #include <linux/aio_abi.h>
7 
8 #include <asm/atomic.h>
9 
10 #define AIO_MAXSEGS		4
11 #define AIO_KIOGRP_NR_ATOMIC	8
12 
13 struct kioctx;
14 
15 /* Notes on cancelling a kiocb:
16  *	If a kiocb is cancelled, aio_complete may return 0 to indicate
17  *	that cancel has not yet disposed of the kiocb.  All cancel
18  *	operations *must* call aio_put_req to dispose of the kiocb
19  *	to guard against races with the completion code.
20  */
21 #define KIOCB_C_CANCELLED	0x01
22 #define KIOCB_C_COMPLETE	0x02
23 
24 #define KIOCB_SYNC_KEY		(~0U)
25 
26 /* ki_flags bits */
27 /*
28  * This may be used for cancel/retry serialization in the future, but
29  * for now it's unused and we probably don't want modules to even
30  * think they can use it.
31  */
32 /* #define KIF_LOCKED		0 */
33 #define KIF_KICKED		1
34 #define KIF_CANCELLED		2
35 
36 #define kiocbTryLock(iocb)	test_and_set_bit(KIF_LOCKED, &(iocb)->ki_flags)
37 #define kiocbTryKick(iocb)	test_and_set_bit(KIF_KICKED, &(iocb)->ki_flags)
38 
39 #define kiocbSetLocked(iocb)	set_bit(KIF_LOCKED, &(iocb)->ki_flags)
40 #define kiocbSetKicked(iocb)	set_bit(KIF_KICKED, &(iocb)->ki_flags)
41 #define kiocbSetCancelled(iocb)	set_bit(KIF_CANCELLED, &(iocb)->ki_flags)
42 
43 #define kiocbClearLocked(iocb)	clear_bit(KIF_LOCKED, &(iocb)->ki_flags)
44 #define kiocbClearKicked(iocb)	clear_bit(KIF_KICKED, &(iocb)->ki_flags)
45 #define kiocbClearCancelled(iocb)	clear_bit(KIF_CANCELLED, &(iocb)->ki_flags)
46 
47 #define kiocbIsLocked(iocb)	test_bit(KIF_LOCKED, &(iocb)->ki_flags)
48 #define kiocbIsKicked(iocb)	test_bit(KIF_KICKED, &(iocb)->ki_flags)
49 #define kiocbIsCancelled(iocb)	test_bit(KIF_CANCELLED, &(iocb)->ki_flags)
50 
51 /* is there a better place to document function pointer methods? */
52 /**
53  * ki_retry	-	iocb forward progress callback
54  * @kiocb:	The kiocb struct to advance by performing an operation.
55  *
56  * This callback is called when the AIO core wants a given AIO operation
57  * to make forward progress.  The kiocb argument describes the operation
58  * that is to be performed.  As the operation proceeds, perhaps partially,
59  * ki_retry is expected to update the kiocb with progress made.  Typically
60  * ki_retry is set in the AIO core and it itself calls file_operations
61  * helpers.
62  *
63  * ki_retry's return value determines when the AIO operation is completed
64  * and an event is generated in the AIO event ring.  Except the special
65  * return values described below, the value that is returned from ki_retry
66  * is transferred directly into the completion ring as the operation's
67  * resulting status.  Once this has happened ki_retry *MUST NOT* reference
68  * the kiocb pointer again.
69  *
70  * If ki_retry returns -EIOCBQUEUED it has made a promise that aio_complete()
71  * will be called on the kiocb pointer in the future.  The AIO core will
72  * not ask the method again -- ki_retry must ensure forward progress.
73  * aio_complete() must be called once and only once in the future, multiple
74  * calls may result in undefined behaviour.
75  *
76  * If ki_retry returns -EIOCBRETRY it has made a promise that kick_iocb()
77  * will be called on the kiocb pointer in the future.  This may happen
78  * through generic helpers that associate kiocb->ki_wait with a wait
79  * queue head that ki_retry uses via current->io_wait.  It can also happen
80  * with custom tracking and manual calls to kick_iocb(), though that is
81  * discouraged.  In either case, kick_iocb() must be called once and only
82  * once.  ki_retry must ensure forward progress, the AIO core will wait
83  * indefinitely for kick_iocb() to be called.
84  */
85 struct kiocb {
86 	struct list_head	ki_run_list;
87 	long			ki_flags;
88 	int			ki_users;
89 	unsigned		ki_key;		/* id of this request */
90 
91 	struct file		*ki_filp;
92 	struct kioctx		*ki_ctx;	/* may be NULL for sync ops */
93 	int			(*ki_cancel)(struct kiocb *, struct io_event *);
94 	ssize_t			(*ki_retry)(struct kiocb *);
95 	void			(*ki_dtor)(struct kiocb *);
96 
97 	union {
98 		void __user		*user;
99 		struct task_struct	*tsk;
100 	} ki_obj;
101 
102 	__u64			ki_user_data;	/* user's data for completion */
103 	wait_queue_t		ki_wait;
104 	loff_t			ki_pos;
105 
106 	void			*private;
107 	/* State that we remember to be able to restart/retry  */
108 	unsigned short		ki_opcode;
109 	size_t			ki_nbytes; 	/* copy of iocb->aio_nbytes */
110 	char 			__user *ki_buf;	/* remaining iocb->aio_buf */
111 	size_t			ki_left; 	/* remaining bytes */
112 	long			ki_retried; 	/* just for testing */
113 	long			ki_kicked; 	/* just for testing */
114 	long			ki_queued; 	/* just for testing */
115 
116 	struct list_head	ki_list;	/* the aio core uses this
117 						 * for cancellation */
118 };
119 
120 #define is_sync_kiocb(iocb)	((iocb)->ki_key == KIOCB_SYNC_KEY)
121 #define init_sync_kiocb(x, filp)			\
122 	do {						\
123 		struct task_struct *tsk = current;	\
124 		(x)->ki_flags = 0;			\
125 		(x)->ki_users = 1;			\
126 		(x)->ki_key = KIOCB_SYNC_KEY;		\
127 		(x)->ki_filp = (filp);			\
128 		(x)->ki_ctx = NULL;			\
129 		(x)->ki_cancel = NULL;			\
130 		(x)->ki_retry = NULL;			\
131 		(x)->ki_dtor = NULL;			\
132 		(x)->ki_obj.tsk = tsk;			\
133 		(x)->ki_user_data = 0;                  \
134 		init_wait((&(x)->ki_wait));             \
135 	} while (0)
136 
137 #define AIO_RING_MAGIC			0xa10a10a1
138 #define AIO_RING_COMPAT_FEATURES	1
139 #define AIO_RING_INCOMPAT_FEATURES	0
140 struct aio_ring {
141 	unsigned	id;	/* kernel internal index number */
142 	unsigned	nr;	/* number of io_events */
143 	unsigned	head;
144 	unsigned	tail;
145 
146 	unsigned	magic;
147 	unsigned	compat_features;
148 	unsigned	incompat_features;
149 	unsigned	header_length;	/* size of aio_ring */
150 
151 
152 	struct io_event		io_events[0];
153 }; /* 128 bytes + ring size */
154 
155 #define aio_ring_avail(info, ring)	(((ring)->head + (info)->nr - 1 - (ring)->tail) % (info)->nr)
156 
157 #define AIO_RING_PAGES	8
158 struct aio_ring_info {
159 	unsigned long		mmap_base;
160 	unsigned long		mmap_size;
161 
162 	struct page		**ring_pages;
163 	spinlock_t		ring_lock;
164 	long			nr_pages;
165 
166 	unsigned		nr, tail;
167 
168 	struct page		*internal_pages[AIO_RING_PAGES];
169 };
170 
171 struct kioctx {
172 	atomic_t		users;
173 	int			dead;
174 	struct mm_struct	*mm;
175 
176 	/* This needs improving */
177 	unsigned long		user_id;
178 	struct kioctx		*next;
179 
180 	wait_queue_head_t	wait;
181 
182 	spinlock_t		ctx_lock;
183 
184 	int			reqs_active;
185 	struct list_head	active_reqs;	/* used for cancellation */
186 	struct list_head	run_list;	/* used for kicked reqs */
187 
188 	/* sys_io_setup currently limits this to an unsigned int */
189 	unsigned		max_reqs;
190 
191 	struct aio_ring_info	ring_info;
192 
193 	struct work_struct	wq;
194 };
195 
196 /* prototypes */
197 extern unsigned aio_max_size;
198 
199 extern ssize_t FASTCALL(wait_on_sync_kiocb(struct kiocb *iocb));
200 extern int FASTCALL(aio_put_req(struct kiocb *iocb));
201 extern void FASTCALL(kick_iocb(struct kiocb *iocb));
202 extern int FASTCALL(aio_complete(struct kiocb *iocb, long res, long res2));
203 extern void FASTCALL(__put_ioctx(struct kioctx *ctx));
204 struct mm_struct;
205 extern void FASTCALL(exit_aio(struct mm_struct *mm));
206 extern struct kioctx *lookup_ioctx(unsigned long ctx_id);
207 extern int FASTCALL(io_submit_one(struct kioctx *ctx,
208 			struct iocb __user *user_iocb, struct iocb *iocb));
209 
210 /* semi private, but used by the 32bit emulations: */
211 struct kioctx *lookup_ioctx(unsigned long ctx_id);
212 int FASTCALL(io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
213 				  struct iocb *iocb));
214 
215 #define get_ioctx(kioctx) do {						\
216 	BUG_ON(unlikely(atomic_read(&(kioctx)->users) <= 0));		\
217 	atomic_inc(&(kioctx)->users);					\
218 } while (0)
219 #define put_ioctx(kioctx) do {						\
220 	BUG_ON(unlikely(atomic_read(&(kioctx)->users) <= 0));		\
221 	if (unlikely(atomic_dec_and_test(&(kioctx)->users))) 		\
222 		__put_ioctx(kioctx);					\
223 } while (0)
224 
225 #define in_aio() !is_sync_wait(current->io_wait)
226 /* may be used for debugging */
227 #define warn_if_async()							\
228 do {									\
229 	if (in_aio()) {							\
230 		printk(KERN_ERR "%s(%s:%d) called in async context!\n",	\
231 			__FUNCTION__, __FILE__, __LINE__);		\
232 		dump_stack();						\
233 	}								\
234 } while (0)
235 
236 #define io_wait_to_kiocb(wait) container_of(wait, struct kiocb, ki_wait)
237 #define is_retried_kiocb(iocb) ((iocb)->ki_retried > 1)
238 
239 #include <linux/aio_abi.h>
240 
241 static inline struct kiocb *list_kiocb(struct list_head *h)
242 {
243 	return list_entry(h, struct kiocb, ki_list);
244 }
245 
246 /* for sysctl: */
247 extern unsigned long aio_nr;
248 extern unsigned long aio_max_nr;
249 
250 #endif /* __LINUX__AIO_H */
251