xref: /linux-6.15/include/linux/aio.h (revision 4bedea94)
1 #ifndef __LINUX__AIO_H
2 #define __LINUX__AIO_H
3 
4 #include <linux/list.h>
5 #include <linux/workqueue.h>
6 #include <linux/aio_abi.h>
7 
8 #include <asm/atomic.h>
9 
10 #define AIO_MAXSEGS		4
11 #define AIO_KIOGRP_NR_ATOMIC	8
12 
13 struct kioctx;
14 
15 /* Notes on cancelling a kiocb:
16  *	If a kiocb is cancelled, aio_complete may return 0 to indicate
17  *	that cancel has not yet disposed of the kiocb.  All cancel
18  *	operations *must* call aio_put_req to dispose of the kiocb
19  *	to guard against races with the completion code.
20  */
21 #define KIOCB_C_CANCELLED	0x01
22 #define KIOCB_C_COMPLETE	0x02
23 
24 #define KIOCB_SYNC_KEY		(~0U)
25 
26 /* ki_flags bits */
27 #define KIF_LOCKED		0
28 #define KIF_KICKED		1
29 #define KIF_CANCELLED		2
30 
31 #define kiocbTryLock(iocb)	test_and_set_bit(KIF_LOCKED, &(iocb)->ki_flags)
32 #define kiocbTryKick(iocb)	test_and_set_bit(KIF_KICKED, &(iocb)->ki_flags)
33 
34 #define kiocbSetLocked(iocb)	set_bit(KIF_LOCKED, &(iocb)->ki_flags)
35 #define kiocbSetKicked(iocb)	set_bit(KIF_KICKED, &(iocb)->ki_flags)
36 #define kiocbSetCancelled(iocb)	set_bit(KIF_CANCELLED, &(iocb)->ki_flags)
37 
38 #define kiocbClearLocked(iocb)	clear_bit(KIF_LOCKED, &(iocb)->ki_flags)
39 #define kiocbClearKicked(iocb)	clear_bit(KIF_KICKED, &(iocb)->ki_flags)
40 #define kiocbClearCancelled(iocb)	clear_bit(KIF_CANCELLED, &(iocb)->ki_flags)
41 
42 #define kiocbIsLocked(iocb)	test_bit(KIF_LOCKED, &(iocb)->ki_flags)
43 #define kiocbIsKicked(iocb)	test_bit(KIF_KICKED, &(iocb)->ki_flags)
44 #define kiocbIsCancelled(iocb)	test_bit(KIF_CANCELLED, &(iocb)->ki_flags)
45 
46 struct kiocb {
47 	struct list_head	ki_run_list;
48 	long			ki_flags;
49 	int			ki_users;
50 	unsigned		ki_key;		/* id of this request */
51 
52 	struct file		*ki_filp;
53 	struct kioctx		*ki_ctx;	/* may be NULL for sync ops */
54 	int			(*ki_cancel)(struct kiocb *, struct io_event *);
55 	ssize_t			(*ki_retry)(struct kiocb *);
56 	void			(*ki_dtor)(struct kiocb *);
57 
58 	struct list_head	ki_list;	/* the aio core uses this
59 						 * for cancellation */
60 
61 	union {
62 		void __user		*user;
63 		struct task_struct	*tsk;
64 	} ki_obj;
65 	__u64			ki_user_data;	/* user's data for completion */
66 	loff_t			ki_pos;
67 	/* State that we remember to be able to restart/retry  */
68 	unsigned short		ki_opcode;
69 	size_t			ki_nbytes; 	/* copy of iocb->aio_nbytes */
70 	char 			__user *ki_buf;	/* remaining iocb->aio_buf */
71 	size_t			ki_left; 	/* remaining bytes */
72 	wait_queue_t		ki_wait;
73 	long			ki_retried; 	/* just for testing */
74 	long			ki_kicked; 	/* just for testing */
75 	long			ki_queued; 	/* just for testing */
76 
77 	void			*private;
78 };
79 
80 #define is_sync_kiocb(iocb)	((iocb)->ki_key == KIOCB_SYNC_KEY)
81 #define init_sync_kiocb(x, filp)			\
82 	do {						\
83 		struct task_struct *tsk = current;	\
84 		(x)->ki_flags = 0;			\
85 		(x)->ki_users = 1;			\
86 		(x)->ki_key = KIOCB_SYNC_KEY;		\
87 		(x)->ki_filp = (filp);			\
88 		(x)->ki_ctx = &tsk->active_mm->default_kioctx;	\
89 		(x)->ki_cancel = NULL;			\
90 		(x)->ki_dtor = NULL;			\
91 		(x)->ki_obj.tsk = tsk;			\
92 		(x)->ki_user_data = 0;                  \
93 		init_wait((&(x)->ki_wait));             \
94 	} while (0)
95 
96 #define AIO_RING_MAGIC			0xa10a10a1
97 #define AIO_RING_COMPAT_FEATURES	1
98 #define AIO_RING_INCOMPAT_FEATURES	0
99 struct aio_ring {
100 	unsigned	id;	/* kernel internal index number */
101 	unsigned	nr;	/* number of io_events */
102 	unsigned	head;
103 	unsigned	tail;
104 
105 	unsigned	magic;
106 	unsigned	compat_features;
107 	unsigned	incompat_features;
108 	unsigned	header_length;	/* size of aio_ring */
109 
110 
111 	struct io_event		io_events[0];
112 }; /* 128 bytes + ring size */
113 
114 #define aio_ring_avail(info, ring)	(((ring)->head + (info)->nr - 1 - (ring)->tail) % (info)->nr)
115 
116 #define AIO_RING_PAGES	8
117 struct aio_ring_info {
118 	unsigned long		mmap_base;
119 	unsigned long		mmap_size;
120 
121 	struct page		**ring_pages;
122 	spinlock_t		ring_lock;
123 	long			nr_pages;
124 
125 	unsigned		nr, tail;
126 
127 	struct page		*internal_pages[AIO_RING_PAGES];
128 };
129 
130 struct kioctx {
131 	atomic_t		users;
132 	int			dead;
133 	struct mm_struct	*mm;
134 
135 	/* This needs improving */
136 	unsigned long		user_id;
137 	struct kioctx		*next;
138 
139 	wait_queue_head_t	wait;
140 
141 	spinlock_t		ctx_lock;
142 
143 	int			reqs_active;
144 	struct list_head	active_reqs;	/* used for cancellation */
145 	struct list_head	run_list;	/* used for kicked reqs */
146 
147 	unsigned		max_reqs;
148 
149 	struct aio_ring_info	ring_info;
150 
151 	struct work_struct	wq;
152 };
153 
154 /* prototypes */
155 extern unsigned aio_max_size;
156 
157 extern ssize_t FASTCALL(wait_on_sync_kiocb(struct kiocb *iocb));
158 extern int FASTCALL(aio_put_req(struct kiocb *iocb));
159 extern void FASTCALL(kick_iocb(struct kiocb *iocb));
160 extern int FASTCALL(aio_complete(struct kiocb *iocb, long res, long res2));
161 extern void FASTCALL(__put_ioctx(struct kioctx *ctx));
162 struct mm_struct;
163 extern void FASTCALL(exit_aio(struct mm_struct *mm));
164 extern struct kioctx *lookup_ioctx(unsigned long ctx_id);
165 extern int FASTCALL(io_submit_one(struct kioctx *ctx,
166 			struct iocb __user *user_iocb, struct iocb *iocb));
167 
168 /* semi private, but used by the 32bit emulations: */
169 struct kioctx *lookup_ioctx(unsigned long ctx_id);
170 int FASTCALL(io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
171 				  struct iocb *iocb));
172 
173 #define get_ioctx(kioctx)	do { if (unlikely(atomic_read(&(kioctx)->users) <= 0)) BUG(); atomic_inc(&(kioctx)->users); } while (0)
174 #define put_ioctx(kioctx)	do { if (unlikely(atomic_dec_and_test(&(kioctx)->users))) __put_ioctx(kioctx); else if (unlikely(atomic_read(&(kioctx)->users) < 0)) BUG(); } while (0)
175 
176 #define in_aio() !is_sync_wait(current->io_wait)
177 /* may be used for debugging */
178 #define warn_if_async()							\
179 do {									\
180 	if (in_aio()) {							\
181 		printk(KERN_ERR "%s(%s:%d) called in async context!\n",	\
182 			__FUNCTION__, __FILE__, __LINE__);		\
183 		dump_stack();						\
184 	}								\
185 } while (0)
186 
187 #define io_wait_to_kiocb(wait) container_of(wait, struct kiocb, ki_wait)
188 #define is_retried_kiocb(iocb) ((iocb)->ki_retried > 1)
189 
190 #include <linux/aio_abi.h>
191 
192 static inline struct kiocb *list_kiocb(struct list_head *h)
193 {
194 	return list_entry(h, struct kiocb, ki_list);
195 }
196 
197 /* for sysctl: */
198 extern atomic_t aio_nr;
199 extern unsigned aio_max_nr;
200 
201 #endif /* __LINUX__AIO_H */
202