1 #ifndef __LINUX__AIO_H 2 #define __LINUX__AIO_H 3 4 #include <linux/list.h> 5 #include <linux/workqueue.h> 6 #include <linux/aio_abi.h> 7 #include <linux/uio.h> 8 #include <linux/rcupdate.h> 9 10 #include <linux/atomic.h> 11 12 struct kioctx; 13 14 #define KIOCB_SYNC_KEY (~0U) 15 16 /* ki_flags bits */ 17 #define KIF_CANCELLED 2 18 19 #define kiocbSetCancelled(iocb) set_bit(KIF_CANCELLED, &(iocb)->ki_flags) 20 21 #define kiocbClearCancelled(iocb) clear_bit(KIF_CANCELLED, &(iocb)->ki_flags) 22 23 #define kiocbIsCancelled(iocb) test_bit(KIF_CANCELLED, &(iocb)->ki_flags) 24 25 /* is there a better place to document function pointer methods? */ 26 /** 27 * ki_retry - iocb forward progress callback 28 * @kiocb: The kiocb struct to advance by performing an operation. 29 * 30 * This callback is called when the AIO core wants a given AIO operation 31 * to make forward progress. The kiocb argument describes the operation 32 * that is to be performed. As the operation proceeds, perhaps partially, 33 * ki_retry is expected to update the kiocb with progress made. Typically 34 * ki_retry is set in the AIO core and it itself calls file_operations 35 * helpers. 36 * 37 * ki_retry's return value determines when the AIO operation is completed 38 * and an event is generated in the AIO event ring. Except the special 39 * return values described below, the value that is returned from ki_retry 40 * is transferred directly into the completion ring as the operation's 41 * resulting status. Once this has happened ki_retry *MUST NOT* reference 42 * the kiocb pointer again. 43 * 44 * If ki_retry returns -EIOCBQUEUED it has made a promise that aio_complete() 45 * will be called on the kiocb pointer in the future. The AIO core will 46 * not ask the method again -- ki_retry must ensure forward progress. 47 * aio_complete() must be called once and only once in the future, multiple 48 * calls may result in undefined behaviour. 49 */ 50 struct kiocb { 51 unsigned long ki_flags; 52 int ki_users; 53 unsigned ki_key; /* id of this request */ 54 55 struct file *ki_filp; 56 struct kioctx *ki_ctx; /* may be NULL for sync ops */ 57 int (*ki_cancel)(struct kiocb *, struct io_event *); 58 ssize_t (*ki_retry)(struct kiocb *); 59 void (*ki_dtor)(struct kiocb *); 60 61 union { 62 void __user *user; 63 struct task_struct *tsk; 64 } ki_obj; 65 66 __u64 ki_user_data; /* user's data for completion */ 67 loff_t ki_pos; 68 69 void *private; 70 /* State that we remember to be able to restart/retry */ 71 unsigned short ki_opcode; 72 size_t ki_nbytes; /* copy of iocb->aio_nbytes */ 73 char __user *ki_buf; /* remaining iocb->aio_buf */ 74 size_t ki_left; /* remaining bytes */ 75 struct iovec ki_inline_vec; /* inline vector */ 76 struct iovec *ki_iovec; 77 unsigned long ki_nr_segs; 78 unsigned long ki_cur_seg; 79 80 struct list_head ki_list; /* the aio core uses this 81 * for cancellation */ 82 struct list_head ki_batch; /* batch allocation */ 83 84 /* 85 * If the aio_resfd field of the userspace iocb is not zero, 86 * this is the underlying eventfd context to deliver events to. 87 */ 88 struct eventfd_ctx *ki_eventfd; 89 }; 90 91 static inline bool is_sync_kiocb(struct kiocb *kiocb) 92 { 93 return kiocb->ki_key == KIOCB_SYNC_KEY; 94 } 95 96 static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp) 97 { 98 *kiocb = (struct kiocb) { 99 .ki_users = 1, 100 .ki_key = KIOCB_SYNC_KEY, 101 .ki_filp = filp, 102 .ki_obj.tsk = current, 103 }; 104 } 105 106 #define AIO_RING_MAGIC 0xa10a10a1 107 #define AIO_RING_COMPAT_FEATURES 1 108 #define AIO_RING_INCOMPAT_FEATURES 0 109 struct aio_ring { 110 unsigned id; /* kernel internal index number */ 111 unsigned nr; /* number of io_events */ 112 unsigned head; 113 unsigned tail; 114 115 unsigned magic; 116 unsigned compat_features; 117 unsigned incompat_features; 118 unsigned header_length; /* size of aio_ring */ 119 120 121 struct io_event io_events[0]; 122 }; /* 128 bytes + ring size */ 123 124 #define AIO_RING_PAGES 8 125 struct aio_ring_info { 126 unsigned long mmap_base; 127 unsigned long mmap_size; 128 129 struct page **ring_pages; 130 spinlock_t ring_lock; 131 long nr_pages; 132 133 unsigned nr, tail; 134 135 struct page *internal_pages[AIO_RING_PAGES]; 136 }; 137 138 static inline unsigned aio_ring_avail(struct aio_ring_info *info, 139 struct aio_ring *ring) 140 { 141 return (ring->head + info->nr - 1 - ring->tail) % info->nr; 142 } 143 144 struct kioctx { 145 atomic_t users; 146 int dead; 147 148 /* This needs improving */ 149 unsigned long user_id; 150 struct hlist_node list; 151 152 wait_queue_head_t wait; 153 154 spinlock_t ctx_lock; 155 156 int reqs_active; 157 struct list_head active_reqs; /* used for cancellation */ 158 159 /* sys_io_setup currently limits this to an unsigned int */ 160 unsigned max_reqs; 161 162 struct aio_ring_info ring_info; 163 164 struct rcu_head rcu_head; 165 }; 166 167 /* prototypes */ 168 #ifdef CONFIG_AIO 169 extern ssize_t wait_on_sync_kiocb(struct kiocb *iocb); 170 extern int aio_put_req(struct kiocb *iocb); 171 extern int aio_complete(struct kiocb *iocb, long res, long res2); 172 struct mm_struct; 173 extern void exit_aio(struct mm_struct *mm); 174 extern long do_io_submit(aio_context_t ctx_id, long nr, 175 struct iocb __user *__user *iocbpp, bool compat); 176 #else 177 static inline ssize_t wait_on_sync_kiocb(struct kiocb *iocb) { return 0; } 178 static inline int aio_put_req(struct kiocb *iocb) { return 0; } 179 static inline int aio_complete(struct kiocb *iocb, long res, long res2) { return 0; } 180 struct mm_struct; 181 static inline void exit_aio(struct mm_struct *mm) { } 182 static inline long do_io_submit(aio_context_t ctx_id, long nr, 183 struct iocb __user * __user *iocbpp, 184 bool compat) { return 0; } 185 #endif /* CONFIG_AIO */ 186 187 static inline struct kiocb *list_kiocb(struct list_head *h) 188 { 189 return list_entry(h, struct kiocb, ki_list); 190 } 191 192 /* for sysctl: */ 193 extern unsigned long aio_nr; 194 extern unsigned long aio_max_nr; 195 196 #endif /* __LINUX__AIO_H */ 197