1 #ifndef __LINUX__AIO_H 2 #define __LINUX__AIO_H 3 4 #include <linux/list.h> 5 #include <linux/workqueue.h> 6 #include <linux/aio_abi.h> 7 #include <linux/uio.h> 8 #include <linux/rcupdate.h> 9 10 #include <linux/atomic.h> 11 12 struct kioctx; 13 struct kiocb; 14 15 #define KIOCB_SYNC_KEY (~0U) 16 17 /* 18 * We use ki_cancel == KIOCB_CANCELLED to indicate that a kiocb has been either 19 * cancelled or completed (this makes a certain amount of sense because 20 * successful cancellation - io_cancel() - does deliver the completion to 21 * userspace). 22 * 23 * And since most things don't implement kiocb cancellation and we'd really like 24 * kiocb completion to be lockless when possible, we use ki_cancel to 25 * synchronize cancellation and completion - we only set it to KIOCB_CANCELLED 26 * with xchg() or cmpxchg(), see batch_complete_aio() and kiocb_cancel(). 27 */ 28 #define KIOCB_CANCELLED ((void *) (~0ULL)) 29 30 typedef int (kiocb_cancel_fn)(struct kiocb *, struct io_event *); 31 32 /* is there a better place to document function pointer methods? */ 33 /** 34 * ki_retry - iocb forward progress callback 35 * @kiocb: The kiocb struct to advance by performing an operation. 36 * 37 * This callback is called when the AIO core wants a given AIO operation 38 * to make forward progress. The kiocb argument describes the operation 39 * that is to be performed. As the operation proceeds, perhaps partially, 40 * ki_retry is expected to update the kiocb with progress made. Typically 41 * ki_retry is set in the AIO core and it itself calls file_operations 42 * helpers. 43 * 44 * ki_retry's return value determines when the AIO operation is completed 45 * and an event is generated in the AIO event ring. Except the special 46 * return values described below, the value that is returned from ki_retry 47 * is transferred directly into the completion ring as the operation's 48 * resulting status. Once this has happened ki_retry *MUST NOT* reference 49 * the kiocb pointer again. 50 * 51 * If ki_retry returns -EIOCBQUEUED it has made a promise that aio_complete() 52 * will be called on the kiocb pointer in the future. The AIO core will 53 * not ask the method again -- ki_retry must ensure forward progress. 54 * aio_complete() must be called once and only once in the future, multiple 55 * calls may result in undefined behaviour. 56 */ 57 struct kiocb { 58 atomic_t ki_users; 59 unsigned ki_key; /* id of this request */ 60 61 struct file *ki_filp; 62 struct kioctx *ki_ctx; /* may be NULL for sync ops */ 63 kiocb_cancel_fn *ki_cancel; 64 ssize_t (*ki_retry)(struct kiocb *); 65 void (*ki_dtor)(struct kiocb *); 66 67 union { 68 void __user *user; 69 struct task_struct *tsk; 70 } ki_obj; 71 72 __u64 ki_user_data; /* user's data for completion */ 73 loff_t ki_pos; 74 75 void *private; 76 /* State that we remember to be able to restart/retry */ 77 unsigned short ki_opcode; 78 size_t ki_nbytes; /* copy of iocb->aio_nbytes */ 79 char __user *ki_buf; /* remaining iocb->aio_buf */ 80 size_t ki_left; /* remaining bytes */ 81 struct iovec ki_inline_vec; /* inline vector */ 82 struct iovec *ki_iovec; 83 unsigned long ki_nr_segs; 84 unsigned long ki_cur_seg; 85 86 struct list_head ki_list; /* the aio core uses this 87 * for cancellation */ 88 struct list_head ki_batch; /* batch allocation */ 89 90 /* 91 * If the aio_resfd field of the userspace iocb is not zero, 92 * this is the underlying eventfd context to deliver events to. 93 */ 94 struct eventfd_ctx *ki_eventfd; 95 }; 96 97 static inline bool is_sync_kiocb(struct kiocb *kiocb) 98 { 99 return kiocb->ki_key == KIOCB_SYNC_KEY; 100 } 101 102 static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp) 103 { 104 *kiocb = (struct kiocb) { 105 .ki_users = ATOMIC_INIT(1), 106 .ki_key = KIOCB_SYNC_KEY, 107 .ki_filp = filp, 108 .ki_obj.tsk = current, 109 }; 110 } 111 112 /* prototypes */ 113 #ifdef CONFIG_AIO 114 extern ssize_t wait_on_sync_kiocb(struct kiocb *iocb); 115 extern void aio_put_req(struct kiocb *iocb); 116 extern void aio_complete(struct kiocb *iocb, long res, long res2); 117 struct mm_struct; 118 extern void exit_aio(struct mm_struct *mm); 119 extern long do_io_submit(aio_context_t ctx_id, long nr, 120 struct iocb __user *__user *iocbpp, bool compat); 121 void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel); 122 #else 123 static inline ssize_t wait_on_sync_kiocb(struct kiocb *iocb) { return 0; } 124 static inline void aio_put_req(struct kiocb *iocb) { } 125 static inline void aio_complete(struct kiocb *iocb, long res, long res2) { } 126 struct mm_struct; 127 static inline void exit_aio(struct mm_struct *mm) { } 128 static inline long do_io_submit(aio_context_t ctx_id, long nr, 129 struct iocb __user * __user *iocbpp, 130 bool compat) { return 0; } 131 static inline void kiocb_set_cancel_fn(struct kiocb *req, 132 kiocb_cancel_fn *cancel) { } 133 #endif /* CONFIG_AIO */ 134 135 static inline struct kiocb *list_kiocb(struct list_head *h) 136 { 137 return list_entry(h, struct kiocb, ki_list); 138 } 139 140 /* for sysctl: */ 141 extern unsigned long aio_nr; 142 extern unsigned long aio_max_nr; 143 144 #endif /* __LINUX__AIO_H */ 145