Lines Matching refs:req

36 	struct io_kiocb *req;  member
73 static bool io_poll_get_ownership_slowpath(struct io_kiocb *req) in io_poll_get_ownership_slowpath() argument
82 v = atomic_fetch_or(IO_POLL_RETRY_FLAG, &req->poll_refs); in io_poll_get_ownership_slowpath()
85 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK); in io_poll_get_ownership_slowpath()
94 static inline bool io_poll_get_ownership(struct io_kiocb *req) in io_poll_get_ownership() argument
96 if (unlikely(atomic_read(&req->poll_refs) >= IO_POLL_REF_BIAS)) in io_poll_get_ownership()
97 return io_poll_get_ownership_slowpath(req); in io_poll_get_ownership()
98 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK); in io_poll_get_ownership()
101 static void io_poll_mark_cancelled(struct io_kiocb *req) in io_poll_mark_cancelled() argument
103 atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs); in io_poll_mark_cancelled()
106 static struct io_poll *io_poll_get_double(struct io_kiocb *req) in io_poll_get_double() argument
109 if (req->opcode == IORING_OP_POLL_ADD) in io_poll_get_double()
110 return req->async_data; in io_poll_get_double()
111 return req->apoll->double_poll; in io_poll_get_double()
114 static struct io_poll *io_poll_get_single(struct io_kiocb *req) in io_poll_get_single() argument
116 if (req->opcode == IORING_OP_POLL_ADD) in io_poll_get_single()
117 return io_kiocb_to_cmd(req, struct io_poll); in io_poll_get_single()
118 return &req->apoll->poll; in io_poll_get_single()
121 static void io_poll_req_insert(struct io_kiocb *req) in io_poll_req_insert() argument
123 struct io_hash_table *table = &req->ctx->cancel_table; in io_poll_req_insert()
124 u32 index = hash_long(req->cqe.user_data, table->hash_bits); in io_poll_req_insert()
126 lockdep_assert_held(&req->ctx->uring_lock); in io_poll_req_insert()
128 hlist_add_head(&req->hash_node, &table->hbs[index].list); in io_poll_req_insert()
153 static void io_poll_remove_entries(struct io_kiocb *req) in io_poll_remove_entries() argument
159 if (!(req->flags & (REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL))) in io_poll_remove_entries()
178 if (req->flags & REQ_F_SINGLE_POLL) in io_poll_remove_entries()
179 io_poll_remove_entry(io_poll_get_single(req)); in io_poll_remove_entries()
180 if (req->flags & REQ_F_DOUBLE_POLL) in io_poll_remove_entries()
181 io_poll_remove_entry(io_poll_get_double(req)); in io_poll_remove_entries()
193 static void __io_poll_execute(struct io_kiocb *req, int mask) in __io_poll_execute() argument
197 io_req_set_res(req, mask, 0); in __io_poll_execute()
198 req->io_task_work.func = io_poll_task_func; in __io_poll_execute()
200 trace_io_uring_task_add(req, mask); in __io_poll_execute()
202 if (!(req->flags & REQ_F_POLL_NO_LAZY)) in __io_poll_execute()
204 __io_req_task_work_add(req, flags); in __io_poll_execute()
207 static inline void io_poll_execute(struct io_kiocb *req, int res) in io_poll_execute() argument
209 if (io_poll_get_ownership(req)) in io_poll_execute()
210 __io_poll_execute(req, res); in io_poll_execute()
223 static int io_poll_check_events(struct io_kiocb *req, io_tw_token_t tw) in io_poll_check_events() argument
231 v = atomic_read(&req->poll_refs); in io_poll_check_events()
245 req->cqe.res = 0; in io_poll_check_events()
248 req->cqe.res = 0; in io_poll_check_events()
254 atomic_andnot(IO_POLL_RETRY_FLAG, &req->poll_refs); in io_poll_check_events()
260 if (!req->cqe.res) { in io_poll_check_events()
261 struct poll_table_struct pt = { ._key = req->apoll_events }; in io_poll_check_events()
262 req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events; in io_poll_check_events()
269 if (unlikely(!req->cqe.res)) { in io_poll_check_events()
271 if (!(req->apoll_events & EPOLLONESHOT)) in io_poll_check_events()
276 if (unlikely(req->cqe.res & EPOLLERR)) in io_poll_check_events()
277 req_set_fail(req); in io_poll_check_events()
278 if (req->apoll_events & EPOLLONESHOT) in io_poll_check_events()
282 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) { in io_poll_check_events()
283 __poll_t mask = mangle_poll(req->cqe.res & in io_poll_check_events()
284 req->apoll_events); in io_poll_check_events()
286 if (!io_req_post_cqe(req, mask, IORING_CQE_F_MORE)) { in io_poll_check_events()
287 io_req_set_res(req, mask, 0); in io_poll_check_events()
291 int ret = io_poll_issue(req, tw); in io_poll_check_events()
302 req->cqe.res = 0; in io_poll_check_events()
309 } while (atomic_sub_return(v, &req->poll_refs) & IO_POLL_REF_MASK); in io_poll_check_events()
311 io_napi_add(req); in io_poll_check_events()
315 void io_poll_task_func(struct io_kiocb *req, io_tw_token_t tw) in io_poll_task_func() argument
319 ret = io_poll_check_events(req, tw); in io_poll_task_func()
321 io_kbuf_recycle(req, 0); in io_poll_task_func()
324 io_kbuf_recycle(req, 0); in io_poll_task_func()
325 __io_poll_execute(req, 0); in io_poll_task_func()
328 io_poll_remove_entries(req); in io_poll_task_func()
330 hash_del(&req->hash_node); in io_poll_task_func()
332 if (req->opcode == IORING_OP_POLL_ADD) { in io_poll_task_func()
336 poll = io_kiocb_to_cmd(req, struct io_poll); in io_poll_task_func()
337 req->cqe.res = mangle_poll(req->cqe.res & poll->events); in io_poll_task_func()
339 io_req_task_submit(req, tw); in io_poll_task_func()
342 req->cqe.res = ret; in io_poll_task_func()
343 req_set_fail(req); in io_poll_task_func()
346 io_req_set_res(req, req->cqe.res, 0); in io_poll_task_func()
347 io_req_task_complete(req, tw); in io_poll_task_func()
349 io_tw_lock(req->ctx, tw); in io_poll_task_func()
352 io_req_task_complete(req, tw); in io_poll_task_func()
354 io_req_task_submit(req, tw); in io_poll_task_func()
356 io_req_defer_failed(req, ret); in io_poll_task_func()
360 static void io_poll_cancel_req(struct io_kiocb *req) in io_poll_cancel_req() argument
362 io_poll_mark_cancelled(req); in io_poll_cancel_req()
364 io_poll_execute(req, 0); in io_poll_cancel_req()
369 static __cold int io_pollfree_wake(struct io_kiocb *req, struct io_poll *poll) in io_pollfree_wake() argument
371 io_poll_mark_cancelled(req); in io_pollfree_wake()
373 io_poll_execute(req, 0); in io_pollfree_wake()
397 struct io_kiocb *req = wqe_to_req(wait); in io_poll_wake() local
402 return io_pollfree_wake(req, poll); in io_poll_wake()
408 if (io_poll_get_ownership(req)) { in io_poll_wake()
422 req->flags &= ~REQ_F_DOUBLE_POLL; in io_poll_wake()
424 req->flags &= ~REQ_F_SINGLE_POLL; in io_poll_wake()
426 __io_poll_execute(req, mask); in io_poll_wake()
432 static bool io_poll_double_prepare(struct io_kiocb *req) in io_poll_double_prepare() argument
435 struct io_poll *poll = io_poll_get_single(req); in io_poll_double_prepare()
448 req->flags |= REQ_F_DOUBLE_POLL; in io_poll_double_prepare()
449 if (req->opcode == IORING_OP_POLL_ADD) in io_poll_double_prepare()
450 req->flags |= REQ_F_ASYNC_DATA; in io_poll_double_prepare()
461 struct io_kiocb *req = pt->req; in __io_queue_proc() local
462 unsigned long wqe_private = (unsigned long) req; in __io_queue_proc()
492 if (!io_poll_double_prepare(req)) { in __io_queue_proc()
500 req->flags |= REQ_F_SINGLE_POLL; in __io_queue_proc()
518 struct io_poll *poll = io_kiocb_to_cmd(pt->req, struct io_poll); in io_poll_queue_proc()
521 (struct io_poll **) &pt->req->async_data); in io_poll_queue_proc()
524 static bool io_poll_can_finish_inline(struct io_kiocb *req, in io_poll_can_finish_inline() argument
527 return pt->owning || io_poll_get_ownership(req); in io_poll_can_finish_inline()
530 static void io_poll_add_hash(struct io_kiocb *req, unsigned int issue_flags) in io_poll_add_hash() argument
532 struct io_ring_ctx *ctx = req->ctx; in io_poll_add_hash()
535 io_poll_req_insert(req); in io_poll_add_hash()
545 static int __io_arm_poll_handler(struct io_kiocb *req, in __io_arm_poll_handler() argument
550 INIT_HLIST_NODE(&req->hash_node); in __io_arm_poll_handler()
552 poll->file = req->file; in __io_arm_poll_handler()
553 req->apoll_events = poll->events; in __io_arm_poll_handler()
556 ipt->req = req; in __io_arm_poll_handler()
571 atomic_set(&req->poll_refs, (int)ipt->owning); in __io_arm_poll_handler()
581 req->flags |= REQ_F_POLL_NO_LAZY; in __io_arm_poll_handler()
583 mask = vfs_poll(req->file, &ipt->pt) & poll->events; in __io_arm_poll_handler()
586 io_poll_remove_entries(req); in __io_arm_poll_handler()
588 if (!io_poll_can_finish_inline(req, ipt)) { in __io_arm_poll_handler()
589 io_poll_mark_cancelled(req); in __io_arm_poll_handler()
600 if (!io_poll_can_finish_inline(req, ipt)) { in __io_arm_poll_handler()
601 io_poll_add_hash(req, issue_flags); in __io_arm_poll_handler()
604 io_poll_remove_entries(req); in __io_arm_poll_handler()
610 io_poll_add_hash(req, issue_flags); in __io_arm_poll_handler()
613 io_poll_can_finish_inline(req, ipt)) { in __io_arm_poll_handler()
614 __io_poll_execute(req, mask); in __io_arm_poll_handler()
617 io_napi_add(req); in __io_arm_poll_handler()
624 if (atomic_cmpxchg(&req->poll_refs, 1, 0) != 1) in __io_arm_poll_handler()
625 __io_poll_execute(req, 0); in __io_arm_poll_handler()
634 struct async_poll *apoll = pt->req->apoll; in io_async_queue_proc()
647 static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req, in io_req_alloc_apoll() argument
650 struct io_ring_ctx *ctx = req->ctx; in io_req_alloc_apoll()
653 if (req->flags & REQ_F_POLLED) { in io_req_alloc_apoll()
654 apoll = req->apoll; in io_req_alloc_apoll()
666 req->apoll = apoll; in io_req_alloc_apoll()
672 int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags) in io_arm_poll_handler() argument
674 const struct io_issue_def *def = &io_issue_defs[req->opcode]; in io_arm_poll_handler()
682 if (!io_file_can_poll(req)) in io_arm_poll_handler()
684 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) in io_arm_poll_handler()
691 if (req->flags & REQ_F_CLEAR_POLLIN) in io_arm_poll_handler()
699 apoll = io_req_alloc_apoll(req, issue_flags); in io_arm_poll_handler()
702 req->flags &= ~(REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL); in io_arm_poll_handler()
703 req->flags |= REQ_F_POLLED; in io_arm_poll_handler()
706 io_kbuf_recycle(req, issue_flags); in io_arm_poll_handler()
708 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, issue_flags); in io_arm_poll_handler()
711 trace_io_uring_poll_arm(req, mask, apoll->poll.events); in io_arm_poll_handler()
723 struct io_kiocb *req; in io_poll_remove_all() local
732 hlist_for_each_entry_safe(req, tmp, &hb->list, hash_node) { in io_poll_remove_all()
733 if (io_match_task_safe(req, tctx, cancel_all)) { in io_poll_remove_all()
734 hlist_del_init(&req->hash_node); in io_poll_remove_all()
735 io_poll_cancel_req(req); in io_poll_remove_all()
746 struct io_kiocb *req; in io_poll_find() local
750 hlist_for_each_entry(req, &hb->list, hash_node) { in io_poll_find()
751 if (cd->data != req->cqe.user_data) in io_poll_find()
753 if (poll_only && req->opcode != IORING_OP_POLL_ADD) in io_poll_find()
756 if (io_cancel_match_sequence(req, cd->seq)) in io_poll_find()
759 return req; in io_poll_find()
768 struct io_kiocb *req; in io_poll_file_find() local
774 hlist_for_each_entry(req, &hb->list, hash_node) { in io_poll_file_find()
775 if (io_cancel_req_match(req, cd)) in io_poll_file_find()
776 return req; in io_poll_file_find()
782 static int io_poll_disarm(struct io_kiocb *req) in io_poll_disarm() argument
784 if (!req) in io_poll_disarm()
786 if (!io_poll_get_ownership(req)) in io_poll_disarm()
788 io_poll_remove_entries(req); in io_poll_disarm()
789 hash_del(&req->hash_node); in io_poll_disarm()
795 struct io_kiocb *req; in __io_poll_cancel() local
799 req = io_poll_file_find(ctx, cd); in __io_poll_cancel()
801 req = io_poll_find(ctx, false, cd); in __io_poll_cancel()
803 if (req) { in __io_poll_cancel()
804 io_poll_cancel_req(req); in __io_poll_cancel()
838 int io_poll_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_poll_remove_prep() argument
840 struct io_poll_update *upd = io_kiocb_to_cmd(req, struct io_poll_update); in io_poll_remove_prep()
868 int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_poll_add_prep() argument
870 struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll); in io_poll_add_prep()
878 if ((flags & IORING_POLL_ADD_MULTI) && (req->flags & REQ_F_CQE_SKIP)) in io_poll_add_prep()
885 int io_poll_add(struct io_kiocb *req, unsigned int issue_flags) in io_poll_add() argument
887 struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll); in io_poll_add()
893 ret = __io_arm_poll_handler(req, poll, &ipt, poll->events, issue_flags); in io_poll_add()
895 io_req_set_res(req, ipt.result_mask, 0); in io_poll_add()
901 int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags) in io_poll_remove() argument
903 struct io_poll_update *poll_update = io_kiocb_to_cmd(req, struct io_poll_update); in io_poll_remove()
904 struct io_ring_ctx *ctx = req->ctx; in io_poll_remove()
946 req_set_fail(req); in io_poll_remove()
950 io_req_set_res(req, ret, 0); in io_poll_remove()