| /linux-6.15/io_uring/ |
| H A D | uring_cmd.c | 38 if (issue_flags & IO_URING_F_UNLOCKED) in io_req_uring_cleanup() 86 unsigned int issue_flags) in io_uring_cmd_del_cancelable() argument 110 unsigned int issue_flags) in io_uring_cmd_mark_cancelable() argument 160 unsigned issue_flags) in io_uring_cmd_done() argument 247 issue_flags |= IO_URING_F_SQE128; in io_uring_cmd() 249 issue_flags |= IO_URING_F_CQE32; in io_uring_cmd() 277 unsigned int issue_flags) in io_uring_cmd_import_fixed() argument 289 unsigned issue_flags) in io_uring_cmd_import_fixed_vec() argument 300 issue_flags); in io_uring_cmd_import_fixed_vec() 313 unsigned int issue_flags) in io_uring_cmd_getsockopt() argument [all …]
|
| H A D | net.h | 28 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags); 32 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags); 34 int io_send(struct io_kiocb *req, unsigned int issue_flags); 37 int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags); 38 int io_recv(struct io_kiocb *req, unsigned int issue_flags); 43 int io_accept(struct io_kiocb *req, unsigned int issue_flags); 46 int io_socket(struct io_kiocb *req, unsigned int issue_flags); 49 int io_connect(struct io_kiocb *req, unsigned int issue_flags); 51 int io_send_zc(struct io_kiocb *req, unsigned int issue_flags); 57 int io_bind(struct io_kiocb *req, unsigned int issue_flags); [all …]
|
| H A D | kbuf.h | 61 unsigned int issue_flags); 63 unsigned int issue_flags); 68 int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags); 71 int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags); 77 bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags); 111 static inline bool io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags) in io_kbuf_recycle() argument 116 return io_kbuf_recycle_legacy(req, issue_flags); in io_kbuf_recycle() 123 unsigned issue_flags) in io_put_kbuf() argument 131 int nbufs, unsigned issue_flags) in io_put_kbufs() argument
|
| H A D | openclose.c | 114 int io_openat2(struct io_kiocb *req, unsigned int issue_flags) in io_openat2() argument 128 if (issue_flags & IO_URING_F_NONBLOCK) { in io_openat2() 153 (!resolve_nonblock && (issue_flags & IO_URING_F_NONBLOCK))) in io_openat2() 158 if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set) in io_openat2() 164 ret = io_fixed_fd_install(req, issue_flags, file, in io_openat2() 175 int io_openat(struct io_kiocb *req, unsigned int issue_flags) in io_openat() argument 177 return io_openat2(req, issue_flags); in io_openat() 193 io_ring_submit_lock(ctx, issue_flags); in __io_close_fixed() 195 io_ring_submit_unlock(ctx, issue_flags); in __io_close_fixed() 224 int io_close(struct io_kiocb *req, unsigned int issue_flags) in io_close() argument [all …]
|
| H A D | msg_ring.c | 42 unsigned int issue_flags) in io_lock_external_ctx() argument 49 if (!(issue_flags & IO_URING_F_UNLOCKED)) { in io_lock_external_ctx() 138 struct io_msg *msg, unsigned int issue_flags) in __io_msg_ring_data() argument 173 return __io_msg_ring_data(target_ctx, msg, issue_flags); in io_msg_ring_data() 183 io_ring_submit_lock(ctx, issue_flags); in io_msg_grab_file() 192 io_ring_submit_unlock(ctx, issue_flags); in io_msg_grab_file() 203 if (unlikely(io_lock_external_ctx(target_ctx, issue_flags))) in io_msg_install_complete() 270 int ret = io_msg_grab_file(req, issue_flags); in io_msg_send_fd() 277 return io_msg_install_complete(req, issue_flags); in io_msg_send_fd() 314 ret = io_msg_ring_data(req, issue_flags); in io_msg_ring() [all …]
|
| H A D | openclose.h | 3 int __io_close_fixed(struct io_ring_ctx *ctx, unsigned int issue_flags, 7 int io_openat(struct io_kiocb *req, unsigned int issue_flags); 11 int io_openat2(struct io_kiocb *req, unsigned int issue_flags); 14 int io_close(struct io_kiocb *req, unsigned int issue_flags); 17 int io_install_fixed_fd(struct io_kiocb *req, unsigned int issue_flags);
|
| H A D | net.c | 436 unsigned int issue_flags) in io_req_msg_cleanup() argument 438 io_netmsg_recycle(req, issue_flags); in io_req_msg_cleanup() 482 unsigned issue_flags) in io_send_finish() argument 555 io_req_msg_cleanup(req, issue_flags); in io_sendmsg() 674 io_req_msg_cleanup(req, issue_flags); in io_send() 833 issue_flags); in io_recv_finish() 879 io_req_msg_cleanup(req, issue_flags); in io_recv_finish() 1047 io_kbuf_recycle(req, issue_flags); in io_recvmsg() 1187 io_kbuf_recycle(req, issue_flags); in io_recv() 1243 issue_flags, &zc->len); in io_recvzc() [all …]
|
| H A D | rw.c | 111 unsigned int issue_flags) in __io_import_rw_buffer() argument 133 unsigned int issue_flags) in io_import_rw_buffer() argument 193 io_rw_recycle(req, issue_flags); in io_req_rw_cleanup() 372 issue_flags); in io_init_rw_fixed() 396 uvec_segs, issue_flags); in io_rw_import_reg_vec() 644 unsigned int issue_flags) in kiocb_done() argument 1016 ret = __io_read(req, issue_flags); in io_read() 1037 ret = __io_read(req, issue_flags); in io_read_mshot() 1052 io_kbuf_recycle(req, issue_flags); in io_read_mshot() 1212 return io_read(req, issue_flags); in io_read_fixed() [all …]
|
| H A D | splice.c | 59 unsigned int issue_flags) in io_splice_get_file() argument 69 io_ring_submit_lock(ctx, issue_flags); in io_splice_get_file() 77 io_ring_submit_unlock(ctx, issue_flags); in io_splice_get_file() 81 int io_tee(struct io_kiocb *req, unsigned int issue_flags) in io_tee() argument 89 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_tee() 91 in = io_splice_get_file(req, issue_flags); in io_tee() 118 int io_splice(struct io_kiocb *req, unsigned int issue_flags) in io_splice() argument 127 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_splice() 129 in = io_splice_get_file(req, issue_flags); in io_splice()
|
| H A D | fs.h | 4 int io_renameat(struct io_kiocb *req, unsigned int issue_flags); 8 int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags); 12 int io_mkdirat(struct io_kiocb *req, unsigned int issue_flags); 16 int io_symlinkat(struct io_kiocb *req, unsigned int issue_flags); 19 int io_linkat(struct io_kiocb *req, unsigned int issue_flags);
|
| H A D | futex.h | 7 int io_futex_wait(struct io_kiocb *req, unsigned int issue_flags); 8 int io_futexv_wait(struct io_kiocb *req, unsigned int issue_flags); 9 int io_futex_wake(struct io_kiocb *req, unsigned int issue_flags); 13 unsigned int issue_flags); 21 unsigned int issue_flags) in io_futex_cancel() argument
|
| H A D | cancel.c | 104 unsigned issue_flags) in io_try_cancel() argument 119 ret = io_poll_cancel(ctx, cd, issue_flags); in io_try_cancel() 123 ret = io_waitid_cancel(ctx, cd, issue_flags); in io_try_cancel() 127 ret = io_futex_cancel(ctx, cd, issue_flags); in io_try_cancel() 167 unsigned int issue_flags) in __io_async_cancel() argument 175 ret = io_try_cancel(tctx, cd, issue_flags); in __io_async_cancel() 184 io_ring_submit_lock(ctx, issue_flags); in __io_async_cancel() 194 io_ring_submit_unlock(ctx, issue_flags); in __io_async_cancel() 216 issue_flags); in io_async_cancel() 374 io_ring_submit_lock(ctx, issue_flags); in io_cancel_remove() [all …]
|
| H A D | rw.h | 41 int io_read(struct io_kiocb *req, unsigned int issue_flags); 42 int io_write(struct io_kiocb *req, unsigned int issue_flags); 43 int io_read_fixed(struct io_kiocb *req, unsigned int issue_flags); 44 int io_write_fixed(struct io_kiocb *req, unsigned int issue_flags); 49 int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags);
|
| H A D | xattr.h | 6 int io_fsetxattr(struct io_kiocb *req, unsigned int issue_flags); 9 int io_setxattr(struct io_kiocb *req, unsigned int issue_flags); 12 int io_fgetxattr(struct io_kiocb *req, unsigned int issue_flags); 15 int io_getxattr(struct io_kiocb *req, unsigned int issue_flags);
|
| H A D | futex.c | 115 unsigned int issue_flags) in io_futex_cancel() argument 117 return io_cancel_remove(ctx, cd, issue_flags, &ctx->futex_list, __io_futex_cancel); in io_futex_cancel() 216 int io_futexv_wait(struct io_kiocb *req, unsigned int issue_flags) in io_futexv_wait() argument 223 io_ring_submit_lock(ctx, issue_flags); in io_futexv_wait() 231 io_ring_submit_unlock(ctx, issue_flags); in io_futexv_wait() 267 io_ring_submit_unlock(ctx, issue_flags); in io_futexv_wait() 271 int io_futex_wait(struct io_kiocb *req, unsigned int issue_flags) in io_futex_wait() argument 284 io_ring_submit_lock(ctx, issue_flags); in io_futex_wait() 301 io_ring_submit_unlock(ctx, issue_flags); in io_futex_wait() 308 io_ring_submit_unlock(ctx, issue_flags); in io_futex_wait() [all …]
|
| H A D | poll.c | 534 io_ring_submit_lock(ctx, issue_flags); in io_poll_add_hash() 536 io_ring_submit_unlock(ctx, issue_flags); in io_poll_add_hash() 548 unsigned issue_flags) in __io_arm_poll_handler() argument 601 io_poll_add_hash(req, issue_flags); in __io_arm_poll_handler() 610 io_poll_add_hash(req, issue_flags); in __io_arm_poll_handler() 648 unsigned issue_flags) in io_req_alloc_apoll() argument 706 io_kbuf_recycle(req, issue_flags); in io_arm_poll_handler() 811 unsigned issue_flags) in io_poll_cancel() argument 815 io_ring_submit_lock(ctx, issue_flags); in io_poll_cancel() 817 io_ring_submit_unlock(ctx, issue_flags); in io_poll_cancel() [all …]
|
| H A D | sync.c | 40 int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags) in io_sync_file_range() argument 46 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_sync_file_range() 70 int io_fsync(struct io_kiocb *req, unsigned int issue_flags) in io_fsync() argument 77 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_fsync() 99 int io_fallocate(struct io_kiocb *req, unsigned int issue_flags) in io_fallocate() argument 105 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_fallocate()
|
| H A D | poll.h | 36 int io_poll_add(struct io_kiocb *req, unsigned int issue_flags); 39 int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags); 43 unsigned issue_flags); 44 int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags);
|
| H A D | xattr.c | 103 int io_fgetxattr(struct io_kiocb *req, unsigned int issue_flags) in io_fgetxattr() argument 108 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_fgetxattr() 115 int io_getxattr(struct io_kiocb *req, unsigned int issue_flags) in io_getxattr() argument 120 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_getxattr() 184 int io_fsetxattr(struct io_kiocb *req, unsigned int issue_flags) in io_fsetxattr() argument 189 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_fsetxattr() 196 int io_setxattr(struct io_kiocb *req, unsigned int issue_flags) in io_setxattr() argument 201 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_setxattr()
|
| H A D | fs.c | 81 int io_renameat(struct io_kiocb *req, unsigned int issue_flags) in io_renameat() argument 86 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_renameat() 130 int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags) in io_unlinkat() argument 135 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_unlinkat() 177 int io_mkdirat(struct io_kiocb *req, unsigned int issue_flags) in io_mkdirat() argument 182 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_mkdirat() 227 int io_symlinkat(struct io_kiocb *req, unsigned int issue_flags) in io_symlinkat() argument 232 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_symlinkat() 272 int io_linkat(struct io_kiocb *req, unsigned int issue_flags) in io_linkat() argument 277 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_linkat()
|
| H A D | sync.h | 4 int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags); 7 int io_fsync(struct io_kiocb *req, unsigned int issue_flags); 9 int io_fallocate(struct io_kiocb *req, unsigned int issue_flags);
|
| H A D | kbuf.c | 107 io_ring_submit_lock(ctx, issue_flags); in io_kbuf_recycle_legacy() 115 io_ring_submit_unlock(ctx, issue_flags); in io_kbuf_recycle_legacy() 156 unsigned int issue_flags) in io_ring_buffer_select() argument 196 unsigned int issue_flags) in io_buffer_select() argument 298 unsigned int issue_flags) in io_buffers_select() argument 304 io_ring_submit_lock(ctx, issue_flags); in io_buffers_select() 326 io_ring_submit_unlock(ctx, issue_flags); in io_buffers_select() 475 io_ring_submit_lock(ctx, issue_flags); in io_remove_buffers() 485 io_ring_submit_unlock(ctx, issue_flags); in io_remove_buffers() 562 io_ring_submit_lock(ctx, issue_flags); in io_provide_buffers() [all …]
|
| /linux-6.15/include/linux/io_uring/ |
| H A D | cmd.h | 44 unsigned int issue_flags); 49 unsigned issue_flags); 59 unsigned issue_flags); 70 unsigned int issue_flags); 79 unsigned int issue_flags) in io_uring_cmd_import_fixed() argument 87 unsigned issue_flags) in io_uring_cmd_import_fixed_vec() argument 92 u64 ret2, unsigned issue_flags) in io_uring_cmd_done() argument 101 unsigned int issue_flags) in io_uring_cmd_mark_cancelable() argument 145 unsigned int issue_flags); 147 unsigned int issue_flags);
|
| /linux-6.15/fs/fuse/ |
| H A D | dev_uring.c | 483 unsigned int issue_flags) in fuse_uring_cancel() argument 703 unsigned int issue_flags) in fuse_uring_send_next_to_ring() argument 720 io_uring_cmd_done(cmd, 0, 0, issue_flags); in fuse_uring_send_next_to_ring() 795 unsigned int issue_flags) in fuse_uring_commit() argument 824 unsigned int issue_flags) in fuse_uring_next_fuse_req() argument 920 fuse_uring_commit(ent, req, issue_flags); in fuse_uring_commit_fetch() 962 unsigned int issue_flags) in fuse_uring_do_register() argument 1117 fuse_uring_cancel(cmd, issue_flags); in fuse_uring_cmd() 1122 if (!(issue_flags & IO_URING_F_SQE128)) in fuse_uring_cmd() 1177 ssize_t ret, unsigned int issue_flags) in fuse_uring_send() argument [all …]
|
| /linux-6.15/drivers/nvme/host/ |
| H A D | ioctl.c | 407 unsigned issue_flags) in nvme_uring_task_cb() argument 502 issue_flags); in nvme_uring_cmd_io() 509 if (issue_flags & IO_URING_F_NONBLOCK) { in nvme_uring_cmd_io() 513 if (issue_flags & IO_URING_F_IOPOLL) in nvme_uring_cmd_io() 651 unsigned int issue_flags) in nvme_ns_uring_cmd() argument 656 ret = nvme_uring_cmd_checks(issue_flags); in nvme_ns_uring_cmd() 679 return nvme_ns_uring_cmd(ns, ioucmd, issue_flags); in nvme_ns_chr_uring_cmd() 770 unsigned int issue_flags) in nvme_ns_head_chr_uring_cmd() argument 779 ret = nvme_ns_uring_cmd(ns, ioucmd, issue_flags); in nvme_ns_head_chr_uring_cmd() 791 if (issue_flags & IO_URING_F_IOPOLL) in nvme_dev_uring_cmd() [all …]
|