Lines Matching refs:fc
38 bool fuse_request_expired(struct fuse_conn *fc, struct list_head *list) in fuse_request_expired() argument
45 return time_is_before_jiffies(req->create_time + fc->timeout.req_timeout); in fuse_request_expired()
48 bool fuse_fpq_processing_expired(struct fuse_conn *fc, struct list_head *processing) in fuse_fpq_processing_expired() argument
53 if (fuse_request_expired(fc, &processing[i])) in fuse_fpq_processing_expired()
76 struct fuse_conn *fc = container_of(dwork, struct fuse_conn, in fuse_check_timeout() local
78 struct fuse_iqueue *fiq = &fc->iq; in fuse_check_timeout()
83 if (!atomic_read(&fc->num_waiting)) in fuse_check_timeout()
87 expired = fuse_request_expired(fc, &fiq->pending); in fuse_check_timeout()
92 spin_lock(&fc->bg_lock); in fuse_check_timeout()
93 expired = fuse_request_expired(fc, &fc->bg_queue); in fuse_check_timeout()
94 spin_unlock(&fc->bg_lock); in fuse_check_timeout()
98 spin_lock(&fc->lock); in fuse_check_timeout()
99 if (!fc->connected) { in fuse_check_timeout()
100 spin_unlock(&fc->lock); in fuse_check_timeout()
103 list_for_each_entry(fud, &fc->devices, entry) { in fuse_check_timeout()
106 if (fuse_request_expired(fc, &fpq->io) || in fuse_check_timeout()
107 fuse_fpq_processing_expired(fc, fpq->processing)) { in fuse_check_timeout()
109 spin_unlock(&fc->lock); in fuse_check_timeout()
115 spin_unlock(&fc->lock); in fuse_check_timeout()
117 if (fuse_uring_request_expired(fc)) in fuse_check_timeout()
121 queue_delayed_work(system_wq, &fc->timeout.work, in fuse_check_timeout()
126 fuse_abort_conn(fc); in fuse_check_timeout()
165 void fuse_set_initialized(struct fuse_conn *fc) in fuse_set_initialized() argument
169 fc->initialized = 1; in fuse_set_initialized()
172 static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background) in fuse_block_alloc() argument
174 return !fc->initialized || (for_background && fc->blocked) || in fuse_block_alloc()
175 (fc->io_uring && fc->connected && !fuse_uring_ready(fc)); in fuse_block_alloc()
178 static void fuse_drop_waiting(struct fuse_conn *fc) in fuse_drop_waiting() argument
185 if (atomic_dec_and_test(&fc->num_waiting) && in fuse_drop_waiting()
186 !READ_ONCE(fc->connected)) { in fuse_drop_waiting()
188 wake_up_all(&fc->blocked_waitq); in fuse_drop_waiting()
198 struct fuse_conn *fc = fm->fc; in fuse_get_req() local
205 atomic_inc(&fc->num_waiting); in fuse_get_req()
207 if (fuse_block_alloc(fc, for_background)) { in fuse_get_req()
209 if (wait_event_killable_exclusive(fc->blocked_waitq, in fuse_get_req()
210 !fuse_block_alloc(fc, for_background))) in fuse_get_req()
217 if (!fc->connected) in fuse_get_req()
221 if (fc->conn_error) in fuse_get_req()
228 wake_up(&fc->blocked_waitq); in fuse_get_req()
232 req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns); in fuse_get_req()
247 fsuid = no_idmap ? current_fsuid() : mapped_fsuid(idmap, fc->user_ns); in fuse_get_req()
248 fsgid = no_idmap ? current_fsgid() : mapped_fsgid(idmap, fc->user_ns); in fuse_get_req()
249 req->in.h.uid = from_kuid(fc->user_ns, fsuid); in fuse_get_req()
250 req->in.h.gid = from_kgid(fc->user_ns, fsgid); in fuse_get_req()
261 fuse_drop_waiting(fc); in fuse_get_req()
267 struct fuse_conn *fc = req->fm->fc; in fuse_put_request() local
275 spin_lock(&fc->bg_lock); in fuse_put_request()
276 if (!fc->blocked) in fuse_put_request()
277 wake_up(&fc->blocked_waitq); in fuse_put_request()
278 spin_unlock(&fc->bg_lock); in fuse_put_request()
283 fuse_drop_waiting(fc); in fuse_put_request()
403 void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget, in fuse_queue_forget() argument
406 struct fuse_iqueue *fiq = &fc->iq; in fuse_queue_forget()
414 static void flush_bg_queue(struct fuse_conn *fc) in flush_bg_queue() argument
416 struct fuse_iqueue *fiq = &fc->iq; in flush_bg_queue()
418 while (fc->active_background < fc->max_background && in flush_bg_queue()
419 !list_empty(&fc->bg_queue)) { in flush_bg_queue()
422 req = list_first_entry(&fc->bg_queue, struct fuse_req, list); in flush_bg_queue()
424 fc->active_background++; in flush_bg_queue()
440 struct fuse_conn *fc = fm->fc; in fuse_request_end() local
441 struct fuse_iqueue *fiq = &fc->iq; in fuse_request_end()
460 spin_lock(&fc->bg_lock); in fuse_request_end()
462 if (fc->num_background == fc->max_background) { in fuse_request_end()
463 fc->blocked = 0; in fuse_request_end()
464 wake_up(&fc->blocked_waitq); in fuse_request_end()
465 } else if (!fc->blocked) { in fuse_request_end()
472 if (waitqueue_active(&fc->blocked_waitq)) in fuse_request_end()
473 wake_up(&fc->blocked_waitq); in fuse_request_end()
476 fc->num_background--; in fuse_request_end()
477 fc->active_background--; in fuse_request_end()
478 flush_bg_queue(fc); in fuse_request_end()
479 spin_unlock(&fc->bg_lock); in fuse_request_end()
494 struct fuse_iqueue *fiq = &req->fm->fc->iq; in queue_interrupt()
525 struct fuse_conn *fc = req->fm->fc; in request_wait_answer() local
526 struct fuse_iqueue *fiq = &fc->iq; in request_wait_answer()
529 if (!fc->no_interrupt) { in request_wait_answer()
569 struct fuse_iqueue *fiq = &req->fm->fc->iq; in __fuse_request_send()
583 static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args) in fuse_adjust_compat() argument
585 if (fc->minor < 4 && args->opcode == FUSE_STATFS) in fuse_adjust_compat()
588 if (fc->minor < 9) { in fuse_adjust_compat()
604 if (fc->minor < 12) { in fuse_adjust_compat()
618 struct fuse_conn *fc = req->fm->fc; in fuse_force_creds() local
621 req->in.h.uid = from_kuid_munged(fc->user_ns, current_fsuid()); in fuse_force_creds()
622 req->in.h.gid = from_kgid_munged(fc->user_ns, current_fsgid()); in fuse_force_creds()
628 req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns); in fuse_force_creds()
646 struct fuse_conn *fc = fm->fc; in __fuse_simple_request() local
651 atomic_inc(&fc->num_waiting); in __fuse_simple_request()
667 fuse_adjust_compat(fc, args); in __fuse_simple_request()
684 static bool fuse_request_queue_background_uring(struct fuse_conn *fc, in fuse_request_queue_background_uring() argument
687 struct fuse_iqueue *fiq = &fc->iq; in fuse_request_queue_background_uring()
704 struct fuse_conn *fc = fm->fc; in fuse_request_queue_background() local
710 atomic_inc(&fc->num_waiting); in fuse_request_queue_background()
715 if (fuse_uring_ready(fc)) in fuse_request_queue_background()
716 return fuse_request_queue_background_uring(fc, req); in fuse_request_queue_background()
719 spin_lock(&fc->bg_lock); in fuse_request_queue_background()
720 if (likely(fc->connected)) { in fuse_request_queue_background()
721 fc->num_background++; in fuse_request_queue_background()
722 if (fc->num_background == fc->max_background) in fuse_request_queue_background()
723 fc->blocked = 1; in fuse_request_queue_background()
724 list_add_tail(&req->list, &fc->bg_queue); in fuse_request_queue_background()
725 flush_bg_queue(fc); in fuse_request_queue_background()
728 spin_unlock(&fc->bg_lock); in fuse_request_queue_background()
766 struct fuse_iqueue *fiq = &fm->fc->iq; in fuse_simple_notify_reply()
1360 static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq, in fuse_read_forget() argument
1365 if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL) in fuse_read_forget()
1384 struct fuse_conn *fc = fud->fc; in fuse_dev_do_read() local
1385 struct fuse_iqueue *fiq = &fc->iq; in fuse_dev_do_read()
1407 fc->max_write)) in fuse_dev_do_read()
1426 err = fc->aborted ? -ECONNABORTED : -ENODEV; in fuse_dev_do_read()
1438 return fuse_read_forget(fc, fiq, cs, nbytes); in fuse_dev_do_read()
1482 err = fc->aborted ? -ECONNABORTED : -ENODEV; in fuse_dev_do_read()
1597 static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size, in fuse_notify_poll() argument
1611 return fuse_notify_poll_wakeup(fc, &outarg); in fuse_notify_poll()
1618 static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size, in fuse_notify_inval_inode() argument
1632 down_read(&fc->killsb); in fuse_notify_inval_inode()
1633 err = fuse_reverse_inval_inode(fc, outarg.ino, in fuse_notify_inval_inode()
1635 up_read(&fc->killsb); in fuse_notify_inval_inode()
1643 static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size, in fuse_notify_inval_entry() argument
1660 if (outarg.namelen > fc->name_max) in fuse_notify_inval_entry()
1680 down_read(&fc->killsb); in fuse_notify_inval_entry()
1681 err = fuse_reverse_inval_entry(fc, outarg.parent, 0, &name, outarg.flags); in fuse_notify_inval_entry()
1682 up_read(&fc->killsb); in fuse_notify_inval_entry()
1692 static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size, in fuse_notify_delete() argument
1709 if (outarg.namelen > fc->name_max) in fuse_notify_delete()
1729 down_read(&fc->killsb); in fuse_notify_delete()
1730 err = fuse_reverse_inval_entry(fc, outarg.parent, outarg.child, &name, 0); in fuse_notify_delete()
1731 up_read(&fc->killsb); in fuse_notify_delete()
1741 static int fuse_notify_store(struct fuse_conn *fc, unsigned int size, in fuse_notify_store() argument
1769 down_read(&fc->killsb); in fuse_notify_store()
1772 inode = fuse_ilookup(fc, nodeid, NULL); in fuse_notify_store()
1821 up_read(&fc->killsb); in fuse_notify_store()
1853 struct fuse_conn *fc = fm->fc; in fuse_retrieve() local
1862 num = min(outarg->size, fc->max_write); in fuse_retrieve()
1869 num_pages = min(num_pages, fc->max_pages); in fuse_retrieve()
1924 static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size, in fuse_notify_retrieve() argument
1943 down_read(&fc->killsb); in fuse_notify_retrieve()
1947 inode = fuse_ilookup(fc, nodeid, &fm); in fuse_notify_retrieve()
1952 up_read(&fc->killsb); in fuse_notify_retrieve()
1974 static void fuse_resend(struct fuse_conn *fc) in fuse_resend() argument
1978 struct fuse_iqueue *fiq = &fc->iq; in fuse_resend()
1982 spin_lock(&fc->lock); in fuse_resend()
1983 if (!fc->connected) { in fuse_resend()
1984 spin_unlock(&fc->lock); in fuse_resend()
1988 list_for_each_entry(fud, &fc->devices, entry) { in fuse_resend()
1996 spin_unlock(&fc->lock); in fuse_resend()
2018 static int fuse_notify_resend(struct fuse_conn *fc) in fuse_notify_resend() argument
2020 fuse_resend(fc); in fuse_notify_resend()
2024 static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code, in fuse_notify() argument
2032 return fuse_notify_poll(fc, size, cs); in fuse_notify()
2035 return fuse_notify_inval_inode(fc, size, cs); in fuse_notify()
2038 return fuse_notify_inval_entry(fc, size, cs); in fuse_notify()
2041 return fuse_notify_store(fc, size, cs); in fuse_notify()
2044 return fuse_notify_retrieve(fc, size, cs); in fuse_notify()
2047 return fuse_notify_delete(fc, size, cs); in fuse_notify()
2050 return fuse_notify_resend(fc); in fuse_notify()
2110 struct fuse_conn *fc = fud->fc; in fuse_dev_do_write() local
2132 err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs); in fuse_dev_do_write()
2160 fc->no_interrupt = 1; in fuse_dev_do_write()
2321 fiq = &fud->fc->iq; in fuse_dev_poll()
2347 static void end_polls(struct fuse_conn *fc) in end_polls() argument
2351 p = rb_first(&fc->polled_files); in end_polls()
2380 void fuse_abort_conn(struct fuse_conn *fc) in fuse_abort_conn() argument
2382 struct fuse_iqueue *fiq = &fc->iq; in fuse_abort_conn()
2384 spin_lock(&fc->lock); in fuse_abort_conn()
2385 if (fc->connected) { in fuse_abort_conn()
2391 if (fc->timeout.req_timeout) in fuse_abort_conn()
2392 cancel_delayed_work(&fc->timeout.work); in fuse_abort_conn()
2395 spin_lock(&fc->bg_lock); in fuse_abort_conn()
2396 fc->connected = 0; in fuse_abort_conn()
2397 spin_unlock(&fc->bg_lock); in fuse_abort_conn()
2399 fuse_set_initialized(fc); in fuse_abort_conn()
2400 list_for_each_entry(fud, &fc->devices, entry) { in fuse_abort_conn()
2421 spin_lock(&fc->bg_lock); in fuse_abort_conn()
2422 fc->blocked = 0; in fuse_abort_conn()
2423 fc->max_background = UINT_MAX; in fuse_abort_conn()
2424 flush_bg_queue(fc); in fuse_abort_conn()
2425 spin_unlock(&fc->bg_lock); in fuse_abort_conn()
2437 end_polls(fc); in fuse_abort_conn()
2438 wake_up_all(&fc->blocked_waitq); in fuse_abort_conn()
2439 spin_unlock(&fc->lock); in fuse_abort_conn()
2447 fuse_uring_abort(fc); in fuse_abort_conn()
2449 spin_unlock(&fc->lock); in fuse_abort_conn()
2454 void fuse_wait_aborted(struct fuse_conn *fc) in fuse_wait_aborted() argument
2458 wait_event(fc->blocked_waitq, atomic_read(&fc->num_waiting) == 0); in fuse_wait_aborted()
2460 fuse_uring_wait_stopped_queues(fc); in fuse_wait_aborted()
2468 struct fuse_conn *fc = fud->fc; in fuse_dev_release() local
2482 if (atomic_dec_and_test(&fc->dev_count)) { in fuse_dev_release()
2483 WARN_ON(fc->iq.fasync != NULL); in fuse_dev_release()
2484 fuse_abort_conn(fc); in fuse_dev_release()
2500 return fasync_helper(fd, file, on, &fud->fc->iq.fasync); in fuse_dev_fasync()
2503 static int fuse_device_clone(struct fuse_conn *fc, struct file *new) in fuse_device_clone() argument
2510 fud = fuse_dev_alloc_install(fc); in fuse_device_clone()
2515 atomic_inc(&fc->dev_count); in fuse_device_clone()
2543 res = fuse_device_clone(fud->fc, file); in fuse_dev_ioctl_clone()
2565 return fuse_backing_open(fud->fc, &map); in fuse_dev_ioctl_backing_open()
2582 return fuse_backing_close(fud->fc, backing_id); in fuse_dev_ioctl_backing_close()