Searched refs:icsk_accept_queue (Results 1 – 20 of 20) sorted by relevance
81 struct request_sock_queue icsk_accept_queue; member278 reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue); in inet_csk_reqsk_queue_added()283 return reqsk_queue_len(&inet_csk(sk)->icsk_accept_queue); in inet_csk_reqsk_queue_len()317 return !reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue) ? in inet_csk_listen_poll()364 spin_lock_init(&icsk->icsk_accept_queue.rskq_lock); in inet_init_csk_locks()365 spin_lock_init(&icsk->icsk_accept_queue.fastopenq.lock); in inet_init_csk_locks()
2019 ctx = rcu_dereference(inet_csk(sk)->icsk_accept_queue.fastopenq.ctx); in tcp_fastopen_get_ctx()
638 if (reqsk_queue_empty(&icsk->icsk_accept_queue)) in inet_csk_wait_for_connect()643 if (!reqsk_queue_empty(&icsk->icsk_accept_queue)) in inet_csk_wait_for_connect()665 struct request_sock_queue *queue = &icsk->icsk_accept_queue; in inet_csk_accept()1050 reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req); in __inet_csk_reqsk_queue_drop()1098 reqsk_queue_migrated(&inet_csk(nsk)->icsk_accept_queue, req); in reqsk_timer_handler()1125 queue = &icsk->icsk_accept_queue; in reqsk_timer_handler()1282 memset(&newicsk->icsk_accept_queue, 0, in inet_csk_clone_lock()1283 sizeof(newicsk->icsk_accept_queue)); in inet_csk_clone_lock()1358 reqsk_queue_alloc(&icsk->icsk_accept_queue); in inet_csk_listen_start()1413 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; in inet_csk_reqsk_queue_add()[all …]
43 inet_csk(sk)->icsk_accept_queue.fastopenq.ctx, 1); in tcp_fastopen_destroy_cipher()82 q = &inet_csk(sk)->icsk_accept_queue.fastopenq; in tcp_fastopen_reset_cipher()103 ctx = rcu_dereference(icsk->icsk_accept_queue.fastopenq.ctx); in tcp_fastopen_get_cipher()244 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; in tcp_fastopen_create_child()312 fastopenq = &inet_csk(sk)->icsk_accept_queue.fastopenq; in tcp_fastopen_queue_check()
858 if (req->num_timeout < READ_ONCE(inet_csk(sk)->icsk_accept_queue.rskq_defer_accept) && in tcp_check_req()881 reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req); in tcp_check_req()
3827 WRITE_ONCE(icsk->icsk_accept_queue.rskq_defer_accept, in do_tcp_setsockopt()4395 val = READ_ONCE(icsk->icsk_accept_queue.rskq_defer_accept); in do_tcp_getsockopt()4538 val = READ_ONCE(icsk->icsk_accept_queue.fastopenq.max_qlen); in do_tcp_getsockopt()
213 !inet_csk(sk)->icsk_accept_queue.fastopenq.max_qlen) { in __inet_listen_sk()
6608 READ_ONCE(icsk->icsk_accept_queue.rskq_defer_accept) || in tcp_rcv_synsent_state_process()7130 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; in tcp_syn_flood_action()
2912 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
94 fastopenq = &inet_csk(lsk)->icsk_accept_queue.fastopenq; in reqsk_fastopen_remove()
12 struct request_sock_queue icsk_accept_queue
89 #define ACCEPT_QUEUE(sk) (&inet_csk(sk)->icsk_accept_queue.rskq_accept_head)
615 reqsk_queue_removed(&inet_csk(parent)->icsk_accept_queue, req); in cleanup_syn_rcv_conn()1549 reqsk_queue_removed(&inet_csk(lsk)->icsk_accept_queue, oreq); in add_pass_open_to_parent()
91 fastopenq = &icsk->icsk_accept_queue.fastopenq; in dump_tcp_sock()
91 fastopenq = &icsk->icsk_accept_queue.fastopenq; in dump_tcp6_sock()
573 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; in fastopen_queue_tune()
480 icsk->icsk_accept_queue.rskq_defer_accept) { in dccp_rcv_request_sent_state_process()
1500 if (reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue)) in subflow_data_ready()1876 struct request_sock_queue *queue = &inet_csk(listener_ssk)->icsk_accept_queue; in mptcp_subflow_queue_clean()
795 queue = &icsk->icsk_accept_queue; in pvcalls_back_poll()
2183 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;