xref: /xnu-11215/bsd/netinet/tcp_timer.c (revision 4f1223e8)
1 /*
2  * Copyright (c) 2000-2024 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
30  *	The Regents of the University of California.  All rights reserved.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions
34  * are met:
35  * 1. Redistributions of source code must retain the above copyright
36  *    notice, this list of conditions and the following disclaimer.
37  * 2. Redistributions in binary form must reproduce the above copyright
38  *    notice, this list of conditions and the following disclaimer in the
39  *    documentation and/or other materials provided with the distribution.
40  * 3. All advertising materials mentioning features or use of this software
41  *    must display the following acknowledgement:
42  *	This product includes software developed by the University of
43  *	California, Berkeley and its contributors.
44  * 4. Neither the name of the University nor the names of its contributors
45  *    may be used to endorse or promote products derived from this software
46  *    without specific prior written permission.
47  *
48  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58  * SUCH DAMAGE.
59  *
60  *	@(#)tcp_timer.c	8.2 (Berkeley) 5/24/95
61  * $FreeBSD: src/sys/netinet/tcp_timer.c,v 1.34.2.11 2001/08/22 00:59:12 silby Exp $
62  */
63 
64 #include "tcp_includes.h"
65 
66 #include <sys/param.h>
67 #include <sys/systm.h>
68 #include <sys/kernel.h>
69 #include <sys/mbuf.h>
70 #include <sys/sysctl.h>
71 #include <sys/socket.h>
72 #include <sys/socketvar.h>
73 #include <sys/protosw.h>
74 #include <sys/domain.h>
75 #include <sys/mcache.h>
76 #include <sys/queue.h>
77 #include <kern/locks.h>
78 #include <kern/cpu_number.h>    /* before tcp_seq.h, for tcp_random18() */
79 #include <mach/boolean.h>
80 
81 #include <net/route.h>
82 #include <net/if_var.h>
83 #include <net/ntstat.h>
84 
85 #include <netinet/in.h>
86 #include <netinet/in_systm.h>
87 #include <netinet/in_pcb.h>
88 #include <netinet/in_var.h>
89 #include <netinet6/in6_pcb.h>
90 #include <netinet/ip_var.h>
91 #include <netinet/tcp.h>
92 #include <netinet/tcp_cache.h>
93 #include <netinet/tcp_fsm.h>
94 #include <netinet/tcp_seq.h>
95 #include <netinet/tcp_timer.h>
96 #include <netinet/tcp_var.h>
97 #include <netinet/tcp_cc.h>
98 #include <netinet6/tcp6_var.h>
99 #include <netinet/tcpip.h>
100 #include <netinet/tcp_log.h>
101 
102 #include <sys/kdebug.h>
103 #include <mach/sdt.h>
104 #include <netinet/mptcp_var.h>
105 #include <net/content_filter.h>
106 #include <net/sockaddr_utils.h>
107 
108 /* Max number of times a stretch ack can be delayed on a connection */
109 #define TCP_STRETCHACK_DELAY_THRESHOLD  5
110 
111 /*
112  * If the host processor has been sleeping for too long, this is the threshold
113  * used to avoid sending stale retransmissions.
114  */
115 #define TCP_SLEEP_TOO_LONG      (10 * 60 * 1000) /* 10 minutes in ms */
116 
117 /* tcp timer list */
118 struct tcptimerlist tcp_timer_list;
119 
120 /* List of pcbs in timewait state, protected by tcbinfo's ipi_lock */
121 struct tcptailq tcp_tw_tailq;
122 
123 
124 static int
125 sysctl_msec_to_ticks SYSCTL_HANDLER_ARGS
126 {
127 #pragma unused(arg2)
128 	int error, temp;
129 	long s, tt;
130 
131 	tt = *(int *)arg1;
132 	s = tt * 1000 / TCP_RETRANSHZ;
133 	if (tt < 0 || s > INT_MAX) {
134 		return EINVAL;
135 	}
136 	temp = (int)s;
137 
138 	error = sysctl_handle_int(oidp, &temp, 0, req);
139 	if (error || !req->newptr) {
140 		return error;
141 	}
142 
143 	tt = (long)temp * TCP_RETRANSHZ / 1000;
144 	if (tt < 1 || tt > INT_MAX) {
145 		return EINVAL;
146 	}
147 
148 	*(int *)arg1 = (int)tt;
149 	SYSCTL_SKMEM_UPDATE_AT_OFFSET(arg2, *(int*)arg1);
150 	return 0;
151 }
152 
153 #if SYSCTL_SKMEM
154 int     tcp_keepinit = TCPTV_KEEP_INIT;
155 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINIT, keepinit,
156     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
157     &tcp_keepinit, offsetof(skmem_sysctl, tcp.keepinit),
158     sysctl_msec_to_ticks, "I", "");
159 
160 int     tcp_keepidle = TCPTV_KEEP_IDLE;
161 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPIDLE, keepidle,
162     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
163     &tcp_keepidle, offsetof(skmem_sysctl, tcp.keepidle),
164     sysctl_msec_to_ticks, "I", "");
165 
166 int     tcp_keepintvl = TCPTV_KEEPINTVL;
167 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINTVL, keepintvl,
168     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
169     &tcp_keepintvl, offsetof(skmem_sysctl, tcp.keepintvl),
170     sysctl_msec_to_ticks, "I", "");
171 
172 SYSCTL_SKMEM_TCP_INT(OID_AUTO, keepcnt,
173     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
174     int, tcp_keepcnt, TCPTV_KEEPCNT, "number of times to repeat keepalive");
175 
176 int     tcp_msl = TCPTV_MSL;
177 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, msl,
178     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
179     &tcp_msl, offsetof(skmem_sysctl, tcp.msl),
180     sysctl_msec_to_ticks, "I", "Maximum segment lifetime");
181 #else /* SYSCTL_SKMEM */
182 int     tcp_keepinit;
183 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINIT, keepinit,
184     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
185     &tcp_keepinit, 0, sysctl_msec_to_ticks, "I", "");
186 
187 int     tcp_keepidle;
188 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPIDLE, keepidle,
189     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
190     &tcp_keepidle, 0, sysctl_msec_to_ticks, "I", "");
191 
192 int     tcp_keepintvl;
193 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINTVL, keepintvl,
194     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
195     &tcp_keepintvl, 0, sysctl_msec_to_ticks, "I", "");
196 
197 int     tcp_keepcnt;
198 SYSCTL_INT(_net_inet_tcp, OID_AUTO, keepcnt,
199     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
200     &tcp_keepcnt, 0, "number of times to repeat keepalive");
201 
202 int     tcp_msl;
203 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, msl,
204     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
205     &tcp_msl, 0, sysctl_msec_to_ticks, "I", "Maximum segment lifetime");
206 #endif /* SYSCTL_SKMEM */
207 
208 /*
209  * Avoid DoS with connections half-closed in TIME_WAIT_2
210  */
211 int     tcp_fin_timeout = TCPTV_FINWAIT2;
212 
213 static int
214 sysctl_tcp_fin_timeout SYSCTL_HANDLER_ARGS
215 {
216 #pragma unused(arg2)
217 	int error;
218 	int value = tcp_fin_timeout;
219 
220 	error = sysctl_handle_int(oidp, &value, 0, req);
221 	if (error != 0 || req->newptr == USER_ADDR_NULL) {
222 		return error;
223 	}
224 
225 	if (value == -1) {
226 		/* Reset to default value */
227 		value = TCPTV_FINWAIT2;
228 	} else {
229 		/* Convert from milliseconds */
230 		long big_value = value * TCP_RETRANSHZ / 1000;
231 
232 		if (big_value < 0 || big_value > INT_MAX) {
233 			return EINVAL;
234 		}
235 		value = (int)big_value;
236 	}
237 	tcp_fin_timeout = value;
238 	SYSCTL_SKMEM_UPDATE_AT_OFFSET(arg2, value);
239 	return 0;
240 }
241 
242 #if SYSCTL_SKMEM
243 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, fin_timeout,
244     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
245     &tcp_fin_timeout, offsetof(skmem_sysctl, tcp.fin_timeout),
246     sysctl_tcp_fin_timeout, "I", "");
247 #else /* SYSCTL_SKMEM */
248 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, fin_timeout,
249     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
250     &tcp_fin_timeout, 0,
251     sysctl_tcp_fin_timeout, "I", "");
252 #endif /* SYSCTL_SKMEM */
253 
254 /*
255  * Avoid DoS via TCP Robustness in Persist Condition
256  * (see http://www.ietf.org/id/draft-ananth-tcpm-persist-02.txt)
257  * by allowing a system wide maximum persistence timeout value when in
258  * Zero Window Probe mode.
259  *
260  * Expressed in milliseconds to be consistent without timeout related
261  * values, the TCP socket option is in seconds.
262  */
263 #if SYSCTL_SKMEM
264 u_int32_t tcp_max_persist_timeout = 0;
265 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, max_persist_timeout,
266     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
267     &tcp_max_persist_timeout, offsetof(skmem_sysctl, tcp.max_persist_timeout),
268     sysctl_msec_to_ticks, "I", "Maximum persistence timeout for ZWP");
269 #else /* SYSCTL_SKMEM */
270 u_int32_t tcp_max_persist_timeout = 0;
271 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, max_persist_timeout,
272     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
273     &tcp_max_persist_timeout, 0, sysctl_msec_to_ticks, "I",
274     "Maximum persistence timeout for ZWP");
275 #endif /* SYSCTL_SKMEM */
276 
277 SYSCTL_SKMEM_TCP_INT(OID_AUTO, always_keepalive,
278     CTLFLAG_RW | CTLFLAG_LOCKED, static int, always_keepalive, 0,
279     "Assume SO_KEEPALIVE on all TCP connections");
280 
281 /*
282  * This parameter determines how long the timer list will stay in fast or
283  * quick mode even though all connections are idle. In this state, the
284  * timer will run more frequently anticipating new data.
285  */
286 SYSCTL_SKMEM_TCP_INT(OID_AUTO, timer_fastmode_idlemax,
287     CTLFLAG_RW | CTLFLAG_LOCKED, int, timer_fastmode_idlemax,
288     TCP_FASTMODE_IDLERUN_MAX, "Maximum idle generations in fast mode");
289 
290 /*
291  * See tcp_syn_backoff[] for interval values between SYN retransmits;
292  * the value set below defines the number of retransmits, before we
293  * disable the timestamp and window scaling options during subsequent
294  * SYN retransmits.  Setting it to 0 disables the dropping off of those
295  * two options.
296  */
297 SYSCTL_SKMEM_TCP_INT(OID_AUTO, broken_peer_syn_rexmit_thres,
298     CTLFLAG_RW | CTLFLAG_LOCKED, static int, tcp_broken_peer_syn_rxmit_thres,
299     10, "Number of retransmitted SYNs before disabling RFC 1323 "
300     "options on local connections");
301 
302 static int tcp_timer_advanced = 0;
303 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcp_timer_advanced,
304     CTLFLAG_RD | CTLFLAG_LOCKED, &tcp_timer_advanced, 0,
305     "Number of times one of the timers was advanced");
306 
307 static int tcp_resched_timerlist = 0;
308 SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcp_resched_timerlist,
309     CTLFLAG_RD | CTLFLAG_LOCKED, &tcp_resched_timerlist, 0,
310     "Number of times timer list was rescheduled as part of processing a packet");
311 
312 SYSCTL_SKMEM_TCP_INT(OID_AUTO, pmtud_blackhole_detection,
313     CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_pmtud_black_hole_detect, 1,
314     "Path MTU Discovery Black Hole Detection");
315 
316 SYSCTL_SKMEM_TCP_INT(OID_AUTO, pmtud_blackhole_mss,
317     CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_pmtud_black_hole_mss, 1200,
318     "Path MTU Discovery Black Hole Detection lowered MSS");
319 
320 #if (DEBUG || DEVELOPMENT)
321 int tcp_probe_if_fix_port = 0;
322 SYSCTL_INT(_net_inet_tcp, OID_AUTO, probe_if_fix_port,
323     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
324     &tcp_probe_if_fix_port, 0, "");
325 #endif /* (DEBUG || DEVELOPMENT) */
326 
327 static u_int32_t tcp_mss_rec_medium = 1200;
328 static u_int32_t tcp_mss_rec_low = 512;
329 
330 #define TCP_REPORT_STATS_INTERVAL       43200 /* 12 hours, in seconds */
331 int tcp_report_stats_interval = TCP_REPORT_STATS_INTERVAL;
332 
333 /* performed garbage collection of "used" sockets */
334 static boolean_t tcp_gc_done = FALSE;
335 
336 /* max idle probes */
337 int     tcp_maxpersistidle = TCPTV_KEEP_IDLE;
338 
339 /*
340  * TCP delack timer is set to 100 ms. Since the processing of timer list
341  * in fast mode will happen no faster than 100 ms, the delayed ack timer
342  * will fire some where between 100 and 200 ms.
343  */
344 int     tcp_delack = TCP_RETRANSHZ / 10;
345 
346 #if MPTCP
347 /*
348  * MP_JOIN retransmission of 3rd ACK will be every 500 msecs without backoff
349  */
350 int     tcp_jack_rxmt = TCP_RETRANSHZ / 2;
351 #endif /* MPTCP */
352 
353 static boolean_t tcp_itimer_done = FALSE;
354 
355 static void tcp_remove_timer(struct tcpcb *tp);
356 static void tcp_sched_timerlist(uint32_t offset);
357 static u_int32_t tcp_run_conn_timer(struct tcpcb *tp, u_int16_t *mode,
358     u_int16_t probe_if_index);
359 static inline void tcp_set_lotimer_index(struct tcpcb *);
360 __private_extern__ void tcp_remove_from_time_wait(struct inpcb *inp);
361 static inline void tcp_update_mss_core(struct tcpcb *tp, struct ifnet *ifp);
362 __private_extern__ void tcp_report_stats(void);
363 
364 static  u_int64_t tcp_last_report_time;
365 
366 /*
367  * Structure to store previously reported stats so that we can send
368  * incremental changes in each report interval.
369  */
370 struct tcp_last_report_stats {
371 	u_int32_t       tcps_connattempt;
372 	u_int32_t       tcps_accepts;
373 	u_int32_t       tcps_ecn_client_setup;
374 	u_int32_t       tcps_ecn_server_setup;
375 	u_int32_t       tcps_ecn_client_success;
376 	u_int32_t       tcps_ecn_server_success;
377 	u_int32_t       tcps_ecn_not_supported;
378 	u_int32_t       tcps_ecn_lost_syn;
379 	u_int32_t       tcps_ecn_lost_synack;
380 	u_int32_t       tcps_ecn_recv_ce;
381 	u_int32_t       tcps_ecn_recv_ece;
382 	u_int32_t       tcps_ecn_sent_ece;
383 	u_int32_t       tcps_ecn_conn_recv_ce;
384 	u_int32_t       tcps_ecn_conn_recv_ece;
385 	u_int32_t       tcps_ecn_conn_plnoce;
386 	u_int32_t       tcps_ecn_conn_pl_ce;
387 	u_int32_t       tcps_ecn_conn_nopl_ce;
388 	u_int32_t       tcps_ecn_fallback_synloss;
389 	u_int32_t       tcps_ecn_fallback_reorder;
390 	u_int32_t       tcps_ecn_fallback_ce;
391 
392 	/* TFO-related statistics */
393 	u_int32_t       tcps_tfo_syn_data_rcv;
394 	u_int32_t       tcps_tfo_cookie_req_rcv;
395 	u_int32_t       tcps_tfo_cookie_sent;
396 	u_int32_t       tcps_tfo_cookie_invalid;
397 	u_int32_t       tcps_tfo_cookie_req;
398 	u_int32_t       tcps_tfo_cookie_rcv;
399 	u_int32_t       tcps_tfo_syn_data_sent;
400 	u_int32_t       tcps_tfo_syn_data_acked;
401 	u_int32_t       tcps_tfo_syn_loss;
402 	u_int32_t       tcps_tfo_blackhole;
403 	u_int32_t       tcps_tfo_cookie_wrong;
404 	u_int32_t       tcps_tfo_no_cookie_rcv;
405 	u_int32_t       tcps_tfo_heuristics_disable;
406 	u_int32_t       tcps_tfo_sndblackhole;
407 
408 	/* MPTCP-related statistics */
409 	u_int32_t       tcps_mptcp_handover_attempt;
410 	u_int32_t       tcps_mptcp_interactive_attempt;
411 	u_int32_t       tcps_mptcp_aggregate_attempt;
412 	u_int32_t       tcps_mptcp_fp_handover_attempt;
413 	u_int32_t       tcps_mptcp_fp_interactive_attempt;
414 	u_int32_t       tcps_mptcp_fp_aggregate_attempt;
415 	u_int32_t       tcps_mptcp_heuristic_fallback;
416 	u_int32_t       tcps_mptcp_fp_heuristic_fallback;
417 	u_int32_t       tcps_mptcp_handover_success_wifi;
418 	u_int32_t       tcps_mptcp_handover_success_cell;
419 	u_int32_t       tcps_mptcp_interactive_success;
420 	u_int32_t       tcps_mptcp_aggregate_success;
421 	u_int32_t       tcps_mptcp_fp_handover_success_wifi;
422 	u_int32_t       tcps_mptcp_fp_handover_success_cell;
423 	u_int32_t       tcps_mptcp_fp_interactive_success;
424 	u_int32_t       tcps_mptcp_fp_aggregate_success;
425 	u_int32_t       tcps_mptcp_handover_cell_from_wifi;
426 	u_int32_t       tcps_mptcp_handover_wifi_from_cell;
427 	u_int32_t       tcps_mptcp_interactive_cell_from_wifi;
428 	u_int64_t       tcps_mptcp_handover_cell_bytes;
429 	u_int64_t       tcps_mptcp_interactive_cell_bytes;
430 	u_int64_t       tcps_mptcp_aggregate_cell_bytes;
431 	u_int64_t       tcps_mptcp_handover_all_bytes;
432 	u_int64_t       tcps_mptcp_interactive_all_bytes;
433 	u_int64_t       tcps_mptcp_aggregate_all_bytes;
434 	u_int32_t       tcps_mptcp_back_to_wifi;
435 	u_int32_t       tcps_mptcp_wifi_proxy;
436 	u_int32_t       tcps_mptcp_cell_proxy;
437 	u_int32_t       tcps_mptcp_triggered_cell;
438 };
439 
440 
441 /* Returns true if the timer is on the timer list */
442 #define TIMER_IS_ON_LIST(tp) ((tp)->t_flags & TF_TIMER_ONLIST)
443 
444 /* Run the TCP timerlist atleast once every hour */
445 #define TCP_TIMERLIST_MAX_OFFSET (60 * 60 * TCP_RETRANSHZ)
446 
447 
448 static void add_to_time_wait_locked(struct tcpcb *tp, uint32_t delay);
449 static boolean_t tcp_garbage_collect(struct inpcb *, int);
450 
451 #define TIMERENTRY_TO_TP(te) (__unsafe_forge_single(struct tcpcb *, ((uintptr_t)te - offsetof(struct tcpcb, tentry.le.le_next))))
452 
453 #define VERIFY_NEXT_LINK(elm, field) do {       \
454 	if (LIST_NEXT((elm),field) != NULL &&   \
455 	    LIST_NEXT((elm),field)->field.le_prev !=    \
456 	        &((elm)->field.le_next))        \
457 	        panic("Bad link elm %p next->prev != elm", (elm));      \
458 } while(0)
459 
460 #define VERIFY_PREV_LINK(elm, field) do {       \
461 	if (*(elm)->field.le_prev != (elm))     \
462 	        panic("Bad link elm %p prev->next != elm", (elm));      \
463 } while(0)
464 
465 #define TCP_SET_TIMER_MODE(mode, i) do { \
466 	if (IS_TIMER_HZ_10MS(i)) \
467 	        (mode) |= TCP_TIMERLIST_10MS_MODE; \
468 	else if (IS_TIMER_HZ_100MS(i)) \
469 	        (mode) |= TCP_TIMERLIST_100MS_MODE; \
470 	else \
471 	        (mode) |= TCP_TIMERLIST_500MS_MODE; \
472 } while(0)
473 
474 #if (DEVELOPMENT || DEBUG)
475 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, mss_rec_medium,
476     CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_mss_rec_medium, 0,
477     "Medium MSS based on recommendation in link status report");
478 SYSCTL_UINT(_net_inet_tcp, OID_AUTO, mss_rec_low,
479     CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_mss_rec_low, 0,
480     "Low MSS based on recommendation in link status report");
481 
482 static int32_t tcp_change_mss_recommended = 0;
483 static int
484 sysctl_change_mss_recommended SYSCTL_HANDLER_ARGS
485 {
486 #pragma unused(oidp, arg1, arg2)
487 	int i, err = 0, changed = 0;
488 	struct ifnet *ifp;
489 	struct if_link_status ifsr;
490 	struct if_cellular_status_v1 *new_cell_sr;
491 	err = sysctl_io_number(req, tcp_change_mss_recommended,
492 	    sizeof(int32_t), &i, &changed);
493 	if (changed) {
494 		if (i < 0 || i > UINT16_MAX) {
495 			return EINVAL;
496 		}
497 		ifnet_head_lock_shared();
498 		TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
499 			if (IFNET_IS_CELLULAR(ifp)) {
500 				bzero(&ifsr, sizeof(ifsr));
501 				new_cell_sr = &ifsr.ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
502 				ifsr.ifsr_version = IF_CELLULAR_STATUS_REPORT_CURRENT_VERSION;
503 				ifsr.ifsr_len = sizeof(*new_cell_sr);
504 
505 				/* Set MSS recommended */
506 				new_cell_sr->valid_bitmask |= IF_CELL_UL_MSS_RECOMMENDED_VALID;
507 				new_cell_sr->mss_recommended = (uint16_t)i;
508 				err = ifnet_link_status_report(ifp, new_cell_sr, sizeof(new_cell_sr));
509 				if (err == 0) {
510 					tcp_change_mss_recommended = i;
511 				} else {
512 					break;
513 				}
514 			}
515 		}
516 		ifnet_head_done();
517 	}
518 	return err;
519 }
520 
521 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, change_mss_recommended,
522     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_change_mss_recommended,
523     0, sysctl_change_mss_recommended, "IU", "Change MSS recommended");
524 
525 SYSCTL_INT(_net_inet_tcp, OID_AUTO, report_stats_interval,
526     CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_report_stats_interval, 0,
527     "Report stats interval");
528 #endif /* (DEVELOPMENT || DEBUG) */
529 
530 /*
531  * Macro to compare two timers. If there is a reset of the sign bit,
532  * it is safe to assume that the timer has wrapped around. By doing
533  * signed comparision, we take care of wrap around such that the value
534  * with the sign bit reset is actually ahead of the other.
535  */
536 inline int32_t
timer_diff(uint32_t t1,uint32_t toff1,uint32_t t2,uint32_t toff2)537 timer_diff(uint32_t t1, uint32_t toff1, uint32_t t2, uint32_t toff2)
538 {
539 	return (int32_t)((t1 + toff1) - (t2 + toff2));
540 }
541 
542 /*
543  * Add to tcp timewait list, delay is given in milliseconds.
544  */
545 static void
add_to_time_wait_locked(struct tcpcb * tp,uint32_t delay)546 add_to_time_wait_locked(struct tcpcb *tp, uint32_t delay)
547 {
548 	struct inpcbinfo *pcbinfo = &tcbinfo;
549 	struct inpcb *inp = tp->t_inpcb;
550 	uint32_t timer;
551 
552 	/* pcb list should be locked when we get here */
553 	LCK_RW_ASSERT(&pcbinfo->ipi_lock, LCK_RW_ASSERT_EXCLUSIVE);
554 
555 	/* We may get here multiple times, so check */
556 	if (!(inp->inp_flags2 & INP2_TIMEWAIT)) {
557 		pcbinfo->ipi_twcount++;
558 		inp->inp_flags2 |= INP2_TIMEWAIT;
559 
560 		/* Remove from global inp list */
561 		LIST_REMOVE(inp, inp_list);
562 	} else {
563 		TAILQ_REMOVE(&tcp_tw_tailq, tp, t_twentry);
564 	}
565 
566 	/* Compute the time at which this socket can be closed */
567 	timer = tcp_now + delay;
568 
569 	/* We will use the TCPT_2MSL timer for tracking this delay */
570 
571 	if (TIMER_IS_ON_LIST(tp)) {
572 		tcp_remove_timer(tp);
573 	}
574 	tp->t_timer[TCPT_2MSL] = timer;
575 
576 	TAILQ_INSERT_TAIL(&tcp_tw_tailq, tp, t_twentry);
577 }
578 
579 void
add_to_time_wait(struct tcpcb * tp,uint32_t delay)580 add_to_time_wait(struct tcpcb *tp, uint32_t delay)
581 {
582 	if (tp->t_inpcb->inp_socket->so_options & SO_NOWAKEFROMSLEEP) {
583 		socket_post_kev_msg_closed(tp->t_inpcb->inp_socket);
584 	}
585 
586 	tcp_del_fsw_flow(tp);
587 
588 	/* 19182803: Notify nstat that connection is closing before waiting. */
589 	nstat_pcb_detach(tp->t_inpcb);
590 
591 #if CONTENT_FILTER
592 	if ((tp->t_inpcb->inp_socket->so_flags & SOF_CONTENT_FILTER) != 0) {
593 		/* If filter present, allow filter to finish processing all queued up data before adding to time wait queue */
594 		(void) cfil_sock_tcp_add_time_wait(tp->t_inpcb->inp_socket);
595 	} else
596 #endif /* CONTENT_FILTER */
597 	{
598 		add_to_time_wait_now(tp, delay);
599 	}
600 }
601 
602 void
add_to_time_wait_now(struct tcpcb * tp,uint32_t delay)603 add_to_time_wait_now(struct tcpcb *tp, uint32_t delay)
604 {
605 	struct inpcbinfo *pcbinfo = &tcbinfo;
606 
607 	if (!lck_rw_try_lock_exclusive(&pcbinfo->ipi_lock)) {
608 		socket_unlock(tp->t_inpcb->inp_socket, 0);
609 		lck_rw_lock_exclusive(&pcbinfo->ipi_lock);
610 		socket_lock(tp->t_inpcb->inp_socket, 0);
611 	}
612 	add_to_time_wait_locked(tp, delay);
613 	lck_rw_done(&pcbinfo->ipi_lock);
614 
615 	inpcb_gc_sched(pcbinfo, INPCB_TIMER_LAZY);
616 }
617 
618 /* If this is on time wait queue, remove it. */
619 void
tcp_remove_from_time_wait(struct inpcb * inp)620 tcp_remove_from_time_wait(struct inpcb *inp)
621 {
622 	struct tcpcb *tp = intotcpcb(inp);
623 	if (inp->inp_flags2 & INP2_TIMEWAIT) {
624 		TAILQ_REMOVE(&tcp_tw_tailq, tp, t_twentry);
625 	}
626 }
627 
628 static boolean_t
tcp_garbage_collect(struct inpcb * inp,int istimewait)629 tcp_garbage_collect(struct inpcb *inp, int istimewait)
630 {
631 	boolean_t active = FALSE;
632 	struct socket *so, *mp_so = NULL;
633 	struct tcpcb *tp;
634 
635 	so = inp->inp_socket;
636 	tp = intotcpcb(inp);
637 
638 	if (so->so_flags & SOF_MP_SUBFLOW) {
639 		mp_so = mptetoso(tptomptp(tp)->mpt_mpte);
640 		if (!socket_try_lock(mp_so)) {
641 			mp_so = NULL;
642 			active = TRUE;
643 			goto out;
644 		}
645 		if (mpsotomppcb(mp_so)->mpp_inside > 0) {
646 			os_log(mptcp_log_handle, "%s - %lx: Still inside %d usecount %d\n", __func__,
647 			    (unsigned long)VM_KERNEL_ADDRPERM(mpsotompte(mp_so)),
648 			    mpsotomppcb(mp_so)->mpp_inside,
649 			    mp_so->so_usecount);
650 			socket_unlock(mp_so, 0);
651 			mp_so = NULL;
652 			active = TRUE;
653 			goto out;
654 		}
655 		/* We call socket_unlock with refcount further below */
656 		mp_so->so_usecount++;
657 		tptomptp(tp)->mpt_mpte->mpte_mppcb->mpp_inside++;
658 	}
659 
660 	/*
661 	 * Skip if still in use or busy; it would have been more efficient
662 	 * if we were to test so_usecount against 0, but this isn't possible
663 	 * due to the current implementation of tcp_dropdropablreq() where
664 	 * overflow sockets that are eligible for garbage collection have
665 	 * their usecounts set to 1.
666 	 */
667 	if (!lck_mtx_try_lock_spin(&inp->inpcb_mtx)) {
668 		active = TRUE;
669 		goto out;
670 	}
671 
672 	/* Check again under the lock */
673 	if (so->so_usecount > 1) {
674 		if (inp->inp_wantcnt == WNT_STOPUSING) {
675 			active = TRUE;
676 		}
677 		lck_mtx_unlock(&inp->inpcb_mtx);
678 		goto out;
679 	}
680 
681 	if (istimewait && TSTMP_GEQ(tcp_now, tp->t_timer[TCPT_2MSL]) &&
682 	    tp->t_state != TCPS_CLOSED) {
683 		/* Become a regular mutex */
684 		lck_mtx_convert_spin(&inp->inpcb_mtx);
685 		tcp_close(tp);
686 	}
687 
688 	/*
689 	 * Overflowed socket dropped from the listening queue?  Do this
690 	 * only if we are called to clean up the time wait slots, since
691 	 * tcp_dropdropablreq() considers a socket to have been fully
692 	 * dropped after add_to_time_wait() is finished.
693 	 * Also handle the case of connections getting closed by the peer
694 	 * while in the queue as seen with rdar://6422317
695 	 *
696 	 */
697 	if (so->so_usecount == 1 &&
698 	    ((istimewait && (so->so_flags & SOF_OVERFLOW)) ||
699 	    ((tp != NULL) && (tp->t_state == TCPS_CLOSED) &&
700 	    (so->so_head != NULL) &&
701 	    ((so->so_state & (SS_INCOMP | SS_CANTSENDMORE | SS_CANTRCVMORE)) ==
702 	    (SS_INCOMP | SS_CANTSENDMORE | SS_CANTRCVMORE))))) {
703 		if (inp->inp_state != INPCB_STATE_DEAD) {
704 			/* Become a regular mutex */
705 			lck_mtx_convert_spin(&inp->inpcb_mtx);
706 			if (SOCK_CHECK_DOM(so, PF_INET6)) {
707 				in6_pcbdetach(inp);
708 			} else {
709 				in_pcbdetach(inp);
710 			}
711 		}
712 		VERIFY(so->so_usecount > 0);
713 		so->so_usecount--;
714 		if (inp->inp_wantcnt == WNT_STOPUSING) {
715 			active = TRUE;
716 		}
717 		lck_mtx_unlock(&inp->inpcb_mtx);
718 		goto out;
719 	} else if (inp->inp_wantcnt != WNT_STOPUSING) {
720 		lck_mtx_unlock(&inp->inpcb_mtx);
721 		active = FALSE;
722 		goto out;
723 	}
724 
725 	/*
726 	 * We get here because the PCB is no longer searchable
727 	 * (WNT_STOPUSING); detach (if needed) and dispose if it is dead
728 	 * (usecount is 0).  This covers all cases, including overflow
729 	 * sockets and those that are considered as "embryonic",
730 	 * i.e. created by sonewconn() in TCP input path, and have
731 	 * not yet been committed.  For the former, we reduce the usecount
732 	 *  to 0 as done by the code above.  For the latter, the usecount
733 	 * would have reduced to 0 as part calling soabort() when the
734 	 * socket is dropped at the end of tcp_input().
735 	 */
736 	if (so->so_usecount == 0) {
737 		DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp,
738 		    struct tcpcb *, tp, int32_t, TCPS_CLOSED);
739 		/* Become a regular mutex */
740 		lck_mtx_convert_spin(&inp->inpcb_mtx);
741 
742 		/*
743 		 * If this tp still happens to be on the timer list,
744 		 * take it out
745 		 */
746 		if (TIMER_IS_ON_LIST(tp)) {
747 			tcp_remove_timer(tp);
748 		}
749 
750 		if (inp->inp_state != INPCB_STATE_DEAD) {
751 			if (SOCK_CHECK_DOM(so, PF_INET6)) {
752 				in6_pcbdetach(inp);
753 			} else {
754 				in_pcbdetach(inp);
755 			}
756 		}
757 
758 		if (mp_so) {
759 			mptcp_subflow_del(tptomptp(tp)->mpt_mpte, tp->t_mpsub);
760 
761 			/* so is now unlinked from mp_so - let's drop the lock */
762 			socket_unlock(mp_so, 1);
763 			mp_so = NULL;
764 		}
765 
766 		in_pcbdispose(inp);
767 		active = FALSE;
768 		goto out;
769 	}
770 
771 	lck_mtx_unlock(&inp->inpcb_mtx);
772 	active = TRUE;
773 
774 out:
775 	if (mp_so) {
776 		socket_unlock(mp_so, 1);
777 	}
778 
779 	return active;
780 }
781 
782 /*
783  * TCP garbage collector callback (inpcb_timer_func_t).
784  *
785  * Returns the number of pcbs that will need to be gc-ed soon,
786  * returnining > 0 will keep timer active.
787  */
788 void
tcp_gc(struct inpcbinfo * ipi)789 tcp_gc(struct inpcbinfo *ipi)
790 {
791 	struct inpcb *inp, *nxt;
792 	struct tcpcb *tw_tp, *tw_ntp;
793 #if  KDEBUG
794 	static int tws_checked = 0;
795 #endif
796 
797 	KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_START, 0, 0, 0, 0, 0);
798 
799 	/*
800 	 * Update tcp_now here as it may get used while
801 	 * processing the slow timer.
802 	 */
803 	calculate_tcp_clock();
804 
805 	/*
806 	 * Garbage collect socket/tcpcb: We need to acquire the list lock
807 	 * exclusively to do this
808 	 */
809 
810 	if (lck_rw_try_lock_exclusive(&ipi->ipi_lock) == FALSE) {
811 		/* don't sweat it this time; cleanup was done last time */
812 		if (tcp_gc_done == TRUE) {
813 			tcp_gc_done = FALSE;
814 			KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_END,
815 			    tws_checked, cur_tw_slot, 0, 0, 0);
816 			/* Lock upgrade failed, give up this round */
817 			os_atomic_inc(&ipi->ipi_gc_req.intimer_fast, relaxed);
818 			return;
819 		}
820 		/* Upgrade failed, lost lock now take it again exclusive */
821 		lck_rw_lock_exclusive(&ipi->ipi_lock);
822 	}
823 	tcp_gc_done = TRUE;
824 
825 	LIST_FOREACH_SAFE(inp, &tcb, inp_list, nxt) {
826 		if (tcp_garbage_collect(inp, 0)) {
827 			os_atomic_inc(&ipi->ipi_gc_req.intimer_fast, relaxed);
828 		}
829 	}
830 
831 	/* Now cleanup the time wait ones */
832 	TAILQ_FOREACH_SAFE(tw_tp, &tcp_tw_tailq, t_twentry, tw_ntp) {
833 		/*
834 		 * We check the timestamp here without holding the
835 		 * socket lock for better performance. If there are
836 		 * any pcbs in time-wait, the timer will get rescheduled.
837 		 * Hence some error in this check can be tolerated.
838 		 *
839 		 * Sometimes a socket on time-wait queue can be closed if
840 		 * 2MSL timer expired but the application still has a
841 		 * usecount on it.
842 		 */
843 		if (tw_tp->t_state == TCPS_CLOSED ||
844 		    TSTMP_GEQ(tcp_now, tw_tp->t_timer[TCPT_2MSL])) {
845 			if (tcp_garbage_collect(tw_tp->t_inpcb, 1)) {
846 				os_atomic_inc(&ipi->ipi_gc_req.intimer_lazy, relaxed);
847 			}
848 		}
849 	}
850 
851 	/* take into account pcbs that are still in time_wait_slots */
852 	os_atomic_add(&ipi->ipi_gc_req.intimer_lazy, ipi->ipi_twcount, relaxed);
853 
854 	lck_rw_done(&ipi->ipi_lock);
855 
856 	/* Clean up the socache while we are here */
857 	if (so_cache_timer()) {
858 		os_atomic_inc(&ipi->ipi_gc_req.intimer_lazy, relaxed);
859 	}
860 
861 	KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_END, tws_checked,
862 	    cur_tw_slot, 0, 0, 0);
863 
864 	return;
865 }
866 
867 /*
868  * Cancel all timers for TCP tp.
869  */
870 void
tcp_canceltimers(struct tcpcb * tp)871 tcp_canceltimers(struct tcpcb *tp)
872 {
873 	int i;
874 
875 	tcp_remove_timer(tp);
876 	for (i = 0; i < TCPT_NTIMERS; i++) {
877 		tp->t_timer[i] = 0;
878 	}
879 	tp->tentry.timer_start = tcp_now;
880 	tp->tentry.index = TCPT_NONE;
881 }
882 
883 int     tcp_syn_backoff[TCP_MAXRXTSHIFT + 1] =
884 { 1, 1, 1, 1, 1, 2, 4, 8, 16, 32, 64, 64, 64 };
885 
886 int     tcp_backoff[TCP_MAXRXTSHIFT + 1] =
887 { 1, 2, 4, 8, 16, 32, 64, 64, 64, 64, 64, 64, 64 };
888 
889 static int tcp_totbackoff = 511;        /* sum of tcp_backoff[] */
890 
891 void
tcp_rexmt_save_state(struct tcpcb * tp)892 tcp_rexmt_save_state(struct tcpcb *tp)
893 {
894 	u_int32_t fsize;
895 	if (TSTMP_SUPPORTED(tp)) {
896 		/*
897 		 * Since timestamps are supported on the connection,
898 		 * we can do recovery as described in rfc 4015.
899 		 */
900 		fsize = tp->snd_max - tp->snd_una;
901 		tp->snd_ssthresh_prev = max(fsize, tp->snd_ssthresh);
902 		tp->snd_recover_prev = tp->snd_recover;
903 	} else {
904 		/*
905 		 * Timestamp option is not supported on this connection.
906 		 * Record ssthresh and cwnd so they can
907 		 * be recovered if this turns out to be a "bad" retransmit.
908 		 * A retransmit is considered "bad" if an ACK for this
909 		 * segment is received within RTT/2 interval; the assumption
910 		 * here is that the ACK was already in flight.  See
911 		 * "On Estimating End-to-End Network Path Properties" by
912 		 * Allman and Paxson for more details.
913 		 */
914 		tp->snd_cwnd_prev = tp->snd_cwnd;
915 		tp->snd_ssthresh_prev = tp->snd_ssthresh;
916 		tp->snd_recover_prev = tp->snd_recover;
917 		if (IN_FASTRECOVERY(tp)) {
918 			tp->t_flags |= TF_WASFRECOVERY;
919 		} else {
920 			tp->t_flags &= ~TF_WASFRECOVERY;
921 		}
922 	}
923 	tp->t_srtt_prev = (tp->t_srtt >> TCP_RTT_SHIFT) + 2;
924 	tp->t_rttvar_prev = (tp->t_rttvar >> TCP_RTTVAR_SHIFT);
925 	tp->t_flagsext &= ~(TF_RECOMPUTE_RTT);
926 }
927 
928 /*
929  * Revert to the older segment size if there is an indication that PMTU
930  * blackhole detection was not needed.
931  */
932 void
tcp_pmtud_revert_segment_size(struct tcpcb * tp)933 tcp_pmtud_revert_segment_size(struct tcpcb *tp)
934 {
935 	int32_t optlen;
936 
937 	VERIFY(tp->t_pmtud_saved_maxopd > 0);
938 	tp->t_flags |= TF_PMTUD;
939 	tp->t_flags &= ~TF_BLACKHOLE;
940 	optlen = tp->t_maxopd - tp->t_maxseg;
941 	tp->t_maxopd = tp->t_pmtud_saved_maxopd;
942 	tp->t_maxseg = tp->t_maxopd - optlen;
943 
944 	/*
945 	 * Reset the slow-start flight size as it
946 	 * may depend on the new MSS
947 	 */
948 	if (CC_ALGO(tp)->cwnd_init != NULL) {
949 		CC_ALGO(tp)->cwnd_init(tp);
950 	}
951 
952 	if (TCP_USE_RLEDBAT(tp, tp->t_inpcb->inp_socket) &&
953 	    tcp_cc_rledbat.rwnd_init != NULL) {
954 		tcp_cc_rledbat.rwnd_init(tp);
955 	}
956 
957 	tp->t_pmtud_start_ts = 0;
958 	tcpstat.tcps_pmtudbh_reverted++;
959 
960 	/* change MSS according to recommendation, if there was one */
961 	tcp_update_mss_locked(tp->t_inpcb->inp_socket, NULL);
962 }
963 
964 static uint32_t
tcp_pmtud_black_holed_next_mss(struct tcpcb * tp)965 tcp_pmtud_black_holed_next_mss(struct tcpcb *tp)
966 {
967 	/* Reduce the MSS to intermediary value */
968 	if (tp->t_maxopd > tcp_pmtud_black_hole_mss) {
969 		return tcp_pmtud_black_hole_mss;
970 	} else {
971 		if (tp->t_inpcb->inp_vflag & INP_IPV4) {
972 			return tcp_mssdflt;
973 		} else {
974 			return tcp_v6mssdflt;
975 		}
976 	}
977 }
978 
979 /*
980  * Send a packet designed to force a response
981  * if the peer is up and reachable:
982  * either an ACK if the connection is still alive,
983  * or an RST if the peer has closed the connection
984  * due to timeout or reboot.
985  * Using sequence number tp->snd_una-1
986  * causes the transmitted zero-length segment
987  * to lie outside the receive window;
988  * by the protocol spec, this requires the
989  * correspondent TCP to respond.
990  */
991 static bool
tcp_send_keep_alive(struct tcpcb * tp)992 tcp_send_keep_alive(struct tcpcb *tp)
993 {
994 	struct tcptemp *__single t_template;
995 	struct mbuf *__single m;
996 
997 	tcpstat.tcps_keepprobe++;
998 	t_template = tcp_maketemplate(tp, &m);
999 	if (t_template != NULL) {
1000 		struct inpcb *inp = tp->t_inpcb;
1001 		struct tcp_respond_args tra;
1002 
1003 		bzero(&tra, sizeof(tra));
1004 		tra.nocell = INP_NO_CELLULAR(inp) ? 1 : 0;
1005 		tra.noexpensive = INP_NO_EXPENSIVE(inp) ? 1 : 0;
1006 		tra.noconstrained = INP_NO_CONSTRAINED(inp) ? 1 : 0;
1007 		tra.awdl_unrestricted = INP_AWDL_UNRESTRICTED(inp) ? 1 : 0;
1008 		tra.intcoproc_allowed = INP_INTCOPROC_ALLOWED(inp) ? 1 : 0;
1009 		tra.management_allowed = INP_MANAGEMENT_ALLOWED(inp) ? 1 : 0;
1010 		tra.keep_alive = 1;
1011 		if (tp->t_inpcb->inp_flags & INP_BOUND_IF) {
1012 			tra.ifscope = tp->t_inpcb->inp_boundifp->if_index;
1013 		} else {
1014 			tra.ifscope = IFSCOPE_NONE;
1015 		}
1016 		tcp_respond(tp, t_template->tt_ipgen,
1017 		    &t_template->tt_t, (struct mbuf *)NULL,
1018 		    tp->rcv_nxt, tp->snd_una - 1, 0, &tra);
1019 		(void) m_free(m);
1020 		return true;
1021 	} else {
1022 		return false;
1023 	}
1024 }
1025 
1026 /*
1027  * TCP timer processing.
1028  */
1029 struct tcpcb *
tcp_timers(struct tcpcb * tp,int timer)1030 tcp_timers(struct tcpcb *tp, int timer)
1031 {
1032 	int32_t rexmt, optlen = 0, idle_time = 0;
1033 	struct socket *so;
1034 	u_int64_t accsleep_ms;
1035 	u_int64_t last_sleep_ms = 0;
1036 	struct ifnet *outifp = tp->t_inpcb->inp_last_outifp;
1037 
1038 	so = tp->t_inpcb->inp_socket;
1039 	idle_time = tcp_now - tp->t_rcvtime;
1040 
1041 	switch (timer) {
1042 	/*
1043 	 * 2 MSL timeout in shutdown went off.  If we're closed but
1044 	 * still waiting for peer to close and connection has been idle
1045 	 * too long, or if 2MSL time is up from TIME_WAIT or FIN_WAIT_2,
1046 	 * delete connection control block.
1047 	 * Otherwise, (this case shouldn't happen) check again in a bit
1048 	 * we keep the socket in the main list in that case.
1049 	 */
1050 	case TCPT_2MSL:
1051 		tcp_free_sackholes(tp);
1052 		if (tp->t_state != TCPS_TIME_WAIT &&
1053 		    tp->t_state != TCPS_FIN_WAIT_2 &&
1054 		    ((idle_time > 0) && (idle_time < TCP_CONN_MAXIDLE(tp)))) {
1055 			tp->t_timer[TCPT_2MSL] = OFFSET_FROM_START(tp,
1056 			    (u_int32_t)TCP_CONN_KEEPINTVL(tp));
1057 		} else {
1058 			if (tp->t_state == TCPS_FIN_WAIT_2) {
1059 				TCP_LOG_DROP_PCB(NULL, NULL, tp, false,
1060 				    "FIN wait timeout drop");
1061 				tcpstat.tcps_fin_timeout_drops++;
1062 				tp = tcp_drop(tp, 0);
1063 			} else {
1064 				tp = tcp_close(tp);
1065 			}
1066 			return tp;
1067 		}
1068 		break;
1069 
1070 	/*
1071 	 * Retransmission timer went off.  Message has not
1072 	 * been acked within retransmit interval.  Back off
1073 	 * to a longer retransmit interval and retransmit one segment.
1074 	 */
1075 	case TCPT_REXMT:
1076 		absolutetime_to_nanoseconds(mach_absolutetime_asleep,
1077 		    &accsleep_ms);
1078 		accsleep_ms = accsleep_ms / 1000000UL;
1079 		if (accsleep_ms > tp->t_accsleep_ms) {
1080 			last_sleep_ms = accsleep_ms - tp->t_accsleep_ms;
1081 		}
1082 		/*
1083 		 * Drop a connection in the retransmit timer
1084 		 * 1. If we have retransmitted more than TCP_MAXRXTSHIFT
1085 		 * times
1086 		 * 2. If the time spent in this retransmission episode is
1087 		 * more than the time limit set with TCP_RXT_CONNDROPTIME
1088 		 * socket option
1089 		 * 3. If TCP_RXT_FINDROP socket option was set and
1090 		 * we have already retransmitted the FIN 3 times without
1091 		 * receiving an ack
1092 		 */
1093 		if (++tp->t_rxtshift > TCP_MAXRXTSHIFT ||
1094 		    (tp->t_rxt_conndroptime > 0 && tp->t_rxtstart > 0 &&
1095 		    (tcp_now - tp->t_rxtstart) >= tp->t_rxt_conndroptime) ||
1096 		    ((tp->t_flagsext & TF_RXTFINDROP) != 0 &&
1097 		    (tp->t_flags & TF_SENTFIN) != 0 && tp->t_rxtshift >= 4) ||
1098 		    (tp->t_rxtshift > 4 && last_sleep_ms >= TCP_SLEEP_TOO_LONG)) {
1099 			if (tp->t_state == TCPS_ESTABLISHED &&
1100 			    tp->t_rxt_minimum_timeout > 0) {
1101 				/*
1102 				 * Avoid dropping a connection if minimum
1103 				 * timeout is set and that time did not
1104 				 * pass. We will retry sending
1105 				 * retransmissions at the maximum interval
1106 				 */
1107 				if (TSTMP_LT(tcp_now, (tp->t_rxtstart +
1108 				    tp->t_rxt_minimum_timeout))) {
1109 					tp->t_rxtshift = TCP_MAXRXTSHIFT - 1;
1110 					goto retransmit_packet;
1111 				}
1112 			}
1113 			if ((tp->t_flagsext & TF_RXTFINDROP) != 0) {
1114 				tcpstat.tcps_rxtfindrop++;
1115 			} else if (last_sleep_ms >= TCP_SLEEP_TOO_LONG) {
1116 				tcpstat.tcps_drop_after_sleep++;
1117 			} else {
1118 				tcpstat.tcps_timeoutdrop++;
1119 			}
1120 			if (tp->t_rxtshift >= TCP_MAXRXTSHIFT) {
1121 				if (TCP_ECN_ENABLED(tp)) {
1122 					INP_INC_IFNET_STAT(tp->t_inpcb,
1123 					    ecn_on.rxmit_drop);
1124 				} else {
1125 					INP_INC_IFNET_STAT(tp->t_inpcb,
1126 					    ecn_off.rxmit_drop);
1127 				}
1128 			}
1129 			tp->t_rxtshift = TCP_MAXRXTSHIFT;
1130 			soevent(so,
1131 			    (SO_FILT_HINT_LOCKED | SO_FILT_HINT_TIMEOUT));
1132 
1133 			if (TCP_ECN_ENABLED(tp) &&
1134 			    tp->t_state == TCPS_ESTABLISHED) {
1135 				tcp_heuristic_ecn_droprxmt(tp);
1136 			}
1137 
1138 			TCP_LOG_DROP_PCB(NULL, NULL, tp, false,
1139 			    "retransmission timeout drop");
1140 			tp = tcp_drop(tp, tp->t_softerror ?
1141 			    tp->t_softerror : ETIMEDOUT);
1142 
1143 			break;
1144 		}
1145 retransmit_packet:
1146 		tcpstat.tcps_rexmttimeo++;
1147 		tp->t_accsleep_ms = accsleep_ms;
1148 
1149 		if (tp->t_rxtshift == 1 &&
1150 		    tp->t_state == TCPS_ESTABLISHED) {
1151 			/* Set the time at which retransmission started. */
1152 			tp->t_rxtstart = tcp_now;
1153 
1154 			/*
1155 			 * if this is the first retransmit timeout, save
1156 			 * the state so that we can recover if the timeout
1157 			 * is spurious.
1158 			 */
1159 			tcp_rexmt_save_state(tp);
1160 			tcp_ccdbg_trace(tp, NULL, TCP_CC_FIRST_REXMT);
1161 		}
1162 #if MPTCP
1163 		if ((tp->t_rxtshift >= mptcp_fail_thresh) &&
1164 		    (tp->t_state == TCPS_ESTABLISHED) &&
1165 		    (tp->t_mpflags & TMPF_MPTCP_TRUE)) {
1166 			mptcp_act_on_txfail(so);
1167 		}
1168 
1169 		if (TCPS_HAVEESTABLISHED(tp->t_state) &&
1170 		    (so->so_flags & SOF_MP_SUBFLOW)) {
1171 			struct mptses *mpte = tptomptp(tp)->mpt_mpte;
1172 
1173 			if (mpte->mpte_svctype == MPTCP_SVCTYPE_HANDOVER ||
1174 			    mpte->mpte_svctype == MPTCP_SVCTYPE_PURE_HANDOVER) {
1175 				mptcp_check_subflows_and_add(mpte);
1176 			}
1177 		}
1178 #endif /* MPTCP */
1179 
1180 		if (tp->t_adaptive_wtimo > 0 &&
1181 		    tp->t_rxtshift > tp->t_adaptive_wtimo &&
1182 		    TCPS_HAVEESTABLISHED(tp->t_state)) {
1183 			/* Send an event to the application */
1184 			soevent(so,
1185 			    (SO_FILT_HINT_LOCKED |
1186 			    SO_FILT_HINT_ADAPTIVE_WTIMO));
1187 		}
1188 
1189 		/*
1190 		 * If this is a retransmit timeout after PTO, the PTO
1191 		 * was not effective
1192 		 */
1193 		if (tp->t_flagsext & TF_SENT_TLPROBE) {
1194 			tp->t_flagsext &= ~(TF_SENT_TLPROBE);
1195 			tcpstat.tcps_rto_after_pto++;
1196 		}
1197 
1198 		if (tp->t_flagsext & TF_DELAY_RECOVERY) {
1199 			/*
1200 			 * Retransmit timer fired before entering recovery
1201 			 * on a connection with packet re-ordering. This
1202 			 * suggests that the reordering metrics computed
1203 			 * are not accurate.
1204 			 */
1205 			tp->t_reorderwin = 0;
1206 			tp->t_timer[TCPT_DELAYFR] = 0;
1207 			tp->t_flagsext &= ~(TF_DELAY_RECOVERY);
1208 		}
1209 
1210 		if (!(tp->t_flagsext & TF_FASTOPEN_FORCE_ENABLE) &&
1211 		    tp->t_state == TCPS_SYN_RECEIVED) {
1212 			tcp_disable_tfo(tp);
1213 		}
1214 
1215 		if (!(tp->t_flagsext & TF_FASTOPEN_FORCE_ENABLE) &&
1216 		    !(tp->t_tfo_flags & TFO_F_HEURISTIC_DONE) &&
1217 		    (tp->t_tfo_stats & TFO_S_SYN_DATA_SENT) &&
1218 		    !(tp->t_tfo_flags & TFO_F_NO_SNDPROBING) &&
1219 		    ((tp->t_state != TCPS_SYN_SENT && tp->t_rxtshift > 1) ||
1220 		    tp->t_rxtshift > 4)) {
1221 			/*
1222 			 * For regular retransmissions, a first one is being
1223 			 * done for tail-loss probe.
1224 			 * Thus, if rxtshift > 1, this means we have sent the segment
1225 			 * a total of 3 times.
1226 			 *
1227 			 * If we are in SYN-SENT state, then there is no tail-loss
1228 			 * probe thus we have to let rxtshift go up to 3.
1229 			 */
1230 			tcp_heuristic_tfo_middlebox(tp);
1231 
1232 			so->so_error = ENODATA;
1233 			soevent(so,
1234 			    (SO_FILT_HINT_LOCKED | SO_FILT_HINT_MP_SUB_ERROR));
1235 			sorwakeup(so);
1236 			sowwakeup(so);
1237 
1238 			tp->t_tfo_stats |= TFO_S_SEND_BLACKHOLE;
1239 			tcpstat.tcps_tfo_sndblackhole++;
1240 		}
1241 
1242 		if (!(tp->t_flagsext & TF_FASTOPEN_FORCE_ENABLE) &&
1243 		    !(tp->t_tfo_flags & TFO_F_HEURISTIC_DONE) &&
1244 		    (tp->t_tfo_stats & TFO_S_SYN_DATA_ACKED) &&
1245 		    tp->t_rxtshift > 3) {
1246 			if (TSTMP_GT(tp->t_sndtime - 10 * TCP_RETRANSHZ, tp->t_rcvtime)) {
1247 				tcp_heuristic_tfo_middlebox(tp);
1248 
1249 				so->so_error = ENODATA;
1250 				soevent(so,
1251 				    (SO_FILT_HINT_LOCKED | SO_FILT_HINT_MP_SUB_ERROR));
1252 				sorwakeup(so);
1253 				sowwakeup(so);
1254 			}
1255 		}
1256 
1257 		if (tp->t_state == TCPS_SYN_SENT) {
1258 			rexmt = TCP_REXMTVAL(tp) * tcp_syn_backoff[tp->t_rxtshift];
1259 			tp->t_stat.synrxtshift = tp->t_rxtshift;
1260 			tp->t_stat.rxmitsyns++;
1261 
1262 			/* When retransmitting, disable TFO */
1263 			if (TFO_ENABLED(tp) &&
1264 			    !(tp->t_flagsext & TF_FASTOPEN_FORCE_ENABLE)) {
1265 				tcp_disable_tfo(tp);
1266 				tp->t_tfo_flags |= TFO_F_SYN_LOSS;
1267 			}
1268 		} else {
1269 			rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift];
1270 		}
1271 
1272 		TCPT_RANGESET(tp->t_rxtcur, rexmt, tp->t_rttmin, TCPTV_REXMTMAX,
1273 		    TCP_ADD_REXMTSLOP(tp));
1274 		tp->t_timer[TCPT_REXMT] = OFFSET_FROM_START(tp, tp->t_rxtcur);
1275 
1276 		TCP_LOG_RTT_INFO(tp);
1277 
1278 		if (INP_WAIT_FOR_IF_FEEDBACK(tp->t_inpcb)) {
1279 			goto fc_output;
1280 		}
1281 
1282 		tcp_free_sackholes(tp);
1283 		if (TCP_RACK_ENABLED(tp)) {
1284 			tcp_segs_clear_sacked(tp);
1285 			tcp_rack_loss_on_rto(tp, true);
1286 		}
1287 		/*
1288 		 * Check for potential Path MTU Discovery Black Hole
1289 		 */
1290 		if (tcp_pmtud_black_hole_detect &&
1291 		    !(tp->t_flagsext & TF_NOBLACKHOLE_DETECTION) &&
1292 		    (tp->t_state == TCPS_ESTABLISHED)) {
1293 			if ((tp->t_flags & TF_PMTUD) &&
1294 			    tp->t_pmtud_lastseg_size > tcp_pmtud_black_holed_next_mss(tp) &&
1295 			    tp->t_rxtshift == 2) {
1296 				/*
1297 				 * Enter Path MTU Black-hole Detection mechanism:
1298 				 * - Disable Path MTU Discovery (IP "DF" bit).
1299 				 * - Reduce MTU to lower value than what we
1300 				 * negotiated with the peer.
1301 				 */
1302 				/* Disable Path MTU Discovery for now */
1303 				tp->t_flags &= ~TF_PMTUD;
1304 				/* Record that we may have found a black hole */
1305 				tp->t_flags |= TF_BLACKHOLE;
1306 				optlen = tp->t_maxopd - tp->t_maxseg;
1307 				/* Keep track of previous MSS */
1308 				tp->t_pmtud_saved_maxopd = tp->t_maxopd;
1309 				tp->t_pmtud_start_ts = tcp_now;
1310 				if (tp->t_pmtud_start_ts == 0) {
1311 					tp->t_pmtud_start_ts++;
1312 				}
1313 				/* Reduce the MSS to intermediary value */
1314 				tp->t_maxopd = tcp_pmtud_black_holed_next_mss(tp);
1315 				tp->t_maxseg = tp->t_maxopd - optlen;
1316 
1317 				/*
1318 				 * Reset the slow-start flight size
1319 				 * as it may depend on the new MSS
1320 				 */
1321 				if (CC_ALGO(tp)->cwnd_init != NULL) {
1322 					CC_ALGO(tp)->cwnd_init(tp);
1323 				}
1324 				tp->snd_cwnd = tp->t_maxseg;
1325 
1326 				if (TCP_USE_RLEDBAT(tp, so) &&
1327 				    tcp_cc_rledbat.rwnd_init != NULL) {
1328 					tcp_cc_rledbat.rwnd_init(tp);
1329 				}
1330 			}
1331 			/*
1332 			 * If further retransmissions are still
1333 			 * unsuccessful with a lowered MTU, maybe this
1334 			 * isn't a Black Hole and we restore the previous
1335 			 * MSS and blackhole detection flags.
1336 			 */
1337 			else {
1338 				if ((tp->t_flags & TF_BLACKHOLE) &&
1339 				    (tp->t_rxtshift > 4)) {
1340 					tcp_pmtud_revert_segment_size(tp);
1341 					tp->snd_cwnd = tp->t_maxseg;
1342 				}
1343 			}
1344 		}
1345 
1346 		/*
1347 		 * Disable rfc1323 and rfc1644 if we haven't got any
1348 		 * response to our SYN (after we reach the threshold)
1349 		 * to work-around some broken terminal servers (most of
1350 		 * which have hopefully been retired) that have bad VJ
1351 		 * header compression code which trashes TCP segments
1352 		 * containing unknown-to-them TCP options.
1353 		 * Do this only on non-local connections.
1354 		 */
1355 		if (tp->t_state == TCPS_SYN_SENT &&
1356 		    tp->t_rxtshift == tcp_broken_peer_syn_rxmit_thres) {
1357 			tp->t_flags &= ~(TF_REQ_SCALE | TF_REQ_TSTMP);
1358 		}
1359 
1360 		/*
1361 		 * If losing, let the lower level know and try for
1362 		 * a better route.  Also, if we backed off this far,
1363 		 * our srtt estimate is probably bogus.  Clobber it
1364 		 * so we'll take the next rtt measurement as our srtt;
1365 		 * move the current srtt into rttvar to keep the current
1366 		 * retransmit times until then.
1367 		 */
1368 		if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) {
1369 			if (!(tp->t_inpcb->inp_vflag & INP_IPV4)) {
1370 				in6_losing(tp->t_inpcb);
1371 			} else {
1372 				in_losing(tp->t_inpcb);
1373 			}
1374 			tp->t_rttvar += (tp->t_srtt >> TCP_RTT_SHIFT);
1375 			tp->t_srtt = 0;
1376 		}
1377 		tp->snd_nxt = tp->snd_una;
1378 		/*
1379 		 * Note:  We overload snd_recover to function also as the
1380 		 * snd_last variable described in RFC 2582
1381 		 */
1382 		tp->snd_recover = tp->snd_max;
1383 		/*
1384 		 * Force a segment to be sent.
1385 		 */
1386 		tp->t_flags |= TF_ACKNOW;
1387 
1388 		/*
1389 		 * If timing a segment in this window, stop the timer
1390 		 * except when we are in connecting states on cellular
1391 		 * interfaces
1392 		 */
1393 		if (tp->t_state >= TCPS_ESTABLISHED || (outifp != NULL &&
1394 		    IFNET_IS_CELLULAR(outifp) == false)) {
1395 			tp->t_rtttime = 0;
1396 		}
1397 
1398 		if (!IN_FASTRECOVERY(tp) && tp->t_rxtshift == 1) {
1399 			tcpstat.tcps_tailloss_rto++;
1400 		}
1401 
1402 		/*
1403 		 * RFC 5681 says: when a TCP sender detects segment loss
1404 		 * using retransmit timer and the given segment has already
1405 		 * been retransmitted by way of the retransmission timer at
1406 		 * least once, the value of ssthresh is held constant
1407 		 */
1408 		if (tp->t_rxtshift == 1 &&
1409 		    CC_ALGO(tp)->after_timeout != NULL) {
1410 			CC_ALGO(tp)->after_timeout(tp);
1411 			/*
1412 			 * CWR notifications are to be sent on new data
1413 			 * right after Fast Retransmits and ECE
1414 			 * notification receipts.
1415 			 */
1416 			if (!TCP_ACC_ECN_ON(tp) && TCP_ECN_ENABLED(tp)) {
1417 				tp->ecn_flags |= TE_SENDCWR;
1418 			}
1419 		}
1420 
1421 		EXIT_FASTRECOVERY(tp);
1422 
1423 		/* Exit cwnd non validated phase */
1424 		tp->t_flagsext &= ~TF_CWND_NONVALIDATED;
1425 
1426 
1427 fc_output:
1428 		tcp_ccdbg_trace(tp, NULL, TCP_CC_REXMT_TIMEOUT);
1429 
1430 		(void) tcp_output(tp);
1431 		break;
1432 
1433 	/*
1434 	 * Persistance timer into zero window.
1435 	 * Force a byte to be output, if possible.
1436 	 */
1437 	case TCPT_PERSIST:
1438 		tcpstat.tcps_persisttimeo++;
1439 		/*
1440 		 * Hack: if the peer is dead/unreachable, we do not
1441 		 * time out if the window is closed.  After a full
1442 		 * backoff, drop the connection if the idle time
1443 		 * (no responses to probes) reaches the maximum
1444 		 * backoff that we would use if retransmitting.
1445 		 *
1446 		 * Drop the connection if we reached the maximum allowed time for
1447 		 * Zero Window Probes without a non-zero update from the peer.
1448 		 * See rdar://5805356
1449 		 */
1450 		if ((tp->t_rxtshift == TCP_MAXRXTSHIFT &&
1451 		    (idle_time >= tcp_maxpersistidle ||
1452 		    idle_time >= TCP_REXMTVAL(tp) * tcp_totbackoff)) ||
1453 		    ((tp->t_persist_stop != 0) &&
1454 		    TSTMP_LEQ(tp->t_persist_stop, tcp_now))) {
1455 			TCP_LOG_DROP_PCB(NULL, NULL, tp, false, "persist timeout drop");
1456 			tcpstat.tcps_persistdrop++;
1457 			soevent(so,
1458 			    (SO_FILT_HINT_LOCKED | SO_FILT_HINT_TIMEOUT));
1459 			tp = tcp_drop(tp, ETIMEDOUT);
1460 			break;
1461 		}
1462 		tcp_setpersist(tp);
1463 		tp->t_flagsext |= TF_FORCE;
1464 		(void) tcp_output(tp);
1465 		tp->t_flagsext &= ~TF_FORCE;
1466 		break;
1467 
1468 	/*
1469 	 * Keep-alive timer went off; send something
1470 	 * or drop connection if idle for too long.
1471 	 */
1472 	case TCPT_KEEP:
1473 #if FLOW_DIVERT
1474 		if (tp->t_inpcb->inp_socket->so_flags & SOF_FLOW_DIVERT) {
1475 			break;
1476 		}
1477 #endif /* FLOW_DIVERT */
1478 
1479 		tcpstat.tcps_keeptimeo++;
1480 #if MPTCP
1481 		/*
1482 		 * Regular TCP connections do not send keepalives after closing
1483 		 * MPTCP must not also, after sending Data FINs.
1484 		 */
1485 		struct mptcb *mp_tp = tptomptp(tp);
1486 		if ((tp->t_mpflags & TMPF_MPTCP_TRUE) &&
1487 		    (tp->t_state > TCPS_ESTABLISHED)) {
1488 			goto dropit;
1489 		} else if (mp_tp != NULL) {
1490 			if ((mptcp_ok_to_keepalive(mp_tp) == 0)) {
1491 				goto dropit;
1492 			}
1493 		}
1494 #endif /* MPTCP */
1495 		if (tp->t_state < TCPS_ESTABLISHED) {
1496 			goto dropit;
1497 		}
1498 		if ((always_keepalive ||
1499 		    (tp->t_inpcb->inp_socket->so_options & SO_KEEPALIVE) ||
1500 		    (tp->t_flagsext & TF_DETECT_READSTALL) ||
1501 		    (tp->t_tfo_probe_state == TFO_PROBE_PROBING)) &&
1502 		    (tp->t_state <= TCPS_CLOSING || tp->t_state == TCPS_FIN_WAIT_2)) {
1503 			if (idle_time >= TCP_CONN_KEEPIDLE(tp) + TCP_CONN_MAXIDLE(tp)) {
1504 				TCP_LOG_DROP_PCB(NULL, NULL, tp, false,
1505 				    "keep alive timeout drop");
1506 				goto dropit;
1507 			}
1508 
1509 			if (tcp_send_keep_alive(tp)) {
1510 				if (tp->t_flagsext & TF_DETECT_READSTALL) {
1511 					tp->t_rtimo_probes++;
1512 				}
1513 
1514 				TCP_LOG_KEEP_ALIVE(tp, idle_time);
1515 			}
1516 
1517 			tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp,
1518 			    TCP_CONN_KEEPINTVL(tp));
1519 		} else {
1520 			tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp,
1521 			    TCP_CONN_KEEPIDLE(tp));
1522 		}
1523 		if (tp->t_flagsext & TF_DETECT_READSTALL) {
1524 			bool reenable_probe = false;
1525 			/*
1526 			 * The keep alive packets sent to detect a read
1527 			 * stall did not get a response from the
1528 			 * peer. Generate more keep-alives to confirm this.
1529 			 * If the number of probes sent reaches the limit,
1530 			 * generate an event.
1531 			 */
1532 			if (tp->t_adaptive_rtimo > 0) {
1533 				if (tp->t_rtimo_probes > tp->t_adaptive_rtimo) {
1534 					/* Generate an event */
1535 					soevent(so,
1536 					    (SO_FILT_HINT_LOCKED |
1537 					    SO_FILT_HINT_ADAPTIVE_RTIMO));
1538 					tcp_keepalive_reset(tp);
1539 				} else {
1540 					reenable_probe = true;
1541 				}
1542 			} else if (outifp != NULL &&
1543 			    (outifp->if_eflags & IFEF_PROBE_CONNECTIVITY) &&
1544 			    tp->t_rtimo_probes <= TCP_CONNECTIVITY_PROBES_MAX) {
1545 				reenable_probe = true;
1546 			} else {
1547 				tp->t_flagsext &= ~TF_DETECT_READSTALL;
1548 			}
1549 			if (reenable_probe) {
1550 				int ind = min(tp->t_rtimo_probes,
1551 				    TCP_MAXRXTSHIFT);
1552 				tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(
1553 					tp, tcp_backoff[ind] * TCP_REXMTVAL(tp));
1554 			}
1555 		}
1556 		if (tp->t_tfo_probe_state == TFO_PROBE_PROBING) {
1557 			int ind;
1558 
1559 			tp->t_tfo_probes++;
1560 			ind = min(tp->t_tfo_probes, TCP_MAXRXTSHIFT);
1561 
1562 			/*
1563 			 * We take the minimum among the time set by true
1564 			 * keepalive (see above) and the backoff'd RTO. That
1565 			 * way we backoff in case of packet-loss but will never
1566 			 * timeout slower than regular keepalive due to the
1567 			 * backing off.
1568 			 */
1569 			tp->t_timer[TCPT_KEEP] = min(OFFSET_FROM_START(
1570 				    tp, tcp_backoff[ind] * TCP_REXMTVAL(tp)),
1571 			    tp->t_timer[TCPT_KEEP]);
1572 		} else if (!(tp->t_flagsext & TF_FASTOPEN_FORCE_ENABLE) &&
1573 		    !(tp->t_tfo_flags & TFO_F_HEURISTIC_DONE) &&
1574 		    tp->t_tfo_probe_state == TFO_PROBE_WAIT_DATA) {
1575 			/* Still no data! Let's assume a TFO-error and err out... */
1576 			tcp_heuristic_tfo_middlebox(tp);
1577 
1578 			so->so_error = ENODATA;
1579 			soevent(so,
1580 			    (SO_FILT_HINT_LOCKED | SO_FILT_HINT_MP_SUB_ERROR));
1581 			sorwakeup(so);
1582 			tp->t_tfo_stats |= TFO_S_RECV_BLACKHOLE;
1583 			tcpstat.tcps_tfo_blackhole++;
1584 		}
1585 		break;
1586 	case TCPT_DELACK:
1587 		if (tcp_delack_enabled && (tp->t_flags & TF_DELACK)) {
1588 			tp->t_flags &= ~TF_DELACK;
1589 			tp->t_timer[TCPT_DELACK] = 0;
1590 			tp->t_flags |= TF_ACKNOW;
1591 
1592 			/*
1593 			 * If delayed ack timer fired while stretching
1594 			 * acks, count the number of times the streaming
1595 			 * detection was not correct. If this exceeds a
1596 			 * threshold, disable strech ack on this
1597 			 * connection
1598 			 *
1599 			 * Also, go back to acking every other packet.
1600 			 */
1601 			if ((tp->t_flags & TF_STRETCHACK)) {
1602 				if (tp->t_unacksegs > 1 &&
1603 				    tp->t_unacksegs < maxseg_unacked) {
1604 					tp->t_stretchack_delayed++;
1605 				}
1606 
1607 				if (tp->t_stretchack_delayed >
1608 				    TCP_STRETCHACK_DELAY_THRESHOLD) {
1609 					tp->t_flagsext |= TF_DISABLE_STRETCHACK;
1610 					/*
1611 					 * Note the time at which stretch
1612 					 * ack was disabled automatically
1613 					 */
1614 					tp->rcv_nostrack_ts = tcp_now;
1615 					tcpstat.tcps_nostretchack++;
1616 					tp->t_stretchack_delayed = 0;
1617 					tp->rcv_nostrack_pkts = 0;
1618 				}
1619 				tcp_reset_stretch_ack(tp);
1620 			}
1621 			tp->t_forced_acks = TCP_FORCED_ACKS_COUNT;
1622 
1623 			/*
1624 			 * If we are measuring inter packet arrival jitter
1625 			 * for throttling a connection, this delayed ack
1626 			 * might be the reason for accumulating some
1627 			 * jitter. So let's restart the measurement.
1628 			 */
1629 			CLEAR_IAJ_STATE(tp);
1630 
1631 			tcpstat.tcps_delack++;
1632 			tp->t_stat.delayed_acks_sent++;
1633 			(void) tcp_output(tp);
1634 		}
1635 		break;
1636 
1637 #if MPTCP
1638 	case TCPT_JACK_RXMT:
1639 		if ((tp->t_state == TCPS_ESTABLISHED) &&
1640 		    (tp->t_mpflags & TMPF_PREESTABLISHED) &&
1641 		    (tp->t_mpflags & TMPF_JOINED_FLOW)) {
1642 			if (++tp->t_mprxtshift > TCP_MAXRXTSHIFT) {
1643 				tcpstat.tcps_timeoutdrop++;
1644 				soevent(so,
1645 				    (SO_FILT_HINT_LOCKED |
1646 				    SO_FILT_HINT_TIMEOUT));
1647 				tp = tcp_drop(tp, tp->t_softerror ?
1648 				    tp->t_softerror : ETIMEDOUT);
1649 				break;
1650 			}
1651 			tcpstat.tcps_join_rxmts++;
1652 			tp->t_mpflags |= TMPF_SND_JACK;
1653 			tp->t_flags |= TF_ACKNOW;
1654 
1655 			/*
1656 			 * No backoff is implemented for simplicity for this
1657 			 * corner case.
1658 			 */
1659 			(void) tcp_output(tp);
1660 		}
1661 		break;
1662 	case TCPT_CELLICON:
1663 	{
1664 		struct mptses *mpte = tptomptp(tp)->mpt_mpte;
1665 
1666 		tp->t_timer[TCPT_CELLICON] = 0;
1667 
1668 		if (mpte->mpte_cellicon_increments == 0) {
1669 			/* Cell-icon not set by this connection */
1670 			break;
1671 		}
1672 
1673 		if (TSTMP_LT(mpte->mpte_last_cellicon_set + MPTCP_CELLICON_TOGGLE_RATE, tcp_now)) {
1674 			mptcp_unset_cellicon(mpte, NULL, 1);
1675 		}
1676 
1677 		if (mpte->mpte_cellicon_increments) {
1678 			tp->t_timer[TCPT_CELLICON] = OFFSET_FROM_START(tp, MPTCP_CELLICON_TOGGLE_RATE);
1679 		}
1680 
1681 		break;
1682 	}
1683 #endif /* MPTCP */
1684 
1685 	case TCPT_PTO:
1686 	{
1687 		int32_t ret = 0;
1688 
1689 		if (!(tp->t_flagsext & TF_IF_PROBING)) {
1690 			tp->t_flagsext &= ~(TF_SENT_TLPROBE);
1691 		}
1692 		/*
1693 		 * Check if the connection is in the right state to
1694 		 * send a probe
1695 		 */
1696 		if ((tp->t_state != TCPS_ESTABLISHED ||
1697 		    tp->t_rxtshift > 0 ||
1698 		    tp->snd_max == tp->snd_una ||
1699 		    !SACK_ENABLED(tp) || IN_FASTRECOVERY(tp)) &&
1700 		    !(tp->t_flagsext & TF_IF_PROBING)) {
1701 			break;
1702 		}
1703 
1704 		/*
1705 		 * When the interface state is changed explicitly reset the retransmission
1706 		 * timer state for both SYN and data packets because we do not want to
1707 		 * wait unnecessarily or timeout too quickly if the link characteristics
1708 		 * have changed drastically
1709 		 */
1710 		if (tp->t_flagsext & TF_IF_PROBING) {
1711 			tp->t_rxtshift = 0;
1712 			if (tp->t_state == TCPS_SYN_SENT) {
1713 				tp->t_stat.synrxtshift = tp->t_rxtshift;
1714 			}
1715 			/*
1716 			 * Reset to the the default RTO
1717 			 */
1718 			tp->t_srtt = TCPTV_SRTTBASE;
1719 			tp->t_rttvar =
1720 			    ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4;
1721 			tp->t_rttmin = TCPTV_REXMTMIN;
1722 			TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
1723 			    tp->t_rttmin, TCPTV_REXMTMAX, TCP_ADD_REXMTSLOP(tp));
1724 			TCP_LOG_RTT_INFO(tp);
1725 		}
1726 
1727 		if (tp->t_state == TCPS_SYN_SENT) {
1728 			/*
1729 			 * The PTO for SYN_SENT reinitializes TCP as if it was a fresh
1730 			 * connection attempt
1731 			 */
1732 			tp->snd_nxt = tp->snd_una;
1733 			/*
1734 			 * Note:  We overload snd_recover to function also as the
1735 			 * snd_last variable described in RFC 2582
1736 			 */
1737 			tp->snd_recover = tp->snd_max;
1738 			/*
1739 			 * Force a segment to be sent.
1740 			 */
1741 			tp->t_flags |= TF_ACKNOW;
1742 
1743 			/* If timing a segment in this window, stop the timer */
1744 			tp->t_rtttime = 0;
1745 
1746 			tp->t_flagsext |= TF_TLP_IS_RETRANS;
1747 		} else {
1748 			int32_t snd_len;
1749 
1750 			/*
1751 			 * If there is no new data to send or if the
1752 			 * connection is limited by receive window then
1753 			 * retransmit the last segment, otherwise send
1754 			 * new data.
1755 			 */
1756 			snd_len = min(so->so_snd.sb_cc, tp->snd_wnd)
1757 			    - (tp->snd_max - tp->snd_una);
1758 			if (snd_len > 0) {
1759 				tp->snd_nxt = tp->snd_max;
1760 				tp->t_flagsext &= ~TF_TLP_IS_RETRANS;
1761 			} else {
1762 				snd_len = min((tp->snd_max - tp->snd_una),
1763 				    tp->t_maxseg);
1764 				tp->snd_nxt = tp->snd_max - snd_len;
1765 				tp->t_flagsext |= TF_TLP_IS_RETRANS;
1766 			}
1767 		}
1768 
1769 		tcpstat.tcps_pto++;
1770 		if (tp->t_flagsext & TF_IF_PROBING) {
1771 			tcpstat.tcps_probe_if++;
1772 		}
1773 
1774 		/* If timing a segment in this window, stop the timer */
1775 		tp->t_rtttime = 0;
1776 		/* Note that tail loss probe is being sent. Exclude IF probe */
1777 		if (!(tp->t_flagsext & TF_IF_PROBING)) {
1778 			tp->t_flagsext |= TF_SENT_TLPROBE;
1779 			tp->t_tlpstart = tcp_now;
1780 		}
1781 
1782 		tp->snd_cwnd += tp->t_maxseg;
1783 		/*
1784 		 * When tail-loss-probe fires, we reset the RTO timer, because
1785 		 * a probe just got sent, so we are good to push out the timer.
1786 		 *
1787 		 * Set to 0 to ensure that tcp_output() will reschedule it
1788 		 */
1789 		tp->t_timer[TCPT_REXMT] = 0;
1790 		ret = tcp_output(tp);
1791 
1792 #if (DEBUG || DEVELOPMENT)
1793 		if ((tp->t_flagsext & TF_IF_PROBING) &&
1794 		    ((IFNET_IS_COMPANION_LINK(tp->t_inpcb->inp_last_outifp)) ||
1795 		    tp->t_state == TCPS_SYN_SENT)) {
1796 			if (ret == 0 && tcp_probe_if_fix_port > 0 &&
1797 			    tcp_probe_if_fix_port <= IPPORT_HILASTAUTO) {
1798 				tp->t_timer[TCPT_REXMT] = 0;
1799 				tcp_set_lotimer_index(tp);
1800 			}
1801 
1802 			os_log(OS_LOG_DEFAULT,
1803 			    "%s: sent %s probe for %u > %u on interface %s"
1804 			    " (%u) %s(%d)",
1805 			    __func__,
1806 			    tp->t_state == TCPS_SYN_SENT ? "SYN" : "data",
1807 			    ntohs(tp->t_inpcb->inp_lport),
1808 			    ntohs(tp->t_inpcb->inp_fport),
1809 			    if_name(tp->t_inpcb->inp_last_outifp),
1810 			    tp->t_inpcb->inp_last_outifp->if_index,
1811 			    ret == 0 ? "succeeded" :"failed", ret);
1812 		}
1813 #endif /* DEBUG || DEVELOPMENT */
1814 
1815 		/*
1816 		 * When there is data (or a SYN) to send, the above call to
1817 		 * tcp_output() should have armed either the REXMT or the
1818 		 * PERSIST timer. If it didn't, something is wrong and this
1819 		 * connection would idle around forever. Let's make sure that
1820 		 * at least the REXMT timer is set.
1821 		 */
1822 		if (tp->t_timer[TCPT_REXMT] == 0 && tp->t_timer[TCPT_PERSIST] == 0 &&
1823 		    (tp->t_inpcb->inp_socket->so_snd.sb_cc != 0 || tp->t_state == TCPS_SYN_SENT ||
1824 		    tp->t_state == TCPS_SYN_RECEIVED)) {
1825 			tp->t_timer[TCPT_REXMT] =
1826 			    OFFSET_FROM_START(tp, tp->t_rxtcur);
1827 
1828 			os_log(OS_LOG_DEFAULT,
1829 			    "%s: tcp_output() returned %u with retransmission timer disabled "
1830 			    "for %u > %u in state %d, reset timer to %d",
1831 			    __func__, ret,
1832 			    ntohs(tp->t_inpcb->inp_lport),
1833 			    ntohs(tp->t_inpcb->inp_fport),
1834 			    tp->t_state,
1835 			    tp->t_timer[TCPT_REXMT]);
1836 
1837 			tcp_check_timer_state(tp);
1838 		}
1839 		tp->snd_cwnd -= tp->t_maxseg;
1840 
1841 		if (!(tp->t_flagsext & TF_IF_PROBING)) {
1842 			tp->t_tlphighrxt = tp->snd_nxt;
1843 			tp->t_tlphightrxt_persist = tp->snd_nxt;
1844 		}
1845 		break;
1846 	}
1847 	case TCPT_DELAYFR:
1848 		tp->t_flagsext &= ~TF_DELAY_RECOVERY;
1849 
1850 		/*
1851 		 * Don't do anything if one of the following is true:
1852 		 * - the connection is already in recovery
1853 		 * - sequence until snd_recover has been acknowledged.
1854 		 * - retransmit timeout has fired
1855 		 */
1856 		if (IN_FASTRECOVERY(tp) ||
1857 		    SEQ_GEQ(tp->snd_una, tp->snd_recover) ||
1858 		    tp->t_rxtshift > 0) {
1859 			break;
1860 		}
1861 
1862 		VERIFY(SACK_ENABLED(tp));
1863 		tcp_rexmt_save_state(tp);
1864 		if (CC_ALGO(tp)->pre_fr != NULL) {
1865 			CC_ALGO(tp)->pre_fr(tp);
1866 			if (!TCP_ACC_ECN_ON(tp) && TCP_ECN_ENABLED(tp)) {
1867 				tp->ecn_flags |= TE_SENDCWR;
1868 			}
1869 		}
1870 		ENTER_FASTRECOVERY(tp);
1871 
1872 		tp->t_timer[TCPT_REXMT] = 0;
1873 		tcpstat.tcps_sack_recovery_episode++;
1874 		tp->t_sack_recovery_episode++;
1875 		tp->snd_cwnd = tp->t_maxseg;
1876 		tcp_ccdbg_trace(tp, NULL, TCP_CC_ENTER_FASTRECOVERY);
1877 		(void) tcp_output(tp);
1878 		break;
1879 
1880 dropit:
1881 		tcpstat.tcps_keepdrops++;
1882 		soevent(so,
1883 		    (SO_FILT_HINT_LOCKED | SO_FILT_HINT_TIMEOUT));
1884 		tp = tcp_drop(tp, ETIMEDOUT);
1885 		break;
1886 	case TCPT_REORDER:
1887 		if (TCP_RACK_ENABLED(tp)) {
1888 			tcp_rack_reordering_timeout(tp, 0);
1889 		}
1890 		break;
1891 	}
1892 	return tp;
1893 }
1894 
1895 /* Remove a timer entry from timer list */
1896 void
tcp_remove_timer(struct tcpcb * tp)1897 tcp_remove_timer(struct tcpcb *tp)
1898 {
1899 	struct tcptimerlist *listp = &tcp_timer_list;
1900 
1901 	socket_lock_assert_owned(tp->t_inpcb->inp_socket);
1902 	if (!(TIMER_IS_ON_LIST(tp))) {
1903 		return;
1904 	}
1905 	lck_mtx_lock(&listp->mtx);
1906 
1907 	if (listp->next_te != NULL && listp->next_te == &tp->tentry) {
1908 		listp->next_te = LIST_NEXT(&tp->tentry, le);
1909 	}
1910 
1911 	LIST_REMOVE(&tp->tentry, le);
1912 	tp->t_flags &= ~(TF_TIMER_ONLIST);
1913 
1914 	listp->entries--;
1915 
1916 	tp->tentry.le.le_next = NULL;
1917 	tp->tentry.le.le_prev = NULL;
1918 
1919 	lck_mtx_unlock(&listp->mtx);
1920 }
1921 
1922 /*
1923  * Function to check if the timerlist needs to be rescheduled to run
1924  * the timer entry correctly. Basically, this is to check if we can avoid
1925  * taking the list lock.
1926  */
1927 
1928 static boolean_t
need_to_resched_timerlist(u_int32_t runtime,u_int16_t mode)1929 need_to_resched_timerlist(u_int32_t runtime, u_int16_t mode)
1930 {
1931 	struct tcptimerlist *listp = &tcp_timer_list;
1932 	int32_t diff;
1933 
1934 	/*
1935 	 * If the list is being processed then the state of the list is
1936 	 * in flux. In this case always acquire the lock and set the state
1937 	 * correctly.
1938 	 */
1939 	if (listp->running) {
1940 		return TRUE;
1941 	}
1942 
1943 	if (!listp->scheduled) {
1944 		return TRUE;
1945 	}
1946 
1947 	diff = timer_diff(listp->runtime, 0, runtime, 0);
1948 	if (diff <= 0) {
1949 		/* The list is going to run before this timer */
1950 		return FALSE;
1951 	} else {
1952 		if (mode & TCP_TIMERLIST_10MS_MODE) {
1953 			if (diff <= TCP_TIMER_10MS_QUANTUM) {
1954 				return FALSE;
1955 			}
1956 		} else if (mode & TCP_TIMERLIST_100MS_MODE) {
1957 			if (diff <= TCP_TIMER_100MS_QUANTUM) {
1958 				return FALSE;
1959 			}
1960 		} else {
1961 			if (diff <= TCP_TIMER_500MS_QUANTUM) {
1962 				return FALSE;
1963 			}
1964 		}
1965 	}
1966 	return TRUE;
1967 }
1968 
1969 void
tcp_sched_timerlist(uint32_t offset)1970 tcp_sched_timerlist(uint32_t offset)
1971 {
1972 	uint64_t deadline = 0;
1973 	struct tcptimerlist *listp = &tcp_timer_list;
1974 
1975 	LCK_MTX_ASSERT(&listp->mtx, LCK_MTX_ASSERT_OWNED);
1976 
1977 	offset = min(offset, TCP_TIMERLIST_MAX_OFFSET);
1978 	listp->runtime = tcp_now + offset;
1979 	listp->schedtime = tcp_now;
1980 	if (listp->runtime == 0) {
1981 		listp->runtime++;
1982 		offset++;
1983 	}
1984 
1985 	clock_interval_to_deadline(offset, USEC_PER_SEC, &deadline);
1986 
1987 	thread_call_enter_delayed(listp->call, deadline);
1988 	listp->scheduled = TRUE;
1989 }
1990 
1991 /*
1992  * Function to run the timers for a connection.
1993  *
1994  * Returns the offset of next timer to be run for this connection which
1995  * can be used to reschedule the timerlist.
1996  *
1997  * te_mode is an out parameter that indicates the modes of active
1998  * timers for this connection.
1999  */
2000 u_int32_t
tcp_run_conn_timer(struct tcpcb * tp,u_int16_t * te_mode,u_int16_t probe_if_index)2001 tcp_run_conn_timer(struct tcpcb *tp, u_int16_t *te_mode,
2002     u_int16_t probe_if_index)
2003 {
2004 	struct socket *so;
2005 	u_int16_t i = 0, index = TCPT_NONE, lo_index = TCPT_NONE;
2006 	u_int32_t timer_val, offset = 0, lo_timer = 0;
2007 	int32_t diff;
2008 	boolean_t needtorun[TCPT_NTIMERS];
2009 	int count = 0;
2010 
2011 	VERIFY(tp != NULL);
2012 	bzero(needtorun, sizeof(needtorun));
2013 	*te_mode = 0;
2014 
2015 	socket_lock(tp->t_inpcb->inp_socket, 1);
2016 
2017 	so = tp->t_inpcb->inp_socket;
2018 	/* Release the want count on inp */
2019 	if (in_pcb_checkstate(tp->t_inpcb, WNT_RELEASE, 1)
2020 	    == WNT_STOPUSING) {
2021 		if (TIMER_IS_ON_LIST(tp)) {
2022 			tcp_remove_timer(tp);
2023 		}
2024 
2025 		/* Looks like the TCP connection got closed while we
2026 		 * were waiting for the lock.. Done
2027 		 */
2028 		goto done;
2029 	}
2030 
2031 	/*
2032 	 * If this connection is over an interface that needs to
2033 	 * be probed, send probe packets to reinitiate communication.
2034 	 */
2035 	if (TCP_IF_STATE_CHANGED(tp, probe_if_index)) {
2036 		tp->t_flagsext |= TF_IF_PROBING;
2037 		tcp_timers(tp, TCPT_PTO);
2038 		tp->t_timer[TCPT_PTO] = 0;
2039 		tp->t_flagsext &= ~TF_IF_PROBING;
2040 	}
2041 
2042 	/*
2043 	 * Since the timer thread needs to wait for tcp lock, it may race
2044 	 * with another thread that can cancel or reschedule the timer
2045 	 * that is about to run. Check if we need to run anything.
2046 	 */
2047 	if ((index = tp->tentry.index) == TCPT_NONE) {
2048 		goto done;
2049 	}
2050 
2051 	timer_val = tp->t_timer[index];
2052 
2053 	diff = timer_diff(tp->tentry.runtime, 0, tcp_now, 0);
2054 	if (diff > 0) {
2055 		if (tp->tentry.index != TCPT_NONE) {
2056 			offset = diff;
2057 			*(te_mode) = tp->tentry.mode;
2058 		}
2059 		goto done;
2060 	}
2061 
2062 	tp->t_timer[index] = 0;
2063 	if (timer_val > 0) {
2064 		tp = tcp_timers(tp, index);
2065 		if (tp == NULL) {
2066 			goto done;
2067 		}
2068 	}
2069 
2070 	/*
2071 	 * Check if there are any other timers that need to be run.
2072 	 * While doing it, adjust the timer values wrt tcp_now.
2073 	 */
2074 	tp->tentry.mode = 0;
2075 	for (i = 0; i < TCPT_NTIMERS; ++i) {
2076 		if (tp->t_timer[i] != 0) {
2077 			diff = timer_diff(tp->tentry.timer_start,
2078 			    tp->t_timer[i], tcp_now, 0);
2079 			if (diff <= 0) {
2080 				needtorun[i] = TRUE;
2081 				count++;
2082 			} else {
2083 				tp->t_timer[i] = diff;
2084 				needtorun[i] = FALSE;
2085 				if (lo_timer == 0 || diff < lo_timer) {
2086 					lo_timer = diff;
2087 					lo_index = i;
2088 				}
2089 				TCP_SET_TIMER_MODE(tp->tentry.mode, i);
2090 			}
2091 		}
2092 	}
2093 
2094 	tp->tentry.timer_start = tcp_now;
2095 	tp->tentry.index = lo_index;
2096 	VERIFY(tp->tentry.index == TCPT_NONE || tp->tentry.mode > 0);
2097 
2098 	if (tp->tentry.index != TCPT_NONE) {
2099 		tp->tentry.runtime = tp->tentry.timer_start +
2100 		    tp->t_timer[tp->tentry.index];
2101 		if (tp->tentry.runtime == 0) {
2102 			tp->tentry.runtime++;
2103 		}
2104 	}
2105 
2106 	if (count > 0) {
2107 		/* run any other timers outstanding at this time. */
2108 		for (i = 0; i < TCPT_NTIMERS; ++i) {
2109 			if (needtorun[i]) {
2110 				tp->t_timer[i] = 0;
2111 				tp = tcp_timers(tp, i);
2112 				if (tp == NULL) {
2113 					offset = 0;
2114 					*(te_mode) = 0;
2115 					goto done;
2116 				}
2117 			}
2118 		}
2119 		tcp_set_lotimer_index(tp);
2120 	}
2121 
2122 	if (tp->tentry.index < TCPT_NONE) {
2123 		offset = tp->t_timer[tp->tentry.index];
2124 		*(te_mode) = tp->tentry.mode;
2125 	}
2126 
2127 done:
2128 	if (tp != NULL && tp->tentry.index == TCPT_NONE) {
2129 		tcp_remove_timer(tp);
2130 		offset = 0;
2131 	}
2132 
2133 	socket_unlock(so, 1);
2134 	return offset;
2135 }
2136 
2137 void
tcp_run_timerlist(void * arg1,void * arg2)2138 tcp_run_timerlist(void * arg1, void * arg2)
2139 {
2140 #pragma unused(arg1, arg2)
2141 	struct tcptimerentry *te, *__single next_te;
2142 	struct tcptimerlist *__single listp = &tcp_timer_list;
2143 	struct tcpcb *__single tp;
2144 	uint32_t next_timer = 0; /* offset of the next timer on the list */
2145 	u_int16_t te_mode = 0;  /* modes of all active timers in a tcpcb */
2146 	u_int16_t list_mode = 0; /* cumulative of modes of all tcpcbs */
2147 	uint32_t active_count = 0;
2148 
2149 	calculate_tcp_clock();
2150 
2151 	lck_mtx_lock(&listp->mtx);
2152 
2153 	int32_t drift = tcp_now - listp->runtime;
2154 	if (drift <= 1) {
2155 		tcpstat.tcps_timer_drift_le_1_ms++;
2156 	} else if (drift <= 10) {
2157 		tcpstat.tcps_timer_drift_le_10_ms++;
2158 	} else if (drift <= 20) {
2159 		tcpstat.tcps_timer_drift_le_20_ms++;
2160 	} else if (drift <= 50) {
2161 		tcpstat.tcps_timer_drift_le_50_ms++;
2162 	} else if (drift <= 100) {
2163 		tcpstat.tcps_timer_drift_le_100_ms++;
2164 	} else if (drift <= 200) {
2165 		tcpstat.tcps_timer_drift_le_200_ms++;
2166 	} else if (drift <= 500) {
2167 		tcpstat.tcps_timer_drift_le_500_ms++;
2168 	} else if (drift <= 1000) {
2169 		tcpstat.tcps_timer_drift_le_1000_ms++;
2170 	} else {
2171 		tcpstat.tcps_timer_drift_gt_1000_ms++;
2172 	}
2173 
2174 	listp->running = TRUE;
2175 
2176 	LIST_FOREACH_SAFE(te, &listp->lhead, le, next_te) {
2177 		uint32_t offset = 0;
2178 		uint32_t runtime = te->runtime;
2179 
2180 		tp = TIMERENTRY_TO_TP(te);
2181 
2182 		/*
2183 		 * An interface probe may need to happen before the previously scheduled runtime
2184 		 */
2185 		if (te->index < TCPT_NONE && TSTMP_GT(runtime, tcp_now) &&
2186 		    !TCP_IF_STATE_CHANGED(tp, listp->probe_if_index)) {
2187 			offset = timer_diff(runtime, 0, tcp_now, 0);
2188 			if (next_timer == 0 || offset < next_timer) {
2189 				next_timer = offset;
2190 			}
2191 			list_mode |= te->mode;
2192 			continue;
2193 		}
2194 
2195 		/*
2196 		 * Acquire an inp wantcnt on the inpcb so that the socket
2197 		 * won't get detached even if tcp_close is called
2198 		 */
2199 		if (in_pcb_checkstate(tp->t_inpcb, WNT_ACQUIRE, 0)
2200 		    == WNT_STOPUSING) {
2201 			/*
2202 			 * Need to take socket lock because it protects
2203 			 * TIMER_IS_ON_LIST
2204 			 */
2205 			lck_mtx_unlock(&listp->mtx);
2206 			socket_lock(tp->t_inpcb->inp_socket, 1);
2207 			tcp_remove_timer(tp);
2208 			socket_unlock(tp->t_inpcb->inp_socket, 1);
2209 			lck_mtx_lock(&listp->mtx);
2210 			continue;
2211 		}
2212 		active_count++;
2213 
2214 		/*
2215 		 * Store the next timerentry pointer before releasing the
2216 		 * list lock. If that entry has to be removed when we
2217 		 * release the lock, this pointer will be updated to the
2218 		 * element after that.
2219 		 */
2220 		listp->next_te = next_te;
2221 
2222 		VERIFY_NEXT_LINK(&tp->tentry, le);
2223 		VERIFY_PREV_LINK(&tp->tentry, le);
2224 
2225 		lck_mtx_unlock(&listp->mtx);
2226 
2227 		offset = tcp_run_conn_timer(tp, &te_mode,
2228 		    listp->probe_if_index);
2229 
2230 		lck_mtx_lock(&listp->mtx);
2231 
2232 		next_te = listp->next_te;
2233 		listp->next_te = NULL;
2234 
2235 		if (offset > 0 && te_mode != 0) {
2236 			list_mode |= te_mode;
2237 
2238 			if (next_timer == 0 || offset < next_timer) {
2239 				next_timer = offset;
2240 			}
2241 		}
2242 	}
2243 
2244 	if (!LIST_EMPTY(&listp->lhead)) {
2245 		uint32_t next_mode = 0;
2246 		if ((list_mode & TCP_TIMERLIST_10MS_MODE) ||
2247 		    (listp->pref_mode & TCP_TIMERLIST_10MS_MODE)) {
2248 			next_mode = TCP_TIMERLIST_10MS_MODE;
2249 		} else if ((list_mode & TCP_TIMERLIST_100MS_MODE) ||
2250 		    (listp->pref_mode & TCP_TIMERLIST_100MS_MODE)) {
2251 			next_mode = TCP_TIMERLIST_100MS_MODE;
2252 		} else {
2253 			next_mode = TCP_TIMERLIST_500MS_MODE;
2254 		}
2255 
2256 		if (next_mode != TCP_TIMERLIST_500MS_MODE) {
2257 			listp->idleruns = 0;
2258 		} else {
2259 			/*
2260 			 * the next required mode is slow mode, but if
2261 			 * the last one was a faster mode and we did not
2262 			 * have enough idle runs, repeat the last mode.
2263 			 *
2264 			 * We try to keep the timer list in fast mode for
2265 			 * some idle time in expectation of new data.
2266 			 */
2267 			if (listp->mode != next_mode &&
2268 			    listp->idleruns < timer_fastmode_idlemax) {
2269 				listp->idleruns++;
2270 				next_mode = listp->mode;
2271 				next_timer = TCP_TIMER_100MS_QUANTUM;
2272 			} else {
2273 				listp->idleruns = 0;
2274 			}
2275 		}
2276 		listp->mode = next_mode;
2277 		if (listp->pref_offset != 0) {
2278 			next_timer = min(listp->pref_offset, next_timer);
2279 		}
2280 
2281 		if (listp->mode == TCP_TIMERLIST_500MS_MODE) {
2282 			next_timer = max(next_timer,
2283 			    TCP_TIMER_500MS_QUANTUM);
2284 		}
2285 
2286 		tcp_sched_timerlist(next_timer);
2287 	} else {
2288 		/*
2289 		 * No need to reschedule this timer, but always run
2290 		 * periodically at a much higher granularity.
2291 		 */
2292 		tcp_sched_timerlist(TCP_TIMERLIST_MAX_OFFSET);
2293 	}
2294 
2295 	listp->running = FALSE;
2296 	listp->pref_mode = 0;
2297 	listp->pref_offset = 0;
2298 	listp->probe_if_index = 0;
2299 
2300 	lck_mtx_unlock(&listp->mtx);
2301 }
2302 
2303 /*
2304  * Function to check if the timerlist needs to be rescheduled to run this
2305  * connection's timers correctly.
2306  */
2307 void
tcp_sched_timers(struct tcpcb * tp)2308 tcp_sched_timers(struct tcpcb *tp)
2309 {
2310 	struct tcptimerentry *te = &tp->tentry;
2311 	u_int16_t index = te->index;
2312 	u_int16_t mode = te->mode;
2313 	struct tcptimerlist *listp = &tcp_timer_list;
2314 	int32_t offset = 0;
2315 	boolean_t list_locked = FALSE;
2316 
2317 	if (tp->t_inpcb->inp_state == INPCB_STATE_DEAD) {
2318 		/* Just return without adding the dead pcb to the list */
2319 		if (TIMER_IS_ON_LIST(tp)) {
2320 			tcp_remove_timer(tp);
2321 		}
2322 		return;
2323 	}
2324 
2325 	if (index == TCPT_NONE) {
2326 		/* Nothing to run */
2327 		tcp_remove_timer(tp);
2328 		return;
2329 	}
2330 
2331 	/*
2332 	 * compute the offset at which the next timer for this connection
2333 	 * has to run.
2334 	 */
2335 	offset = timer_diff(te->runtime, 0, tcp_now, 0);
2336 	if (offset <= 0) {
2337 		offset = 1;
2338 		tcp_timer_advanced++;
2339 	}
2340 
2341 	if (!TIMER_IS_ON_LIST(tp)) {
2342 		if (!list_locked) {
2343 			lck_mtx_lock(&listp->mtx);
2344 			list_locked = TRUE;
2345 		}
2346 
2347 		if (!TIMER_IS_ON_LIST(tp)) {
2348 			LIST_INSERT_HEAD(&listp->lhead, te, le);
2349 			tp->t_flags |= TF_TIMER_ONLIST;
2350 
2351 			listp->entries++;
2352 			if (listp->entries > listp->maxentries) {
2353 				listp->maxentries = listp->entries;
2354 			}
2355 
2356 			/* if the list is not scheduled, just schedule it */
2357 			if (!listp->scheduled) {
2358 				goto schedule;
2359 			}
2360 		}
2361 	}
2362 
2363 	/*
2364 	 * Timer entry is currently on the list, check if the list needs
2365 	 * to be rescheduled.
2366 	 */
2367 	if (need_to_resched_timerlist(te->runtime, mode)) {
2368 		tcp_resched_timerlist++;
2369 
2370 		if (!list_locked) {
2371 			lck_mtx_lock(&listp->mtx);
2372 			list_locked = TRUE;
2373 		}
2374 
2375 		VERIFY_NEXT_LINK(te, le);
2376 		VERIFY_PREV_LINK(te, le);
2377 
2378 		if (listp->running) {
2379 			listp->pref_mode |= mode;
2380 			if (listp->pref_offset == 0 ||
2381 			    offset < listp->pref_offset) {
2382 				listp->pref_offset = offset;
2383 			}
2384 		} else {
2385 			/*
2386 			 * The list could have got rescheduled while
2387 			 * this thread was waiting for the lock
2388 			 */
2389 			if (listp->scheduled) {
2390 				int32_t diff;
2391 				diff = timer_diff(listp->runtime, 0,
2392 				    tcp_now, offset);
2393 				if (diff <= 0) {
2394 					goto done;
2395 				} else {
2396 					goto schedule;
2397 				}
2398 			} else {
2399 				goto schedule;
2400 			}
2401 		}
2402 	}
2403 	goto done;
2404 
2405 schedule:
2406 	/*
2407 	 * Since a connection with timers is getting scheduled, the timer
2408 	 * list moves from idle to active state and that is why idlegen is
2409 	 * reset
2410 	 */
2411 	if (mode & TCP_TIMERLIST_10MS_MODE) {
2412 		listp->mode = TCP_TIMERLIST_10MS_MODE;
2413 		listp->idleruns = 0;
2414 		offset = min(offset, TCP_TIMER_10MS_QUANTUM);
2415 	} else if (mode & TCP_TIMERLIST_100MS_MODE) {
2416 		if (listp->mode > TCP_TIMERLIST_100MS_MODE) {
2417 			listp->mode = TCP_TIMERLIST_100MS_MODE;
2418 		}
2419 		listp->idleruns = 0;
2420 		offset = min(offset, TCP_TIMER_100MS_QUANTUM);
2421 	}
2422 	tcp_sched_timerlist(offset);
2423 
2424 done:
2425 	if (list_locked) {
2426 		lck_mtx_unlock(&listp->mtx);
2427 	}
2428 
2429 	return;
2430 }
2431 
2432 static inline void
tcp_set_lotimer_index(struct tcpcb * tp)2433 tcp_set_lotimer_index(struct tcpcb *tp)
2434 {
2435 	uint16_t i, lo_index = TCPT_NONE, mode = 0;
2436 	uint32_t lo_timer = 0;
2437 	for (i = 0; i < TCPT_NTIMERS; ++i) {
2438 		if (tp->t_timer[i] != 0) {
2439 			TCP_SET_TIMER_MODE(mode, i);
2440 			if (lo_timer == 0 || tp->t_timer[i] < lo_timer) {
2441 				lo_timer = tp->t_timer[i];
2442 				lo_index = i;
2443 			}
2444 		}
2445 	}
2446 	tp->tentry.index = lo_index;
2447 	tp->tentry.mode = mode;
2448 	VERIFY(tp->tentry.index == TCPT_NONE || tp->tentry.mode > 0);
2449 
2450 	if (tp->tentry.index != TCPT_NONE) {
2451 		tp->tentry.runtime = tp->tentry.timer_start
2452 		    + tp->t_timer[tp->tentry.index];
2453 		if (tp->tentry.runtime == 0) {
2454 			tp->tentry.runtime++;
2455 		}
2456 	}
2457 }
2458 
2459 void
tcp_check_timer_state(struct tcpcb * tp)2460 tcp_check_timer_state(struct tcpcb *tp)
2461 {
2462 	socket_lock_assert_owned(tp->t_inpcb->inp_socket);
2463 
2464 	if (tp->t_inpcb->inp_flags2 & INP2_TIMEWAIT) {
2465 		return;
2466 	}
2467 
2468 	tcp_set_lotimer_index(tp);
2469 
2470 	tcp_sched_timers(tp);
2471 	return;
2472 }
2473 
2474 static inline void
tcp_cumulative_stat(u_int32_t cur,u_int32_t * prev,u_int32_t * dest)2475 tcp_cumulative_stat(u_int32_t cur, u_int32_t *prev, u_int32_t *dest)
2476 {
2477 	/* handle wrap around */
2478 	int32_t diff = (int32_t) (cur - *prev);
2479 	if (diff > 0) {
2480 		*dest = diff;
2481 	} else {
2482 		*dest = 0;
2483 	}
2484 	*prev = cur;
2485 	return;
2486 }
2487 
2488 static inline void
tcp_cumulative_stat64(u_int64_t cur,u_int64_t * prev,u_int64_t * dest)2489 tcp_cumulative_stat64(u_int64_t cur, u_int64_t *prev, u_int64_t *dest)
2490 {
2491 	/* handle wrap around */
2492 	int64_t diff = (int64_t) (cur - *prev);
2493 	if (diff > 0) {
2494 		*dest = diff;
2495 	} else {
2496 		*dest = 0;
2497 	}
2498 	*prev = cur;
2499 	return;
2500 }
2501 
2502 __private_extern__ void
tcp_report_stats(void)2503 tcp_report_stats(void)
2504 {
2505 	struct nstat_sysinfo_data data;
2506 	struct sockaddr_in dst;
2507 	struct sockaddr_in6 dst6;
2508 	struct rtentry *rt = NULL;
2509 	static struct tcp_last_report_stats prev;
2510 	u_int64_t var, uptime;
2511 
2512 #define stat    data.u.tcp_stats
2513 	if (((uptime = net_uptime()) - tcp_last_report_time) <
2514 	    tcp_report_stats_interval) {
2515 		return;
2516 	}
2517 
2518 	tcp_last_report_time = uptime;
2519 
2520 	bzero(&data, sizeof(data));
2521 	data.flags = NSTAT_SYSINFO_TCP_STATS;
2522 
2523 	SOCKADDR_ZERO(&dst, sizeof(dst));
2524 	dst.sin_len = sizeof(dst);
2525 	dst.sin_family = AF_INET;
2526 
2527 	/* ipv4 avg rtt */
2528 	lck_mtx_lock(rnh_lock);
2529 	rt =  rt_lookup(TRUE, SA(&dst), NULL,
2530 	    rt_tables[AF_INET], IFSCOPE_NONE);
2531 	lck_mtx_unlock(rnh_lock);
2532 	if (rt != NULL) {
2533 		RT_LOCK(rt);
2534 		if (rt_primary_default(rt, rt_key(rt)) &&
2535 		    rt->rt_stats != NULL) {
2536 			stat.ipv4_avgrtt = rt->rt_stats->nstat_avg_rtt;
2537 		}
2538 		RT_UNLOCK(rt);
2539 		rtfree(rt);
2540 		rt = NULL;
2541 	}
2542 
2543 	/* ipv6 avg rtt */
2544 	SOCKADDR_ZERO(&dst6, sizeof(dst6));
2545 	dst6.sin6_len = sizeof(dst6);
2546 	dst6.sin6_family = AF_INET6;
2547 
2548 	lck_mtx_lock(rnh_lock);
2549 	rt = rt_lookup(TRUE, SA(&dst6), NULL,
2550 	    rt_tables[AF_INET6], IFSCOPE_NONE);
2551 	lck_mtx_unlock(rnh_lock);
2552 	if (rt != NULL) {
2553 		RT_LOCK(rt);
2554 		if (rt_primary_default(rt, rt_key(rt)) &&
2555 		    rt->rt_stats != NULL) {
2556 			stat.ipv6_avgrtt = rt->rt_stats->nstat_avg_rtt;
2557 		}
2558 		RT_UNLOCK(rt);
2559 		rtfree(rt);
2560 		rt = NULL;
2561 	}
2562 
2563 	/* send packet loss rate, shift by 10 for precision */
2564 	if (tcpstat.tcps_sndpack > 0 && tcpstat.tcps_sndrexmitpack > 0) {
2565 		var = tcpstat.tcps_sndrexmitpack << 10;
2566 		stat.send_plr = (uint32_t)((var * 100) / tcpstat.tcps_sndpack);
2567 	}
2568 
2569 	/* recv packet loss rate, shift by 10 for precision */
2570 	if (tcpstat.tcps_rcvpack > 0 && tcpstat.tcps_recovered_pkts > 0) {
2571 		var = tcpstat.tcps_recovered_pkts << 10;
2572 		stat.recv_plr = (uint32_t)((var * 100) / tcpstat.tcps_rcvpack);
2573 	}
2574 
2575 	/* RTO after tail loss, shift by 10 for precision */
2576 	if (tcpstat.tcps_sndrexmitpack > 0
2577 	    && tcpstat.tcps_tailloss_rto > 0) {
2578 		var = tcpstat.tcps_tailloss_rto << 10;
2579 		stat.send_tlrto_rate =
2580 		    (uint32_t)((var * 100) / tcpstat.tcps_sndrexmitpack);
2581 	}
2582 
2583 	/* packet reordering */
2584 	if (tcpstat.tcps_sndpack > 0 && tcpstat.tcps_reordered_pkts > 0) {
2585 		var = tcpstat.tcps_reordered_pkts << 10;
2586 		stat.send_reorder_rate =
2587 		    (uint32_t)((var * 100) / tcpstat.tcps_sndpack);
2588 	}
2589 
2590 	if (tcp_ecn_outbound == 1) {
2591 		stat.ecn_client_enabled = 1;
2592 	}
2593 	if (tcp_ecn_inbound == 1) {
2594 		stat.ecn_server_enabled = 1;
2595 	}
2596 	tcp_cumulative_stat(tcpstat.tcps_connattempt,
2597 	    &prev.tcps_connattempt, &stat.connection_attempts);
2598 	tcp_cumulative_stat(tcpstat.tcps_accepts,
2599 	    &prev.tcps_accepts, &stat.connection_accepts);
2600 	tcp_cumulative_stat(tcpstat.tcps_ecn_client_setup,
2601 	    &prev.tcps_ecn_client_setup, &stat.ecn_client_setup);
2602 	tcp_cumulative_stat(tcpstat.tcps_ecn_server_setup,
2603 	    &prev.tcps_ecn_server_setup, &stat.ecn_server_setup);
2604 	tcp_cumulative_stat(tcpstat.tcps_ecn_client_success,
2605 	    &prev.tcps_ecn_client_success, &stat.ecn_client_success);
2606 	tcp_cumulative_stat(tcpstat.tcps_ecn_server_success,
2607 	    &prev.tcps_ecn_server_success, &stat.ecn_server_success);
2608 	tcp_cumulative_stat(tcpstat.tcps_ecn_not_supported,
2609 	    &prev.tcps_ecn_not_supported, &stat.ecn_not_supported);
2610 	tcp_cumulative_stat(tcpstat.tcps_ecn_lost_syn,
2611 	    &prev.tcps_ecn_lost_syn, &stat.ecn_lost_syn);
2612 	tcp_cumulative_stat(tcpstat.tcps_ecn_lost_synack,
2613 	    &prev.tcps_ecn_lost_synack, &stat.ecn_lost_synack);
2614 	tcp_cumulative_stat(tcpstat.tcps_ecn_recv_ce,
2615 	    &prev.tcps_ecn_recv_ce, &stat.ecn_recv_ce);
2616 	tcp_cumulative_stat(tcpstat.tcps_ecn_recv_ece,
2617 	    &prev.tcps_ecn_recv_ece, &stat.ecn_recv_ece);
2618 	tcp_cumulative_stat(tcpstat.tcps_ecn_recv_ece,
2619 	    &prev.tcps_ecn_recv_ece, &stat.ecn_recv_ece);
2620 	tcp_cumulative_stat(tcpstat.tcps_ecn_sent_ece,
2621 	    &prev.tcps_ecn_sent_ece, &stat.ecn_sent_ece);
2622 	tcp_cumulative_stat(tcpstat.tcps_ecn_sent_ece,
2623 	    &prev.tcps_ecn_sent_ece, &stat.ecn_sent_ece);
2624 	tcp_cumulative_stat(tcpstat.tcps_ecn_conn_recv_ce,
2625 	    &prev.tcps_ecn_conn_recv_ce, &stat.ecn_conn_recv_ce);
2626 	tcp_cumulative_stat(tcpstat.tcps_ecn_conn_recv_ece,
2627 	    &prev.tcps_ecn_conn_recv_ece, &stat.ecn_conn_recv_ece);
2628 	tcp_cumulative_stat(tcpstat.tcps_ecn_conn_plnoce,
2629 	    &prev.tcps_ecn_conn_plnoce, &stat.ecn_conn_plnoce);
2630 	tcp_cumulative_stat(tcpstat.tcps_ecn_conn_pl_ce,
2631 	    &prev.tcps_ecn_conn_pl_ce, &stat.ecn_conn_pl_ce);
2632 	tcp_cumulative_stat(tcpstat.tcps_ecn_conn_nopl_ce,
2633 	    &prev.tcps_ecn_conn_nopl_ce, &stat.ecn_conn_nopl_ce);
2634 	tcp_cumulative_stat(tcpstat.tcps_ecn_fallback_synloss,
2635 	    &prev.tcps_ecn_fallback_synloss, &stat.ecn_fallback_synloss);
2636 	tcp_cumulative_stat(tcpstat.tcps_ecn_fallback_reorder,
2637 	    &prev.tcps_ecn_fallback_reorder, &stat.ecn_fallback_reorder);
2638 	tcp_cumulative_stat(tcpstat.tcps_ecn_fallback_ce,
2639 	    &prev.tcps_ecn_fallback_ce, &stat.ecn_fallback_ce);
2640 	tcp_cumulative_stat(tcpstat.tcps_tfo_syn_data_rcv,
2641 	    &prev.tcps_tfo_syn_data_rcv, &stat.tfo_syn_data_rcv);
2642 	tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_req_rcv,
2643 	    &prev.tcps_tfo_cookie_req_rcv, &stat.tfo_cookie_req_rcv);
2644 	tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_sent,
2645 	    &prev.tcps_tfo_cookie_sent, &stat.tfo_cookie_sent);
2646 	tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_invalid,
2647 	    &prev.tcps_tfo_cookie_invalid, &stat.tfo_cookie_invalid);
2648 	tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_req,
2649 	    &prev.tcps_tfo_cookie_req, &stat.tfo_cookie_req);
2650 	tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_rcv,
2651 	    &prev.tcps_tfo_cookie_rcv, &stat.tfo_cookie_rcv);
2652 	tcp_cumulative_stat(tcpstat.tcps_tfo_syn_data_sent,
2653 	    &prev.tcps_tfo_syn_data_sent, &stat.tfo_syn_data_sent);
2654 	tcp_cumulative_stat(tcpstat.tcps_tfo_syn_data_acked,
2655 	    &prev.tcps_tfo_syn_data_acked, &stat.tfo_syn_data_acked);
2656 	tcp_cumulative_stat(tcpstat.tcps_tfo_syn_loss,
2657 	    &prev.tcps_tfo_syn_loss, &stat.tfo_syn_loss);
2658 	tcp_cumulative_stat(tcpstat.tcps_tfo_blackhole,
2659 	    &prev.tcps_tfo_blackhole, &stat.tfo_blackhole);
2660 	tcp_cumulative_stat(tcpstat.tcps_tfo_cookie_wrong,
2661 	    &prev.tcps_tfo_cookie_wrong, &stat.tfo_cookie_wrong);
2662 	tcp_cumulative_stat(tcpstat.tcps_tfo_no_cookie_rcv,
2663 	    &prev.tcps_tfo_no_cookie_rcv, &stat.tfo_no_cookie_rcv);
2664 	tcp_cumulative_stat(tcpstat.tcps_tfo_heuristics_disable,
2665 	    &prev.tcps_tfo_heuristics_disable, &stat.tfo_heuristics_disable);
2666 	tcp_cumulative_stat(tcpstat.tcps_tfo_sndblackhole,
2667 	    &prev.tcps_tfo_sndblackhole, &stat.tfo_sndblackhole);
2668 
2669 
2670 	tcp_cumulative_stat(tcpstat.tcps_mptcp_handover_attempt,
2671 	    &prev.tcps_mptcp_handover_attempt, &stat.mptcp_handover_attempt);
2672 	tcp_cumulative_stat(tcpstat.tcps_mptcp_interactive_attempt,
2673 	    &prev.tcps_mptcp_interactive_attempt, &stat.mptcp_interactive_attempt);
2674 	tcp_cumulative_stat(tcpstat.tcps_mptcp_aggregate_attempt,
2675 	    &prev.tcps_mptcp_aggregate_attempt, &stat.mptcp_aggregate_attempt);
2676 	tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_handover_attempt,
2677 	    &prev.tcps_mptcp_fp_handover_attempt, &stat.mptcp_fp_handover_attempt);
2678 	tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_interactive_attempt,
2679 	    &prev.tcps_mptcp_fp_interactive_attempt, &stat.mptcp_fp_interactive_attempt);
2680 	tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_aggregate_attempt,
2681 	    &prev.tcps_mptcp_fp_aggregate_attempt, &stat.mptcp_fp_aggregate_attempt);
2682 	tcp_cumulative_stat(tcpstat.tcps_mptcp_heuristic_fallback,
2683 	    &prev.tcps_mptcp_heuristic_fallback, &stat.mptcp_heuristic_fallback);
2684 	tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_heuristic_fallback,
2685 	    &prev.tcps_mptcp_fp_heuristic_fallback, &stat.mptcp_fp_heuristic_fallback);
2686 	tcp_cumulative_stat(tcpstat.tcps_mptcp_handover_success_wifi,
2687 	    &prev.tcps_mptcp_handover_success_wifi, &stat.mptcp_handover_success_wifi);
2688 	tcp_cumulative_stat(tcpstat.tcps_mptcp_handover_success_cell,
2689 	    &prev.tcps_mptcp_handover_success_cell, &stat.mptcp_handover_success_cell);
2690 	tcp_cumulative_stat(tcpstat.tcps_mptcp_interactive_success,
2691 	    &prev.tcps_mptcp_interactive_success, &stat.mptcp_interactive_success);
2692 	tcp_cumulative_stat(tcpstat.tcps_mptcp_aggregate_success,
2693 	    &prev.tcps_mptcp_aggregate_success, &stat.mptcp_aggregate_success);
2694 	tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_handover_success_wifi,
2695 	    &prev.tcps_mptcp_fp_handover_success_wifi, &stat.mptcp_fp_handover_success_wifi);
2696 	tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_handover_success_cell,
2697 	    &prev.tcps_mptcp_fp_handover_success_cell, &stat.mptcp_fp_handover_success_cell);
2698 	tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_interactive_success,
2699 	    &prev.tcps_mptcp_fp_interactive_success, &stat.mptcp_fp_interactive_success);
2700 	tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_aggregate_success,
2701 	    &prev.tcps_mptcp_fp_aggregate_success, &stat.mptcp_fp_aggregate_success);
2702 	tcp_cumulative_stat(tcpstat.tcps_mptcp_handover_cell_from_wifi,
2703 	    &prev.tcps_mptcp_handover_cell_from_wifi, &stat.mptcp_handover_cell_from_wifi);
2704 	tcp_cumulative_stat(tcpstat.tcps_mptcp_handover_wifi_from_cell,
2705 	    &prev.tcps_mptcp_handover_wifi_from_cell, &stat.mptcp_handover_wifi_from_cell);
2706 	tcp_cumulative_stat(tcpstat.tcps_mptcp_interactive_cell_from_wifi,
2707 	    &prev.tcps_mptcp_interactive_cell_from_wifi, &stat.mptcp_interactive_cell_from_wifi);
2708 	tcp_cumulative_stat64(tcpstat.tcps_mptcp_handover_cell_bytes,
2709 	    &prev.tcps_mptcp_handover_cell_bytes, &stat.mptcp_handover_cell_bytes);
2710 	tcp_cumulative_stat64(tcpstat.tcps_mptcp_interactive_cell_bytes,
2711 	    &prev.tcps_mptcp_interactive_cell_bytes, &stat.mptcp_interactive_cell_bytes);
2712 	tcp_cumulative_stat64(tcpstat.tcps_mptcp_aggregate_cell_bytes,
2713 	    &prev.tcps_mptcp_aggregate_cell_bytes, &stat.mptcp_aggregate_cell_bytes);
2714 	tcp_cumulative_stat64(tcpstat.tcps_mptcp_handover_all_bytes,
2715 	    &prev.tcps_mptcp_handover_all_bytes, &stat.mptcp_handover_all_bytes);
2716 	tcp_cumulative_stat64(tcpstat.tcps_mptcp_interactive_all_bytes,
2717 	    &prev.tcps_mptcp_interactive_all_bytes, &stat.mptcp_interactive_all_bytes);
2718 	tcp_cumulative_stat64(tcpstat.tcps_mptcp_aggregate_all_bytes,
2719 	    &prev.tcps_mptcp_aggregate_all_bytes, &stat.mptcp_aggregate_all_bytes);
2720 	tcp_cumulative_stat(tcpstat.tcps_mptcp_back_to_wifi,
2721 	    &prev.tcps_mptcp_back_to_wifi, &stat.mptcp_back_to_wifi);
2722 	tcp_cumulative_stat(tcpstat.tcps_mptcp_wifi_proxy,
2723 	    &prev.tcps_mptcp_wifi_proxy, &stat.mptcp_wifi_proxy);
2724 	tcp_cumulative_stat(tcpstat.tcps_mptcp_cell_proxy,
2725 	    &prev.tcps_mptcp_cell_proxy, &stat.mptcp_cell_proxy);
2726 	tcp_cumulative_stat(tcpstat.tcps_mptcp_triggered_cell,
2727 	    &prev.tcps_mptcp_triggered_cell, &stat.mptcp_triggered_cell);
2728 
2729 	nstat_sysinfo_send_data(&data);
2730 
2731 #undef  stat
2732 }
2733 
2734 void
tcp_interface_send_probe(u_int16_t probe_if_index)2735 tcp_interface_send_probe(u_int16_t probe_if_index)
2736 {
2737 	int32_t offset = 0;
2738 	struct tcptimerlist *listp = &tcp_timer_list;
2739 
2740 	/* Make sure TCP clock is up to date */
2741 	calculate_tcp_clock();
2742 
2743 	lck_mtx_lock(&listp->mtx);
2744 	if (listp->probe_if_index > 0 && listp->probe_if_index != probe_if_index) {
2745 		tcpstat.tcps_probe_if_conflict++;
2746 		os_log(OS_LOG_DEFAULT,
2747 		    "%s: probe_if_index %u conflicts with %u, tcps_probe_if_conflict %u\n",
2748 		    __func__, probe_if_index, listp->probe_if_index,
2749 		    tcpstat.tcps_probe_if_conflict);
2750 		goto done;
2751 	}
2752 
2753 	listp->probe_if_index = probe_if_index;
2754 	if (listp->running) {
2755 		os_log(OS_LOG_DEFAULT, "%s: timer list already running for if_index %u\n",
2756 		    __func__, probe_if_index);
2757 		goto done;
2758 	}
2759 
2760 	/*
2761 	 * Reschedule the timerlist to run within the next 10ms, which is
2762 	 * the fastest that we can do.
2763 	 */
2764 	offset = TCP_TIMER_10MS_QUANTUM;
2765 	if (listp->scheduled) {
2766 		int32_t diff;
2767 		diff = timer_diff(listp->runtime, 0, tcp_now, offset);
2768 		if (diff <= 0) {
2769 			/* The timer will fire sooner than what's needed */
2770 			os_log(OS_LOG_DEFAULT,
2771 			    "%s: timer will fire sooner than needed for if_index %u\n",
2772 			    __func__, probe_if_index);
2773 			goto done;
2774 		}
2775 	}
2776 	listp->mode = TCP_TIMERLIST_10MS_MODE;
2777 	listp->idleruns = 0;
2778 
2779 	tcp_sched_timerlist(offset);
2780 
2781 done:
2782 	lck_mtx_unlock(&listp->mtx);
2783 	return;
2784 }
2785 
2786 /*
2787  * Enable read probes on this connection, if:
2788  * - it is in established state
2789  * - doesn't have any data outstanding
2790  * - the outgoing ifp matches
2791  * - we have not already sent any read probes
2792  */
2793 static void
tcp_enable_read_probe(struct tcpcb * tp,struct ifnet * ifp)2794 tcp_enable_read_probe(struct tcpcb *tp, struct ifnet *ifp)
2795 {
2796 	if (tp->t_state == TCPS_ESTABLISHED &&
2797 	    tp->snd_max == tp->snd_una &&
2798 	    tp->t_inpcb->inp_last_outifp == ifp &&
2799 	    !(tp->t_flagsext & TF_DETECT_READSTALL) &&
2800 	    tp->t_rtimo_probes == 0) {
2801 		tp->t_flagsext |= TF_DETECT_READSTALL;
2802 		tp->t_rtimo_probes = 0;
2803 		tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp,
2804 		    TCP_TIMER_10MS_QUANTUM);
2805 		if (tp->tentry.index == TCPT_NONE) {
2806 			tp->tentry.index = TCPT_KEEP;
2807 			tp->tentry.runtime = tcp_now +
2808 			    TCP_TIMER_10MS_QUANTUM;
2809 		} else {
2810 			int32_t diff = 0;
2811 
2812 			/* Reset runtime to be in next 10ms */
2813 			diff = timer_diff(tp->tentry.runtime, 0,
2814 			    tcp_now, TCP_TIMER_10MS_QUANTUM);
2815 			if (diff > 0) {
2816 				tp->tentry.index = TCPT_KEEP;
2817 				tp->tentry.runtime = tcp_now +
2818 				    TCP_TIMER_10MS_QUANTUM;
2819 				if (tp->tentry.runtime == 0) {
2820 					tp->tentry.runtime++;
2821 				}
2822 			}
2823 		}
2824 	}
2825 }
2826 
2827 /*
2828  * Disable read probe and reset the keep alive timer
2829  */
2830 static void
tcp_disable_read_probe(struct tcpcb * tp)2831 tcp_disable_read_probe(struct tcpcb *tp)
2832 {
2833 	if (tp->t_adaptive_rtimo == 0 &&
2834 	    ((tp->t_flagsext & TF_DETECT_READSTALL) ||
2835 	    tp->t_rtimo_probes > 0)) {
2836 		tcp_keepalive_reset(tp);
2837 
2838 		if (tp->t_mpsub) {
2839 			mptcp_reset_keepalive(tp);
2840 		}
2841 	}
2842 }
2843 
2844 /*
2845  * Reschedule the tcp timerlist in the next 10ms to re-enable read/write
2846  * probes on connections going over a particular interface.
2847  */
2848 void
tcp_probe_connectivity(struct ifnet * ifp,u_int32_t enable)2849 tcp_probe_connectivity(struct ifnet *ifp, u_int32_t enable)
2850 {
2851 	int32_t offset;
2852 	struct tcptimerlist *listp = &tcp_timer_list;
2853 	struct inpcbinfo *pcbinfo = &tcbinfo;
2854 	struct inpcb *inp, *nxt;
2855 
2856 	if (ifp == NULL) {
2857 		return;
2858 	}
2859 
2860 	/* update clock */
2861 	calculate_tcp_clock();
2862 
2863 	/*
2864 	 * Enable keep alive timer on all connections that are
2865 	 * active/established on this interface.
2866 	 */
2867 	lck_rw_lock_shared(&pcbinfo->ipi_lock);
2868 
2869 	LIST_FOREACH_SAFE(inp, pcbinfo->ipi_listhead, inp_list, nxt) {
2870 		struct tcpcb *tp = NULL;
2871 		if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) ==
2872 		    WNT_STOPUSING) {
2873 			continue;
2874 		}
2875 
2876 		/* Acquire lock to look at the state of the connection */
2877 		socket_lock(inp->inp_socket, 1);
2878 
2879 		/* Release the want count */
2880 		if (inp->inp_ppcb == NULL ||
2881 		    (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING)) {
2882 			socket_unlock(inp->inp_socket, 1);
2883 			continue;
2884 		}
2885 		tp = intotcpcb(inp);
2886 		if (enable) {
2887 			tcp_enable_read_probe(tp, ifp);
2888 		} else {
2889 			tcp_disable_read_probe(tp);
2890 		}
2891 
2892 		socket_unlock(inp->inp_socket, 1);
2893 	}
2894 	lck_rw_done(&pcbinfo->ipi_lock);
2895 
2896 	lck_mtx_lock(&listp->mtx);
2897 	if (listp->running) {
2898 		listp->pref_mode |= TCP_TIMERLIST_10MS_MODE;
2899 		goto done;
2900 	}
2901 
2902 	/* Reschedule within the next 10ms */
2903 	offset = TCP_TIMER_10MS_QUANTUM;
2904 	if (listp->scheduled) {
2905 		int32_t diff;
2906 		diff = timer_diff(listp->runtime, 0, tcp_now, offset);
2907 		if (diff <= 0) {
2908 			/* The timer will fire sooner than what's needed */
2909 			goto done;
2910 		}
2911 	}
2912 	listp->mode = TCP_TIMERLIST_10MS_MODE;
2913 	listp->idleruns = 0;
2914 
2915 	tcp_sched_timerlist(offset);
2916 done:
2917 	lck_mtx_unlock(&listp->mtx);
2918 	return;
2919 }
2920 
2921 inline void
tcp_update_mss_core(struct tcpcb * tp,struct ifnet * ifp)2922 tcp_update_mss_core(struct tcpcb *tp, struct ifnet *ifp)
2923 {
2924 	struct if_cellular_status_v1 *ifsr;
2925 	u_int32_t optlen;
2926 	ifsr = &ifp->if_link_status->ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
2927 	if (ifsr->valid_bitmask & IF_CELL_UL_MSS_RECOMMENDED_VALID) {
2928 		optlen = tp->t_maxopd - tp->t_maxseg;
2929 
2930 		if (ifsr->mss_recommended ==
2931 		    IF_CELL_UL_MSS_RECOMMENDED_NONE &&
2932 		    tp->t_cached_maxopd > 0 &&
2933 		    tp->t_maxopd < tp->t_cached_maxopd) {
2934 			tp->t_maxopd = tp->t_cached_maxopd;
2935 			tcpstat.tcps_mss_to_default++;
2936 		} else if (ifsr->mss_recommended ==
2937 		    IF_CELL_UL_MSS_RECOMMENDED_MEDIUM &&
2938 		    tp->t_maxopd > tcp_mss_rec_medium) {
2939 			tp->t_cached_maxopd = tp->t_maxopd;
2940 			tp->t_maxopd = tcp_mss_rec_medium;
2941 			tcpstat.tcps_mss_to_medium++;
2942 		} else if (ifsr->mss_recommended ==
2943 		    IF_CELL_UL_MSS_RECOMMENDED_LOW &&
2944 		    tp->t_maxopd > tcp_mss_rec_low) {
2945 			tp->t_cached_maxopd = tp->t_maxopd;
2946 			tp->t_maxopd = tcp_mss_rec_low;
2947 			tcpstat.tcps_mss_to_low++;
2948 		}
2949 		tp->t_maxseg = tp->t_maxopd - optlen;
2950 
2951 		/*
2952 		 * clear the cached value if it is same as the current
2953 		 */
2954 		if (tp->t_maxopd == tp->t_cached_maxopd) {
2955 			tp->t_cached_maxopd = 0;
2956 		}
2957 	}
2958 }
2959 
2960 void
tcp_update_mss_locked(struct socket * so,struct ifnet * ifp)2961 tcp_update_mss_locked(struct socket *so, struct ifnet *ifp)
2962 {
2963 	struct inpcb *inp = sotoinpcb(so);
2964 	struct tcpcb *tp = intotcpcb(inp);
2965 
2966 	if (ifp == NULL && (ifp = inp->inp_last_outifp) == NULL) {
2967 		return;
2968 	}
2969 
2970 	if (!IFNET_IS_CELLULAR(ifp)) {
2971 		/*
2972 		 * This optimization is implemented for cellular
2973 		 * networks only
2974 		 */
2975 		return;
2976 	}
2977 	if (tp->t_state <= TCPS_CLOSE_WAIT) {
2978 		/*
2979 		 * If the connection is currently doing or has done PMTU
2980 		 * blackhole detection, do not change the MSS
2981 		 */
2982 		if (tp->t_flags & TF_BLACKHOLE) {
2983 			return;
2984 		}
2985 		if (ifp->if_link_status == NULL) {
2986 			return;
2987 		}
2988 		tcp_update_mss_core(tp, ifp);
2989 	}
2990 }
2991 
2992 void
tcp_itimer(struct inpcbinfo * ipi)2993 tcp_itimer(struct inpcbinfo *ipi)
2994 {
2995 	struct inpcb *inp, *nxt;
2996 
2997 	if (lck_rw_try_lock_exclusive(&ipi->ipi_lock) == FALSE) {
2998 		if (tcp_itimer_done == TRUE) {
2999 			tcp_itimer_done = FALSE;
3000 			os_atomic_inc(&ipi->ipi_timer_req.intimer_fast, relaxed);
3001 			return;
3002 		}
3003 		/* Upgrade failed, lost lock now take it again exclusive */
3004 		lck_rw_lock_exclusive(&ipi->ipi_lock);
3005 	}
3006 	tcp_itimer_done = TRUE;
3007 
3008 	LIST_FOREACH_SAFE(inp, &tcb, inp_list, nxt) {
3009 		struct socket *so;
3010 		struct ifnet *ifp;
3011 
3012 		if (inp->inp_ppcb == NULL ||
3013 		    in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) {
3014 			continue;
3015 		}
3016 		so = inp->inp_socket;
3017 		ifp = inp->inp_last_outifp;
3018 		socket_lock(so, 1);
3019 		if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) {
3020 			socket_unlock(so, 1);
3021 			continue;
3022 		}
3023 		so_check_extended_bk_idle_time(so);
3024 		if (ipi->ipi_flags & INPCBINFO_UPDATE_MSS) {
3025 			tcp_update_mss_locked(so, NULL);
3026 		}
3027 		socket_unlock(so, 1);
3028 
3029 		/*
3030 		 * Defunct all system-initiated background sockets if the
3031 		 * socket is using the cellular interface and the interface
3032 		 * has its LQM set to abort.
3033 		 */
3034 		if ((ipi->ipi_flags & INPCBINFO_HANDLE_LQM_ABORT) &&
3035 		    IS_SO_TC_BACKGROUNDSYSTEM(so->so_traffic_class) &&
3036 		    ifp != NULL && IFNET_IS_CELLULAR(ifp) &&
3037 		    (ifp->if_interface_state.valid_bitmask &
3038 		    IF_INTERFACE_STATE_LQM_STATE_VALID) &&
3039 		    ifp->if_interface_state.lqm_state ==
3040 		    IFNET_LQM_THRESH_ABORT) {
3041 			socket_defunct(current_proc(), so,
3042 			    SHUTDOWN_SOCKET_LEVEL_DISCONNECT_ALL);
3043 		}
3044 	}
3045 
3046 	ipi->ipi_flags &= ~(INPCBINFO_UPDATE_MSS | INPCBINFO_HANDLE_LQM_ABORT);
3047 	lck_rw_done(&ipi->ipi_lock);
3048 }
3049