1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * @(#)tcp_timer.c 8.2 (Berkeley) 5/24/95
32 */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include "opt_inet.h"
38 #include "opt_inet6.h"
39 #include "opt_tcpdebug.h"
40 #include "opt_rss.h"
41
42 #include <sys/param.h>
43 #include <sys/kernel.h>
44 #include <sys/lock.h>
45 #include <sys/mbuf.h>
46 #include <sys/mutex.h>
47 #include <sys/protosw.h>
48 #include <sys/smp.h>
49 #include <sys/socket.h>
50 #include <sys/socketvar.h>
51 #include <sys/sysctl.h>
52 #include <sys/systm.h>
53
54 #include <net/if.h>
55 #include <net/route.h>
56 #include <net/rss_config.h>
57 #include <net/vnet.h>
58 #include <net/netisr.h>
59
60 #include <netinet/in.h>
61 #include <netinet/in_kdtrace.h>
62 #include <netinet/in_pcb.h>
63 #include <netinet/in_rss.h>
64 #include <netinet/in_systm.h>
65 #ifdef INET6
66 #include <netinet6/in6_pcb.h>
67 #endif
68 #include <netinet/ip_var.h>
69 #include <netinet/tcp.h>
70 #include <netinet/tcp_fsm.h>
71 #include <netinet/tcp_log_buf.h>
72 #include <netinet/tcp_timer.h>
73 #include <netinet/tcp_var.h>
74 #include <netinet/tcp_seq.h>
75 #include <netinet/cc/cc.h>
76 #ifdef INET6
77 #include <netinet6/tcp6_var.h>
78 #endif
79 #include <netinet/tcpip.h>
80 #ifdef TCPDEBUG
81 #include <netinet/tcp_debug.h>
82 #endif
83
84 int tcp_persmin;
85 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, persmin,
86 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
87 &tcp_persmin, 0, sysctl_msec_to_ticks, "I",
88 "minimum persistence interval");
89
90 int tcp_persmax;
91 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, persmax,
92 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
93 &tcp_persmax, 0, sysctl_msec_to_ticks, "I",
94 "maximum persistence interval");
95
96 int tcp_keepinit;
97 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINIT, keepinit,
98 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
99 &tcp_keepinit, 0, sysctl_msec_to_ticks, "I",
100 "time to establish connection");
101
102 int tcp_keepidle;
103 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPIDLE, keepidle,
104 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
105 &tcp_keepidle, 0, sysctl_msec_to_ticks, "I",
106 "time before keepalive probes begin");
107
108 int tcp_keepintvl;
109 SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINTVL, keepintvl,
110 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
111 &tcp_keepintvl, 0, sysctl_msec_to_ticks, "I",
112 "time between keepalive probes");
113
114 int tcp_delacktime;
115 SYSCTL_PROC(_net_inet_tcp, TCPCTL_DELACKTIME, delacktime,
116 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
117 &tcp_delacktime, 0, sysctl_msec_to_ticks, "I",
118 "Time before a delayed ACK is sent");
119
120 VNET_DEFINE(int, tcp_msl);
121 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, msl,
122 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_VNET,
123 &VNET_NAME(tcp_msl), 0, sysctl_msec_to_ticks, "I",
124 "Maximum segment lifetime");
125
126 int tcp_rexmit_initial;
127 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, rexmit_initial,
128 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
129 &tcp_rexmit_initial, 0, sysctl_msec_to_ticks, "I",
130 "Initial Retransmission Timeout");
131
132 int tcp_rexmit_min;
133 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, rexmit_min,
134 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
135 &tcp_rexmit_min, 0, sysctl_msec_to_ticks, "I",
136 "Minimum Retransmission Timeout");
137
138 int tcp_rexmit_slop;
139 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, rexmit_slop,
140 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
141 &tcp_rexmit_slop, 0, sysctl_msec_to_ticks, "I",
142 "Retransmission Timer Slop");
143
144 VNET_DEFINE(int, tcp_always_keepalive) = 1;
145 SYSCTL_INT(_net_inet_tcp, OID_AUTO, always_keepalive, CTLFLAG_VNET|CTLFLAG_RW,
146 &VNET_NAME(tcp_always_keepalive) , 0,
147 "Assume SO_KEEPALIVE on all TCP connections");
148
149 int tcp_fast_finwait2_recycle = 0;
150 SYSCTL_INT(_net_inet_tcp, OID_AUTO, fast_finwait2_recycle, CTLFLAG_RW,
151 &tcp_fast_finwait2_recycle, 0,
152 "Recycle closed FIN_WAIT_2 connections faster");
153
154 int tcp_finwait2_timeout;
155 SYSCTL_PROC(_net_inet_tcp, OID_AUTO, finwait2_timeout,
156 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
157 &tcp_finwait2_timeout, 0, sysctl_msec_to_ticks, "I",
158 "FIN-WAIT2 timeout");
159
160 int tcp_keepcnt = TCPTV_KEEPCNT;
161 SYSCTL_INT(_net_inet_tcp, OID_AUTO, keepcnt, CTLFLAG_RW, &tcp_keepcnt, 0,
162 "Number of keepalive probes to send");
163
164 /* max idle probes */
165 int tcp_maxpersistidle;
166
167 int tcp_rexmit_drop_options = 0;
168 SYSCTL_INT(_net_inet_tcp, OID_AUTO, rexmit_drop_options, CTLFLAG_RW,
169 &tcp_rexmit_drop_options, 0,
170 "Drop TCP options from 3rd and later retransmitted SYN");
171
172 VNET_DEFINE(int, tcp_pmtud_blackhole_detect);
173 SYSCTL_INT(_net_inet_tcp, OID_AUTO, pmtud_blackhole_detection,
174 CTLFLAG_RW|CTLFLAG_VNET,
175 &VNET_NAME(tcp_pmtud_blackhole_detect), 0,
176 "Path MTU Discovery Black Hole Detection Enabled");
177
178 #ifdef INET
179 VNET_DEFINE(int, tcp_pmtud_blackhole_mss) = 1200;
180 SYSCTL_INT(_net_inet_tcp, OID_AUTO, pmtud_blackhole_mss,
181 CTLFLAG_RW|CTLFLAG_VNET,
182 &VNET_NAME(tcp_pmtud_blackhole_mss), 0,
183 "Path MTU Discovery Black Hole Detection lowered MSS");
184 #endif
185
186 #ifdef INET6
187 VNET_DEFINE(int, tcp_v6pmtud_blackhole_mss) = 1220;
188 SYSCTL_INT(_net_inet_tcp, OID_AUTO, v6pmtud_blackhole_mss,
189 CTLFLAG_RW|CTLFLAG_VNET,
190 &VNET_NAME(tcp_v6pmtud_blackhole_mss), 0,
191 "Path MTU Discovery IPv6 Black Hole Detection lowered MSS");
192 #endif
193
194 #ifdef RSS
195 static int per_cpu_timers = 1;
196 #else
197 static int per_cpu_timers = 0;
198 #endif
199 SYSCTL_INT(_net_inet_tcp, OID_AUTO, per_cpu_timers, CTLFLAG_RW,
200 &per_cpu_timers , 0, "run tcp timers on all cpus");
201
202 /*
203 * Map the given inp to a CPU id.
204 *
205 * This queries RSS if it's compiled in, else it defaults to the current
206 * CPU ID.
207 */
208 inline int
inp_to_cpuid(struct inpcb * inp)209 inp_to_cpuid(struct inpcb *inp)
210 {
211 u_int cpuid;
212
213 if (per_cpu_timers) {
214 #ifdef RSS
215 cpuid = rss_hash2cpuid(inp->inp_flowid, inp->inp_flowtype);
216 if (cpuid == NETISR_CPUID_NONE)
217 return (curcpu); /* XXX */
218 else
219 return (cpuid);
220 #endif
221 /*
222 * We don't have a flowid -> cpuid mapping, so cheat and
223 * just map unknown cpuids to curcpu. Not the best, but
224 * apparently better than defaulting to swi 0.
225 */
226 cpuid = inp->inp_flowid % (mp_maxid + 1);
227 if (! CPU_ABSENT(cpuid))
228 return (cpuid);
229 return (curcpu);
230 } else {
231 return (0);
232 }
233 }
234
235 /*
236 * Tcp protocol timeout routine called every 500 ms.
237 * Updates timestamps used for TCP
238 * causes finite state machine actions if timers expire.
239 */
240 void
tcp_slowtimo(void)241 tcp_slowtimo(void)
242 {
243 VNET_ITERATOR_DECL(vnet_iter);
244
245 VNET_LIST_RLOCK_NOSLEEP();
246 VNET_FOREACH(vnet_iter) {
247 CURVNET_SET(vnet_iter);
248 (void) tcp_tw_2msl_scan(0);
249 CURVNET_RESTORE();
250 }
251 VNET_LIST_RUNLOCK_NOSLEEP();
252 }
253
254 int tcp_backoff[TCP_MAXRXTSHIFT + 1] =
255 { 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 512, 512, 512 };
256
257 int tcp_totbackoff = 2559; /* sum of tcp_backoff[] */
258
259 /*
260 * TCP timer processing.
261 */
262
263 void
tcp_timer_delack(void * xtp)264 tcp_timer_delack(void *xtp)
265 {
266 struct epoch_tracker et;
267 struct tcpcb *tp = xtp;
268 struct inpcb *inp;
269 CURVNET_SET(tp->t_vnet);
270
271 inp = tp->t_inpcb;
272 KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp));
273 INP_WLOCK(inp);
274 if (callout_pending(&tp->t_timers->tt_delack) ||
275 !callout_active(&tp->t_timers->tt_delack)) {
276 INP_WUNLOCK(inp);
277 CURVNET_RESTORE();
278 return;
279 }
280 callout_deactivate(&tp->t_timers->tt_delack);
281 if ((inp->inp_flags & INP_DROPPED) != 0) {
282 INP_WUNLOCK(inp);
283 CURVNET_RESTORE();
284 return;
285 }
286 tp->t_flags |= TF_ACKNOW;
287 TCPSTAT_INC(tcps_delack);
288 NET_EPOCH_ENTER(et);
289 (void) tp->t_fb->tfb_tcp_output(tp);
290 INP_WUNLOCK(inp);
291 NET_EPOCH_EXIT(et);
292 CURVNET_RESTORE();
293 }
294
295 void
tcp_inpinfo_lock_del(struct inpcb * inp,struct tcpcb * tp)296 tcp_inpinfo_lock_del(struct inpcb *inp, struct tcpcb *tp)
297 {
298 if (inp && tp != NULL)
299 INP_WUNLOCK(inp);
300 }
301
302 void
tcp_timer_2msl(void * xtp)303 tcp_timer_2msl(void *xtp)
304 {
305 struct tcpcb *tp = xtp;
306 struct inpcb *inp;
307 struct epoch_tracker et;
308 CURVNET_SET(tp->t_vnet);
309 #ifdef TCPDEBUG
310 int ostate;
311
312 ostate = tp->t_state;
313 #endif
314 inp = tp->t_inpcb;
315 KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp));
316 INP_WLOCK(inp);
317 tcp_free_sackholes(tp);
318 if (callout_pending(&tp->t_timers->tt_2msl) ||
319 !callout_active(&tp->t_timers->tt_2msl)) {
320 INP_WUNLOCK(tp->t_inpcb);
321 CURVNET_RESTORE();
322 return;
323 }
324 callout_deactivate(&tp->t_timers->tt_2msl);
325 if ((inp->inp_flags & INP_DROPPED) != 0) {
326 INP_WUNLOCK(inp);
327 CURVNET_RESTORE();
328 return;
329 }
330 KASSERT((tp->t_timers->tt_flags & TT_STOPPED) == 0,
331 ("%s: tp %p tcpcb can't be stopped here", __func__, tp));
332 /*
333 * 2 MSL timeout in shutdown went off. If we're closed but
334 * still waiting for peer to close and connection has been idle
335 * too long delete connection control block. Otherwise, check
336 * again in a bit.
337 *
338 * If in TIME_WAIT state just ignore as this timeout is handled in
339 * tcp_tw_2msl_scan().
340 *
341 * If fastrecycle of FIN_WAIT_2, in FIN_WAIT_2 and receiver has closed,
342 * there's no point in hanging onto FIN_WAIT_2 socket. Just close it.
343 * Ignore fact that there were recent incoming segments.
344 */
345 if ((inp->inp_flags & INP_TIMEWAIT) != 0) {
346 INP_WUNLOCK(inp);
347 CURVNET_RESTORE();
348 return;
349 }
350 if (tcp_fast_finwait2_recycle && tp->t_state == TCPS_FIN_WAIT_2 &&
351 tp->t_inpcb && tp->t_inpcb->inp_socket &&
352 (tp->t_inpcb->inp_socket->so_rcv.sb_state & SBS_CANTRCVMORE)) {
353 TCPSTAT_INC(tcps_finwait2_drops);
354 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
355 tcp_inpinfo_lock_del(inp, tp);
356 goto out;
357 }
358 NET_EPOCH_ENTER(et);
359 tp = tcp_close(tp);
360 NET_EPOCH_EXIT(et);
361 tcp_inpinfo_lock_del(inp, tp);
362 goto out;
363 } else {
364 if (ticks - tp->t_rcvtime <= TP_MAXIDLE(tp)) {
365 callout_reset(&tp->t_timers->tt_2msl,
366 TP_KEEPINTVL(tp), tcp_timer_2msl, tp);
367 } else {
368 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
369 tcp_inpinfo_lock_del(inp, tp);
370 goto out;
371 }
372 NET_EPOCH_ENTER(et);
373 tp = tcp_close(tp);
374 NET_EPOCH_EXIT(et);
375 tcp_inpinfo_lock_del(inp, tp);
376 goto out;
377 }
378 }
379
380 #ifdef TCPDEBUG
381 if (tp != NULL && (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
382 tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0,
383 PRU_SLOWTIMO);
384 #endif
385 TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO);
386
387 if (tp != NULL)
388 INP_WUNLOCK(inp);
389 out:
390 CURVNET_RESTORE();
391 }
392
393 void
tcp_timer_keep(void * xtp)394 tcp_timer_keep(void *xtp)
395 {
396 struct tcpcb *tp = xtp;
397 struct tcptemp *t_template;
398 struct inpcb *inp;
399 struct epoch_tracker et;
400 CURVNET_SET(tp->t_vnet);
401 #ifdef TCPDEBUG
402 int ostate;
403
404 ostate = tp->t_state;
405 #endif
406 inp = tp->t_inpcb;
407 KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp));
408 INP_WLOCK(inp);
409 if (callout_pending(&tp->t_timers->tt_keep) ||
410 !callout_active(&tp->t_timers->tt_keep)) {
411 INP_WUNLOCK(inp);
412 CURVNET_RESTORE();
413 return;
414 }
415 callout_deactivate(&tp->t_timers->tt_keep);
416 if ((inp->inp_flags & INP_DROPPED) != 0) {
417 INP_WUNLOCK(inp);
418 CURVNET_RESTORE();
419 return;
420 }
421 KASSERT((tp->t_timers->tt_flags & TT_STOPPED) == 0,
422 ("%s: tp %p tcpcb can't be stopped here", __func__, tp));
423
424 /*
425 * Because we don't regularly reset the keepalive callout in
426 * the ESTABLISHED state, it may be that we don't actually need
427 * to send a keepalive yet. If that occurs, schedule another
428 * call for the next time the keepalive timer might expire.
429 */
430 if (TCPS_HAVEESTABLISHED(tp->t_state)) {
431 u_int idletime;
432
433 idletime = ticks - tp->t_rcvtime;
434 if (idletime < TP_KEEPIDLE(tp)) {
435 callout_reset(&tp->t_timers->tt_keep,
436 TP_KEEPIDLE(tp) - idletime, tcp_timer_keep, tp);
437 INP_WUNLOCK(inp);
438 CURVNET_RESTORE();
439 return;
440 }
441 }
442
443 /*
444 * Keep-alive timer went off; send something
445 * or drop connection if idle for too long.
446 */
447 TCPSTAT_INC(tcps_keeptimeo);
448 if (tp->t_state < TCPS_ESTABLISHED)
449 goto dropit;
450 if ((V_tcp_always_keepalive ||
451 inp->inp_socket->so_options & SO_KEEPALIVE) &&
452 tp->t_state <= TCPS_CLOSING) {
453 if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp))
454 goto dropit;
455 /*
456 * Send a packet designed to force a response
457 * if the peer is up and reachable:
458 * either an ACK if the connection is still alive,
459 * or an RST if the peer has closed the connection
460 * due to timeout or reboot.
461 * Using sequence number tp->snd_una-1
462 * causes the transmitted zero-length segment
463 * to lie outside the receive window;
464 * by the protocol spec, this requires the
465 * correspondent TCP to respond.
466 */
467 TCPSTAT_INC(tcps_keepprobe);
468 t_template = tcpip_maketemplate(inp);
469 if (t_template) {
470 NET_EPOCH_ENTER(et);
471 tcp_respond(tp, t_template->tt_ipgen,
472 &t_template->tt_t, (struct mbuf *)NULL,
473 tp->rcv_nxt, tp->snd_una - 1, 0);
474 NET_EPOCH_EXIT(et);
475 free(t_template, M_TEMP);
476 }
477 callout_reset(&tp->t_timers->tt_keep, TP_KEEPINTVL(tp),
478 tcp_timer_keep, tp);
479 } else
480 callout_reset(&tp->t_timers->tt_keep, TP_KEEPIDLE(tp),
481 tcp_timer_keep, tp);
482
483 #ifdef TCPDEBUG
484 if (inp->inp_socket->so_options & SO_DEBUG)
485 tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0,
486 PRU_SLOWTIMO);
487 #endif
488 TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO);
489 INP_WUNLOCK(inp);
490 CURVNET_RESTORE();
491 return;
492
493 dropit:
494 TCPSTAT_INC(tcps_keepdrops);
495 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
496 tcp_inpinfo_lock_del(inp, tp);
497 goto out;
498 }
499 NET_EPOCH_ENTER(et);
500 tp = tcp_drop(tp, ETIMEDOUT);
501
502 #ifdef TCPDEBUG
503 if (tp != NULL && (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
504 tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0,
505 PRU_SLOWTIMO);
506 #endif
507 TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO);
508 NET_EPOCH_EXIT(et);
509 tcp_inpinfo_lock_del(inp, tp);
510 out:
511 CURVNET_RESTORE();
512 }
513
514 void
tcp_timer_persist(void * xtp)515 tcp_timer_persist(void *xtp)
516 {
517 struct tcpcb *tp = xtp;
518 struct inpcb *inp;
519 struct epoch_tracker et;
520 CURVNET_SET(tp->t_vnet);
521 #ifdef TCPDEBUG
522 int ostate;
523
524 ostate = tp->t_state;
525 #endif
526 inp = tp->t_inpcb;
527 KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp));
528 INP_WLOCK(inp);
529 if (callout_pending(&tp->t_timers->tt_persist) ||
530 !callout_active(&tp->t_timers->tt_persist)) {
531 INP_WUNLOCK(inp);
532 CURVNET_RESTORE();
533 return;
534 }
535 callout_deactivate(&tp->t_timers->tt_persist);
536 if ((inp->inp_flags & INP_DROPPED) != 0) {
537 INP_WUNLOCK(inp);
538 CURVNET_RESTORE();
539 return;
540 }
541 KASSERT((tp->t_timers->tt_flags & TT_STOPPED) == 0,
542 ("%s: tp %p tcpcb can't be stopped here", __func__, tp));
543 /*
544 * Persistence timer into zero window.
545 * Force a byte to be output, if possible.
546 */
547 TCPSTAT_INC(tcps_persisttimeo);
548 /*
549 * Hack: if the peer is dead/unreachable, we do not
550 * time out if the window is closed. After a full
551 * backoff, drop the connection if the idle time
552 * (no responses to probes) reaches the maximum
553 * backoff that we would use if retransmitting.
554 */
555 if (tp->t_rxtshift == TCP_MAXRXTSHIFT &&
556 (ticks - tp->t_rcvtime >= tcp_maxpersistidle ||
557 ticks - tp->t_rcvtime >= TCP_REXMTVAL(tp) * tcp_totbackoff)) {
558 TCPSTAT_INC(tcps_persistdrop);
559 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
560 tcp_inpinfo_lock_del(inp, tp);
561 goto out;
562 }
563 NET_EPOCH_ENTER(et);
564 tp = tcp_drop(tp, ETIMEDOUT);
565 NET_EPOCH_EXIT(et);
566 tcp_inpinfo_lock_del(inp, tp);
567 goto out;
568 }
569 /*
570 * If the user has closed the socket then drop a persisting
571 * connection after a much reduced timeout.
572 */
573 if (tp->t_state > TCPS_CLOSE_WAIT &&
574 (ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) {
575 TCPSTAT_INC(tcps_persistdrop);
576 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
577 tcp_inpinfo_lock_del(inp, tp);
578 goto out;
579 }
580 NET_EPOCH_ENTER(et);
581 tp = tcp_drop(tp, ETIMEDOUT);
582 NET_EPOCH_EXIT(et);
583 tcp_inpinfo_lock_del(inp, tp);
584 goto out;
585 }
586 tcp_setpersist(tp);
587 tp->t_flags |= TF_FORCEDATA;
588 NET_EPOCH_ENTER(et);
589 (void) tp->t_fb->tfb_tcp_output(tp);
590 NET_EPOCH_EXIT(et);
591 tp->t_flags &= ~TF_FORCEDATA;
592
593 #ifdef TCPDEBUG
594 if (tp != NULL && tp->t_inpcb->inp_socket->so_options & SO_DEBUG)
595 tcp_trace(TA_USER, ostate, tp, NULL, NULL, PRU_SLOWTIMO);
596 #endif
597 TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO);
598 INP_WUNLOCK(inp);
599 out:
600 CURVNET_RESTORE();
601 }
602
603 void
tcp_timer_rexmt(void * xtp)604 tcp_timer_rexmt(void * xtp)
605 {
606 struct tcpcb *tp = xtp;
607 CURVNET_SET(tp->t_vnet);
608 int rexmt;
609 struct inpcb *inp;
610 struct epoch_tracker et;
611 bool isipv6;
612 #ifdef TCPDEBUG
613 int ostate;
614
615 ostate = tp->t_state;
616 #endif
617 inp = tp->t_inpcb;
618 KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp));
619 INP_WLOCK(inp);
620 if (callout_pending(&tp->t_timers->tt_rexmt) ||
621 !callout_active(&tp->t_timers->tt_rexmt)) {
622 INP_WUNLOCK(inp);
623 CURVNET_RESTORE();
624 return;
625 }
626 callout_deactivate(&tp->t_timers->tt_rexmt);
627 if ((inp->inp_flags & INP_DROPPED) != 0) {
628 INP_WUNLOCK(inp);
629 CURVNET_RESTORE();
630 return;
631 }
632 KASSERT((tp->t_timers->tt_flags & TT_STOPPED) == 0,
633 ("%s: tp %p tcpcb can't be stopped here", __func__, tp));
634 tcp_free_sackholes(tp);
635 TCP_LOG_EVENT(tp, NULL, NULL, NULL, TCP_LOG_RTO, 0, 0, NULL, false);
636 if (tp->t_fb->tfb_tcp_rexmit_tmr) {
637 /* The stack has a timer action too. */
638 (*tp->t_fb->tfb_tcp_rexmit_tmr)(tp);
639 }
640 /*
641 * Retransmission timer went off. Message has not
642 * been acked within retransmit interval. Back off
643 * to a longer retransmit interval and retransmit one segment.
644 */
645 if (++tp->t_rxtshift > TCP_MAXRXTSHIFT) {
646 tp->t_rxtshift = TCP_MAXRXTSHIFT;
647 TCPSTAT_INC(tcps_timeoutdrop);
648 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
649 tcp_inpinfo_lock_del(inp, tp);
650 goto out;
651 }
652 NET_EPOCH_ENTER(et);
653 tp = tcp_drop(tp, ETIMEDOUT);
654 NET_EPOCH_EXIT(et);
655 tcp_inpinfo_lock_del(inp, tp);
656 goto out;
657 }
658 if (tp->t_state == TCPS_SYN_SENT) {
659 /*
660 * If the SYN was retransmitted, indicate CWND to be
661 * limited to 1 segment in cc_conn_init().
662 */
663 tp->snd_cwnd = 1;
664 } else if (tp->t_rxtshift == 1) {
665 /*
666 * first retransmit; record ssthresh and cwnd so they can
667 * be recovered if this turns out to be a "bad" retransmit.
668 * A retransmit is considered "bad" if an ACK for this
669 * segment is received within RTT/2 interval; the assumption
670 * here is that the ACK was already in flight. See
671 * "On Estimating End-to-End Network Path Properties" by
672 * Allman and Paxson for more details.
673 */
674 tp->snd_cwnd_prev = tp->snd_cwnd;
675 tp->snd_ssthresh_prev = tp->snd_ssthresh;
676 tp->snd_recover_prev = tp->snd_recover;
677 if (IN_FASTRECOVERY(tp->t_flags))
678 tp->t_flags |= TF_WASFRECOVERY;
679 else
680 tp->t_flags &= ~TF_WASFRECOVERY;
681 if (IN_CONGRECOVERY(tp->t_flags))
682 tp->t_flags |= TF_WASCRECOVERY;
683 else
684 tp->t_flags &= ~TF_WASCRECOVERY;
685 if ((tp->t_flags & TF_RCVD_TSTMP) == 0)
686 tp->t_badrxtwin = ticks + (tp->t_srtt >> (TCP_RTT_SHIFT + 1));
687 /* In the event that we've negotiated timestamps
688 * badrxtwin will be set to the value that we set
689 * the retransmitted packet's to_tsval to by tcp_output
690 */
691 tp->t_flags |= TF_PREVVALID;
692 } else
693 tp->t_flags &= ~TF_PREVVALID;
694 TCPSTAT_INC(tcps_rexmttimeo);
695 if ((tp->t_state == TCPS_SYN_SENT) ||
696 (tp->t_state == TCPS_SYN_RECEIVED))
697 rexmt = tcp_rexmit_initial * tcp_backoff[tp->t_rxtshift];
698 else
699 rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift];
700 TCPT_RANGESET(tp->t_rxtcur, rexmt,
701 tp->t_rttmin, TCPTV_REXMTMAX);
702
703 /*
704 * We enter the path for PLMTUD if connection is established or, if
705 * connection is FIN_WAIT_1 status, reason for the last is that if
706 * amount of data we send is very small, we could send it in couple of
707 * packets and process straight to FIN. In that case we won't catch
708 * ESTABLISHED state.
709 */
710 #ifdef INET6
711 isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) ? true : false;
712 #else
713 isipv6 = false;
714 #endif
715 if (((V_tcp_pmtud_blackhole_detect == 1) ||
716 (V_tcp_pmtud_blackhole_detect == 2 && !isipv6) ||
717 (V_tcp_pmtud_blackhole_detect == 3 && isipv6)) &&
718 ((tp->t_state == TCPS_ESTABLISHED) ||
719 (tp->t_state == TCPS_FIN_WAIT_1))) {
720 if (tp->t_rxtshift == 1) {
721 /*
722 * We enter blackhole detection after the first
723 * unsuccessful timer based retransmission.
724 * Then we reduce up to two times the MSS, each
725 * candidate giving two tries of retransmissions.
726 * But we give a candidate only two tries, if it
727 * actually reduces the MSS.
728 */
729 tp->t_blackhole_enter = 2;
730 tp->t_blackhole_exit = tp->t_blackhole_enter;
731 if (isipv6) {
732 #ifdef INET6
733 if (tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss)
734 tp->t_blackhole_exit += 2;
735 if (tp->t_maxseg > V_tcp_v6mssdflt &&
736 V_tcp_v6pmtud_blackhole_mss > V_tcp_v6mssdflt)
737 tp->t_blackhole_exit += 2;
738 #endif
739 } else {
740 #ifdef INET
741 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss)
742 tp->t_blackhole_exit += 2;
743 if (tp->t_maxseg > V_tcp_mssdflt &&
744 V_tcp_pmtud_blackhole_mss > V_tcp_mssdflt)
745 tp->t_blackhole_exit += 2;
746 #endif
747 }
748 }
749 if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD|TF2_PLPMTU_MAXSEGSNT)) ==
750 (TF2_PLPMTU_PMTUD|TF2_PLPMTU_MAXSEGSNT)) &&
751 (tp->t_rxtshift >= tp->t_blackhole_enter &&
752 tp->t_rxtshift < tp->t_blackhole_exit &&
753 (tp->t_rxtshift - tp->t_blackhole_enter) % 2 == 0)) {
754 /*
755 * Enter Path MTU Black-hole Detection mechanism:
756 * - Disable Path MTU Discovery (IP "DF" bit).
757 * - Reduce MTU to lower value than what we
758 * negotiated with peer.
759 */
760 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) == 0) {
761 /* Record that we may have found a black hole. */
762 tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE;
763 /* Keep track of previous MSS. */
764 tp->t_pmtud_saved_maxseg = tp->t_maxseg;
765 }
766
767 /*
768 * Reduce the MSS to blackhole value or to the default
769 * in an attempt to retransmit.
770 */
771 #ifdef INET6
772 if (isipv6 &&
773 tp->t_maxseg > V_tcp_v6pmtud_blackhole_mss &&
774 V_tcp_v6pmtud_blackhole_mss > V_tcp_v6mssdflt) {
775 /* Use the sysctl tuneable blackhole MSS. */
776 tp->t_maxseg = V_tcp_v6pmtud_blackhole_mss;
777 TCPSTAT_INC(tcps_pmtud_blackhole_activated);
778 } else if (isipv6) {
779 /* Use the default MSS. */
780 tp->t_maxseg = V_tcp_v6mssdflt;
781 /*
782 * Disable Path MTU Discovery when we switch to
783 * minmss.
784 */
785 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
786 TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss);
787 }
788 #endif
789 #if defined(INET6) && defined(INET)
790 else
791 #endif
792 #ifdef INET
793 if (tp->t_maxseg > V_tcp_pmtud_blackhole_mss &&
794 V_tcp_pmtud_blackhole_mss > V_tcp_mssdflt) {
795 /* Use the sysctl tuneable blackhole MSS. */
796 tp->t_maxseg = V_tcp_pmtud_blackhole_mss;
797 TCPSTAT_INC(tcps_pmtud_blackhole_activated);
798 } else {
799 /* Use the default MSS. */
800 tp->t_maxseg = V_tcp_mssdflt;
801 /*
802 * Disable Path MTU Discovery when we switch to
803 * minmss.
804 */
805 tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
806 TCPSTAT_INC(tcps_pmtud_blackhole_activated_min_mss);
807 }
808 #endif
809 /*
810 * Reset the slow-start flight size
811 * as it may depend on the new MSS.
812 */
813 if (CC_ALGO(tp)->conn_init != NULL)
814 CC_ALGO(tp)->conn_init(tp->ccv);
815 } else {
816 /*
817 * If further retransmissions are still unsuccessful
818 * with a lowered MTU, maybe this isn't a blackhole and
819 * we restore the previous MSS and blackhole detection
820 * flags.
821 */
822 if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) &&
823 (tp->t_rxtshift >= tp->t_blackhole_exit)) {
824 tp->t_flags2 |= TF2_PLPMTU_PMTUD;
825 tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE;
826 tp->t_maxseg = tp->t_pmtud_saved_maxseg;
827 TCPSTAT_INC(tcps_pmtud_blackhole_failed);
828 /*
829 * Reset the slow-start flight size as it
830 * may depend on the new MSS.
831 */
832 if (CC_ALGO(tp)->conn_init != NULL)
833 CC_ALGO(tp)->conn_init(tp->ccv);
834 }
835 }
836 }
837
838 /*
839 * Disable RFC1323 and SACK if we haven't got any response to
840 * our third SYN to work-around some broken terminal servers
841 * (most of which have hopefully been retired) that have bad VJ
842 * header compression code which trashes TCP segments containing
843 * unknown-to-them TCP options.
844 */
845 if (tcp_rexmit_drop_options && (tp->t_state == TCPS_SYN_SENT) &&
846 (tp->t_rxtshift == 3))
847 tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_SACK_PERMIT);
848 /*
849 * If we backed off this far, notify the L3 protocol that we're having
850 * connection problems.
851 */
852 if (tp->t_rxtshift > TCP_RTT_INVALIDATE) {
853 #ifdef INET6
854 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0)
855 in6_losing(tp->t_inpcb);
856 else
857 #endif
858 in_losing(tp->t_inpcb);
859 }
860 tp->snd_nxt = tp->snd_una;
861 tp->snd_recover = tp->snd_max;
862 /*
863 * Force a segment to be sent.
864 */
865 tp->t_flags |= TF_ACKNOW;
866 /*
867 * If timing a segment in this window, stop the timer.
868 */
869 tp->t_rtttime = 0;
870
871 cc_cong_signal(tp, NULL, CC_RTO);
872 NET_EPOCH_ENTER(et);
873 (void) tp->t_fb->tfb_tcp_output(tp);
874 NET_EPOCH_EXIT(et);
875 #ifdef TCPDEBUG
876 if (tp != NULL && (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
877 tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0,
878 PRU_SLOWTIMO);
879 #endif
880 TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO);
881 INP_WUNLOCK(inp);
882 out:
883 CURVNET_RESTORE();
884 }
885
886 void
tcp_timer_activate(struct tcpcb * tp,uint32_t timer_type,u_int delta)887 tcp_timer_activate(struct tcpcb *tp, uint32_t timer_type, u_int delta)
888 {
889 struct callout *t_callout;
890 callout_func_t *f_callout;
891 struct inpcb *inp = tp->t_inpcb;
892 int cpu = inp_to_cpuid(inp);
893
894 #ifdef TCP_OFFLOAD
895 if (tp->t_flags & TF_TOE)
896 return;
897 #endif
898
899 if (tp->t_timers->tt_flags & TT_STOPPED)
900 return;
901
902 switch (timer_type) {
903 case TT_DELACK:
904 t_callout = &tp->t_timers->tt_delack;
905 f_callout = tcp_timer_delack;
906 break;
907 case TT_REXMT:
908 t_callout = &tp->t_timers->tt_rexmt;
909 f_callout = tcp_timer_rexmt;
910 break;
911 case TT_PERSIST:
912 t_callout = &tp->t_timers->tt_persist;
913 f_callout = tcp_timer_persist;
914 break;
915 case TT_KEEP:
916 t_callout = &tp->t_timers->tt_keep;
917 f_callout = tcp_timer_keep;
918 break;
919 case TT_2MSL:
920 t_callout = &tp->t_timers->tt_2msl;
921 f_callout = tcp_timer_2msl;
922 break;
923 default:
924 if (tp->t_fb->tfb_tcp_timer_activate) {
925 tp->t_fb->tfb_tcp_timer_activate(tp, timer_type, delta);
926 return;
927 }
928 panic("tp %p bad timer_type %#x", tp, timer_type);
929 }
930 if (delta == 0) {
931 callout_stop(t_callout);
932 } else {
933 callout_reset_on(t_callout, delta, f_callout, tp, cpu);
934 }
935 }
936
937 int
tcp_timer_active(struct tcpcb * tp,uint32_t timer_type)938 tcp_timer_active(struct tcpcb *tp, uint32_t timer_type)
939 {
940 struct callout *t_callout;
941
942 switch (timer_type) {
943 case TT_DELACK:
944 t_callout = &tp->t_timers->tt_delack;
945 break;
946 case TT_REXMT:
947 t_callout = &tp->t_timers->tt_rexmt;
948 break;
949 case TT_PERSIST:
950 t_callout = &tp->t_timers->tt_persist;
951 break;
952 case TT_KEEP:
953 t_callout = &tp->t_timers->tt_keep;
954 break;
955 case TT_2MSL:
956 t_callout = &tp->t_timers->tt_2msl;
957 break;
958 default:
959 if (tp->t_fb->tfb_tcp_timer_active) {
960 return(tp->t_fb->tfb_tcp_timer_active(tp, timer_type));
961 }
962 panic("tp %p bad timer_type %#x", tp, timer_type);
963 }
964 return callout_active(t_callout);
965 }
966
967 /*
968 * Stop the timer from running, and apply a flag
969 * against the timer_flags that will force the
970 * timer never to run. The flag is needed to assure
971 * a race does not leave it running and cause
972 * the timer to possibly restart itself (keep and persist
973 * especially do this).
974 */
975 int
tcp_timer_suspend(struct tcpcb * tp,uint32_t timer_type)976 tcp_timer_suspend(struct tcpcb *tp, uint32_t timer_type)
977 {
978 struct callout *t_callout;
979 uint32_t t_flags;
980
981 switch (timer_type) {
982 case TT_DELACK:
983 t_flags = TT_DELACK_SUS;
984 t_callout = &tp->t_timers->tt_delack;
985 break;
986 case TT_REXMT:
987 t_flags = TT_REXMT_SUS;
988 t_callout = &tp->t_timers->tt_rexmt;
989 break;
990 case TT_PERSIST:
991 t_flags = TT_PERSIST_SUS;
992 t_callout = &tp->t_timers->tt_persist;
993 break;
994 case TT_KEEP:
995 t_flags = TT_KEEP_SUS;
996 t_callout = &tp->t_timers->tt_keep;
997 break;
998 case TT_2MSL:
999 t_flags = TT_2MSL_SUS;
1000 t_callout = &tp->t_timers->tt_2msl;
1001 break;
1002 default:
1003 panic("tp:%p bad timer_type 0x%x", tp, timer_type);
1004 }
1005 tp->t_timers->tt_flags |= t_flags;
1006 return (callout_stop(t_callout));
1007 }
1008
1009 void
tcp_timers_unsuspend(struct tcpcb * tp,uint32_t timer_type)1010 tcp_timers_unsuspend(struct tcpcb *tp, uint32_t timer_type)
1011 {
1012 switch (timer_type) {
1013 case TT_DELACK:
1014 if (tp->t_timers->tt_flags & TT_DELACK_SUS) {
1015 tp->t_timers->tt_flags &= ~TT_DELACK_SUS;
1016 if (tp->t_flags & TF_DELACK) {
1017 /* Delayed ack timer should be up activate a timer */
1018 tp->t_flags &= ~TF_DELACK;
1019 tcp_timer_activate(tp, TT_DELACK,
1020 tcp_delacktime);
1021 }
1022 }
1023 break;
1024 case TT_REXMT:
1025 if (tp->t_timers->tt_flags & TT_REXMT_SUS) {
1026 tp->t_timers->tt_flags &= ~TT_REXMT_SUS;
1027 if (SEQ_GT(tp->snd_max, tp->snd_una) &&
1028 (tcp_timer_active((tp), TT_PERSIST) == 0) &&
1029 tp->snd_wnd) {
1030 /* We have outstanding data activate a timer */
1031 tcp_timer_activate(tp, TT_REXMT,
1032 tp->t_rxtcur);
1033 }
1034 }
1035 break;
1036 case TT_PERSIST:
1037 if (tp->t_timers->tt_flags & TT_PERSIST_SUS) {
1038 tp->t_timers->tt_flags &= ~TT_PERSIST_SUS;
1039 if (tp->snd_wnd == 0) {
1040 /* Activate the persists timer */
1041 tp->t_rxtshift = 0;
1042 tcp_setpersist(tp);
1043 }
1044 }
1045 break;
1046 case TT_KEEP:
1047 if (tp->t_timers->tt_flags & TT_KEEP_SUS) {
1048 tp->t_timers->tt_flags &= ~TT_KEEP_SUS;
1049 tcp_timer_activate(tp, TT_KEEP,
1050 TCPS_HAVEESTABLISHED(tp->t_state) ?
1051 TP_KEEPIDLE(tp) : TP_KEEPINIT(tp));
1052 }
1053 break;
1054 case TT_2MSL:
1055 if (tp->t_timers->tt_flags &= TT_2MSL_SUS) {
1056 tp->t_timers->tt_flags &= ~TT_2MSL_SUS;
1057 if ((tp->t_state == TCPS_FIN_WAIT_2) &&
1058 ((tp->t_inpcb->inp_socket == NULL) ||
1059 (tp->t_inpcb->inp_socket->so_rcv.sb_state & SBS_CANTRCVMORE))) {
1060 /* Star the 2MSL timer */
1061 tcp_timer_activate(tp, TT_2MSL,
1062 (tcp_fast_finwait2_recycle) ?
1063 tcp_finwait2_timeout : TP_MAXIDLE(tp));
1064 }
1065 }
1066 break;
1067 default:
1068 panic("tp:%p bad timer_type 0x%x", tp, timer_type);
1069 }
1070 }
1071
1072 void
tcp_timer_stop(struct tcpcb * tp,uint32_t timer_type)1073 tcp_timer_stop(struct tcpcb *tp, uint32_t timer_type)
1074 {
1075 struct callout *t_callout;
1076
1077 tp->t_timers->tt_flags |= TT_STOPPED;
1078 switch (timer_type) {
1079 case TT_DELACK:
1080 t_callout = &tp->t_timers->tt_delack;
1081 break;
1082 case TT_REXMT:
1083 t_callout = &tp->t_timers->tt_rexmt;
1084 break;
1085 case TT_PERSIST:
1086 t_callout = &tp->t_timers->tt_persist;
1087 break;
1088 case TT_KEEP:
1089 t_callout = &tp->t_timers->tt_keep;
1090 break;
1091 case TT_2MSL:
1092 t_callout = &tp->t_timers->tt_2msl;
1093 break;
1094 default:
1095 if (tp->t_fb->tfb_tcp_timer_stop) {
1096 /*
1097 * XXXrrs we need to look at this with the
1098 * stop case below (flags).
1099 */
1100 tp->t_fb->tfb_tcp_timer_stop(tp, timer_type);
1101 return;
1102 }
1103 panic("tp %p bad timer_type %#x", tp, timer_type);
1104 }
1105
1106 if (callout_async_drain(t_callout, tcp_timer_discard) == 0) {
1107 /*
1108 * Can't stop the callout, defer tcpcb actual deletion
1109 * to the last one. We do this using the async drain
1110 * function and incrementing the count in
1111 */
1112 tp->t_timers->tt_draincnt++;
1113 }
1114 }
1115