xref: /linux-6.15/include/linux/tcp.h (revision bbb03029)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		Definitions for the TCP protocol.
7  *
8  * Version:	@(#)tcp.h	1.0.2	04/28/93
9  *
10  * Author:	Fred N. van Kempen, <[email protected]>
11  *
12  *		This program is free software; you can redistribute it and/or
13  *		modify it under the terms of the GNU General Public License
14  *		as published by the Free Software Foundation; either version
15  *		2 of the License, or (at your option) any later version.
16  */
17 #ifndef _LINUX_TCP_H
18 #define _LINUX_TCP_H
19 
20 
21 #include <linux/skbuff.h>
22 #include <linux/win_minmax.h>
23 #include <net/sock.h>
24 #include <net/inet_connection_sock.h>
25 #include <net/inet_timewait_sock.h>
26 #include <uapi/linux/tcp.h>
27 
28 static inline struct tcphdr *tcp_hdr(const struct sk_buff *skb)
29 {
30 	return (struct tcphdr *)skb_transport_header(skb);
31 }
32 
33 static inline unsigned int __tcp_hdrlen(const struct tcphdr *th)
34 {
35 	return th->doff * 4;
36 }
37 
38 static inline unsigned int tcp_hdrlen(const struct sk_buff *skb)
39 {
40 	return __tcp_hdrlen(tcp_hdr(skb));
41 }
42 
43 static inline struct tcphdr *inner_tcp_hdr(const struct sk_buff *skb)
44 {
45 	return (struct tcphdr *)skb_inner_transport_header(skb);
46 }
47 
48 static inline unsigned int inner_tcp_hdrlen(const struct sk_buff *skb)
49 {
50 	return inner_tcp_hdr(skb)->doff * 4;
51 }
52 
53 static inline unsigned int tcp_optlen(const struct sk_buff *skb)
54 {
55 	return (tcp_hdr(skb)->doff - 5) * 4;
56 }
57 
58 /* TCP Fast Open */
59 #define TCP_FASTOPEN_COOKIE_MIN	4	/* Min Fast Open Cookie size in bytes */
60 #define TCP_FASTOPEN_COOKIE_MAX	16	/* Max Fast Open Cookie size in bytes */
61 #define TCP_FASTOPEN_COOKIE_SIZE 8	/* the size employed by this impl. */
62 
63 /* TCP Fast Open Cookie as stored in memory */
64 struct tcp_fastopen_cookie {
65 	union {
66 		u8	val[TCP_FASTOPEN_COOKIE_MAX];
67 #if IS_ENABLED(CONFIG_IPV6)
68 		struct in6_addr addr;
69 #endif
70 	};
71 	s8	len;
72 	bool	exp;	/* In RFC6994 experimental option format */
73 };
74 
75 /* This defines a selective acknowledgement block. */
76 struct tcp_sack_block_wire {
77 	__be32	start_seq;
78 	__be32	end_seq;
79 };
80 
81 struct tcp_sack_block {
82 	u32	start_seq;
83 	u32	end_seq;
84 };
85 
86 /*These are used to set the sack_ok field in struct tcp_options_received */
87 #define TCP_SACK_SEEN     (1 << 0)   /*1 = peer is SACK capable, */
88 #define TCP_FACK_ENABLED  (1 << 1)   /*1 = FACK is enabled locally*/
89 #define TCP_DSACK_SEEN    (1 << 2)   /*1 = DSACK was received from peer*/
90 
91 struct tcp_options_received {
92 /*	PAWS/RTTM data	*/
93 	long	ts_recent_stamp;/* Time we stored ts_recent (for aging) */
94 	u32	ts_recent;	/* Time stamp to echo next		*/
95 	u32	rcv_tsval;	/* Time stamp value             	*/
96 	u32	rcv_tsecr;	/* Time stamp echo reply        	*/
97 	u16 	saw_tstamp : 1,	/* Saw TIMESTAMP on last packet		*/
98 		tstamp_ok : 1,	/* TIMESTAMP seen on SYN packet		*/
99 		dsack : 1,	/* D-SACK is scheduled			*/
100 		wscale_ok : 1,	/* Wscale seen on SYN packet		*/
101 		sack_ok : 4,	/* SACK seen on SYN packet		*/
102 		snd_wscale : 4,	/* Window scaling received from sender	*/
103 		rcv_wscale : 4;	/* Window scaling to send to receiver	*/
104 	u8	num_sacks;	/* Number of SACK blocks		*/
105 	u16	user_mss;	/* mss requested by user in ioctl	*/
106 	u16	mss_clamp;	/* Maximal mss, negotiated at connection setup */
107 };
108 
109 static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
110 {
111 	rx_opt->tstamp_ok = rx_opt->sack_ok = 0;
112 	rx_opt->wscale_ok = rx_opt->snd_wscale = 0;
113 }
114 
115 /* This is the max number of SACKS that we'll generate and process. It's safe
116  * to increase this, although since:
117  *   size = TCPOLEN_SACK_BASE_ALIGNED (4) + n * TCPOLEN_SACK_PERBLOCK (8)
118  * only four options will fit in a standard TCP header */
119 #define TCP_NUM_SACKS 4
120 
121 struct tcp_request_sock_ops;
122 
123 struct tcp_request_sock {
124 	struct inet_request_sock 	req;
125 	const struct tcp_request_sock_ops *af_specific;
126 	u64				snt_synack; /* first SYNACK sent time */
127 	bool				tfo_listener;
128 	u32				txhash;
129 	u32				rcv_isn;
130 	u32				snt_isn;
131 	u32				ts_off;
132 	u32				last_oow_ack_time; /* last SYNACK */
133 	u32				rcv_nxt; /* the ack # by SYNACK. For
134 						  * FastOpen it's the seq#
135 						  * after data-in-SYN.
136 						  */
137 };
138 
139 static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req)
140 {
141 	return (struct tcp_request_sock *)req;
142 }
143 
144 struct tcp_sock {
145 	/* inet_connection_sock has to be the first member of tcp_sock */
146 	struct inet_connection_sock	inet_conn;
147 	u16	tcp_header_len;	/* Bytes of tcp header to send		*/
148 	u16	gso_segs;	/* Max number of segs per GSO packet	*/
149 
150 /*
151  *	RFC793 variables by their proper names. This means you can
152  *	read the code and the spec side by side (and laugh ...)
153  *	See RFC793 and RFC1122. The RFC writes these in capitals.
154  */
155 	u64	bytes_received;	/* RFC4898 tcpEStatsAppHCThruOctetsReceived
156 				 * sum(delta(rcv_nxt)), or how many bytes
157 				 * were acked.
158 				 */
159 	u32	segs_in;	/* RFC4898 tcpEStatsPerfSegsIn
160 				 * total number of segments in.
161 				 */
162 	u32	data_segs_in;	/* RFC4898 tcpEStatsPerfDataSegsIn
163 				 * total number of data segments in.
164 				 */
165  	u32	rcv_nxt;	/* What we want to receive next 	*/
166 	u32	copied_seq;	/* Head of yet unread data		*/
167 	u32	rcv_wup;	/* rcv_nxt on last window update sent	*/
168  	u32	snd_nxt;	/* Next sequence we send		*/
169 	u32	segs_out;	/* RFC4898 tcpEStatsPerfSegsOut
170 				 * The total number of segments sent.
171 				 */
172 	u32	data_segs_out;	/* RFC4898 tcpEStatsPerfDataSegsOut
173 				 * total number of data segments sent.
174 				 */
175 	u64	bytes_acked;	/* RFC4898 tcpEStatsAppHCThruOctetsAcked
176 				 * sum(delta(snd_una)), or how many bytes
177 				 * were acked.
178 				 */
179  	u32	snd_una;	/* First byte we want an ack for	*/
180  	u32	snd_sml;	/* Last byte of the most recently transmitted small packet */
181 	u32	rcv_tstamp;	/* timestamp of last received ACK (for keepalives) */
182 	u32	lsndtime;	/* timestamp of last sent data packet (for restart window) */
183 	u32	last_oow_ack_time;  /* timestamp of last out-of-window ACK */
184 
185 	u32	tsoffset;	/* timestamp offset */
186 
187 	struct list_head tsq_node; /* anchor in tsq_tasklet.head list */
188 
189 	u32	snd_wl1;	/* Sequence for window update		*/
190 	u32	snd_wnd;	/* The window we expect to receive	*/
191 	u32	max_window;	/* Maximal window ever seen from peer	*/
192 	u32	mss_cache;	/* Cached effective mss, not including SACKS */
193 
194 	u32	window_clamp;	/* Maximal window to advertise		*/
195 	u32	rcv_ssthresh;	/* Current window clamp			*/
196 
197 	/* Information of the most recently (s)acked skb */
198 	struct tcp_rack {
199 		u64 mstamp; /* (Re)sent time of the skb */
200 		u32 rtt_us;  /* Associated RTT */
201 		u32 end_seq; /* Ending TCP sequence of the skb */
202 		u8 advanced; /* mstamp advanced since last lost marking */
203 		u8 reord;    /* reordering detected */
204 	} rack;
205 	u16	advmss;		/* Advertised MSS			*/
206 	u32	chrono_start;	/* Start time in jiffies of a TCP chrono */
207 	u32	chrono_stat[3];	/* Time in jiffies for chrono_stat stats */
208 	u8	chrono_type:2,	/* current chronograph type */
209 		rate_app_limited:1,  /* rate_{delivered,interval_us} limited? */
210 		fastopen_connect:1, /* FASTOPEN_CONNECT sockopt */
211 		unused:4;
212 	u8	nonagle     : 4,/* Disable Nagle algorithm?             */
213 		thin_lto    : 1,/* Use linear timeouts for thin streams */
214 		unused1	    : 1,
215 		repair      : 1,
216 		frto        : 1;/* F-RTO (RFC5682) activated in CA_Loss */
217 	u8	repair_queue;
218 	u8	syn_data:1,	/* SYN includes data */
219 		syn_fastopen:1,	/* SYN includes Fast Open option */
220 		syn_fastopen_exp:1,/* SYN includes Fast Open exp. option */
221 		syn_fastopen_ch:1, /* Active TFO re-enabling probe */
222 		syn_data_acked:1,/* data in SYN is acked by SYN-ACK */
223 		save_syn:1,	/* Save headers of SYN packet */
224 		is_cwnd_limited:1;/* forward progress limited by snd_cwnd? */
225 	u32	tlp_high_seq;	/* snd_nxt at the time of TLP retransmit. */
226 
227 /* RTT measurement */
228 	u64	tcp_mstamp;	/* most recent packet received/sent */
229 	u32	srtt_us;	/* smoothed round trip time << 3 in usecs */
230 	u32	mdev_us;	/* medium deviation			*/
231 	u32	mdev_max_us;	/* maximal mdev for the last rtt period	*/
232 	u32	rttvar_us;	/* smoothed mdev_max			*/
233 	u32	rtt_seq;	/* sequence number to update rttvar	*/
234 	struct  minmax rtt_min;
235 
236 	u32	packets_out;	/* Packets which are "in flight"	*/
237 	u32	retrans_out;	/* Retransmitted packets out		*/
238 	u32	max_packets_out;  /* max packets_out in last window */
239 	u32	max_packets_seq;  /* right edge of max_packets_out flight */
240 
241 	u16	urg_data;	/* Saved octet of OOB data and control flags */
242 	u8	ecn_flags;	/* ECN status bits.			*/
243 	u8	keepalive_probes; /* num of allowed keep alive probes	*/
244 	u32	reordering;	/* Packet reordering metric.		*/
245 	u32	snd_up;		/* Urgent pointer		*/
246 
247 /*
248  *      Options received (usually on last packet, some only on SYN packets).
249  */
250 	struct tcp_options_received rx_opt;
251 
252 /*
253  *	Slow start and congestion control (see also Nagle, and Karn & Partridge)
254  */
255  	u32	snd_ssthresh;	/* Slow start size threshold		*/
256  	u32	snd_cwnd;	/* Sending congestion window		*/
257 	u32	snd_cwnd_cnt;	/* Linear increase counter		*/
258 	u32	snd_cwnd_clamp; /* Do not allow snd_cwnd to grow above this */
259 	u32	snd_cwnd_used;
260 	u32	snd_cwnd_stamp;
261 	u32	prior_cwnd;	/* Congestion window at start of Recovery. */
262 	u32	prr_delivered;	/* Number of newly delivered packets to
263 				 * receiver in Recovery. */
264 	u32	prr_out;	/* Total number of pkts sent during Recovery. */
265 	u32	delivered;	/* Total data packets delivered incl. rexmits */
266 	u32	lost;		/* Total data packets lost incl. rexmits */
267 	u32	app_limited;	/* limited until "delivered" reaches this val */
268 	u64	first_tx_mstamp;  /* start of window send phase */
269 	u64	delivered_mstamp; /* time we reached "delivered" */
270 	u32	rate_delivered;    /* saved rate sample: packets delivered */
271 	u32	rate_interval_us;  /* saved rate sample: time elapsed */
272 
273  	u32	rcv_wnd;	/* Current receiver window		*/
274 	u32	write_seq;	/* Tail(+1) of data held in tcp send buffer */
275 	u32	notsent_lowat;	/* TCP_NOTSENT_LOWAT */
276 	u32	pushed_seq;	/* Last pushed seq, required to talk to windows */
277 	u32	lost_out;	/* Lost packets			*/
278 	u32	sacked_out;	/* SACK'd packets			*/
279 	u32	fackets_out;	/* FACK'd packets			*/
280 
281 	struct hrtimer	pacing_timer;
282 
283 	/* from STCP, retrans queue hinting */
284 	struct sk_buff* lost_skb_hint;
285 	struct sk_buff *retransmit_skb_hint;
286 
287 	/* OOO segments go in this rbtree. Socket lock must be held. */
288 	struct rb_root	out_of_order_queue;
289 	struct sk_buff	*ooo_last_skb; /* cache rb_last(out_of_order_queue) */
290 
291 	/* SACKs data, these 2 need to be together (see tcp_options_write) */
292 	struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */
293 	struct tcp_sack_block selective_acks[4]; /* The SACKS themselves*/
294 
295 	struct tcp_sack_block recv_sack_cache[4];
296 
297 	struct sk_buff *highest_sack;   /* skb just after the highest
298 					 * skb with SACKed bit set
299 					 * (validity guaranteed only if
300 					 * sacked_out > 0)
301 					 */
302 
303 	int     lost_cnt_hint;
304 
305 	u32	prior_ssthresh; /* ssthresh saved at recovery start	*/
306 	u32	high_seq;	/* snd_nxt at onset of congestion	*/
307 
308 	u32	retrans_stamp;	/* Timestamp of the last retransmit,
309 				 * also used in SYN-SENT to remember stamp of
310 				 * the first SYN. */
311 	u32	undo_marker;	/* snd_una upon a new recovery episode. */
312 	int	undo_retrans;	/* number of undoable retransmissions. */
313 	u32	total_retrans;	/* Total retransmits for entire connection */
314 
315 	u32	urg_seq;	/* Seq of received urgent pointer */
316 	unsigned int		keepalive_time;	  /* time before keep alive takes place */
317 	unsigned int		keepalive_intvl;  /* time interval between keep alive probes */
318 
319 	int			linger2;
320 
321 /* Receiver side RTT estimation */
322 	struct {
323 		u32	rtt_us;
324 		u32	seq;
325 		u64	time;
326 	} rcv_rtt_est;
327 
328 /* Receiver queue space */
329 	struct {
330 		int	space;
331 		u32	seq;
332 		u64	time;
333 	} rcvq_space;
334 
335 /* TCP-specific MTU probe information. */
336 	struct {
337 		u32		  probe_seq_start;
338 		u32		  probe_seq_end;
339 	} mtu_probe;
340 	u32	mtu_info; /* We received an ICMP_FRAG_NEEDED / ICMPV6_PKT_TOOBIG
341 			   * while socket was owned by user.
342 			   */
343 
344 #ifdef CONFIG_TCP_MD5SIG
345 /* TCP AF-Specific parts; only used by MD5 Signature support so far */
346 	const struct tcp_sock_af_ops	*af_specific;
347 
348 /* TCP MD5 Signature Option information */
349 	struct tcp_md5sig_info	__rcu *md5sig_info;
350 #endif
351 
352 /* TCP fastopen related information */
353 	struct tcp_fastopen_request *fastopen_req;
354 	/* fastopen_rsk points to request_sock that resulted in this big
355 	 * socket. Used to retransmit SYNACKs etc.
356 	 */
357 	struct request_sock *fastopen_rsk;
358 	u32	*saved_syn;
359 };
360 
361 enum tsq_enum {
362 	TSQ_THROTTLED,
363 	TSQ_QUEUED,
364 	TCP_TSQ_DEFERRED,	   /* tcp_tasklet_func() found socket was owned */
365 	TCP_WRITE_TIMER_DEFERRED,  /* tcp_write_timer() found socket was owned */
366 	TCP_DELACK_TIMER_DEFERRED, /* tcp_delack_timer() found socket was owned */
367 	TCP_MTU_REDUCED_DEFERRED,  /* tcp_v{4|6}_err() could not call
368 				    * tcp_v{4|6}_mtu_reduced()
369 				    */
370 };
371 
372 enum tsq_flags {
373 	TSQF_THROTTLED			= (1UL << TSQ_THROTTLED),
374 	TSQF_QUEUED			= (1UL << TSQ_QUEUED),
375 	TCPF_TSQ_DEFERRED		= (1UL << TCP_TSQ_DEFERRED),
376 	TCPF_WRITE_TIMER_DEFERRED	= (1UL << TCP_WRITE_TIMER_DEFERRED),
377 	TCPF_DELACK_TIMER_DEFERRED	= (1UL << TCP_DELACK_TIMER_DEFERRED),
378 	TCPF_MTU_REDUCED_DEFERRED	= (1UL << TCP_MTU_REDUCED_DEFERRED),
379 };
380 
381 static inline struct tcp_sock *tcp_sk(const struct sock *sk)
382 {
383 	return (struct tcp_sock *)sk;
384 }
385 
386 struct tcp_timewait_sock {
387 	struct inet_timewait_sock tw_sk;
388 #define tw_rcv_nxt tw_sk.__tw_common.skc_tw_rcv_nxt
389 #define tw_snd_nxt tw_sk.__tw_common.skc_tw_snd_nxt
390 	u32			  tw_rcv_wnd;
391 	u32			  tw_ts_offset;
392 	u32			  tw_ts_recent;
393 
394 	/* The time we sent the last out-of-window ACK: */
395 	u32			  tw_last_oow_ack_time;
396 
397 	long			  tw_ts_recent_stamp;
398 #ifdef CONFIG_TCP_MD5SIG
399 	struct tcp_md5sig_key	  *tw_md5_key;
400 #endif
401 };
402 
403 static inline struct tcp_timewait_sock *tcp_twsk(const struct sock *sk)
404 {
405 	return (struct tcp_timewait_sock *)sk;
406 }
407 
408 static inline bool tcp_passive_fastopen(const struct sock *sk)
409 {
410 	return (sk->sk_state == TCP_SYN_RECV &&
411 		tcp_sk(sk)->fastopen_rsk != NULL);
412 }
413 
414 static inline void fastopen_queue_tune(struct sock *sk, int backlog)
415 {
416 	struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
417 	int somaxconn = READ_ONCE(sock_net(sk)->core.sysctl_somaxconn);
418 
419 	queue->fastopenq.max_qlen = min_t(unsigned int, backlog, somaxconn);
420 }
421 
422 static inline void tcp_move_syn(struct tcp_sock *tp,
423 				struct request_sock *req)
424 {
425 	tp->saved_syn = req->saved_syn;
426 	req->saved_syn = NULL;
427 }
428 
429 static inline void tcp_saved_syn_free(struct tcp_sock *tp)
430 {
431 	kfree(tp->saved_syn);
432 	tp->saved_syn = NULL;
433 }
434 
435 struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk);
436 
437 static inline u16 tcp_mss_clamp(const struct tcp_sock *tp, u16 mss)
438 {
439 	/* We use READ_ONCE() here because socket might not be locked.
440 	 * This happens for listeners.
441 	 */
442 	u16 user_mss = READ_ONCE(tp->rx_opt.user_mss);
443 
444 	return (user_mss && user_mss < mss) ? user_mss : mss;
445 }
446 #endif	/* _LINUX_TCP_H */
447