xref: /linux-6.15/include/linux/tcp.h (revision e73173db)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		Definitions for the TCP protocol.
7  *
8  * Version:	@(#)tcp.h	1.0.2	04/28/93
9  *
10  * Author:	Fred N. van Kempen, <[email protected]>
11  *
12  *		This program is free software; you can redistribute it and/or
13  *		modify it under the terms of the GNU General Public License
14  *		as published by the Free Software Foundation; either version
15  *		2 of the License, or (at your option) any later version.
16  */
17 #ifndef _LINUX_TCP_H
18 #define _LINUX_TCP_H
19 
20 #include <linux/types.h>
21 #include <asm/byteorder.h>
22 #include <linux/socket.h>
23 
24 struct tcphdr {
25 	__be16	source;
26 	__be16	dest;
27 	__be32	seq;
28 	__be32	ack_seq;
29 #if defined(__LITTLE_ENDIAN_BITFIELD)
30 	__u16	res1:4,
31 		doff:4,
32 		fin:1,
33 		syn:1,
34 		rst:1,
35 		psh:1,
36 		ack:1,
37 		urg:1,
38 		ece:1,
39 		cwr:1;
40 #elif defined(__BIG_ENDIAN_BITFIELD)
41 	__u16	doff:4,
42 		res1:4,
43 		cwr:1,
44 		ece:1,
45 		urg:1,
46 		ack:1,
47 		psh:1,
48 		rst:1,
49 		syn:1,
50 		fin:1;
51 #else
52 #error	"Adjust your <asm/byteorder.h> defines"
53 #endif
54 	__be16	window;
55 	__sum16	check;
56 	__be16	urg_ptr;
57 };
58 
59 /*
60  *	The union cast uses a gcc extension to avoid aliasing problems
61  *  (union is compatible to any of its members)
62  *  This means this part of the code is -fstrict-aliasing safe now.
63  */
64 union tcp_word_hdr {
65 	struct tcphdr hdr;
66 	__be32 		  words[5];
67 };
68 
69 #define tcp_flag_word(tp) ( ((union tcp_word_hdr *)(tp))->words [3])
70 
71 enum {
72 	TCP_FLAG_CWR = __cpu_to_be32(0x00800000),
73 	TCP_FLAG_ECE = __cpu_to_be32(0x00400000),
74 	TCP_FLAG_URG = __cpu_to_be32(0x00200000),
75 	TCP_FLAG_ACK = __cpu_to_be32(0x00100000),
76 	TCP_FLAG_PSH = __cpu_to_be32(0x00080000),
77 	TCP_FLAG_RST = __cpu_to_be32(0x00040000),
78 	TCP_FLAG_SYN = __cpu_to_be32(0x00020000),
79 	TCP_FLAG_FIN = __cpu_to_be32(0x00010000),
80 	TCP_RESERVED_BITS = __cpu_to_be32(0x0F000000),
81 	TCP_DATA_OFFSET = __cpu_to_be32(0xF0000000)
82 };
83 
84 /* TCP socket options */
85 #define TCP_NODELAY		1	/* Turn off Nagle's algorithm. */
86 #define TCP_MAXSEG		2	/* Limit MSS */
87 #define TCP_CORK		3	/* Never send partially complete segments */
88 #define TCP_KEEPIDLE		4	/* Start keeplives after this period */
89 #define TCP_KEEPINTVL		5	/* Interval between keepalives */
90 #define TCP_KEEPCNT		6	/* Number of keepalives before death */
91 #define TCP_SYNCNT		7	/* Number of SYN retransmits */
92 #define TCP_LINGER2		8	/* Life time of orphaned FIN-WAIT-2 state */
93 #define TCP_DEFER_ACCEPT	9	/* Wake up listener only when data arrive */
94 #define TCP_WINDOW_CLAMP	10	/* Bound advertised window */
95 #define TCP_INFO		11	/* Information about this connection. */
96 #define TCP_QUICKACK		12	/* Block/reenable quick acks */
97 #define TCP_CONGESTION		13	/* Congestion control algorithm */
98 #define TCP_MD5SIG		14	/* TCP MD5 Signature (RFC2385) */
99 
100 #define TCPI_OPT_TIMESTAMPS	1
101 #define TCPI_OPT_SACK		2
102 #define TCPI_OPT_WSCALE		4
103 #define TCPI_OPT_ECN		8
104 
105 enum tcp_ca_state
106 {
107 	TCP_CA_Open = 0,
108 #define TCPF_CA_Open	(1<<TCP_CA_Open)
109 	TCP_CA_Disorder = 1,
110 #define TCPF_CA_Disorder (1<<TCP_CA_Disorder)
111 	TCP_CA_CWR = 2,
112 #define TCPF_CA_CWR	(1<<TCP_CA_CWR)
113 	TCP_CA_Recovery = 3,
114 #define TCPF_CA_Recovery (1<<TCP_CA_Recovery)
115 	TCP_CA_Loss = 4
116 #define TCPF_CA_Loss	(1<<TCP_CA_Loss)
117 };
118 
119 struct tcp_info
120 {
121 	__u8	tcpi_state;
122 	__u8	tcpi_ca_state;
123 	__u8	tcpi_retransmits;
124 	__u8	tcpi_probes;
125 	__u8	tcpi_backoff;
126 	__u8	tcpi_options;
127 	__u8	tcpi_snd_wscale : 4, tcpi_rcv_wscale : 4;
128 
129 	__u32	tcpi_rto;
130 	__u32	tcpi_ato;
131 	__u32	tcpi_snd_mss;
132 	__u32	tcpi_rcv_mss;
133 
134 	__u32	tcpi_unacked;
135 	__u32	tcpi_sacked;
136 	__u32	tcpi_lost;
137 	__u32	tcpi_retrans;
138 	__u32	tcpi_fackets;
139 
140 	/* Times. */
141 	__u32	tcpi_last_data_sent;
142 	__u32	tcpi_last_ack_sent;     /* Not remembered, sorry. */
143 	__u32	tcpi_last_data_recv;
144 	__u32	tcpi_last_ack_recv;
145 
146 	/* Metrics. */
147 	__u32	tcpi_pmtu;
148 	__u32	tcpi_rcv_ssthresh;
149 	__u32	tcpi_rtt;
150 	__u32	tcpi_rttvar;
151 	__u32	tcpi_snd_ssthresh;
152 	__u32	tcpi_snd_cwnd;
153 	__u32	tcpi_advmss;
154 	__u32	tcpi_reordering;
155 
156 	__u32	tcpi_rcv_rtt;
157 	__u32	tcpi_rcv_space;
158 
159 	__u32	tcpi_total_retrans;
160 };
161 
162 /* for TCP_MD5SIG socket option */
163 #define TCP_MD5SIG_MAXKEYLEN	80
164 
165 struct tcp_md5sig {
166 	struct __kernel_sockaddr_storage tcpm_addr;	/* address associated */
167 	__u16	__tcpm_pad1;				/* zero */
168 	__u16	tcpm_keylen;				/* key length */
169 	__u32	__tcpm_pad2;				/* zero */
170 	__u8	tcpm_key[TCP_MD5SIG_MAXKEYLEN];		/* key (binary) */
171 };
172 
173 #ifdef __KERNEL__
174 
175 #include <linux/skbuff.h>
176 #include <linux/dmaengine.h>
177 #include <net/sock.h>
178 #include <net/inet_connection_sock.h>
179 #include <net/inet_timewait_sock.h>
180 
181 static inline struct tcphdr *tcp_hdr(const struct sk_buff *skb)
182 {
183 	return (struct tcphdr *)skb_transport_header(skb);
184 }
185 
186 static inline unsigned int tcp_hdrlen(const struct sk_buff *skb)
187 {
188 	return tcp_hdr(skb)->doff * 4;
189 }
190 
191 static inline unsigned int tcp_optlen(const struct sk_buff *skb)
192 {
193 	return (tcp_hdr(skb)->doff - 5) * 4;
194 }
195 
196 /* This defines a selective acknowledgement block. */
197 struct tcp_sack_block_wire {
198 	__be32	start_seq;
199 	__be32	end_seq;
200 };
201 
202 struct tcp_sack_block {
203 	u32	start_seq;
204 	u32	end_seq;
205 };
206 
207 struct tcp_options_received {
208 /*	PAWS/RTTM data	*/
209 	long	ts_recent_stamp;/* Time we stored ts_recent (for aging) */
210 	u32	ts_recent;	/* Time stamp to echo next		*/
211 	u32	rcv_tsval;	/* Time stamp value             	*/
212 	u32	rcv_tsecr;	/* Time stamp echo reply        	*/
213 	u16 	saw_tstamp : 1,	/* Saw TIMESTAMP on last packet		*/
214 		tstamp_ok : 1,	/* TIMESTAMP seen on SYN packet		*/
215 		dsack : 1,	/* D-SACK is scheduled			*/
216 		wscale_ok : 1,	/* Wscale seen on SYN packet		*/
217 		sack_ok : 4,	/* SACK seen on SYN packet		*/
218 		snd_wscale : 4,	/* Window scaling received from sender	*/
219 		rcv_wscale : 4;	/* Window scaling to send to receiver	*/
220 /*	SACKs data	*/
221 	u8	num_sacks;	/* Number of SACK blocks		*/
222 	u16	user_mss;  	/* mss requested by user in ioctl */
223 	u16	mss_clamp;	/* Maximal mss, negotiated at connection setup */
224 };
225 
226 /* This is the max number of SACKS that we'll generate and process. It's safe
227  * to increse this, although since:
228  *   size = TCPOLEN_SACK_BASE_ALIGNED (4) + n * TCPOLEN_SACK_PERBLOCK (8)
229  * only four options will fit in a standard TCP header */
230 #define TCP_NUM_SACKS 4
231 
232 struct tcp_request_sock {
233 	struct inet_request_sock 	req;
234 #ifdef CONFIG_TCP_MD5SIG
235 	/* Only used by TCP MD5 Signature so far. */
236 	struct tcp_request_sock_ops	*af_specific;
237 #endif
238 	u32			 	rcv_isn;
239 	u32			 	snt_isn;
240 };
241 
242 static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req)
243 {
244 	return (struct tcp_request_sock *)req;
245 }
246 
247 struct tcp_sock {
248 	/* inet_connection_sock has to be the first member of tcp_sock */
249 	struct inet_connection_sock	inet_conn;
250 	u16	tcp_header_len;	/* Bytes of tcp header to send		*/
251 	u16	xmit_size_goal_segs; /* Goal for segmenting output packets */
252 
253 /*
254  *	Header prediction flags
255  *	0x5?10 << 16 + snd_wnd in net byte order
256  */
257 	__be32	pred_flags;
258 
259 /*
260  *	RFC793 variables by their proper names. This means you can
261  *	read the code and the spec side by side (and laugh ...)
262  *	See RFC793 and RFC1122. The RFC writes these in capitals.
263  */
264  	u32	rcv_nxt;	/* What we want to receive next 	*/
265 	u32	copied_seq;	/* Head of yet unread data		*/
266 	u32	rcv_wup;	/* rcv_nxt on last window update sent	*/
267  	u32	snd_nxt;	/* Next sequence we send		*/
268 
269  	u32	snd_una;	/* First byte we want an ack for	*/
270  	u32	snd_sml;	/* Last byte of the most recently transmitted small packet */
271 	u32	rcv_tstamp;	/* timestamp of last received ACK (for keepalives) */
272 	u32	lsndtime;	/* timestamp of last sent data packet (for restart window) */
273 
274 	/* Data for direct copy to user */
275 	struct {
276 		struct sk_buff_head	prequeue;
277 		struct task_struct	*task;
278 		struct iovec		*iov;
279 		int			memory;
280 		int			len;
281 #ifdef CONFIG_NET_DMA
282 		/* members for async copy */
283 		struct dma_chan		*dma_chan;
284 		int			wakeup;
285 		struct dma_pinned_list	*pinned_list;
286 		dma_cookie_t		dma_cookie;
287 #endif
288 	} ucopy;
289 
290 	u32	snd_wl1;	/* Sequence for window update		*/
291 	u32	snd_wnd;	/* The window we expect to receive	*/
292 	u32	max_window;	/* Maximal window ever seen from peer	*/
293 	u32	mss_cache;	/* Cached effective mss, not including SACKS */
294 
295 	u32	window_clamp;	/* Maximal window to advertise		*/
296 	u32	rcv_ssthresh;	/* Current window clamp			*/
297 
298 	u32	frto_highmark;	/* snd_nxt when RTO occurred */
299 	u16	advmss;		/* Advertised MSS			*/
300 	u8	frto_counter;	/* Number of new acks after RTO */
301 	u8	nonagle;	/* Disable Nagle algorithm?             */
302 
303 /* RTT measurement */
304 	u32	srtt;		/* smoothed round trip time << 3	*/
305 	u32	mdev;		/* medium deviation			*/
306 	u32	mdev_max;	/* maximal mdev for the last rtt period	*/
307 	u32	rttvar;		/* smoothed mdev_max			*/
308 	u32	rtt_seq;	/* sequence number to update rttvar	*/
309 
310 	u32	packets_out;	/* Packets which are "in flight"	*/
311 	u32	retrans_out;	/* Retransmitted packets out		*/
312 
313 	u16	urg_data;	/* Saved octet of OOB data and control flags */
314 	u8	ecn_flags;	/* ECN status bits.			*/
315 	u8	reordering;	/* Packet reordering metric.		*/
316 	u32	snd_up;		/* Urgent pointer		*/
317 
318 	u8	keepalive_probes; /* num of allowed keep alive probes	*/
319 /*
320  *      Options received (usually on last packet, some only on SYN packets).
321  */
322 	struct tcp_options_received rx_opt;
323 
324 /*
325  *	Slow start and congestion control (see also Nagle, and Karn & Partridge)
326  */
327  	u32	snd_ssthresh;	/* Slow start size threshold		*/
328  	u32	snd_cwnd;	/* Sending congestion window		*/
329 	u32	snd_cwnd_cnt;	/* Linear increase counter		*/
330 	u32	snd_cwnd_clamp; /* Do not allow snd_cwnd to grow above this */
331 	u32	snd_cwnd_used;
332 	u32	snd_cwnd_stamp;
333 
334  	u32	rcv_wnd;	/* Current receiver window		*/
335 	u32	write_seq;	/* Tail(+1) of data held in tcp send buffer */
336 	u32	pushed_seq;	/* Last pushed seq, required to talk to windows */
337 	u32	lost_out;	/* Lost packets			*/
338 	u32	sacked_out;	/* SACK'd packets			*/
339 	u32	fackets_out;	/* FACK'd packets			*/
340 	u32	tso_deferred;
341 	u32	bytes_acked;	/* Appropriate Byte Counting - RFC3465 */
342 
343 	/* from STCP, retrans queue hinting */
344 	struct sk_buff* lost_skb_hint;
345 	struct sk_buff *scoreboard_skb_hint;
346 	struct sk_buff *retransmit_skb_hint;
347 
348 	struct sk_buff_head	out_of_order_queue; /* Out of order segments go here */
349 
350 	/* SACKs data, these 2 need to be together (see tcp_build_and_update_options) */
351 	struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */
352 	struct tcp_sack_block selective_acks[4]; /* The SACKS themselves*/
353 
354 	struct tcp_sack_block recv_sack_cache[4];
355 
356 	struct sk_buff *highest_sack;   /* highest skb with SACK received
357 					 * (validity guaranteed only if
358 					 * sacked_out > 0)
359 					 */
360 
361 	int     lost_cnt_hint;
362 	u32     retransmit_high;	/* L-bits may be on up to this seqno */
363 
364 	u32	lost_retrans_low;	/* Sent seq after any rxmit (lowest) */
365 
366 	u32	prior_ssthresh; /* ssthresh saved at recovery start	*/
367 	u32	high_seq;	/* snd_nxt at onset of congestion	*/
368 
369 	u32	retrans_stamp;	/* Timestamp of the last retransmit,
370 				 * also used in SYN-SENT to remember stamp of
371 				 * the first SYN. */
372 	u32	undo_marker;	/* tracking retrans started here. */
373 	int	undo_retrans;	/* number of undoable retransmissions. */
374 	u32	total_retrans;	/* Total retransmits for entire connection */
375 
376 	u32	urg_seq;	/* Seq of received urgent pointer */
377 	unsigned int		keepalive_time;	  /* time before keep alive takes place */
378 	unsigned int		keepalive_intvl;  /* time interval between keep alive probes */
379 
380 	unsigned long last_synq_overflow;
381 
382 /* Receiver side RTT estimation */
383 	struct {
384 		u32	rtt;
385 		u32	seq;
386 		u32	time;
387 	} rcv_rtt_est;
388 
389 /* Receiver queue space */
390 	struct {
391 		int	space;
392 		u32	seq;
393 		u32	time;
394 	} rcvq_space;
395 
396 /* TCP-specific MTU probe information. */
397 	struct {
398 		u32		  probe_seq_start;
399 		u32		  probe_seq_end;
400 	} mtu_probe;
401 
402 #ifdef CONFIG_TCP_MD5SIG
403 /* TCP AF-Specific parts; only used by MD5 Signature support so far */
404 	struct tcp_sock_af_ops	*af_specific;
405 
406 /* TCP MD5 Signagure Option information */
407 	struct tcp_md5sig_info	*md5sig_info;
408 #endif
409 
410 	int			linger2;
411 };
412 
413 static inline struct tcp_sock *tcp_sk(const struct sock *sk)
414 {
415 	return (struct tcp_sock *)sk;
416 }
417 
418 struct tcp_timewait_sock {
419 	struct inet_timewait_sock tw_sk;
420 	u32			  tw_rcv_nxt;
421 	u32			  tw_snd_nxt;
422 	u32			  tw_rcv_wnd;
423 	u32			  tw_ts_recent;
424 	long			  tw_ts_recent_stamp;
425 #ifdef CONFIG_TCP_MD5SIG
426 	u16			  tw_md5_keylen;
427 	u8			  tw_md5_key[TCP_MD5SIG_MAXKEYLEN];
428 #endif
429 };
430 
431 static inline struct tcp_timewait_sock *tcp_twsk(const struct sock *sk)
432 {
433 	return (struct tcp_timewait_sock *)sk;
434 }
435 
436 #endif
437 
438 #endif	/* _LINUX_TCP_H */
439