xref: /linux-6.15/include/linux/skbuff.h (revision 73ec78b0)
1 /*
2  *	Definitions for the 'struct sk_buff' memory handlers.
3  *
4  *	Authors:
5  *		Alan Cox, <[email protected]>
6  *		Florian La Roche, <[email protected]>
7  *
8  *	This program is free software; you can redistribute it and/or
9  *	modify it under the terms of the GNU General Public License
10  *	as published by the Free Software Foundation; either version
11  *	2 of the License, or (at your option) any later version.
12  */
13 
14 #ifndef _LINUX_SKBUFF_H
15 #define _LINUX_SKBUFF_H
16 
17 #include <linux/kernel.h>
18 #include <linux/kmemcheck.h>
19 #include <linux/compiler.h>
20 #include <linux/time.h>
21 #include <linux/bug.h>
22 #include <linux/cache.h>
23 
24 #include <linux/atomic.h>
25 #include <asm/types.h>
26 #include <linux/spinlock.h>
27 #include <linux/net.h>
28 #include <linux/textsearch.h>
29 #include <net/checksum.h>
30 #include <linux/rcupdate.h>
31 #include <linux/dmaengine.h>
32 #include <linux/hrtimer.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/netdev_features.h>
35 #include <net/flow_keys.h>
36 
37 /* Don't change this without changing skb_csum_unnecessary! */
38 #define CHECKSUM_NONE 0
39 #define CHECKSUM_UNNECESSARY 1
40 #define CHECKSUM_COMPLETE 2
41 #define CHECKSUM_PARTIAL 3
42 
43 #define SKB_DATA_ALIGN(X)	(((X) + (SMP_CACHE_BYTES - 1)) & \
44 				 ~(SMP_CACHE_BYTES - 1))
45 #define SKB_WITH_OVERHEAD(X)	\
46 	((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
47 #define SKB_MAX_ORDER(X, ORDER) \
48 	SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
49 #define SKB_MAX_HEAD(X)		(SKB_MAX_ORDER((X), 0))
50 #define SKB_MAX_ALLOC		(SKB_MAX_ORDER(0, 2))
51 
52 /* return minimum truesize of one skb containing X bytes of data */
53 #define SKB_TRUESIZE(X) ((X) +						\
54 			 SKB_DATA_ALIGN(sizeof(struct sk_buff)) +	\
55 			 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
56 
57 /* A. Checksumming of received packets by device.
58  *
59  *	NONE: device failed to checksum this packet.
60  *		skb->csum is undefined.
61  *
62  *	UNNECESSARY: device parsed packet and wouldbe verified checksum.
63  *		skb->csum is undefined.
64  *	      It is bad option, but, unfortunately, many of vendors do this.
65  *	      Apparently with secret goal to sell you new device, when you
66  *	      will add new protocol to your host. F.e. IPv6. 8)
67  *
68  *	COMPLETE: the most generic way. Device supplied checksum of _all_
69  *	    the packet as seen by netif_rx in skb->csum.
70  *	    NOTE: Even if device supports only some protocols, but
71  *	    is able to produce some skb->csum, it MUST use COMPLETE,
72  *	    not UNNECESSARY.
73  *
74  *	PARTIAL: identical to the case for output below.  This may occur
75  *	    on a packet received directly from another Linux OS, e.g.,
76  *	    a virtualised Linux kernel on the same host.  The packet can
77  *	    be treated in the same way as UNNECESSARY except that on
78  *	    output (i.e., forwarding) the checksum must be filled in
79  *	    by the OS or the hardware.
80  *
81  * B. Checksumming on output.
82  *
83  *	NONE: skb is checksummed by protocol or csum is not required.
84  *
85  *	PARTIAL: device is required to csum packet as seen by hard_start_xmit
86  *	from skb->csum_start to the end and to record the checksum
87  *	at skb->csum_start + skb->csum_offset.
88  *
89  *	Device must show its capabilities in dev->features, set
90  *	at device setup time.
91  *	NETIF_F_HW_CSUM	- it is clever device, it is able to checksum
92  *			  everything.
93  *	NETIF_F_IP_CSUM - device is dumb. It is able to csum only
94  *			  TCP/UDP over IPv4. Sigh. Vendors like this
95  *			  way by an unknown reason. Though, see comment above
96  *			  about CHECKSUM_UNNECESSARY. 8)
97  *	NETIF_F_IPV6_CSUM about as dumb as the last one but does IPv6 instead.
98  *
99  *	UNNECESSARY: device will do per protocol specific csum. Protocol drivers
100  *	that do not want net to perform the checksum calculation should use
101  *	this flag in their outgoing skbs.
102  *	NETIF_F_FCOE_CRC  this indicates the device can do FCoE FC CRC
103  *			  offload. Correspondingly, the FCoE protocol driver
104  *			  stack should use CHECKSUM_UNNECESSARY.
105  *
106  *	Any questions? No questions, good. 		--ANK
107  */
108 
109 struct net_device;
110 struct scatterlist;
111 struct pipe_inode_info;
112 
113 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
114 struct nf_conntrack {
115 	atomic_t use;
116 };
117 #endif
118 
119 #ifdef CONFIG_BRIDGE_NETFILTER
120 struct nf_bridge_info {
121 	atomic_t		use;
122 	unsigned int		mask;
123 	struct net_device	*physindev;
124 	struct net_device	*physoutdev;
125 	unsigned long		data[32 / sizeof(unsigned long)];
126 };
127 #endif
128 
129 struct sk_buff_head {
130 	/* These two members must be first. */
131 	struct sk_buff	*next;
132 	struct sk_buff	*prev;
133 
134 	__u32		qlen;
135 	spinlock_t	lock;
136 };
137 
138 struct sk_buff;
139 
140 /* To allow 64K frame to be packed as single skb without frag_list we
141  * require 64K/PAGE_SIZE pages plus 1 additional page to allow for
142  * buffers which do not start on a page boundary.
143  *
144  * Since GRO uses frags we allocate at least 16 regardless of page
145  * size.
146  */
147 #if (65536/PAGE_SIZE + 1) < 16
148 #define MAX_SKB_FRAGS 16UL
149 #else
150 #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
151 #endif
152 
153 typedef struct skb_frag_struct skb_frag_t;
154 
155 struct skb_frag_struct {
156 	struct {
157 		struct page *p;
158 	} page;
159 #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
160 	__u32 page_offset;
161 	__u32 size;
162 #else
163 	__u16 page_offset;
164 	__u16 size;
165 #endif
166 };
167 
168 static inline unsigned int skb_frag_size(const skb_frag_t *frag)
169 {
170 	return frag->size;
171 }
172 
173 static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
174 {
175 	frag->size = size;
176 }
177 
178 static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
179 {
180 	frag->size += delta;
181 }
182 
183 static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
184 {
185 	frag->size -= delta;
186 }
187 
188 #define HAVE_HW_TIME_STAMP
189 
190 /**
191  * struct skb_shared_hwtstamps - hardware time stamps
192  * @hwtstamp:	hardware time stamp transformed into duration
193  *		since arbitrary point in time
194  * @syststamp:	hwtstamp transformed to system time base
195  *
196  * Software time stamps generated by ktime_get_real() are stored in
197  * skb->tstamp. The relation between the different kinds of time
198  * stamps is as follows:
199  *
200  * syststamp and tstamp can be compared against each other in
201  * arbitrary combinations.  The accuracy of a
202  * syststamp/tstamp/"syststamp from other device" comparison is
203  * limited by the accuracy of the transformation into system time
204  * base. This depends on the device driver and its underlying
205  * hardware.
206  *
207  * hwtstamps can only be compared against other hwtstamps from
208  * the same device.
209  *
210  * This structure is attached to packets as part of the
211  * &skb_shared_info. Use skb_hwtstamps() to get a pointer.
212  */
213 struct skb_shared_hwtstamps {
214 	ktime_t	hwtstamp;
215 	ktime_t	syststamp;
216 };
217 
218 /* Definitions for tx_flags in struct skb_shared_info */
219 enum {
220 	/* generate hardware time stamp */
221 	SKBTX_HW_TSTAMP = 1 << 0,
222 
223 	/* generate software time stamp */
224 	SKBTX_SW_TSTAMP = 1 << 1,
225 
226 	/* device driver is going to provide hardware time stamp */
227 	SKBTX_IN_PROGRESS = 1 << 2,
228 
229 	/* device driver supports TX zero-copy buffers */
230 	SKBTX_DEV_ZEROCOPY = 1 << 3,
231 
232 	/* generate wifi status information (where possible) */
233 	SKBTX_WIFI_STATUS = 1 << 4,
234 
235 	/* This indicates at least one fragment might be overwritten
236 	 * (as in vmsplice(), sendfile() ...)
237 	 * If we need to compute a TX checksum, we'll need to copy
238 	 * all frags to avoid possible bad checksum
239 	 */
240 	SKBTX_SHARED_FRAG = 1 << 5,
241 };
242 
243 /*
244  * The callback notifies userspace to release buffers when skb DMA is done in
245  * lower device, the skb last reference should be 0 when calling this.
246  * The zerocopy_success argument is true if zero copy transmit occurred,
247  * false on data copy or out of memory error caused by data copy attempt.
248  * The ctx field is used to track device context.
249  * The desc field is used to track userspace buffer index.
250  */
251 struct ubuf_info {
252 	void (*callback)(struct ubuf_info *, bool zerocopy_success);
253 	void *ctx;
254 	unsigned long desc;
255 };
256 
257 /* This data is invariant across clones and lives at
258  * the end of the header data, ie. at skb->end.
259  */
260 struct skb_shared_info {
261 	unsigned char	nr_frags;
262 	__u8		tx_flags;
263 	unsigned short	gso_size;
264 	/* Warning: this field is not always filled in (UFO)! */
265 	unsigned short	gso_segs;
266 	unsigned short  gso_type;
267 	struct sk_buff	*frag_list;
268 	struct skb_shared_hwtstamps hwtstamps;
269 	__be32          ip6_frag_id;
270 
271 	/*
272 	 * Warning : all fields before dataref are cleared in __alloc_skb()
273 	 */
274 	atomic_t	dataref;
275 
276 	/* Intermediate layers must ensure that destructor_arg
277 	 * remains valid until skb destructor */
278 	void *		destructor_arg;
279 
280 	/* must be last field, see pskb_expand_head() */
281 	skb_frag_t	frags[MAX_SKB_FRAGS];
282 };
283 
284 /* We divide dataref into two halves.  The higher 16 bits hold references
285  * to the payload part of skb->data.  The lower 16 bits hold references to
286  * the entire skb->data.  A clone of a headerless skb holds the length of
287  * the header in skb->hdr_len.
288  *
289  * All users must obey the rule that the skb->data reference count must be
290  * greater than or equal to the payload reference count.
291  *
292  * Holding a reference to the payload part means that the user does not
293  * care about modifications to the header part of skb->data.
294  */
295 #define SKB_DATAREF_SHIFT 16
296 #define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
297 
298 
299 enum {
300 	SKB_FCLONE_UNAVAILABLE,
301 	SKB_FCLONE_ORIG,
302 	SKB_FCLONE_CLONE,
303 };
304 
305 enum {
306 	SKB_GSO_TCPV4 = 1 << 0,
307 	SKB_GSO_UDP = 1 << 1,
308 
309 	/* This indicates the skb is from an untrusted source. */
310 	SKB_GSO_DODGY = 1 << 2,
311 
312 	/* This indicates the tcp segment has CWR set. */
313 	SKB_GSO_TCP_ECN = 1 << 3,
314 
315 	SKB_GSO_TCPV6 = 1 << 4,
316 
317 	SKB_GSO_FCOE = 1 << 5,
318 
319 	SKB_GSO_GRE = 1 << 6,
320 
321 	SKB_GSO_UDP_TUNNEL = 1 << 7,
322 };
323 
324 #if BITS_PER_LONG > 32
325 #define NET_SKBUFF_DATA_USES_OFFSET 1
326 #endif
327 
328 #ifdef NET_SKBUFF_DATA_USES_OFFSET
329 typedef unsigned int sk_buff_data_t;
330 #else
331 typedef unsigned char *sk_buff_data_t;
332 #endif
333 
334 #if defined(CONFIG_NF_DEFRAG_IPV4) || defined(CONFIG_NF_DEFRAG_IPV4_MODULE) || \
335     defined(CONFIG_NF_DEFRAG_IPV6) || defined(CONFIG_NF_DEFRAG_IPV6_MODULE)
336 #define NET_SKBUFF_NF_DEFRAG_NEEDED 1
337 #endif
338 
339 /**
340  *	struct sk_buff - socket buffer
341  *	@next: Next buffer in list
342  *	@prev: Previous buffer in list
343  *	@tstamp: Time we arrived
344  *	@sk: Socket we are owned by
345  *	@dev: Device we arrived on/are leaving by
346  *	@cb: Control buffer. Free for use by every layer. Put private vars here
347  *	@_skb_refdst: destination entry (with norefcount bit)
348  *	@sp: the security path, used for xfrm
349  *	@len: Length of actual data
350  *	@data_len: Data length
351  *	@mac_len: Length of link layer header
352  *	@hdr_len: writable header length of cloned skb
353  *	@csum: Checksum (must include start/offset pair)
354  *	@csum_start: Offset from skb->head where checksumming should start
355  *	@csum_offset: Offset from csum_start where checksum should be stored
356  *	@priority: Packet queueing priority
357  *	@local_df: allow local fragmentation
358  *	@cloned: Head may be cloned (check refcnt to be sure)
359  *	@ip_summed: Driver fed us an IP checksum
360  *	@nohdr: Payload reference only, must not modify header
361  *	@nfctinfo: Relationship of this skb to the connection
362  *	@pkt_type: Packet class
363  *	@fclone: skbuff clone status
364  *	@ipvs_property: skbuff is owned by ipvs
365  *	@peeked: this packet has been seen already, so stats have been
366  *		done for it, don't do them again
367  *	@nf_trace: netfilter packet trace flag
368  *	@protocol: Packet protocol from driver
369  *	@destructor: Destruct function
370  *	@nfct: Associated connection, if any
371  *	@nfct_reasm: netfilter conntrack re-assembly pointer
372  *	@nf_bridge: Saved data about a bridged frame - see br_netfilter.c
373  *	@skb_iif: ifindex of device we arrived on
374  *	@tc_index: Traffic control index
375  *	@tc_verd: traffic control verdict
376  *	@rxhash: the packet hash computed on receive
377  *	@queue_mapping: Queue mapping for multiqueue devices
378  *	@ndisc_nodetype: router type (from link layer)
379  *	@ooo_okay: allow the mapping of a socket to a queue to be changed
380  *	@l4_rxhash: indicate rxhash is a canonical 4-tuple hash over transport
381  *		ports.
382  *	@wifi_acked_valid: wifi_acked was set
383  *	@wifi_acked: whether frame was acked on wifi or not
384  *	@no_fcs:  Request NIC to treat last 4 bytes as Ethernet FCS
385  *	@dma_cookie: a cookie to one of several possible DMA operations
386  *		done by skb DMA functions
387  *	@secmark: security marking
388  *	@mark: Generic packet mark
389  *	@dropcount: total number of sk_receive_queue overflows
390  *	@vlan_proto: vlan encapsulation protocol
391  *	@vlan_tci: vlan tag control information
392  *	@inner_transport_header: Inner transport layer header (encapsulation)
393  *	@inner_network_header: Network layer header (encapsulation)
394  *	@inner_mac_header: Link layer header (encapsulation)
395  *	@transport_header: Transport layer header
396  *	@network_header: Network layer header
397  *	@mac_header: Link layer header
398  *	@tail: Tail pointer
399  *	@end: End pointer
400  *	@head: Head of buffer
401  *	@data: Data head pointer
402  *	@truesize: Buffer size
403  *	@users: User count - see {datagram,tcp}.c
404  */
405 
406 struct sk_buff {
407 	/* These two members must be first. */
408 	struct sk_buff		*next;
409 	struct sk_buff		*prev;
410 
411 	ktime_t			tstamp;
412 
413 	struct sock		*sk;
414 	struct net_device	*dev;
415 
416 	/*
417 	 * This is the control buffer. It is free to use for every
418 	 * layer. Please put your private variables there. If you
419 	 * want to keep them across layers you have to do a skb_clone()
420 	 * first. This is owned by whoever has the skb queued ATM.
421 	 */
422 	char			cb[48] __aligned(8);
423 
424 	unsigned long		_skb_refdst;
425 #ifdef CONFIG_XFRM
426 	struct	sec_path	*sp;
427 #endif
428 	unsigned int		len,
429 				data_len;
430 	__u16			mac_len,
431 				hdr_len;
432 	union {
433 		__wsum		csum;
434 		struct {
435 			__u16	csum_start;
436 			__u16	csum_offset;
437 		};
438 	};
439 	__u32			priority;
440 	kmemcheck_bitfield_begin(flags1);
441 	__u8			local_df:1,
442 				cloned:1,
443 				ip_summed:2,
444 				nohdr:1,
445 				nfctinfo:3;
446 	__u8			pkt_type:3,
447 				fclone:2,
448 				ipvs_property:1,
449 				peeked:1,
450 				nf_trace:1;
451 	kmemcheck_bitfield_end(flags1);
452 	__be16			protocol;
453 
454 	void			(*destructor)(struct sk_buff *skb);
455 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
456 	struct nf_conntrack	*nfct;
457 #endif
458 #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
459 	struct sk_buff		*nfct_reasm;
460 #endif
461 #ifdef CONFIG_BRIDGE_NETFILTER
462 	struct nf_bridge_info	*nf_bridge;
463 #endif
464 
465 	int			skb_iif;
466 
467 	__u32			rxhash;
468 
469 	__be16			vlan_proto;
470 	__u16			vlan_tci;
471 
472 #ifdef CONFIG_NET_SCHED
473 	__u16			tc_index;	/* traffic control index */
474 #ifdef CONFIG_NET_CLS_ACT
475 	__u16			tc_verd;	/* traffic control verdict */
476 #endif
477 #endif
478 
479 	__u16			queue_mapping;
480 	kmemcheck_bitfield_begin(flags2);
481 #ifdef CONFIG_IPV6_NDISC_NODETYPE
482 	__u8			ndisc_nodetype:2;
483 #endif
484 	__u8			pfmemalloc:1;
485 	__u8			ooo_okay:1;
486 	__u8			l4_rxhash:1;
487 	__u8			wifi_acked_valid:1;
488 	__u8			wifi_acked:1;
489 	__u8			no_fcs:1;
490 	__u8			head_frag:1;
491 	/* Encapsulation protocol and NIC drivers should use
492 	 * this flag to indicate to each other if the skb contains
493 	 * encapsulated packet or not and maybe use the inner packet
494 	 * headers if needed
495 	 */
496 	__u8			encapsulation:1;
497 	/* 7/9 bit hole (depending on ndisc_nodetype presence) */
498 	kmemcheck_bitfield_end(flags2);
499 
500 #ifdef CONFIG_NET_DMA
501 	dma_cookie_t		dma_cookie;
502 #endif
503 #ifdef CONFIG_NETWORK_SECMARK
504 	__u32			secmark;
505 #endif
506 	union {
507 		__u32		mark;
508 		__u32		dropcount;
509 		__u32		reserved_tailroom;
510 	};
511 
512 	sk_buff_data_t		inner_transport_header;
513 	sk_buff_data_t		inner_network_header;
514 	sk_buff_data_t		inner_mac_header;
515 	sk_buff_data_t		transport_header;
516 	sk_buff_data_t		network_header;
517 	sk_buff_data_t		mac_header;
518 	/* These elements must be at the end, see alloc_skb() for details.  */
519 	sk_buff_data_t		tail;
520 	sk_buff_data_t		end;
521 	unsigned char		*head,
522 				*data;
523 	unsigned int		truesize;
524 	atomic_t		users;
525 };
526 
527 #ifdef __KERNEL__
528 /*
529  *	Handling routines are only of interest to the kernel
530  */
531 #include <linux/slab.h>
532 
533 
534 #define SKB_ALLOC_FCLONE	0x01
535 #define SKB_ALLOC_RX		0x02
536 
537 /* Returns true if the skb was allocated from PFMEMALLOC reserves */
538 static inline bool skb_pfmemalloc(const struct sk_buff *skb)
539 {
540 	return unlikely(skb->pfmemalloc);
541 }
542 
543 /*
544  * skb might have a dst pointer attached, refcounted or not.
545  * _skb_refdst low order bit is set if refcount was _not_ taken
546  */
547 #define SKB_DST_NOREF	1UL
548 #define SKB_DST_PTRMASK	~(SKB_DST_NOREF)
549 
550 /**
551  * skb_dst - returns skb dst_entry
552  * @skb: buffer
553  *
554  * Returns skb dst_entry, regardless of reference taken or not.
555  */
556 static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
557 {
558 	/* If refdst was not refcounted, check we still are in a
559 	 * rcu_read_lock section
560 	 */
561 	WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
562 		!rcu_read_lock_held() &&
563 		!rcu_read_lock_bh_held());
564 	return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
565 }
566 
567 /**
568  * skb_dst_set - sets skb dst
569  * @skb: buffer
570  * @dst: dst entry
571  *
572  * Sets skb dst, assuming a reference was taken on dst and should
573  * be released by skb_dst_drop()
574  */
575 static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
576 {
577 	skb->_skb_refdst = (unsigned long)dst;
578 }
579 
580 extern void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst,
581 				bool force);
582 
583 /**
584  * skb_dst_set_noref - sets skb dst, hopefully, without taking reference
585  * @skb: buffer
586  * @dst: dst entry
587  *
588  * Sets skb dst, assuming a reference was not taken on dst.
589  * If dst entry is cached, we do not take reference and dst_release
590  * will be avoided by refdst_drop. If dst entry is not cached, we take
591  * reference, so that last dst_release can destroy the dst immediately.
592  */
593 static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
594 {
595 	__skb_dst_set_noref(skb, dst, false);
596 }
597 
598 /**
599  * skb_dst_set_noref_force - sets skb dst, without taking reference
600  * @skb: buffer
601  * @dst: dst entry
602  *
603  * Sets skb dst, assuming a reference was not taken on dst.
604  * No reference is taken and no dst_release will be called. While for
605  * cached dsts deferred reclaim is a basic feature, for entries that are
606  * not cached it is caller's job to guarantee that last dst_release for
607  * provided dst happens when nobody uses it, eg. after a RCU grace period.
608  */
609 static inline void skb_dst_set_noref_force(struct sk_buff *skb,
610 					   struct dst_entry *dst)
611 {
612 	__skb_dst_set_noref(skb, dst, true);
613 }
614 
615 /**
616  * skb_dst_is_noref - Test if skb dst isn't refcounted
617  * @skb: buffer
618  */
619 static inline bool skb_dst_is_noref(const struct sk_buff *skb)
620 {
621 	return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
622 }
623 
624 static inline struct rtable *skb_rtable(const struct sk_buff *skb)
625 {
626 	return (struct rtable *)skb_dst(skb);
627 }
628 
629 extern void kfree_skb(struct sk_buff *skb);
630 extern void skb_tx_error(struct sk_buff *skb);
631 extern void consume_skb(struct sk_buff *skb);
632 extern void	       __kfree_skb(struct sk_buff *skb);
633 extern struct kmem_cache *skbuff_head_cache;
634 
635 extern void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
636 extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
637 			     bool *fragstolen, int *delta_truesize);
638 
639 extern struct sk_buff *__alloc_skb(unsigned int size,
640 				   gfp_t priority, int flags, int node);
641 extern struct sk_buff *build_skb(void *data, unsigned int frag_size);
642 static inline struct sk_buff *alloc_skb(unsigned int size,
643 					gfp_t priority)
644 {
645 	return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
646 }
647 
648 static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
649 					       gfp_t priority)
650 {
651 	return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
652 }
653 
654 extern struct sk_buff *__alloc_skb_head(gfp_t priority, int node);
655 static inline struct sk_buff *alloc_skb_head(gfp_t priority)
656 {
657 	return __alloc_skb_head(priority, -1);
658 }
659 
660 extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
661 extern int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
662 extern struct sk_buff *skb_clone(struct sk_buff *skb,
663 				 gfp_t priority);
664 extern struct sk_buff *skb_copy(const struct sk_buff *skb,
665 				gfp_t priority);
666 extern struct sk_buff *__pskb_copy(struct sk_buff *skb,
667 				 int headroom, gfp_t gfp_mask);
668 
669 extern int	       pskb_expand_head(struct sk_buff *skb,
670 					int nhead, int ntail,
671 					gfp_t gfp_mask);
672 extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
673 					    unsigned int headroom);
674 extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
675 				       int newheadroom, int newtailroom,
676 				       gfp_t priority);
677 extern int	       skb_to_sgvec(struct sk_buff *skb,
678 				    struct scatterlist *sg, int offset,
679 				    int len);
680 extern int	       skb_cow_data(struct sk_buff *skb, int tailbits,
681 				    struct sk_buff **trailer);
682 extern int	       skb_pad(struct sk_buff *skb, int pad);
683 #define dev_kfree_skb(a)	consume_skb(a)
684 
685 extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
686 			int getfrag(void *from, char *to, int offset,
687 			int len,int odd, struct sk_buff *skb),
688 			void *from, int length);
689 
690 struct skb_seq_state {
691 	__u32		lower_offset;
692 	__u32		upper_offset;
693 	__u32		frag_idx;
694 	__u32		stepped_offset;
695 	struct sk_buff	*root_skb;
696 	struct sk_buff	*cur_skb;
697 	__u8		*frag_data;
698 };
699 
700 extern void	      skb_prepare_seq_read(struct sk_buff *skb,
701 					   unsigned int from, unsigned int to,
702 					   struct skb_seq_state *st);
703 extern unsigned int   skb_seq_read(unsigned int consumed, const u8 **data,
704 				   struct skb_seq_state *st);
705 extern void	      skb_abort_seq_read(struct skb_seq_state *st);
706 
707 extern unsigned int   skb_find_text(struct sk_buff *skb, unsigned int from,
708 				    unsigned int to, struct ts_config *config,
709 				    struct ts_state *state);
710 
711 extern void __skb_get_rxhash(struct sk_buff *skb);
712 static inline __u32 skb_get_rxhash(struct sk_buff *skb)
713 {
714 	if (!skb->l4_rxhash)
715 		__skb_get_rxhash(skb);
716 
717 	return skb->rxhash;
718 }
719 
720 #ifdef NET_SKBUFF_DATA_USES_OFFSET
721 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
722 {
723 	return skb->head + skb->end;
724 }
725 
726 static inline unsigned int skb_end_offset(const struct sk_buff *skb)
727 {
728 	return skb->end;
729 }
730 #else
731 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
732 {
733 	return skb->end;
734 }
735 
736 static inline unsigned int skb_end_offset(const struct sk_buff *skb)
737 {
738 	return skb->end - skb->head;
739 }
740 #endif
741 
742 /* Internal */
743 #define skb_shinfo(SKB)	((struct skb_shared_info *)(skb_end_pointer(SKB)))
744 
745 static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
746 {
747 	return &skb_shinfo(skb)->hwtstamps;
748 }
749 
750 /**
751  *	skb_queue_empty - check if a queue is empty
752  *	@list: queue head
753  *
754  *	Returns true if the queue is empty, false otherwise.
755  */
756 static inline int skb_queue_empty(const struct sk_buff_head *list)
757 {
758 	return list->next == (struct sk_buff *)list;
759 }
760 
761 /**
762  *	skb_queue_is_last - check if skb is the last entry in the queue
763  *	@list: queue head
764  *	@skb: buffer
765  *
766  *	Returns true if @skb is the last buffer on the list.
767  */
768 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
769 				     const struct sk_buff *skb)
770 {
771 	return skb->next == (struct sk_buff *)list;
772 }
773 
774 /**
775  *	skb_queue_is_first - check if skb is the first entry in the queue
776  *	@list: queue head
777  *	@skb: buffer
778  *
779  *	Returns true if @skb is the first buffer on the list.
780  */
781 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
782 				      const struct sk_buff *skb)
783 {
784 	return skb->prev == (struct sk_buff *)list;
785 }
786 
787 /**
788  *	skb_queue_next - return the next packet in the queue
789  *	@list: queue head
790  *	@skb: current buffer
791  *
792  *	Return the next packet in @list after @skb.  It is only valid to
793  *	call this if skb_queue_is_last() evaluates to false.
794  */
795 static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
796 					     const struct sk_buff *skb)
797 {
798 	/* This BUG_ON may seem severe, but if we just return then we
799 	 * are going to dereference garbage.
800 	 */
801 	BUG_ON(skb_queue_is_last(list, skb));
802 	return skb->next;
803 }
804 
805 /**
806  *	skb_queue_prev - return the prev packet in the queue
807  *	@list: queue head
808  *	@skb: current buffer
809  *
810  *	Return the prev packet in @list before @skb.  It is only valid to
811  *	call this if skb_queue_is_first() evaluates to false.
812  */
813 static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
814 					     const struct sk_buff *skb)
815 {
816 	/* This BUG_ON may seem severe, but if we just return then we
817 	 * are going to dereference garbage.
818 	 */
819 	BUG_ON(skb_queue_is_first(list, skb));
820 	return skb->prev;
821 }
822 
823 /**
824  *	skb_get - reference buffer
825  *	@skb: buffer to reference
826  *
827  *	Makes another reference to a socket buffer and returns a pointer
828  *	to the buffer.
829  */
830 static inline struct sk_buff *skb_get(struct sk_buff *skb)
831 {
832 	atomic_inc(&skb->users);
833 	return skb;
834 }
835 
836 /*
837  * If users == 1, we are the only owner and are can avoid redundant
838  * atomic change.
839  */
840 
841 /**
842  *	skb_cloned - is the buffer a clone
843  *	@skb: buffer to check
844  *
845  *	Returns true if the buffer was generated with skb_clone() and is
846  *	one of multiple shared copies of the buffer. Cloned buffers are
847  *	shared data so must not be written to under normal circumstances.
848  */
849 static inline int skb_cloned(const struct sk_buff *skb)
850 {
851 	return skb->cloned &&
852 	       (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
853 }
854 
855 static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
856 {
857 	might_sleep_if(pri & __GFP_WAIT);
858 
859 	if (skb_cloned(skb))
860 		return pskb_expand_head(skb, 0, 0, pri);
861 
862 	return 0;
863 }
864 
865 /**
866  *	skb_header_cloned - is the header a clone
867  *	@skb: buffer to check
868  *
869  *	Returns true if modifying the header part of the buffer requires
870  *	the data to be copied.
871  */
872 static inline int skb_header_cloned(const struct sk_buff *skb)
873 {
874 	int dataref;
875 
876 	if (!skb->cloned)
877 		return 0;
878 
879 	dataref = atomic_read(&skb_shinfo(skb)->dataref);
880 	dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
881 	return dataref != 1;
882 }
883 
884 /**
885  *	skb_header_release - release reference to header
886  *	@skb: buffer to operate on
887  *
888  *	Drop a reference to the header part of the buffer.  This is done
889  *	by acquiring a payload reference.  You must not read from the header
890  *	part of skb->data after this.
891  */
892 static inline void skb_header_release(struct sk_buff *skb)
893 {
894 	BUG_ON(skb->nohdr);
895 	skb->nohdr = 1;
896 	atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref);
897 }
898 
899 /**
900  *	skb_shared - is the buffer shared
901  *	@skb: buffer to check
902  *
903  *	Returns true if more than one person has a reference to this
904  *	buffer.
905  */
906 static inline int skb_shared(const struct sk_buff *skb)
907 {
908 	return atomic_read(&skb->users) != 1;
909 }
910 
911 /**
912  *	skb_share_check - check if buffer is shared and if so clone it
913  *	@skb: buffer to check
914  *	@pri: priority for memory allocation
915  *
916  *	If the buffer is shared the buffer is cloned and the old copy
917  *	drops a reference. A new clone with a single reference is returned.
918  *	If the buffer is not shared the original buffer is returned. When
919  *	being called from interrupt status or with spinlocks held pri must
920  *	be GFP_ATOMIC.
921  *
922  *	NULL is returned on a memory allocation failure.
923  */
924 static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
925 {
926 	might_sleep_if(pri & __GFP_WAIT);
927 	if (skb_shared(skb)) {
928 		struct sk_buff *nskb = skb_clone(skb, pri);
929 
930 		if (likely(nskb))
931 			consume_skb(skb);
932 		else
933 			kfree_skb(skb);
934 		skb = nskb;
935 	}
936 	return skb;
937 }
938 
939 /*
940  *	Copy shared buffers into a new sk_buff. We effectively do COW on
941  *	packets to handle cases where we have a local reader and forward
942  *	and a couple of other messy ones. The normal one is tcpdumping
943  *	a packet thats being forwarded.
944  */
945 
946 /**
947  *	skb_unshare - make a copy of a shared buffer
948  *	@skb: buffer to check
949  *	@pri: priority for memory allocation
950  *
951  *	If the socket buffer is a clone then this function creates a new
952  *	copy of the data, drops a reference count on the old copy and returns
953  *	the new copy with the reference count at 1. If the buffer is not a clone
954  *	the original buffer is returned. When called with a spinlock held or
955  *	from interrupt state @pri must be %GFP_ATOMIC
956  *
957  *	%NULL is returned on a memory allocation failure.
958  */
959 static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
960 					  gfp_t pri)
961 {
962 	might_sleep_if(pri & __GFP_WAIT);
963 	if (skb_cloned(skb)) {
964 		struct sk_buff *nskb = skb_copy(skb, pri);
965 		kfree_skb(skb);	/* Free our shared copy */
966 		skb = nskb;
967 	}
968 	return skb;
969 }
970 
971 /**
972  *	skb_peek - peek at the head of an &sk_buff_head
973  *	@list_: list to peek at
974  *
975  *	Peek an &sk_buff. Unlike most other operations you _MUST_
976  *	be careful with this one. A peek leaves the buffer on the
977  *	list and someone else may run off with it. You must hold
978  *	the appropriate locks or have a private queue to do this.
979  *
980  *	Returns %NULL for an empty list or a pointer to the head element.
981  *	The reference count is not incremented and the reference is therefore
982  *	volatile. Use with caution.
983  */
984 static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
985 {
986 	struct sk_buff *skb = list_->next;
987 
988 	if (skb == (struct sk_buff *)list_)
989 		skb = NULL;
990 	return skb;
991 }
992 
993 /**
994  *	skb_peek_next - peek skb following the given one from a queue
995  *	@skb: skb to start from
996  *	@list_: list to peek at
997  *
998  *	Returns %NULL when the end of the list is met or a pointer to the
999  *	next element. The reference count is not incremented and the
1000  *	reference is therefore volatile. Use with caution.
1001  */
1002 static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
1003 		const struct sk_buff_head *list_)
1004 {
1005 	struct sk_buff *next = skb->next;
1006 
1007 	if (next == (struct sk_buff *)list_)
1008 		next = NULL;
1009 	return next;
1010 }
1011 
1012 /**
1013  *	skb_peek_tail - peek at the tail of an &sk_buff_head
1014  *	@list_: list to peek at
1015  *
1016  *	Peek an &sk_buff. Unlike most other operations you _MUST_
1017  *	be careful with this one. A peek leaves the buffer on the
1018  *	list and someone else may run off with it. You must hold
1019  *	the appropriate locks or have a private queue to do this.
1020  *
1021  *	Returns %NULL for an empty list or a pointer to the tail element.
1022  *	The reference count is not incremented and the reference is therefore
1023  *	volatile. Use with caution.
1024  */
1025 static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
1026 {
1027 	struct sk_buff *skb = list_->prev;
1028 
1029 	if (skb == (struct sk_buff *)list_)
1030 		skb = NULL;
1031 	return skb;
1032 
1033 }
1034 
1035 /**
1036  *	skb_queue_len	- get queue length
1037  *	@list_: list to measure
1038  *
1039  *	Return the length of an &sk_buff queue.
1040  */
1041 static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
1042 {
1043 	return list_->qlen;
1044 }
1045 
1046 /**
1047  *	__skb_queue_head_init - initialize non-spinlock portions of sk_buff_head
1048  *	@list: queue to initialize
1049  *
1050  *	This initializes only the list and queue length aspects of
1051  *	an sk_buff_head object.  This allows to initialize the list
1052  *	aspects of an sk_buff_head without reinitializing things like
1053  *	the spinlock.  It can also be used for on-stack sk_buff_head
1054  *	objects where the spinlock is known to not be used.
1055  */
1056 static inline void __skb_queue_head_init(struct sk_buff_head *list)
1057 {
1058 	list->prev = list->next = (struct sk_buff *)list;
1059 	list->qlen = 0;
1060 }
1061 
1062 /*
1063  * This function creates a split out lock class for each invocation;
1064  * this is needed for now since a whole lot of users of the skb-queue
1065  * infrastructure in drivers have different locking usage (in hardirq)
1066  * than the networking core (in softirq only). In the long run either the
1067  * network layer or drivers should need annotation to consolidate the
1068  * main types of usage into 3 classes.
1069  */
1070 static inline void skb_queue_head_init(struct sk_buff_head *list)
1071 {
1072 	spin_lock_init(&list->lock);
1073 	__skb_queue_head_init(list);
1074 }
1075 
1076 static inline void skb_queue_head_init_class(struct sk_buff_head *list,
1077 		struct lock_class_key *class)
1078 {
1079 	skb_queue_head_init(list);
1080 	lockdep_set_class(&list->lock, class);
1081 }
1082 
1083 /*
1084  *	Insert an sk_buff on a list.
1085  *
1086  *	The "__skb_xxxx()" functions are the non-atomic ones that
1087  *	can only be called with interrupts disabled.
1088  */
1089 extern void        skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
1090 static inline void __skb_insert(struct sk_buff *newsk,
1091 				struct sk_buff *prev, struct sk_buff *next,
1092 				struct sk_buff_head *list)
1093 {
1094 	newsk->next = next;
1095 	newsk->prev = prev;
1096 	next->prev  = prev->next = newsk;
1097 	list->qlen++;
1098 }
1099 
1100 static inline void __skb_queue_splice(const struct sk_buff_head *list,
1101 				      struct sk_buff *prev,
1102 				      struct sk_buff *next)
1103 {
1104 	struct sk_buff *first = list->next;
1105 	struct sk_buff *last = list->prev;
1106 
1107 	first->prev = prev;
1108 	prev->next = first;
1109 
1110 	last->next = next;
1111 	next->prev = last;
1112 }
1113 
1114 /**
1115  *	skb_queue_splice - join two skb lists, this is designed for stacks
1116  *	@list: the new list to add
1117  *	@head: the place to add it in the first list
1118  */
1119 static inline void skb_queue_splice(const struct sk_buff_head *list,
1120 				    struct sk_buff_head *head)
1121 {
1122 	if (!skb_queue_empty(list)) {
1123 		__skb_queue_splice(list, (struct sk_buff *) head, head->next);
1124 		head->qlen += list->qlen;
1125 	}
1126 }
1127 
1128 /**
1129  *	skb_queue_splice_init - join two skb lists and reinitialise the emptied list
1130  *	@list: the new list to add
1131  *	@head: the place to add it in the first list
1132  *
1133  *	The list at @list is reinitialised
1134  */
1135 static inline void skb_queue_splice_init(struct sk_buff_head *list,
1136 					 struct sk_buff_head *head)
1137 {
1138 	if (!skb_queue_empty(list)) {
1139 		__skb_queue_splice(list, (struct sk_buff *) head, head->next);
1140 		head->qlen += list->qlen;
1141 		__skb_queue_head_init(list);
1142 	}
1143 }
1144 
1145 /**
1146  *	skb_queue_splice_tail - join two skb lists, each list being a queue
1147  *	@list: the new list to add
1148  *	@head: the place to add it in the first list
1149  */
1150 static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
1151 					 struct sk_buff_head *head)
1152 {
1153 	if (!skb_queue_empty(list)) {
1154 		__skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1155 		head->qlen += list->qlen;
1156 	}
1157 }
1158 
1159 /**
1160  *	skb_queue_splice_tail_init - join two skb lists and reinitialise the emptied list
1161  *	@list: the new list to add
1162  *	@head: the place to add it in the first list
1163  *
1164  *	Each of the lists is a queue.
1165  *	The list at @list is reinitialised
1166  */
1167 static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
1168 					      struct sk_buff_head *head)
1169 {
1170 	if (!skb_queue_empty(list)) {
1171 		__skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1172 		head->qlen += list->qlen;
1173 		__skb_queue_head_init(list);
1174 	}
1175 }
1176 
1177 /**
1178  *	__skb_queue_after - queue a buffer at the list head
1179  *	@list: list to use
1180  *	@prev: place after this buffer
1181  *	@newsk: buffer to queue
1182  *
1183  *	Queue a buffer int the middle of a list. This function takes no locks
1184  *	and you must therefore hold required locks before calling it.
1185  *
1186  *	A buffer cannot be placed on two lists at the same time.
1187  */
1188 static inline void __skb_queue_after(struct sk_buff_head *list,
1189 				     struct sk_buff *prev,
1190 				     struct sk_buff *newsk)
1191 {
1192 	__skb_insert(newsk, prev, prev->next, list);
1193 }
1194 
1195 extern void skb_append(struct sk_buff *old, struct sk_buff *newsk,
1196 		       struct sk_buff_head *list);
1197 
1198 static inline void __skb_queue_before(struct sk_buff_head *list,
1199 				      struct sk_buff *next,
1200 				      struct sk_buff *newsk)
1201 {
1202 	__skb_insert(newsk, next->prev, next, list);
1203 }
1204 
1205 /**
1206  *	__skb_queue_head - queue a buffer at the list head
1207  *	@list: list to use
1208  *	@newsk: buffer to queue
1209  *
1210  *	Queue a buffer at the start of a list. This function takes no locks
1211  *	and you must therefore hold required locks before calling it.
1212  *
1213  *	A buffer cannot be placed on two lists at the same time.
1214  */
1215 extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
1216 static inline void __skb_queue_head(struct sk_buff_head *list,
1217 				    struct sk_buff *newsk)
1218 {
1219 	__skb_queue_after(list, (struct sk_buff *)list, newsk);
1220 }
1221 
1222 /**
1223  *	__skb_queue_tail - queue a buffer at the list tail
1224  *	@list: list to use
1225  *	@newsk: buffer to queue
1226  *
1227  *	Queue a buffer at the end of a list. This function takes no locks
1228  *	and you must therefore hold required locks before calling it.
1229  *
1230  *	A buffer cannot be placed on two lists at the same time.
1231  */
1232 extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
1233 static inline void __skb_queue_tail(struct sk_buff_head *list,
1234 				   struct sk_buff *newsk)
1235 {
1236 	__skb_queue_before(list, (struct sk_buff *)list, newsk);
1237 }
1238 
1239 /*
1240  * remove sk_buff from list. _Must_ be called atomically, and with
1241  * the list known..
1242  */
1243 extern void	   skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
1244 static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
1245 {
1246 	struct sk_buff *next, *prev;
1247 
1248 	list->qlen--;
1249 	next	   = skb->next;
1250 	prev	   = skb->prev;
1251 	skb->next  = skb->prev = NULL;
1252 	next->prev = prev;
1253 	prev->next = next;
1254 }
1255 
1256 /**
1257  *	__skb_dequeue - remove from the head of the queue
1258  *	@list: list to dequeue from
1259  *
1260  *	Remove the head of the list. This function does not take any locks
1261  *	so must be used with appropriate locks held only. The head item is
1262  *	returned or %NULL if the list is empty.
1263  */
1264 extern struct sk_buff *skb_dequeue(struct sk_buff_head *list);
1265 static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
1266 {
1267 	struct sk_buff *skb = skb_peek(list);
1268 	if (skb)
1269 		__skb_unlink(skb, list);
1270 	return skb;
1271 }
1272 
1273 /**
1274  *	__skb_dequeue_tail - remove from the tail of the queue
1275  *	@list: list to dequeue from
1276  *
1277  *	Remove the tail of the list. This function does not take any locks
1278  *	so must be used with appropriate locks held only. The tail item is
1279  *	returned or %NULL if the list is empty.
1280  */
1281 extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
1282 static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
1283 {
1284 	struct sk_buff *skb = skb_peek_tail(list);
1285 	if (skb)
1286 		__skb_unlink(skb, list);
1287 	return skb;
1288 }
1289 
1290 
1291 static inline bool skb_is_nonlinear(const struct sk_buff *skb)
1292 {
1293 	return skb->data_len;
1294 }
1295 
1296 static inline unsigned int skb_headlen(const struct sk_buff *skb)
1297 {
1298 	return skb->len - skb->data_len;
1299 }
1300 
1301 static inline int skb_pagelen(const struct sk_buff *skb)
1302 {
1303 	int i, len = 0;
1304 
1305 	for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--)
1306 		len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
1307 	return len + skb_headlen(skb);
1308 }
1309 
1310 /**
1311  * __skb_fill_page_desc - initialise a paged fragment in an skb
1312  * @skb: buffer containing fragment to be initialised
1313  * @i: paged fragment index to initialise
1314  * @page: the page to use for this fragment
1315  * @off: the offset to the data with @page
1316  * @size: the length of the data
1317  *
1318  * Initialises the @i'th fragment of @skb to point to &size bytes at
1319  * offset @off within @page.
1320  *
1321  * Does not take any additional reference on the fragment.
1322  */
1323 static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
1324 					struct page *page, int off, int size)
1325 {
1326 	skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1327 
1328 	/*
1329 	 * Propagate page->pfmemalloc to the skb if we can. The problem is
1330 	 * that not all callers have unique ownership of the page. If
1331 	 * pfmemalloc is set, we check the mapping as a mapping implies
1332 	 * page->index is set (index and pfmemalloc share space).
1333 	 * If it's a valid mapping, we cannot use page->pfmemalloc but we
1334 	 * do not lose pfmemalloc information as the pages would not be
1335 	 * allocated using __GFP_MEMALLOC.
1336 	 */
1337 	frag->page.p		  = page;
1338 	frag->page_offset	  = off;
1339 	skb_frag_size_set(frag, size);
1340 
1341 	page = compound_head(page);
1342 	if (page->pfmemalloc && !page->mapping)
1343 		skb->pfmemalloc	= true;
1344 }
1345 
1346 /**
1347  * skb_fill_page_desc - initialise a paged fragment in an skb
1348  * @skb: buffer containing fragment to be initialised
1349  * @i: paged fragment index to initialise
1350  * @page: the page to use for this fragment
1351  * @off: the offset to the data with @page
1352  * @size: the length of the data
1353  *
1354  * As per __skb_fill_page_desc() -- initialises the @i'th fragment of
1355  * @skb to point to &size bytes at offset @off within @page. In
1356  * addition updates @skb such that @i is the last fragment.
1357  *
1358  * Does not take any additional reference on the fragment.
1359  */
1360 static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
1361 				      struct page *page, int off, int size)
1362 {
1363 	__skb_fill_page_desc(skb, i, page, off, size);
1364 	skb_shinfo(skb)->nr_frags = i + 1;
1365 }
1366 
1367 extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
1368 			    int off, int size, unsigned int truesize);
1369 
1370 #define SKB_PAGE_ASSERT(skb) 	BUG_ON(skb_shinfo(skb)->nr_frags)
1371 #define SKB_FRAG_ASSERT(skb) 	BUG_ON(skb_has_frag_list(skb))
1372 #define SKB_LINEAR_ASSERT(skb)  BUG_ON(skb_is_nonlinear(skb))
1373 
1374 #ifdef NET_SKBUFF_DATA_USES_OFFSET
1375 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1376 {
1377 	return skb->head + skb->tail;
1378 }
1379 
1380 static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1381 {
1382 	skb->tail = skb->data - skb->head;
1383 }
1384 
1385 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1386 {
1387 	skb_reset_tail_pointer(skb);
1388 	skb->tail += offset;
1389 }
1390 #else /* NET_SKBUFF_DATA_USES_OFFSET */
1391 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1392 {
1393 	return skb->tail;
1394 }
1395 
1396 static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1397 {
1398 	skb->tail = skb->data;
1399 }
1400 
1401 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1402 {
1403 	skb->tail = skb->data + offset;
1404 }
1405 
1406 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
1407 
1408 /*
1409  *	Add data to an sk_buff
1410  */
1411 extern unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
1412 static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
1413 {
1414 	unsigned char *tmp = skb_tail_pointer(skb);
1415 	SKB_LINEAR_ASSERT(skb);
1416 	skb->tail += len;
1417 	skb->len  += len;
1418 	return tmp;
1419 }
1420 
1421 extern unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
1422 static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
1423 {
1424 	skb->data -= len;
1425 	skb->len  += len;
1426 	return skb->data;
1427 }
1428 
1429 extern unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
1430 static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
1431 {
1432 	skb->len -= len;
1433 	BUG_ON(skb->len < skb->data_len);
1434 	return skb->data += len;
1435 }
1436 
1437 static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int len)
1438 {
1439 	return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
1440 }
1441 
1442 extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
1443 
1444 static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
1445 {
1446 	if (len > skb_headlen(skb) &&
1447 	    !__pskb_pull_tail(skb, len - skb_headlen(skb)))
1448 		return NULL;
1449 	skb->len -= len;
1450 	return skb->data += len;
1451 }
1452 
1453 static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
1454 {
1455 	return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
1456 }
1457 
1458 static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
1459 {
1460 	if (likely(len <= skb_headlen(skb)))
1461 		return 1;
1462 	if (unlikely(len > skb->len))
1463 		return 0;
1464 	return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
1465 }
1466 
1467 /**
1468  *	skb_headroom - bytes at buffer head
1469  *	@skb: buffer to check
1470  *
1471  *	Return the number of bytes of free space at the head of an &sk_buff.
1472  */
1473 static inline unsigned int skb_headroom(const struct sk_buff *skb)
1474 {
1475 	return skb->data - skb->head;
1476 }
1477 
1478 /**
1479  *	skb_tailroom - bytes at buffer end
1480  *	@skb: buffer to check
1481  *
1482  *	Return the number of bytes of free space at the tail of an sk_buff
1483  */
1484 static inline int skb_tailroom(const struct sk_buff *skb)
1485 {
1486 	return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
1487 }
1488 
1489 /**
1490  *	skb_availroom - bytes at buffer end
1491  *	@skb: buffer to check
1492  *
1493  *	Return the number of bytes of free space at the tail of an sk_buff
1494  *	allocated by sk_stream_alloc()
1495  */
1496 static inline int skb_availroom(const struct sk_buff *skb)
1497 {
1498 	if (skb_is_nonlinear(skb))
1499 		return 0;
1500 
1501 	return skb->end - skb->tail - skb->reserved_tailroom;
1502 }
1503 
1504 /**
1505  *	skb_reserve - adjust headroom
1506  *	@skb: buffer to alter
1507  *	@len: bytes to move
1508  *
1509  *	Increase the headroom of an empty &sk_buff by reducing the tail
1510  *	room. This is only allowed for an empty buffer.
1511  */
1512 static inline void skb_reserve(struct sk_buff *skb, int len)
1513 {
1514 	skb->data += len;
1515 	skb->tail += len;
1516 }
1517 
1518 static inline void skb_reset_inner_headers(struct sk_buff *skb)
1519 {
1520 	skb->inner_mac_header = skb->mac_header;
1521 	skb->inner_network_header = skb->network_header;
1522 	skb->inner_transport_header = skb->transport_header;
1523 }
1524 
1525 static inline void skb_reset_mac_len(struct sk_buff *skb)
1526 {
1527 	skb->mac_len = skb->network_header - skb->mac_header;
1528 }
1529 
1530 #ifdef NET_SKBUFF_DATA_USES_OFFSET
1531 static inline unsigned char *skb_inner_transport_header(const struct sk_buff
1532 							*skb)
1533 {
1534 	return skb->head + skb->inner_transport_header;
1535 }
1536 
1537 static inline void skb_reset_inner_transport_header(struct sk_buff *skb)
1538 {
1539 	skb->inner_transport_header = skb->data - skb->head;
1540 }
1541 
1542 static inline void skb_set_inner_transport_header(struct sk_buff *skb,
1543 						   const int offset)
1544 {
1545 	skb_reset_inner_transport_header(skb);
1546 	skb->inner_transport_header += offset;
1547 }
1548 
1549 static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb)
1550 {
1551 	return skb->head + skb->inner_network_header;
1552 }
1553 
1554 static inline void skb_reset_inner_network_header(struct sk_buff *skb)
1555 {
1556 	skb->inner_network_header = skb->data - skb->head;
1557 }
1558 
1559 static inline void skb_set_inner_network_header(struct sk_buff *skb,
1560 						const int offset)
1561 {
1562 	skb_reset_inner_network_header(skb);
1563 	skb->inner_network_header += offset;
1564 }
1565 
1566 static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
1567 {
1568 	return skb->head + skb->inner_mac_header;
1569 }
1570 
1571 static inline void skb_reset_inner_mac_header(struct sk_buff *skb)
1572 {
1573 	skb->inner_mac_header = skb->data - skb->head;
1574 }
1575 
1576 static inline void skb_set_inner_mac_header(struct sk_buff *skb,
1577 					    const int offset)
1578 {
1579 	skb_reset_inner_mac_header(skb);
1580 	skb->inner_mac_header += offset;
1581 }
1582 static inline bool skb_transport_header_was_set(const struct sk_buff *skb)
1583 {
1584 	return skb->transport_header != ~0U;
1585 }
1586 
1587 static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
1588 {
1589 	return skb->head + skb->transport_header;
1590 }
1591 
1592 static inline void skb_reset_transport_header(struct sk_buff *skb)
1593 {
1594 	skb->transport_header = skb->data - skb->head;
1595 }
1596 
1597 static inline void skb_set_transport_header(struct sk_buff *skb,
1598 					    const int offset)
1599 {
1600 	skb_reset_transport_header(skb);
1601 	skb->transport_header += offset;
1602 }
1603 
1604 static inline unsigned char *skb_network_header(const struct sk_buff *skb)
1605 {
1606 	return skb->head + skb->network_header;
1607 }
1608 
1609 static inline void skb_reset_network_header(struct sk_buff *skb)
1610 {
1611 	skb->network_header = skb->data - skb->head;
1612 }
1613 
1614 static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
1615 {
1616 	skb_reset_network_header(skb);
1617 	skb->network_header += offset;
1618 }
1619 
1620 static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
1621 {
1622 	return skb->head + skb->mac_header;
1623 }
1624 
1625 static inline int skb_mac_header_was_set(const struct sk_buff *skb)
1626 {
1627 	return skb->mac_header != ~0U;
1628 }
1629 
1630 static inline void skb_reset_mac_header(struct sk_buff *skb)
1631 {
1632 	skb->mac_header = skb->data - skb->head;
1633 }
1634 
1635 static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
1636 {
1637 	skb_reset_mac_header(skb);
1638 	skb->mac_header += offset;
1639 }
1640 
1641 #else /* NET_SKBUFF_DATA_USES_OFFSET */
1642 static inline unsigned char *skb_inner_transport_header(const struct sk_buff
1643 							*skb)
1644 {
1645 	return skb->inner_transport_header;
1646 }
1647 
1648 static inline void skb_reset_inner_transport_header(struct sk_buff *skb)
1649 {
1650 	skb->inner_transport_header = skb->data;
1651 }
1652 
1653 static inline void skb_set_inner_transport_header(struct sk_buff *skb,
1654 						   const int offset)
1655 {
1656 	skb->inner_transport_header = skb->data + offset;
1657 }
1658 
1659 static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb)
1660 {
1661 	return skb->inner_network_header;
1662 }
1663 
1664 static inline void skb_reset_inner_network_header(struct sk_buff *skb)
1665 {
1666 	skb->inner_network_header = skb->data;
1667 }
1668 
1669 static inline void skb_set_inner_network_header(struct sk_buff *skb,
1670 						const int offset)
1671 {
1672 	skb->inner_network_header = skb->data + offset;
1673 }
1674 
1675 static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
1676 {
1677 	return skb->inner_mac_header;
1678 }
1679 
1680 static inline void skb_reset_inner_mac_header(struct sk_buff *skb)
1681 {
1682 	skb->inner_mac_header = skb->data;
1683 }
1684 
1685 static inline void skb_set_inner_mac_header(struct sk_buff *skb,
1686 						const int offset)
1687 {
1688 	skb->inner_mac_header = skb->data + offset;
1689 }
1690 static inline bool skb_transport_header_was_set(const struct sk_buff *skb)
1691 {
1692 	return skb->transport_header != NULL;
1693 }
1694 
1695 static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
1696 {
1697 	return skb->transport_header;
1698 }
1699 
1700 static inline void skb_reset_transport_header(struct sk_buff *skb)
1701 {
1702 	skb->transport_header = skb->data;
1703 }
1704 
1705 static inline void skb_set_transport_header(struct sk_buff *skb,
1706 					    const int offset)
1707 {
1708 	skb->transport_header = skb->data + offset;
1709 }
1710 
1711 static inline unsigned char *skb_network_header(const struct sk_buff *skb)
1712 {
1713 	return skb->network_header;
1714 }
1715 
1716 static inline void skb_reset_network_header(struct sk_buff *skb)
1717 {
1718 	skb->network_header = skb->data;
1719 }
1720 
1721 static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
1722 {
1723 	skb->network_header = skb->data + offset;
1724 }
1725 
1726 static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
1727 {
1728 	return skb->mac_header;
1729 }
1730 
1731 static inline int skb_mac_header_was_set(const struct sk_buff *skb)
1732 {
1733 	return skb->mac_header != NULL;
1734 }
1735 
1736 static inline void skb_reset_mac_header(struct sk_buff *skb)
1737 {
1738 	skb->mac_header = skb->data;
1739 }
1740 
1741 static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
1742 {
1743 	skb->mac_header = skb->data + offset;
1744 }
1745 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
1746 
1747 static inline void skb_probe_transport_header(struct sk_buff *skb,
1748 					      const int offset_hint)
1749 {
1750 	struct flow_keys keys;
1751 
1752 	if (skb_transport_header_was_set(skb))
1753 		return;
1754 	else if (skb_flow_dissect(skb, &keys))
1755 		skb_set_transport_header(skb, keys.thoff);
1756 	else
1757 		skb_set_transport_header(skb, offset_hint);
1758 }
1759 
1760 static inline void skb_mac_header_rebuild(struct sk_buff *skb)
1761 {
1762 	if (skb_mac_header_was_set(skb)) {
1763 		const unsigned char *old_mac = skb_mac_header(skb);
1764 
1765 		skb_set_mac_header(skb, -skb->mac_len);
1766 		memmove(skb_mac_header(skb), old_mac, skb->mac_len);
1767 	}
1768 }
1769 
1770 static inline int skb_checksum_start_offset(const struct sk_buff *skb)
1771 {
1772 	return skb->csum_start - skb_headroom(skb);
1773 }
1774 
1775 static inline int skb_transport_offset(const struct sk_buff *skb)
1776 {
1777 	return skb_transport_header(skb) - skb->data;
1778 }
1779 
1780 static inline u32 skb_network_header_len(const struct sk_buff *skb)
1781 {
1782 	return skb->transport_header - skb->network_header;
1783 }
1784 
1785 static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
1786 {
1787 	return skb->inner_transport_header - skb->inner_network_header;
1788 }
1789 
1790 static inline int skb_network_offset(const struct sk_buff *skb)
1791 {
1792 	return skb_network_header(skb) - skb->data;
1793 }
1794 
1795 static inline int skb_inner_network_offset(const struct sk_buff *skb)
1796 {
1797 	return skb_inner_network_header(skb) - skb->data;
1798 }
1799 
1800 static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
1801 {
1802 	return pskb_may_pull(skb, skb_network_offset(skb) + len);
1803 }
1804 
1805 /*
1806  * CPUs often take a performance hit when accessing unaligned memory
1807  * locations. The actual performance hit varies, it can be small if the
1808  * hardware handles it or large if we have to take an exception and fix it
1809  * in software.
1810  *
1811  * Since an ethernet header is 14 bytes network drivers often end up with
1812  * the IP header at an unaligned offset. The IP header can be aligned by
1813  * shifting the start of the packet by 2 bytes. Drivers should do this
1814  * with:
1815  *
1816  * skb_reserve(skb, NET_IP_ALIGN);
1817  *
1818  * The downside to this alignment of the IP header is that the DMA is now
1819  * unaligned. On some architectures the cost of an unaligned DMA is high
1820  * and this cost outweighs the gains made by aligning the IP header.
1821  *
1822  * Since this trade off varies between architectures, we allow NET_IP_ALIGN
1823  * to be overridden.
1824  */
1825 #ifndef NET_IP_ALIGN
1826 #define NET_IP_ALIGN	2
1827 #endif
1828 
1829 /*
1830  * The networking layer reserves some headroom in skb data (via
1831  * dev_alloc_skb). This is used to avoid having to reallocate skb data when
1832  * the header has to grow. In the default case, if the header has to grow
1833  * 32 bytes or less we avoid the reallocation.
1834  *
1835  * Unfortunately this headroom changes the DMA alignment of the resulting
1836  * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive
1837  * on some architectures. An architecture can override this value,
1838  * perhaps setting it to a cacheline in size (since that will maintain
1839  * cacheline alignment of the DMA). It must be a power of 2.
1840  *
1841  * Various parts of the networking layer expect at least 32 bytes of
1842  * headroom, you should not reduce this.
1843  *
1844  * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS)
1845  * to reduce average number of cache lines per packet.
1846  * get_rps_cpus() for example only access one 64 bytes aligned block :
1847  * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
1848  */
1849 #ifndef NET_SKB_PAD
1850 #define NET_SKB_PAD	max(32, L1_CACHE_BYTES)
1851 #endif
1852 
1853 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
1854 
1855 static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
1856 {
1857 	if (unlikely(skb_is_nonlinear(skb))) {
1858 		WARN_ON(1);
1859 		return;
1860 	}
1861 	skb->len = len;
1862 	skb_set_tail_pointer(skb, len);
1863 }
1864 
1865 extern void skb_trim(struct sk_buff *skb, unsigned int len);
1866 
1867 static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
1868 {
1869 	if (skb->data_len)
1870 		return ___pskb_trim(skb, len);
1871 	__skb_trim(skb, len);
1872 	return 0;
1873 }
1874 
1875 static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
1876 {
1877 	return (len < skb->len) ? __pskb_trim(skb, len) : 0;
1878 }
1879 
1880 /**
1881  *	pskb_trim_unique - remove end from a paged unique (not cloned) buffer
1882  *	@skb: buffer to alter
1883  *	@len: new length
1884  *
1885  *	This is identical to pskb_trim except that the caller knows that
1886  *	the skb is not cloned so we should never get an error due to out-
1887  *	of-memory.
1888  */
1889 static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
1890 {
1891 	int err = pskb_trim(skb, len);
1892 	BUG_ON(err);
1893 }
1894 
1895 /**
1896  *	skb_orphan - orphan a buffer
1897  *	@skb: buffer to orphan
1898  *
1899  *	If a buffer currently has an owner then we call the owner's
1900  *	destructor function and make the @skb unowned. The buffer continues
1901  *	to exist but is no longer charged to its former owner.
1902  */
1903 static inline void skb_orphan(struct sk_buff *skb)
1904 {
1905 	if (skb->destructor)
1906 		skb->destructor(skb);
1907 	skb->destructor = NULL;
1908 	skb->sk		= NULL;
1909 }
1910 
1911 /**
1912  *	skb_orphan_frags - orphan the frags contained in a buffer
1913  *	@skb: buffer to orphan frags from
1914  *	@gfp_mask: allocation mask for replacement pages
1915  *
1916  *	For each frag in the SKB which needs a destructor (i.e. has an
1917  *	owner) create a copy of that frag and release the original
1918  *	page by calling the destructor.
1919  */
1920 static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
1921 {
1922 	if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)))
1923 		return 0;
1924 	return skb_copy_ubufs(skb, gfp_mask);
1925 }
1926 
1927 /**
1928  *	__skb_queue_purge - empty a list
1929  *	@list: list to empty
1930  *
1931  *	Delete all buffers on an &sk_buff list. Each buffer is removed from
1932  *	the list and one reference dropped. This function does not take the
1933  *	list lock and the caller must hold the relevant locks to use it.
1934  */
1935 extern void skb_queue_purge(struct sk_buff_head *list);
1936 static inline void __skb_queue_purge(struct sk_buff_head *list)
1937 {
1938 	struct sk_buff *skb;
1939 	while ((skb = __skb_dequeue(list)) != NULL)
1940 		kfree_skb(skb);
1941 }
1942 
1943 #define NETDEV_FRAG_PAGE_MAX_ORDER get_order(32768)
1944 #define NETDEV_FRAG_PAGE_MAX_SIZE  (PAGE_SIZE << NETDEV_FRAG_PAGE_MAX_ORDER)
1945 #define NETDEV_PAGECNT_MAX_BIAS	   NETDEV_FRAG_PAGE_MAX_SIZE
1946 
1947 extern void *netdev_alloc_frag(unsigned int fragsz);
1948 
1949 extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
1950 					  unsigned int length,
1951 					  gfp_t gfp_mask);
1952 
1953 /**
1954  *	netdev_alloc_skb - allocate an skbuff for rx on a specific device
1955  *	@dev: network device to receive on
1956  *	@length: length to allocate
1957  *
1958  *	Allocate a new &sk_buff and assign it a usage count of one. The
1959  *	buffer has unspecified headroom built in. Users should allocate
1960  *	the headroom they think they need without accounting for the
1961  *	built in space. The built in space is used for optimisations.
1962  *
1963  *	%NULL is returned if there is no free memory. Although this function
1964  *	allocates memory it can be called from an interrupt.
1965  */
1966 static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
1967 					       unsigned int length)
1968 {
1969 	return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
1970 }
1971 
1972 /* legacy helper around __netdev_alloc_skb() */
1973 static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
1974 					      gfp_t gfp_mask)
1975 {
1976 	return __netdev_alloc_skb(NULL, length, gfp_mask);
1977 }
1978 
1979 /* legacy helper around netdev_alloc_skb() */
1980 static inline struct sk_buff *dev_alloc_skb(unsigned int length)
1981 {
1982 	return netdev_alloc_skb(NULL, length);
1983 }
1984 
1985 
1986 static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
1987 		unsigned int length, gfp_t gfp)
1988 {
1989 	struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
1990 
1991 	if (NET_IP_ALIGN && skb)
1992 		skb_reserve(skb, NET_IP_ALIGN);
1993 	return skb;
1994 }
1995 
1996 static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
1997 		unsigned int length)
1998 {
1999 	return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
2000 }
2001 
2002 /*
2003  *	__skb_alloc_page - allocate pages for ps-rx on a skb and preserve pfmemalloc data
2004  *	@gfp_mask: alloc_pages_node mask. Set __GFP_NOMEMALLOC if not for network packet RX
2005  *	@skb: skb to set pfmemalloc on if __GFP_MEMALLOC is used
2006  *	@order: size of the allocation
2007  *
2008  * 	Allocate a new page.
2009  *
2010  * 	%NULL is returned if there is no free memory.
2011 */
2012 static inline struct page *__skb_alloc_pages(gfp_t gfp_mask,
2013 					      struct sk_buff *skb,
2014 					      unsigned int order)
2015 {
2016 	struct page *page;
2017 
2018 	gfp_mask |= __GFP_COLD;
2019 
2020 	if (!(gfp_mask & __GFP_NOMEMALLOC))
2021 		gfp_mask |= __GFP_MEMALLOC;
2022 
2023 	page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
2024 	if (skb && page && page->pfmemalloc)
2025 		skb->pfmemalloc = true;
2026 
2027 	return page;
2028 }
2029 
2030 /**
2031  *	__skb_alloc_page - allocate a page for ps-rx for a given skb and preserve pfmemalloc data
2032  *	@gfp_mask: alloc_pages_node mask. Set __GFP_NOMEMALLOC if not for network packet RX
2033  *	@skb: skb to set pfmemalloc on if __GFP_MEMALLOC is used
2034  *
2035  * 	Allocate a new page.
2036  *
2037  * 	%NULL is returned if there is no free memory.
2038  */
2039 static inline struct page *__skb_alloc_page(gfp_t gfp_mask,
2040 					     struct sk_buff *skb)
2041 {
2042 	return __skb_alloc_pages(gfp_mask, skb, 0);
2043 }
2044 
2045 /**
2046  *	skb_propagate_pfmemalloc - Propagate pfmemalloc if skb is allocated after RX page
2047  *	@page: The page that was allocated from skb_alloc_page
2048  *	@skb: The skb that may need pfmemalloc set
2049  */
2050 static inline void skb_propagate_pfmemalloc(struct page *page,
2051 					     struct sk_buff *skb)
2052 {
2053 	if (page && page->pfmemalloc)
2054 		skb->pfmemalloc = true;
2055 }
2056 
2057 /**
2058  * skb_frag_page - retrieve the page refered to by a paged fragment
2059  * @frag: the paged fragment
2060  *
2061  * Returns the &struct page associated with @frag.
2062  */
2063 static inline struct page *skb_frag_page(const skb_frag_t *frag)
2064 {
2065 	return frag->page.p;
2066 }
2067 
2068 /**
2069  * __skb_frag_ref - take an addition reference on a paged fragment.
2070  * @frag: the paged fragment
2071  *
2072  * Takes an additional reference on the paged fragment @frag.
2073  */
2074 static inline void __skb_frag_ref(skb_frag_t *frag)
2075 {
2076 	get_page(skb_frag_page(frag));
2077 }
2078 
2079 /**
2080  * skb_frag_ref - take an addition reference on a paged fragment of an skb.
2081  * @skb: the buffer
2082  * @f: the fragment offset.
2083  *
2084  * Takes an additional reference on the @f'th paged fragment of @skb.
2085  */
2086 static inline void skb_frag_ref(struct sk_buff *skb, int f)
2087 {
2088 	__skb_frag_ref(&skb_shinfo(skb)->frags[f]);
2089 }
2090 
2091 /**
2092  * __skb_frag_unref - release a reference on a paged fragment.
2093  * @frag: the paged fragment
2094  *
2095  * Releases a reference on the paged fragment @frag.
2096  */
2097 static inline void __skb_frag_unref(skb_frag_t *frag)
2098 {
2099 	put_page(skb_frag_page(frag));
2100 }
2101 
2102 /**
2103  * skb_frag_unref - release a reference on a paged fragment of an skb.
2104  * @skb: the buffer
2105  * @f: the fragment offset
2106  *
2107  * Releases a reference on the @f'th paged fragment of @skb.
2108  */
2109 static inline void skb_frag_unref(struct sk_buff *skb, int f)
2110 {
2111 	__skb_frag_unref(&skb_shinfo(skb)->frags[f]);
2112 }
2113 
2114 /**
2115  * skb_frag_address - gets the address of the data contained in a paged fragment
2116  * @frag: the paged fragment buffer
2117  *
2118  * Returns the address of the data within @frag. The page must already
2119  * be mapped.
2120  */
2121 static inline void *skb_frag_address(const skb_frag_t *frag)
2122 {
2123 	return page_address(skb_frag_page(frag)) + frag->page_offset;
2124 }
2125 
2126 /**
2127  * skb_frag_address_safe - gets the address of the data contained in a paged fragment
2128  * @frag: the paged fragment buffer
2129  *
2130  * Returns the address of the data within @frag. Checks that the page
2131  * is mapped and returns %NULL otherwise.
2132  */
2133 static inline void *skb_frag_address_safe(const skb_frag_t *frag)
2134 {
2135 	void *ptr = page_address(skb_frag_page(frag));
2136 	if (unlikely(!ptr))
2137 		return NULL;
2138 
2139 	return ptr + frag->page_offset;
2140 }
2141 
2142 /**
2143  * __skb_frag_set_page - sets the page contained in a paged fragment
2144  * @frag: the paged fragment
2145  * @page: the page to set
2146  *
2147  * Sets the fragment @frag to contain @page.
2148  */
2149 static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
2150 {
2151 	frag->page.p = page;
2152 }
2153 
2154 /**
2155  * skb_frag_set_page - sets the page contained in a paged fragment of an skb
2156  * @skb: the buffer
2157  * @f: the fragment offset
2158  * @page: the page to set
2159  *
2160  * Sets the @f'th fragment of @skb to contain @page.
2161  */
2162 static inline void skb_frag_set_page(struct sk_buff *skb, int f,
2163 				     struct page *page)
2164 {
2165 	__skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
2166 }
2167 
2168 /**
2169  * skb_frag_dma_map - maps a paged fragment via the DMA API
2170  * @dev: the device to map the fragment to
2171  * @frag: the paged fragment to map
2172  * @offset: the offset within the fragment (starting at the
2173  *          fragment's own offset)
2174  * @size: the number of bytes to map
2175  * @dir: the direction of the mapping (%PCI_DMA_*)
2176  *
2177  * Maps the page associated with @frag to @device.
2178  */
2179 static inline dma_addr_t skb_frag_dma_map(struct device *dev,
2180 					  const skb_frag_t *frag,
2181 					  size_t offset, size_t size,
2182 					  enum dma_data_direction dir)
2183 {
2184 	return dma_map_page(dev, skb_frag_page(frag),
2185 			    frag->page_offset + offset, size, dir);
2186 }
2187 
2188 static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
2189 					gfp_t gfp_mask)
2190 {
2191 	return __pskb_copy(skb, skb_headroom(skb), gfp_mask);
2192 }
2193 
2194 /**
2195  *	skb_clone_writable - is the header of a clone writable
2196  *	@skb: buffer to check
2197  *	@len: length up to which to write
2198  *
2199  *	Returns true if modifying the header part of the cloned buffer
2200  *	does not requires the data to be copied.
2201  */
2202 static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len)
2203 {
2204 	return !skb_header_cloned(skb) &&
2205 	       skb_headroom(skb) + len <= skb->hdr_len;
2206 }
2207 
2208 static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
2209 			    int cloned)
2210 {
2211 	int delta = 0;
2212 
2213 	if (headroom > skb_headroom(skb))
2214 		delta = headroom - skb_headroom(skb);
2215 
2216 	if (delta || cloned)
2217 		return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
2218 					GFP_ATOMIC);
2219 	return 0;
2220 }
2221 
2222 /**
2223  *	skb_cow - copy header of skb when it is required
2224  *	@skb: buffer to cow
2225  *	@headroom: needed headroom
2226  *
2227  *	If the skb passed lacks sufficient headroom or its data part
2228  *	is shared, data is reallocated. If reallocation fails, an error
2229  *	is returned and original skb is not changed.
2230  *
2231  *	The result is skb with writable area skb->head...skb->tail
2232  *	and at least @headroom of space at head.
2233  */
2234 static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
2235 {
2236 	return __skb_cow(skb, headroom, skb_cloned(skb));
2237 }
2238 
2239 /**
2240  *	skb_cow_head - skb_cow but only making the head writable
2241  *	@skb: buffer to cow
2242  *	@headroom: needed headroom
2243  *
2244  *	This function is identical to skb_cow except that we replace the
2245  *	skb_cloned check by skb_header_cloned.  It should be used when
2246  *	you only need to push on some header and do not need to modify
2247  *	the data.
2248  */
2249 static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
2250 {
2251 	return __skb_cow(skb, headroom, skb_header_cloned(skb));
2252 }
2253 
2254 /**
2255  *	skb_padto	- pad an skbuff up to a minimal size
2256  *	@skb: buffer to pad
2257  *	@len: minimal length
2258  *
2259  *	Pads up a buffer to ensure the trailing bytes exist and are
2260  *	blanked. If the buffer already contains sufficient data it
2261  *	is untouched. Otherwise it is extended. Returns zero on
2262  *	success. The skb is freed on error.
2263  */
2264 
2265 static inline int skb_padto(struct sk_buff *skb, unsigned int len)
2266 {
2267 	unsigned int size = skb->len;
2268 	if (likely(size >= len))
2269 		return 0;
2270 	return skb_pad(skb, len - size);
2271 }
2272 
2273 static inline int skb_add_data(struct sk_buff *skb,
2274 			       char __user *from, int copy)
2275 {
2276 	const int off = skb->len;
2277 
2278 	if (skb->ip_summed == CHECKSUM_NONE) {
2279 		int err = 0;
2280 		__wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy),
2281 							    copy, 0, &err);
2282 		if (!err) {
2283 			skb->csum = csum_block_add(skb->csum, csum, off);
2284 			return 0;
2285 		}
2286 	} else if (!copy_from_user(skb_put(skb, copy), from, copy))
2287 		return 0;
2288 
2289 	__skb_trim(skb, off);
2290 	return -EFAULT;
2291 }
2292 
2293 static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
2294 				    const struct page *page, int off)
2295 {
2296 	if (i) {
2297 		const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
2298 
2299 		return page == skb_frag_page(frag) &&
2300 		       off == frag->page_offset + skb_frag_size(frag);
2301 	}
2302 	return false;
2303 }
2304 
2305 static inline int __skb_linearize(struct sk_buff *skb)
2306 {
2307 	return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
2308 }
2309 
2310 /**
2311  *	skb_linearize - convert paged skb to linear one
2312  *	@skb: buffer to linarize
2313  *
2314  *	If there is no free memory -ENOMEM is returned, otherwise zero
2315  *	is returned and the old skb data released.
2316  */
2317 static inline int skb_linearize(struct sk_buff *skb)
2318 {
2319 	return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
2320 }
2321 
2322 /**
2323  * skb_has_shared_frag - can any frag be overwritten
2324  * @skb: buffer to test
2325  *
2326  * Return true if the skb has at least one frag that might be modified
2327  * by an external entity (as in vmsplice()/sendfile())
2328  */
2329 static inline bool skb_has_shared_frag(const struct sk_buff *skb)
2330 {
2331 	return skb_is_nonlinear(skb) &&
2332 	       skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
2333 }
2334 
2335 /**
2336  *	skb_linearize_cow - make sure skb is linear and writable
2337  *	@skb: buffer to process
2338  *
2339  *	If there is no free memory -ENOMEM is returned, otherwise zero
2340  *	is returned and the old skb data released.
2341  */
2342 static inline int skb_linearize_cow(struct sk_buff *skb)
2343 {
2344 	return skb_is_nonlinear(skb) || skb_cloned(skb) ?
2345 	       __skb_linearize(skb) : 0;
2346 }
2347 
2348 /**
2349  *	skb_postpull_rcsum - update checksum for received skb after pull
2350  *	@skb: buffer to update
2351  *	@start: start of data before pull
2352  *	@len: length of data pulled
2353  *
2354  *	After doing a pull on a received packet, you need to call this to
2355  *	update the CHECKSUM_COMPLETE checksum, or set ip_summed to
2356  *	CHECKSUM_NONE so that it can be recomputed from scratch.
2357  */
2358 
2359 static inline void skb_postpull_rcsum(struct sk_buff *skb,
2360 				      const void *start, unsigned int len)
2361 {
2362 	if (skb->ip_summed == CHECKSUM_COMPLETE)
2363 		skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
2364 }
2365 
2366 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
2367 
2368 /**
2369  *	pskb_trim_rcsum - trim received skb and update checksum
2370  *	@skb: buffer to trim
2371  *	@len: new length
2372  *
2373  *	This is exactly the same as pskb_trim except that it ensures the
2374  *	checksum of received packets are still valid after the operation.
2375  */
2376 
2377 static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
2378 {
2379 	if (likely(len >= skb->len))
2380 		return 0;
2381 	if (skb->ip_summed == CHECKSUM_COMPLETE)
2382 		skb->ip_summed = CHECKSUM_NONE;
2383 	return __pskb_trim(skb, len);
2384 }
2385 
2386 #define skb_queue_walk(queue, skb) \
2387 		for (skb = (queue)->next;					\
2388 		     skb != (struct sk_buff *)(queue);				\
2389 		     skb = skb->next)
2390 
2391 #define skb_queue_walk_safe(queue, skb, tmp)					\
2392 		for (skb = (queue)->next, tmp = skb->next;			\
2393 		     skb != (struct sk_buff *)(queue);				\
2394 		     skb = tmp, tmp = skb->next)
2395 
2396 #define skb_queue_walk_from(queue, skb)						\
2397 		for (; skb != (struct sk_buff *)(queue);			\
2398 		     skb = skb->next)
2399 
2400 #define skb_queue_walk_from_safe(queue, skb, tmp)				\
2401 		for (tmp = skb->next;						\
2402 		     skb != (struct sk_buff *)(queue);				\
2403 		     skb = tmp, tmp = skb->next)
2404 
2405 #define skb_queue_reverse_walk(queue, skb) \
2406 		for (skb = (queue)->prev;					\
2407 		     skb != (struct sk_buff *)(queue);				\
2408 		     skb = skb->prev)
2409 
2410 #define skb_queue_reverse_walk_safe(queue, skb, tmp)				\
2411 		for (skb = (queue)->prev, tmp = skb->prev;			\
2412 		     skb != (struct sk_buff *)(queue);				\
2413 		     skb = tmp, tmp = skb->prev)
2414 
2415 #define skb_queue_reverse_walk_from_safe(queue, skb, tmp)			\
2416 		for (tmp = skb->prev;						\
2417 		     skb != (struct sk_buff *)(queue);				\
2418 		     skb = tmp, tmp = skb->prev)
2419 
2420 static inline bool skb_has_frag_list(const struct sk_buff *skb)
2421 {
2422 	return skb_shinfo(skb)->frag_list != NULL;
2423 }
2424 
2425 static inline void skb_frag_list_init(struct sk_buff *skb)
2426 {
2427 	skb_shinfo(skb)->frag_list = NULL;
2428 }
2429 
2430 static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag)
2431 {
2432 	frag->next = skb_shinfo(skb)->frag_list;
2433 	skb_shinfo(skb)->frag_list = frag;
2434 }
2435 
2436 #define skb_walk_frags(skb, iter)	\
2437 	for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
2438 
2439 extern struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
2440 					   int *peeked, int *off, int *err);
2441 extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
2442 					 int noblock, int *err);
2443 extern unsigned int    datagram_poll(struct file *file, struct socket *sock,
2444 				     struct poll_table_struct *wait);
2445 extern int	       skb_copy_datagram_iovec(const struct sk_buff *from,
2446 					       int offset, struct iovec *to,
2447 					       int size);
2448 extern int	       skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
2449 							int hlen,
2450 							struct iovec *iov);
2451 extern int	       skb_copy_datagram_from_iovec(struct sk_buff *skb,
2452 						    int offset,
2453 						    const struct iovec *from,
2454 						    int from_offset,
2455 						    int len);
2456 extern int	       skb_copy_datagram_const_iovec(const struct sk_buff *from,
2457 						     int offset,
2458 						     const struct iovec *to,
2459 						     int to_offset,
2460 						     int size);
2461 extern void	       skb_free_datagram(struct sock *sk, struct sk_buff *skb);
2462 extern void	       skb_free_datagram_locked(struct sock *sk,
2463 						struct sk_buff *skb);
2464 extern int	       skb_kill_datagram(struct sock *sk, struct sk_buff *skb,
2465 					 unsigned int flags);
2466 extern __wsum	       skb_checksum(const struct sk_buff *skb, int offset,
2467 				    int len, __wsum csum);
2468 extern int	       skb_copy_bits(const struct sk_buff *skb, int offset,
2469 				     void *to, int len);
2470 extern int	       skb_store_bits(struct sk_buff *skb, int offset,
2471 				      const void *from, int len);
2472 extern __wsum	       skb_copy_and_csum_bits(const struct sk_buff *skb,
2473 					      int offset, u8 *to, int len,
2474 					      __wsum csum);
2475 extern int             skb_splice_bits(struct sk_buff *skb,
2476 						unsigned int offset,
2477 						struct pipe_inode_info *pipe,
2478 						unsigned int len,
2479 						unsigned int flags);
2480 extern void	       skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
2481 extern void	       skb_split(struct sk_buff *skb,
2482 				 struct sk_buff *skb1, const u32 len);
2483 extern int	       skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
2484 				 int shiftlen);
2485 
2486 extern struct sk_buff *skb_segment(struct sk_buff *skb,
2487 				   netdev_features_t features);
2488 
2489 static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
2490 				       int len, void *buffer)
2491 {
2492 	int hlen = skb_headlen(skb);
2493 
2494 	if (hlen - offset >= len)
2495 		return skb->data + offset;
2496 
2497 	if (skb_copy_bits(skb, offset, buffer, len) < 0)
2498 		return NULL;
2499 
2500 	return buffer;
2501 }
2502 
2503 static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
2504 					     void *to,
2505 					     const unsigned int len)
2506 {
2507 	memcpy(to, skb->data, len);
2508 }
2509 
2510 static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
2511 						    const int offset, void *to,
2512 						    const unsigned int len)
2513 {
2514 	memcpy(to, skb->data + offset, len);
2515 }
2516 
2517 static inline void skb_copy_to_linear_data(struct sk_buff *skb,
2518 					   const void *from,
2519 					   const unsigned int len)
2520 {
2521 	memcpy(skb->data, from, len);
2522 }
2523 
2524 static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
2525 						  const int offset,
2526 						  const void *from,
2527 						  const unsigned int len)
2528 {
2529 	memcpy(skb->data + offset, from, len);
2530 }
2531 
2532 extern void skb_init(void);
2533 
2534 static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
2535 {
2536 	return skb->tstamp;
2537 }
2538 
2539 /**
2540  *	skb_get_timestamp - get timestamp from a skb
2541  *	@skb: skb to get stamp from
2542  *	@stamp: pointer to struct timeval to store stamp in
2543  *
2544  *	Timestamps are stored in the skb as offsets to a base timestamp.
2545  *	This function converts the offset back to a struct timeval and stores
2546  *	it in stamp.
2547  */
2548 static inline void skb_get_timestamp(const struct sk_buff *skb,
2549 				     struct timeval *stamp)
2550 {
2551 	*stamp = ktime_to_timeval(skb->tstamp);
2552 }
2553 
2554 static inline void skb_get_timestampns(const struct sk_buff *skb,
2555 				       struct timespec *stamp)
2556 {
2557 	*stamp = ktime_to_timespec(skb->tstamp);
2558 }
2559 
2560 static inline void __net_timestamp(struct sk_buff *skb)
2561 {
2562 	skb->tstamp = ktime_get_real();
2563 }
2564 
2565 static inline ktime_t net_timedelta(ktime_t t)
2566 {
2567 	return ktime_sub(ktime_get_real(), t);
2568 }
2569 
2570 static inline ktime_t net_invalid_timestamp(void)
2571 {
2572 	return ktime_set(0, 0);
2573 }
2574 
2575 extern void skb_timestamping_init(void);
2576 
2577 #ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
2578 
2579 extern void skb_clone_tx_timestamp(struct sk_buff *skb);
2580 extern bool skb_defer_rx_timestamp(struct sk_buff *skb);
2581 
2582 #else /* CONFIG_NETWORK_PHY_TIMESTAMPING */
2583 
2584 static inline void skb_clone_tx_timestamp(struct sk_buff *skb)
2585 {
2586 }
2587 
2588 static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
2589 {
2590 	return false;
2591 }
2592 
2593 #endif /* !CONFIG_NETWORK_PHY_TIMESTAMPING */
2594 
2595 /**
2596  * skb_complete_tx_timestamp() - deliver cloned skb with tx timestamps
2597  *
2598  * PHY drivers may accept clones of transmitted packets for
2599  * timestamping via their phy_driver.txtstamp method. These drivers
2600  * must call this function to return the skb back to the stack, with
2601  * or without a timestamp.
2602  *
2603  * @skb: clone of the the original outgoing packet
2604  * @hwtstamps: hardware time stamps, may be NULL if not available
2605  *
2606  */
2607 void skb_complete_tx_timestamp(struct sk_buff *skb,
2608 			       struct skb_shared_hwtstamps *hwtstamps);
2609 
2610 /**
2611  * skb_tstamp_tx - queue clone of skb with send time stamps
2612  * @orig_skb:	the original outgoing packet
2613  * @hwtstamps:	hardware time stamps, may be NULL if not available
2614  *
2615  * If the skb has a socket associated, then this function clones the
2616  * skb (thus sharing the actual data and optional structures), stores
2617  * the optional hardware time stamping information (if non NULL) or
2618  * generates a software time stamp (otherwise), then queues the clone
2619  * to the error queue of the socket.  Errors are silently ignored.
2620  */
2621 extern void skb_tstamp_tx(struct sk_buff *orig_skb,
2622 			struct skb_shared_hwtstamps *hwtstamps);
2623 
2624 static inline void sw_tx_timestamp(struct sk_buff *skb)
2625 {
2626 	if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP &&
2627 	    !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
2628 		skb_tstamp_tx(skb, NULL);
2629 }
2630 
2631 /**
2632  * skb_tx_timestamp() - Driver hook for transmit timestamping
2633  *
2634  * Ethernet MAC Drivers should call this function in their hard_xmit()
2635  * function immediately before giving the sk_buff to the MAC hardware.
2636  *
2637  * @skb: A socket buffer.
2638  */
2639 static inline void skb_tx_timestamp(struct sk_buff *skb)
2640 {
2641 	skb_clone_tx_timestamp(skb);
2642 	sw_tx_timestamp(skb);
2643 }
2644 
2645 /**
2646  * skb_complete_wifi_ack - deliver skb with wifi status
2647  *
2648  * @skb: the original outgoing packet
2649  * @acked: ack status
2650  *
2651  */
2652 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
2653 
2654 extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
2655 extern __sum16 __skb_checksum_complete(struct sk_buff *skb);
2656 
2657 static inline int skb_csum_unnecessary(const struct sk_buff *skb)
2658 {
2659 	return skb->ip_summed & CHECKSUM_UNNECESSARY;
2660 }
2661 
2662 /**
2663  *	skb_checksum_complete - Calculate checksum of an entire packet
2664  *	@skb: packet to process
2665  *
2666  *	This function calculates the checksum over the entire packet plus
2667  *	the value of skb->csum.  The latter can be used to supply the
2668  *	checksum of a pseudo header as used by TCP/UDP.  It returns the
2669  *	checksum.
2670  *
2671  *	For protocols that contain complete checksums such as ICMP/TCP/UDP,
2672  *	this function can be used to verify that checksum on received
2673  *	packets.  In that case the function should return zero if the
2674  *	checksum is correct.  In particular, this function will return zero
2675  *	if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the
2676  *	hardware has already verified the correctness of the checksum.
2677  */
2678 static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
2679 {
2680 	return skb_csum_unnecessary(skb) ?
2681 	       0 : __skb_checksum_complete(skb);
2682 }
2683 
2684 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2685 extern void nf_conntrack_destroy(struct nf_conntrack *nfct);
2686 static inline void nf_conntrack_put(struct nf_conntrack *nfct)
2687 {
2688 	if (nfct && atomic_dec_and_test(&nfct->use))
2689 		nf_conntrack_destroy(nfct);
2690 }
2691 static inline void nf_conntrack_get(struct nf_conntrack *nfct)
2692 {
2693 	if (nfct)
2694 		atomic_inc(&nfct->use);
2695 }
2696 #endif
2697 #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2698 static inline void nf_conntrack_get_reasm(struct sk_buff *skb)
2699 {
2700 	if (skb)
2701 		atomic_inc(&skb->users);
2702 }
2703 static inline void nf_conntrack_put_reasm(struct sk_buff *skb)
2704 {
2705 	if (skb)
2706 		kfree_skb(skb);
2707 }
2708 #endif
2709 #ifdef CONFIG_BRIDGE_NETFILTER
2710 static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
2711 {
2712 	if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
2713 		kfree(nf_bridge);
2714 }
2715 static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
2716 {
2717 	if (nf_bridge)
2718 		atomic_inc(&nf_bridge->use);
2719 }
2720 #endif /* CONFIG_BRIDGE_NETFILTER */
2721 static inline void nf_reset(struct sk_buff *skb)
2722 {
2723 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2724 	nf_conntrack_put(skb->nfct);
2725 	skb->nfct = NULL;
2726 #endif
2727 #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2728 	nf_conntrack_put_reasm(skb->nfct_reasm);
2729 	skb->nfct_reasm = NULL;
2730 #endif
2731 #ifdef CONFIG_BRIDGE_NETFILTER
2732 	nf_bridge_put(skb->nf_bridge);
2733 	skb->nf_bridge = NULL;
2734 #endif
2735 }
2736 
2737 static inline void nf_reset_trace(struct sk_buff *skb)
2738 {
2739 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
2740 	skb->nf_trace = 0;
2741 #endif
2742 }
2743 
2744 /* Note: This doesn't put any conntrack and bridge info in dst. */
2745 static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
2746 {
2747 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2748 	dst->nfct = src->nfct;
2749 	nf_conntrack_get(src->nfct);
2750 	dst->nfctinfo = src->nfctinfo;
2751 #endif
2752 #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2753 	dst->nfct_reasm = src->nfct_reasm;
2754 	nf_conntrack_get_reasm(src->nfct_reasm);
2755 #endif
2756 #ifdef CONFIG_BRIDGE_NETFILTER
2757 	dst->nf_bridge  = src->nf_bridge;
2758 	nf_bridge_get(src->nf_bridge);
2759 #endif
2760 }
2761 
2762 static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
2763 {
2764 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2765 	nf_conntrack_put(dst->nfct);
2766 #endif
2767 #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2768 	nf_conntrack_put_reasm(dst->nfct_reasm);
2769 #endif
2770 #ifdef CONFIG_BRIDGE_NETFILTER
2771 	nf_bridge_put(dst->nf_bridge);
2772 #endif
2773 	__nf_copy(dst, src);
2774 }
2775 
2776 #ifdef CONFIG_NETWORK_SECMARK
2777 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
2778 {
2779 	to->secmark = from->secmark;
2780 }
2781 
2782 static inline void skb_init_secmark(struct sk_buff *skb)
2783 {
2784 	skb->secmark = 0;
2785 }
2786 #else
2787 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
2788 { }
2789 
2790 static inline void skb_init_secmark(struct sk_buff *skb)
2791 { }
2792 #endif
2793 
2794 static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
2795 {
2796 	skb->queue_mapping = queue_mapping;
2797 }
2798 
2799 static inline u16 skb_get_queue_mapping(const struct sk_buff *skb)
2800 {
2801 	return skb->queue_mapping;
2802 }
2803 
2804 static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
2805 {
2806 	to->queue_mapping = from->queue_mapping;
2807 }
2808 
2809 static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
2810 {
2811 	skb->queue_mapping = rx_queue + 1;
2812 }
2813 
2814 static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
2815 {
2816 	return skb->queue_mapping - 1;
2817 }
2818 
2819 static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
2820 {
2821 	return skb->queue_mapping != 0;
2822 }
2823 
2824 extern u16 __skb_tx_hash(const struct net_device *dev,
2825 			 const struct sk_buff *skb,
2826 			 unsigned int num_tx_queues);
2827 
2828 #ifdef CONFIG_XFRM
2829 static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
2830 {
2831 	return skb->sp;
2832 }
2833 #else
2834 static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
2835 {
2836 	return NULL;
2837 }
2838 #endif
2839 
2840 /* Keeps track of mac header offset relative to skb->head.
2841  * It is useful for TSO of Tunneling protocol. e.g. GRE.
2842  * For non-tunnel skb it points to skb_mac_header() and for
2843  * tunnel skb it points to outer mac header. */
2844 struct skb_gso_cb {
2845 	int mac_offset;
2846 };
2847 #define SKB_GSO_CB(skb) ((struct skb_gso_cb *)(skb)->cb)
2848 
2849 static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
2850 {
2851 	return (skb_mac_header(inner_skb) - inner_skb->head) -
2852 		SKB_GSO_CB(inner_skb)->mac_offset;
2853 }
2854 
2855 static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
2856 {
2857 	int new_headroom, headroom;
2858 	int ret;
2859 
2860 	headroom = skb_headroom(skb);
2861 	ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC);
2862 	if (ret)
2863 		return ret;
2864 
2865 	new_headroom = skb_headroom(skb);
2866 	SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom);
2867 	return 0;
2868 }
2869 
2870 static inline bool skb_is_gso(const struct sk_buff *skb)
2871 {
2872 	return skb_shinfo(skb)->gso_size;
2873 }
2874 
2875 static inline bool skb_is_gso_v6(const struct sk_buff *skb)
2876 {
2877 	return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
2878 }
2879 
2880 extern void __skb_warn_lro_forwarding(const struct sk_buff *skb);
2881 
2882 static inline bool skb_warn_if_lro(const struct sk_buff *skb)
2883 {
2884 	/* LRO sets gso_size but not gso_type, whereas if GSO is really
2885 	 * wanted then gso_type will be set. */
2886 	const struct skb_shared_info *shinfo = skb_shinfo(skb);
2887 
2888 	if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
2889 	    unlikely(shinfo->gso_type == 0)) {
2890 		__skb_warn_lro_forwarding(skb);
2891 		return true;
2892 	}
2893 	return false;
2894 }
2895 
2896 static inline void skb_forward_csum(struct sk_buff *skb)
2897 {
2898 	/* Unfortunately we don't support this one.  Any brave souls? */
2899 	if (skb->ip_summed == CHECKSUM_COMPLETE)
2900 		skb->ip_summed = CHECKSUM_NONE;
2901 }
2902 
2903 /**
2904  * skb_checksum_none_assert - make sure skb ip_summed is CHECKSUM_NONE
2905  * @skb: skb to check
2906  *
2907  * fresh skbs have their ip_summed set to CHECKSUM_NONE.
2908  * Instead of forcing ip_summed to CHECKSUM_NONE, we can
2909  * use this helper, to document places where we make this assertion.
2910  */
2911 static inline void skb_checksum_none_assert(const struct sk_buff *skb)
2912 {
2913 #ifdef DEBUG
2914 	BUG_ON(skb->ip_summed != CHECKSUM_NONE);
2915 #endif
2916 }
2917 
2918 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
2919 
2920 u32 __skb_get_poff(const struct sk_buff *skb);
2921 
2922 /**
2923  * skb_head_is_locked - Determine if the skb->head is locked down
2924  * @skb: skb to check
2925  *
2926  * The head on skbs build around a head frag can be removed if they are
2927  * not cloned.  This function returns true if the skb head is locked down
2928  * due to either being allocated via kmalloc, or by being a clone with
2929  * multiple references to the head.
2930  */
2931 static inline bool skb_head_is_locked(const struct sk_buff *skb)
2932 {
2933 	return !skb->head_frag || skb_cloned(skb);
2934 }
2935 #endif	/* __KERNEL__ */
2936 #endif	/* _LINUX_SKBUFF_H */
2937