xref: /linux-6.15/include/linux/skbuff.h (revision e756bc56)
1 /*
2  *	Definitions for the 'struct sk_buff' memory handlers.
3  *
4  *	Authors:
5  *		Alan Cox, <[email protected]>
6  *		Florian La Roche, <[email protected]>
7  *
8  *	This program is free software; you can redistribute it and/or
9  *	modify it under the terms of the GNU General Public License
10  *	as published by the Free Software Foundation; either version
11  *	2 of the License, or (at your option) any later version.
12  */
13 
14 #ifndef _LINUX_SKBUFF_H
15 #define _LINUX_SKBUFF_H
16 
17 #include <linux/kernel.h>
18 #include <linux/kmemcheck.h>
19 #include <linux/compiler.h>
20 #include <linux/time.h>
21 #include <linux/bug.h>
22 #include <linux/cache.h>
23 
24 #include <linux/atomic.h>
25 #include <asm/types.h>
26 #include <linux/spinlock.h>
27 #include <linux/net.h>
28 #include <linux/textsearch.h>
29 #include <net/checksum.h>
30 #include <linux/rcupdate.h>
31 #include <linux/dmaengine.h>
32 #include <linux/hrtimer.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/netdev_features.h>
35 #include <net/flow_keys.h>
36 
37 /* Don't change this without changing skb_csum_unnecessary! */
38 #define CHECKSUM_NONE 0
39 #define CHECKSUM_UNNECESSARY 1
40 #define CHECKSUM_COMPLETE 2
41 #define CHECKSUM_PARTIAL 3
42 
43 #define SKB_DATA_ALIGN(X)	(((X) + (SMP_CACHE_BYTES - 1)) & \
44 				 ~(SMP_CACHE_BYTES - 1))
45 #define SKB_WITH_OVERHEAD(X)	\
46 	((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
47 #define SKB_MAX_ORDER(X, ORDER) \
48 	SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
49 #define SKB_MAX_HEAD(X)		(SKB_MAX_ORDER((X), 0))
50 #define SKB_MAX_ALLOC		(SKB_MAX_ORDER(0, 2))
51 
52 /* return minimum truesize of one skb containing X bytes of data */
53 #define SKB_TRUESIZE(X) ((X) +						\
54 			 SKB_DATA_ALIGN(sizeof(struct sk_buff)) +	\
55 			 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
56 
57 /* A. Checksumming of received packets by device.
58  *
59  *	NONE: device failed to checksum this packet.
60  *		skb->csum is undefined.
61  *
62  *	UNNECESSARY: device parsed packet and wouldbe verified checksum.
63  *		skb->csum is undefined.
64  *	      It is bad option, but, unfortunately, many of vendors do this.
65  *	      Apparently with secret goal to sell you new device, when you
66  *	      will add new protocol to your host. F.e. IPv6. 8)
67  *
68  *	COMPLETE: the most generic way. Device supplied checksum of _all_
69  *	    the packet as seen by netif_rx in skb->csum.
70  *	    NOTE: Even if device supports only some protocols, but
71  *	    is able to produce some skb->csum, it MUST use COMPLETE,
72  *	    not UNNECESSARY.
73  *
74  *	PARTIAL: identical to the case for output below.  This may occur
75  *	    on a packet received directly from another Linux OS, e.g.,
76  *	    a virtualised Linux kernel on the same host.  The packet can
77  *	    be treated in the same way as UNNECESSARY except that on
78  *	    output (i.e., forwarding) the checksum must be filled in
79  *	    by the OS or the hardware.
80  *
81  * B. Checksumming on output.
82  *
83  *	NONE: skb is checksummed by protocol or csum is not required.
84  *
85  *	PARTIAL: device is required to csum packet as seen by hard_start_xmit
86  *	from skb->csum_start to the end and to record the checksum
87  *	at skb->csum_start + skb->csum_offset.
88  *
89  *	Device must show its capabilities in dev->features, set
90  *	at device setup time.
91  *	NETIF_F_HW_CSUM	- it is clever device, it is able to checksum
92  *			  everything.
93  *	NETIF_F_IP_CSUM - device is dumb. It is able to csum only
94  *			  TCP/UDP over IPv4. Sigh. Vendors like this
95  *			  way by an unknown reason. Though, see comment above
96  *			  about CHECKSUM_UNNECESSARY. 8)
97  *	NETIF_F_IPV6_CSUM about as dumb as the last one but does IPv6 instead.
98  *
99  *	UNNECESSARY: device will do per protocol specific csum. Protocol drivers
100  *	that do not want net to perform the checksum calculation should use
101  *	this flag in their outgoing skbs.
102  *	NETIF_F_FCOE_CRC  this indicates the device can do FCoE FC CRC
103  *			  offload. Correspondingly, the FCoE protocol driver
104  *			  stack should use CHECKSUM_UNNECESSARY.
105  *
106  *	Any questions? No questions, good. 		--ANK
107  */
108 
109 struct net_device;
110 struct scatterlist;
111 struct pipe_inode_info;
112 
113 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
114 struct nf_conntrack {
115 	atomic_t use;
116 };
117 #endif
118 
119 #ifdef CONFIG_BRIDGE_NETFILTER
120 struct nf_bridge_info {
121 	atomic_t		use;
122 	unsigned int		mask;
123 	struct net_device	*physindev;
124 	struct net_device	*physoutdev;
125 	unsigned long		data[32 / sizeof(unsigned long)];
126 };
127 #endif
128 
129 struct sk_buff_head {
130 	/* These two members must be first. */
131 	struct sk_buff	*next;
132 	struct sk_buff	*prev;
133 
134 	__u32		qlen;
135 	spinlock_t	lock;
136 };
137 
138 struct sk_buff;
139 
140 /* To allow 64K frame to be packed as single skb without frag_list we
141  * require 64K/PAGE_SIZE pages plus 1 additional page to allow for
142  * buffers which do not start on a page boundary.
143  *
144  * Since GRO uses frags we allocate at least 16 regardless of page
145  * size.
146  */
147 #if (65536/PAGE_SIZE + 1) < 16
148 #define MAX_SKB_FRAGS 16UL
149 #else
150 #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
151 #endif
152 
153 typedef struct skb_frag_struct skb_frag_t;
154 
155 struct skb_frag_struct {
156 	struct {
157 		struct page *p;
158 	} page;
159 #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
160 	__u32 page_offset;
161 	__u32 size;
162 #else
163 	__u16 page_offset;
164 	__u16 size;
165 #endif
166 };
167 
168 static inline unsigned int skb_frag_size(const skb_frag_t *frag)
169 {
170 	return frag->size;
171 }
172 
173 static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
174 {
175 	frag->size = size;
176 }
177 
178 static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
179 {
180 	frag->size += delta;
181 }
182 
183 static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
184 {
185 	frag->size -= delta;
186 }
187 
188 #define HAVE_HW_TIME_STAMP
189 
190 /**
191  * struct skb_shared_hwtstamps - hardware time stamps
192  * @hwtstamp:	hardware time stamp transformed into duration
193  *		since arbitrary point in time
194  * @syststamp:	hwtstamp transformed to system time base
195  *
196  * Software time stamps generated by ktime_get_real() are stored in
197  * skb->tstamp. The relation between the different kinds of time
198  * stamps is as follows:
199  *
200  * syststamp and tstamp can be compared against each other in
201  * arbitrary combinations.  The accuracy of a
202  * syststamp/tstamp/"syststamp from other device" comparison is
203  * limited by the accuracy of the transformation into system time
204  * base. This depends on the device driver and its underlying
205  * hardware.
206  *
207  * hwtstamps can only be compared against other hwtstamps from
208  * the same device.
209  *
210  * This structure is attached to packets as part of the
211  * &skb_shared_info. Use skb_hwtstamps() to get a pointer.
212  */
213 struct skb_shared_hwtstamps {
214 	ktime_t	hwtstamp;
215 	ktime_t	syststamp;
216 };
217 
218 /* Definitions for tx_flags in struct skb_shared_info */
219 enum {
220 	/* generate hardware time stamp */
221 	SKBTX_HW_TSTAMP = 1 << 0,
222 
223 	/* generate software time stamp */
224 	SKBTX_SW_TSTAMP = 1 << 1,
225 
226 	/* device driver is going to provide hardware time stamp */
227 	SKBTX_IN_PROGRESS = 1 << 2,
228 
229 	/* device driver supports TX zero-copy buffers */
230 	SKBTX_DEV_ZEROCOPY = 1 << 3,
231 
232 	/* generate wifi status information (where possible) */
233 	SKBTX_WIFI_STATUS = 1 << 4,
234 
235 	/* This indicates at least one fragment might be overwritten
236 	 * (as in vmsplice(), sendfile() ...)
237 	 * If we need to compute a TX checksum, we'll need to copy
238 	 * all frags to avoid possible bad checksum
239 	 */
240 	SKBTX_SHARED_FRAG = 1 << 5,
241 };
242 
243 /*
244  * The callback notifies userspace to release buffers when skb DMA is done in
245  * lower device, the skb last reference should be 0 when calling this.
246  * The zerocopy_success argument is true if zero copy transmit occurred,
247  * false on data copy or out of memory error caused by data copy attempt.
248  * The ctx field is used to track device context.
249  * The desc field is used to track userspace buffer index.
250  */
251 struct ubuf_info {
252 	void (*callback)(struct ubuf_info *, bool zerocopy_success);
253 	void *ctx;
254 	unsigned long desc;
255 };
256 
257 /* This data is invariant across clones and lives at
258  * the end of the header data, ie. at skb->end.
259  */
260 struct skb_shared_info {
261 	unsigned char	nr_frags;
262 	__u8		tx_flags;
263 	unsigned short	gso_size;
264 	/* Warning: this field is not always filled in (UFO)! */
265 	unsigned short	gso_segs;
266 	unsigned short  gso_type;
267 	struct sk_buff	*frag_list;
268 	struct skb_shared_hwtstamps hwtstamps;
269 	__be32          ip6_frag_id;
270 
271 	/*
272 	 * Warning : all fields before dataref are cleared in __alloc_skb()
273 	 */
274 	atomic_t	dataref;
275 
276 	/* Intermediate layers must ensure that destructor_arg
277 	 * remains valid until skb destructor */
278 	void *		destructor_arg;
279 
280 	/* must be last field, see pskb_expand_head() */
281 	skb_frag_t	frags[MAX_SKB_FRAGS];
282 };
283 
284 /* We divide dataref into two halves.  The higher 16 bits hold references
285  * to the payload part of skb->data.  The lower 16 bits hold references to
286  * the entire skb->data.  A clone of a headerless skb holds the length of
287  * the header in skb->hdr_len.
288  *
289  * All users must obey the rule that the skb->data reference count must be
290  * greater than or equal to the payload reference count.
291  *
292  * Holding a reference to the payload part means that the user does not
293  * care about modifications to the header part of skb->data.
294  */
295 #define SKB_DATAREF_SHIFT 16
296 #define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
297 
298 
299 enum {
300 	SKB_FCLONE_UNAVAILABLE,
301 	SKB_FCLONE_ORIG,
302 	SKB_FCLONE_CLONE,
303 };
304 
305 enum {
306 	SKB_GSO_TCPV4 = 1 << 0,
307 	SKB_GSO_UDP = 1 << 1,
308 
309 	/* This indicates the skb is from an untrusted source. */
310 	SKB_GSO_DODGY = 1 << 2,
311 
312 	/* This indicates the tcp segment has CWR set. */
313 	SKB_GSO_TCP_ECN = 1 << 3,
314 
315 	SKB_GSO_TCPV6 = 1 << 4,
316 
317 	SKB_GSO_FCOE = 1 << 5,
318 
319 	SKB_GSO_GRE = 1 << 6,
320 
321 	SKB_GSO_IPIP = 1 << 7,
322 
323 	SKB_GSO_SIT = 1 << 8,
324 
325 	SKB_GSO_UDP_TUNNEL = 1 << 9,
326 
327 	SKB_GSO_MPLS = 1 << 10,
328 };
329 
330 #if BITS_PER_LONG > 32
331 #define NET_SKBUFF_DATA_USES_OFFSET 1
332 #endif
333 
334 #ifdef NET_SKBUFF_DATA_USES_OFFSET
335 typedef unsigned int sk_buff_data_t;
336 #else
337 typedef unsigned char *sk_buff_data_t;
338 #endif
339 
340 /**
341  *	struct sk_buff - socket buffer
342  *	@next: Next buffer in list
343  *	@prev: Previous buffer in list
344  *	@tstamp: Time we arrived
345  *	@sk: Socket we are owned by
346  *	@dev: Device we arrived on/are leaving by
347  *	@cb: Control buffer. Free for use by every layer. Put private vars here
348  *	@_skb_refdst: destination entry (with norefcount bit)
349  *	@sp: the security path, used for xfrm
350  *	@len: Length of actual data
351  *	@data_len: Data length
352  *	@mac_len: Length of link layer header
353  *	@hdr_len: writable header length of cloned skb
354  *	@csum: Checksum (must include start/offset pair)
355  *	@csum_start: Offset from skb->head where checksumming should start
356  *	@csum_offset: Offset from csum_start where checksum should be stored
357  *	@priority: Packet queueing priority
358  *	@local_df: allow local fragmentation
359  *	@cloned: Head may be cloned (check refcnt to be sure)
360  *	@ip_summed: Driver fed us an IP checksum
361  *	@nohdr: Payload reference only, must not modify header
362  *	@nfctinfo: Relationship of this skb to the connection
363  *	@pkt_type: Packet class
364  *	@fclone: skbuff clone status
365  *	@ipvs_property: skbuff is owned by ipvs
366  *	@peeked: this packet has been seen already, so stats have been
367  *		done for it, don't do them again
368  *	@nf_trace: netfilter packet trace flag
369  *	@protocol: Packet protocol from driver
370  *	@destructor: Destruct function
371  *	@nfct: Associated connection, if any
372  *	@nf_bridge: Saved data about a bridged frame - see br_netfilter.c
373  *	@skb_iif: ifindex of device we arrived on
374  *	@tc_index: Traffic control index
375  *	@tc_verd: traffic control verdict
376  *	@rxhash: the packet hash computed on receive
377  *	@queue_mapping: Queue mapping for multiqueue devices
378  *	@ndisc_nodetype: router type (from link layer)
379  *	@ooo_okay: allow the mapping of a socket to a queue to be changed
380  *	@l4_rxhash: indicate rxhash is a canonical 4-tuple hash over transport
381  *		ports.
382  *	@wifi_acked_valid: wifi_acked was set
383  *	@wifi_acked: whether frame was acked on wifi or not
384  *	@no_fcs:  Request NIC to treat last 4 bytes as Ethernet FCS
385  *	@dma_cookie: a cookie to one of several possible DMA operations
386  *		done by skb DMA functions
387   *	@napi_id: id of the NAPI struct this skb came from
388  *	@secmark: security marking
389  *	@mark: Generic packet mark
390  *	@dropcount: total number of sk_receive_queue overflows
391  *	@vlan_proto: vlan encapsulation protocol
392  *	@vlan_tci: vlan tag control information
393  *	@inner_protocol: Protocol (encapsulation)
394  *	@inner_transport_header: Inner transport layer header (encapsulation)
395  *	@inner_network_header: Network layer header (encapsulation)
396  *	@inner_mac_header: Link layer header (encapsulation)
397  *	@transport_header: Transport layer header
398  *	@network_header: Network layer header
399  *	@mac_header: Link layer header
400  *	@tail: Tail pointer
401  *	@end: End pointer
402  *	@head: Head of buffer
403  *	@data: Data head pointer
404  *	@truesize: Buffer size
405  *	@users: User count - see {datagram,tcp}.c
406  */
407 
408 struct sk_buff {
409 	/* These two members must be first. */
410 	struct sk_buff		*next;
411 	struct sk_buff		*prev;
412 
413 	ktime_t			tstamp;
414 
415 	struct sock		*sk;
416 	struct net_device	*dev;
417 
418 	/*
419 	 * This is the control buffer. It is free to use for every
420 	 * layer. Please put your private variables there. If you
421 	 * want to keep them across layers you have to do a skb_clone()
422 	 * first. This is owned by whoever has the skb queued ATM.
423 	 */
424 	char			cb[48] __aligned(8);
425 
426 	unsigned long		_skb_refdst;
427 #ifdef CONFIG_XFRM
428 	struct	sec_path	*sp;
429 #endif
430 	unsigned int		len,
431 				data_len;
432 	__u16			mac_len,
433 				hdr_len;
434 	union {
435 		__wsum		csum;
436 		struct {
437 			__u16	csum_start;
438 			__u16	csum_offset;
439 		};
440 	};
441 	__u32			priority;
442 	kmemcheck_bitfield_begin(flags1);
443 	__u8			local_df:1,
444 				cloned:1,
445 				ip_summed:2,
446 				nohdr:1,
447 				nfctinfo:3;
448 	__u8			pkt_type:3,
449 				fclone:2,
450 				ipvs_property:1,
451 				peeked:1,
452 				nf_trace:1;
453 	kmemcheck_bitfield_end(flags1);
454 	__be16			protocol;
455 
456 	void			(*destructor)(struct sk_buff *skb);
457 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
458 	struct nf_conntrack	*nfct;
459 #endif
460 #ifdef CONFIG_BRIDGE_NETFILTER
461 	struct nf_bridge_info	*nf_bridge;
462 #endif
463 
464 	int			skb_iif;
465 
466 	__u32			rxhash;
467 
468 	__be16			vlan_proto;
469 	__u16			vlan_tci;
470 
471 #ifdef CONFIG_NET_SCHED
472 	__u16			tc_index;	/* traffic control index */
473 #ifdef CONFIG_NET_CLS_ACT
474 	__u16			tc_verd;	/* traffic control verdict */
475 #endif
476 #endif
477 
478 	__u16			queue_mapping;
479 	kmemcheck_bitfield_begin(flags2);
480 #ifdef CONFIG_IPV6_NDISC_NODETYPE
481 	__u8			ndisc_nodetype:2;
482 #endif
483 	__u8			pfmemalloc:1;
484 	__u8			ooo_okay:1;
485 	__u8			l4_rxhash:1;
486 	__u8			wifi_acked_valid:1;
487 	__u8			wifi_acked:1;
488 	__u8			no_fcs:1;
489 	__u8			head_frag:1;
490 	/* Encapsulation protocol and NIC drivers should use
491 	 * this flag to indicate to each other if the skb contains
492 	 * encapsulated packet or not and maybe use the inner packet
493 	 * headers if needed
494 	 */
495 	__u8			encapsulation:1;
496 	/* 6/8 bit hole (depending on ndisc_nodetype presence) */
497 	kmemcheck_bitfield_end(flags2);
498 
499 #if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL
500 	union {
501 		unsigned int	napi_id;
502 		dma_cookie_t	dma_cookie;
503 	};
504 #endif
505 #ifdef CONFIG_NETWORK_SECMARK
506 	__u32			secmark;
507 #endif
508 	union {
509 		__u32		mark;
510 		__u32		dropcount;
511 		__u32		reserved_tailroom;
512 	};
513 
514 	__be16			inner_protocol;
515 	__u16			inner_transport_header;
516 	__u16			inner_network_header;
517 	__u16			inner_mac_header;
518 	__u16			transport_header;
519 	__u16			network_header;
520 	__u16			mac_header;
521 	/* These elements must be at the end, see alloc_skb() for details.  */
522 	sk_buff_data_t		tail;
523 	sk_buff_data_t		end;
524 	unsigned char		*head,
525 				*data;
526 	unsigned int		truesize;
527 	atomic_t		users;
528 };
529 
530 #ifdef __KERNEL__
531 /*
532  *	Handling routines are only of interest to the kernel
533  */
534 #include <linux/slab.h>
535 
536 
537 #define SKB_ALLOC_FCLONE	0x01
538 #define SKB_ALLOC_RX		0x02
539 
540 /* Returns true if the skb was allocated from PFMEMALLOC reserves */
541 static inline bool skb_pfmemalloc(const struct sk_buff *skb)
542 {
543 	return unlikely(skb->pfmemalloc);
544 }
545 
546 /*
547  * skb might have a dst pointer attached, refcounted or not.
548  * _skb_refdst low order bit is set if refcount was _not_ taken
549  */
550 #define SKB_DST_NOREF	1UL
551 #define SKB_DST_PTRMASK	~(SKB_DST_NOREF)
552 
553 /**
554  * skb_dst - returns skb dst_entry
555  * @skb: buffer
556  *
557  * Returns skb dst_entry, regardless of reference taken or not.
558  */
559 static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
560 {
561 	/* If refdst was not refcounted, check we still are in a
562 	 * rcu_read_lock section
563 	 */
564 	WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
565 		!rcu_read_lock_held() &&
566 		!rcu_read_lock_bh_held());
567 	return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
568 }
569 
570 /**
571  * skb_dst_set - sets skb dst
572  * @skb: buffer
573  * @dst: dst entry
574  *
575  * Sets skb dst, assuming a reference was taken on dst and should
576  * be released by skb_dst_drop()
577  */
578 static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
579 {
580 	skb->_skb_refdst = (unsigned long)dst;
581 }
582 
583 void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst,
584 			 bool force);
585 
586 /**
587  * skb_dst_set_noref - sets skb dst, hopefully, without taking reference
588  * @skb: buffer
589  * @dst: dst entry
590  *
591  * Sets skb dst, assuming a reference was not taken on dst.
592  * If dst entry is cached, we do not take reference and dst_release
593  * will be avoided by refdst_drop. If dst entry is not cached, we take
594  * reference, so that last dst_release can destroy the dst immediately.
595  */
596 static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
597 {
598 	__skb_dst_set_noref(skb, dst, false);
599 }
600 
601 /**
602  * skb_dst_set_noref_force - sets skb dst, without taking reference
603  * @skb: buffer
604  * @dst: dst entry
605  *
606  * Sets skb dst, assuming a reference was not taken on dst.
607  * No reference is taken and no dst_release will be called. While for
608  * cached dsts deferred reclaim is a basic feature, for entries that are
609  * not cached it is caller's job to guarantee that last dst_release for
610  * provided dst happens when nobody uses it, eg. after a RCU grace period.
611  */
612 static inline void skb_dst_set_noref_force(struct sk_buff *skb,
613 					   struct dst_entry *dst)
614 {
615 	__skb_dst_set_noref(skb, dst, true);
616 }
617 
618 /**
619  * skb_dst_is_noref - Test if skb dst isn't refcounted
620  * @skb: buffer
621  */
622 static inline bool skb_dst_is_noref(const struct sk_buff *skb)
623 {
624 	return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
625 }
626 
627 static inline struct rtable *skb_rtable(const struct sk_buff *skb)
628 {
629 	return (struct rtable *)skb_dst(skb);
630 }
631 
632 void kfree_skb(struct sk_buff *skb);
633 void kfree_skb_list(struct sk_buff *segs);
634 void skb_tx_error(struct sk_buff *skb);
635 void consume_skb(struct sk_buff *skb);
636 void  __kfree_skb(struct sk_buff *skb);
637 extern struct kmem_cache *skbuff_head_cache;
638 
639 void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
640 bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
641 		      bool *fragstolen, int *delta_truesize);
642 
643 struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
644 			    int node);
645 struct sk_buff *build_skb(void *data, unsigned int frag_size);
646 static inline struct sk_buff *alloc_skb(unsigned int size,
647 					gfp_t priority)
648 {
649 	return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
650 }
651 
652 static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
653 					       gfp_t priority)
654 {
655 	return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
656 }
657 
658 struct sk_buff *__alloc_skb_head(gfp_t priority, int node);
659 static inline struct sk_buff *alloc_skb_head(gfp_t priority)
660 {
661 	return __alloc_skb_head(priority, -1);
662 }
663 
664 struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
665 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
666 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
667 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority);
668 struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask);
669 
670 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);
671 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
672 				     unsigned int headroom);
673 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
674 				int newtailroom, gfp_t priority);
675 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset,
676 		 int len);
677 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
678 int skb_pad(struct sk_buff *skb, int pad);
679 #define dev_kfree_skb(a)	consume_skb(a)
680 
681 int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
682 			    int getfrag(void *from, char *to, int offset,
683 					int len, int odd, struct sk_buff *skb),
684 			    void *from, int length);
685 
686 struct skb_seq_state {
687 	__u32		lower_offset;
688 	__u32		upper_offset;
689 	__u32		frag_idx;
690 	__u32		stepped_offset;
691 	struct sk_buff	*root_skb;
692 	struct sk_buff	*cur_skb;
693 	__u8		*frag_data;
694 };
695 
696 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
697 			  unsigned int to, struct skb_seq_state *st);
698 unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
699 			  struct skb_seq_state *st);
700 void skb_abort_seq_read(struct skb_seq_state *st);
701 
702 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
703 			   unsigned int to, struct ts_config *config,
704 			   struct ts_state *state);
705 
706 void __skb_get_rxhash(struct sk_buff *skb);
707 static inline __u32 skb_get_rxhash(struct sk_buff *skb)
708 {
709 	if (!skb->l4_rxhash)
710 		__skb_get_rxhash(skb);
711 
712 	return skb->rxhash;
713 }
714 
715 #ifdef NET_SKBUFF_DATA_USES_OFFSET
716 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
717 {
718 	return skb->head + skb->end;
719 }
720 
721 static inline unsigned int skb_end_offset(const struct sk_buff *skb)
722 {
723 	return skb->end;
724 }
725 #else
726 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
727 {
728 	return skb->end;
729 }
730 
731 static inline unsigned int skb_end_offset(const struct sk_buff *skb)
732 {
733 	return skb->end - skb->head;
734 }
735 #endif
736 
737 /* Internal */
738 #define skb_shinfo(SKB)	((struct skb_shared_info *)(skb_end_pointer(SKB)))
739 
740 static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
741 {
742 	return &skb_shinfo(skb)->hwtstamps;
743 }
744 
745 /**
746  *	skb_queue_empty - check if a queue is empty
747  *	@list: queue head
748  *
749  *	Returns true if the queue is empty, false otherwise.
750  */
751 static inline int skb_queue_empty(const struct sk_buff_head *list)
752 {
753 	return list->next == (struct sk_buff *)list;
754 }
755 
756 /**
757  *	skb_queue_is_last - check if skb is the last entry in the queue
758  *	@list: queue head
759  *	@skb: buffer
760  *
761  *	Returns true if @skb is the last buffer on the list.
762  */
763 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
764 				     const struct sk_buff *skb)
765 {
766 	return skb->next == (struct sk_buff *)list;
767 }
768 
769 /**
770  *	skb_queue_is_first - check if skb is the first entry in the queue
771  *	@list: queue head
772  *	@skb: buffer
773  *
774  *	Returns true if @skb is the first buffer on the list.
775  */
776 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
777 				      const struct sk_buff *skb)
778 {
779 	return skb->prev == (struct sk_buff *)list;
780 }
781 
782 /**
783  *	skb_queue_next - return the next packet in the queue
784  *	@list: queue head
785  *	@skb: current buffer
786  *
787  *	Return the next packet in @list after @skb.  It is only valid to
788  *	call this if skb_queue_is_last() evaluates to false.
789  */
790 static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
791 					     const struct sk_buff *skb)
792 {
793 	/* This BUG_ON may seem severe, but if we just return then we
794 	 * are going to dereference garbage.
795 	 */
796 	BUG_ON(skb_queue_is_last(list, skb));
797 	return skb->next;
798 }
799 
800 /**
801  *	skb_queue_prev - return the prev packet in the queue
802  *	@list: queue head
803  *	@skb: current buffer
804  *
805  *	Return the prev packet in @list before @skb.  It is only valid to
806  *	call this if skb_queue_is_first() evaluates to false.
807  */
808 static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
809 					     const struct sk_buff *skb)
810 {
811 	/* This BUG_ON may seem severe, but if we just return then we
812 	 * are going to dereference garbage.
813 	 */
814 	BUG_ON(skb_queue_is_first(list, skb));
815 	return skb->prev;
816 }
817 
818 /**
819  *	skb_get - reference buffer
820  *	@skb: buffer to reference
821  *
822  *	Makes another reference to a socket buffer and returns a pointer
823  *	to the buffer.
824  */
825 static inline struct sk_buff *skb_get(struct sk_buff *skb)
826 {
827 	atomic_inc(&skb->users);
828 	return skb;
829 }
830 
831 /*
832  * If users == 1, we are the only owner and are can avoid redundant
833  * atomic change.
834  */
835 
836 /**
837  *	skb_cloned - is the buffer a clone
838  *	@skb: buffer to check
839  *
840  *	Returns true if the buffer was generated with skb_clone() and is
841  *	one of multiple shared copies of the buffer. Cloned buffers are
842  *	shared data so must not be written to under normal circumstances.
843  */
844 static inline int skb_cloned(const struct sk_buff *skb)
845 {
846 	return skb->cloned &&
847 	       (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
848 }
849 
850 static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
851 {
852 	might_sleep_if(pri & __GFP_WAIT);
853 
854 	if (skb_cloned(skb))
855 		return pskb_expand_head(skb, 0, 0, pri);
856 
857 	return 0;
858 }
859 
860 /**
861  *	skb_header_cloned - is the header a clone
862  *	@skb: buffer to check
863  *
864  *	Returns true if modifying the header part of the buffer requires
865  *	the data to be copied.
866  */
867 static inline int skb_header_cloned(const struct sk_buff *skb)
868 {
869 	int dataref;
870 
871 	if (!skb->cloned)
872 		return 0;
873 
874 	dataref = atomic_read(&skb_shinfo(skb)->dataref);
875 	dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
876 	return dataref != 1;
877 }
878 
879 /**
880  *	skb_header_release - release reference to header
881  *	@skb: buffer to operate on
882  *
883  *	Drop a reference to the header part of the buffer.  This is done
884  *	by acquiring a payload reference.  You must not read from the header
885  *	part of skb->data after this.
886  */
887 static inline void skb_header_release(struct sk_buff *skb)
888 {
889 	BUG_ON(skb->nohdr);
890 	skb->nohdr = 1;
891 	atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref);
892 }
893 
894 /**
895  *	skb_shared - is the buffer shared
896  *	@skb: buffer to check
897  *
898  *	Returns true if more than one person has a reference to this
899  *	buffer.
900  */
901 static inline int skb_shared(const struct sk_buff *skb)
902 {
903 	return atomic_read(&skb->users) != 1;
904 }
905 
906 /**
907  *	skb_share_check - check if buffer is shared and if so clone it
908  *	@skb: buffer to check
909  *	@pri: priority for memory allocation
910  *
911  *	If the buffer is shared the buffer is cloned and the old copy
912  *	drops a reference. A new clone with a single reference is returned.
913  *	If the buffer is not shared the original buffer is returned. When
914  *	being called from interrupt status or with spinlocks held pri must
915  *	be GFP_ATOMIC.
916  *
917  *	NULL is returned on a memory allocation failure.
918  */
919 static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
920 {
921 	might_sleep_if(pri & __GFP_WAIT);
922 	if (skb_shared(skb)) {
923 		struct sk_buff *nskb = skb_clone(skb, pri);
924 
925 		if (likely(nskb))
926 			consume_skb(skb);
927 		else
928 			kfree_skb(skb);
929 		skb = nskb;
930 	}
931 	return skb;
932 }
933 
934 /*
935  *	Copy shared buffers into a new sk_buff. We effectively do COW on
936  *	packets to handle cases where we have a local reader and forward
937  *	and a couple of other messy ones. The normal one is tcpdumping
938  *	a packet thats being forwarded.
939  */
940 
941 /**
942  *	skb_unshare - make a copy of a shared buffer
943  *	@skb: buffer to check
944  *	@pri: priority for memory allocation
945  *
946  *	If the socket buffer is a clone then this function creates a new
947  *	copy of the data, drops a reference count on the old copy and returns
948  *	the new copy with the reference count at 1. If the buffer is not a clone
949  *	the original buffer is returned. When called with a spinlock held or
950  *	from interrupt state @pri must be %GFP_ATOMIC
951  *
952  *	%NULL is returned on a memory allocation failure.
953  */
954 static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
955 					  gfp_t pri)
956 {
957 	might_sleep_if(pri & __GFP_WAIT);
958 	if (skb_cloned(skb)) {
959 		struct sk_buff *nskb = skb_copy(skb, pri);
960 		kfree_skb(skb);	/* Free our shared copy */
961 		skb = nskb;
962 	}
963 	return skb;
964 }
965 
966 /**
967  *	skb_peek - peek at the head of an &sk_buff_head
968  *	@list_: list to peek at
969  *
970  *	Peek an &sk_buff. Unlike most other operations you _MUST_
971  *	be careful with this one. A peek leaves the buffer on the
972  *	list and someone else may run off with it. You must hold
973  *	the appropriate locks or have a private queue to do this.
974  *
975  *	Returns %NULL for an empty list or a pointer to the head element.
976  *	The reference count is not incremented and the reference is therefore
977  *	volatile. Use with caution.
978  */
979 static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
980 {
981 	struct sk_buff *skb = list_->next;
982 
983 	if (skb == (struct sk_buff *)list_)
984 		skb = NULL;
985 	return skb;
986 }
987 
988 /**
989  *	skb_peek_next - peek skb following the given one from a queue
990  *	@skb: skb to start from
991  *	@list_: list to peek at
992  *
993  *	Returns %NULL when the end of the list is met or a pointer to the
994  *	next element. The reference count is not incremented and the
995  *	reference is therefore volatile. Use with caution.
996  */
997 static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
998 		const struct sk_buff_head *list_)
999 {
1000 	struct sk_buff *next = skb->next;
1001 
1002 	if (next == (struct sk_buff *)list_)
1003 		next = NULL;
1004 	return next;
1005 }
1006 
1007 /**
1008  *	skb_peek_tail - peek at the tail of an &sk_buff_head
1009  *	@list_: list to peek at
1010  *
1011  *	Peek an &sk_buff. Unlike most other operations you _MUST_
1012  *	be careful with this one. A peek leaves the buffer on the
1013  *	list and someone else may run off with it. You must hold
1014  *	the appropriate locks or have a private queue to do this.
1015  *
1016  *	Returns %NULL for an empty list or a pointer to the tail element.
1017  *	The reference count is not incremented and the reference is therefore
1018  *	volatile. Use with caution.
1019  */
1020 static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
1021 {
1022 	struct sk_buff *skb = list_->prev;
1023 
1024 	if (skb == (struct sk_buff *)list_)
1025 		skb = NULL;
1026 	return skb;
1027 
1028 }
1029 
1030 /**
1031  *	skb_queue_len	- get queue length
1032  *	@list_: list to measure
1033  *
1034  *	Return the length of an &sk_buff queue.
1035  */
1036 static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
1037 {
1038 	return list_->qlen;
1039 }
1040 
1041 /**
1042  *	__skb_queue_head_init - initialize non-spinlock portions of sk_buff_head
1043  *	@list: queue to initialize
1044  *
1045  *	This initializes only the list and queue length aspects of
1046  *	an sk_buff_head object.  This allows to initialize the list
1047  *	aspects of an sk_buff_head without reinitializing things like
1048  *	the spinlock.  It can also be used for on-stack sk_buff_head
1049  *	objects where the spinlock is known to not be used.
1050  */
1051 static inline void __skb_queue_head_init(struct sk_buff_head *list)
1052 {
1053 	list->prev = list->next = (struct sk_buff *)list;
1054 	list->qlen = 0;
1055 }
1056 
1057 /*
1058  * This function creates a split out lock class for each invocation;
1059  * this is needed for now since a whole lot of users of the skb-queue
1060  * infrastructure in drivers have different locking usage (in hardirq)
1061  * than the networking core (in softirq only). In the long run either the
1062  * network layer or drivers should need annotation to consolidate the
1063  * main types of usage into 3 classes.
1064  */
1065 static inline void skb_queue_head_init(struct sk_buff_head *list)
1066 {
1067 	spin_lock_init(&list->lock);
1068 	__skb_queue_head_init(list);
1069 }
1070 
1071 static inline void skb_queue_head_init_class(struct sk_buff_head *list,
1072 		struct lock_class_key *class)
1073 {
1074 	skb_queue_head_init(list);
1075 	lockdep_set_class(&list->lock, class);
1076 }
1077 
1078 /*
1079  *	Insert an sk_buff on a list.
1080  *
1081  *	The "__skb_xxxx()" functions are the non-atomic ones that
1082  *	can only be called with interrupts disabled.
1083  */
1084 void skb_insert(struct sk_buff *old, struct sk_buff *newsk,
1085 		struct sk_buff_head *list);
1086 static inline void __skb_insert(struct sk_buff *newsk,
1087 				struct sk_buff *prev, struct sk_buff *next,
1088 				struct sk_buff_head *list)
1089 {
1090 	newsk->next = next;
1091 	newsk->prev = prev;
1092 	next->prev  = prev->next = newsk;
1093 	list->qlen++;
1094 }
1095 
1096 static inline void __skb_queue_splice(const struct sk_buff_head *list,
1097 				      struct sk_buff *prev,
1098 				      struct sk_buff *next)
1099 {
1100 	struct sk_buff *first = list->next;
1101 	struct sk_buff *last = list->prev;
1102 
1103 	first->prev = prev;
1104 	prev->next = first;
1105 
1106 	last->next = next;
1107 	next->prev = last;
1108 }
1109 
1110 /**
1111  *	skb_queue_splice - join two skb lists, this is designed for stacks
1112  *	@list: the new list to add
1113  *	@head: the place to add it in the first list
1114  */
1115 static inline void skb_queue_splice(const struct sk_buff_head *list,
1116 				    struct sk_buff_head *head)
1117 {
1118 	if (!skb_queue_empty(list)) {
1119 		__skb_queue_splice(list, (struct sk_buff *) head, head->next);
1120 		head->qlen += list->qlen;
1121 	}
1122 }
1123 
1124 /**
1125  *	skb_queue_splice_init - join two skb lists and reinitialise the emptied list
1126  *	@list: the new list to add
1127  *	@head: the place to add it in the first list
1128  *
1129  *	The list at @list is reinitialised
1130  */
1131 static inline void skb_queue_splice_init(struct sk_buff_head *list,
1132 					 struct sk_buff_head *head)
1133 {
1134 	if (!skb_queue_empty(list)) {
1135 		__skb_queue_splice(list, (struct sk_buff *) head, head->next);
1136 		head->qlen += list->qlen;
1137 		__skb_queue_head_init(list);
1138 	}
1139 }
1140 
1141 /**
1142  *	skb_queue_splice_tail - join two skb lists, each list being a queue
1143  *	@list: the new list to add
1144  *	@head: the place to add it in the first list
1145  */
1146 static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
1147 					 struct sk_buff_head *head)
1148 {
1149 	if (!skb_queue_empty(list)) {
1150 		__skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1151 		head->qlen += list->qlen;
1152 	}
1153 }
1154 
1155 /**
1156  *	skb_queue_splice_tail_init - join two skb lists and reinitialise the emptied list
1157  *	@list: the new list to add
1158  *	@head: the place to add it in the first list
1159  *
1160  *	Each of the lists is a queue.
1161  *	The list at @list is reinitialised
1162  */
1163 static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
1164 					      struct sk_buff_head *head)
1165 {
1166 	if (!skb_queue_empty(list)) {
1167 		__skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1168 		head->qlen += list->qlen;
1169 		__skb_queue_head_init(list);
1170 	}
1171 }
1172 
1173 /**
1174  *	__skb_queue_after - queue a buffer at the list head
1175  *	@list: list to use
1176  *	@prev: place after this buffer
1177  *	@newsk: buffer to queue
1178  *
1179  *	Queue a buffer int the middle of a list. This function takes no locks
1180  *	and you must therefore hold required locks before calling it.
1181  *
1182  *	A buffer cannot be placed on two lists at the same time.
1183  */
1184 static inline void __skb_queue_after(struct sk_buff_head *list,
1185 				     struct sk_buff *prev,
1186 				     struct sk_buff *newsk)
1187 {
1188 	__skb_insert(newsk, prev, prev->next, list);
1189 }
1190 
1191 void skb_append(struct sk_buff *old, struct sk_buff *newsk,
1192 		struct sk_buff_head *list);
1193 
1194 static inline void __skb_queue_before(struct sk_buff_head *list,
1195 				      struct sk_buff *next,
1196 				      struct sk_buff *newsk)
1197 {
1198 	__skb_insert(newsk, next->prev, next, list);
1199 }
1200 
1201 /**
1202  *	__skb_queue_head - queue a buffer at the list head
1203  *	@list: list to use
1204  *	@newsk: buffer to queue
1205  *
1206  *	Queue a buffer at the start of a list. This function takes no locks
1207  *	and you must therefore hold required locks before calling it.
1208  *
1209  *	A buffer cannot be placed on two lists at the same time.
1210  */
1211 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
1212 static inline void __skb_queue_head(struct sk_buff_head *list,
1213 				    struct sk_buff *newsk)
1214 {
1215 	__skb_queue_after(list, (struct sk_buff *)list, newsk);
1216 }
1217 
1218 /**
1219  *	__skb_queue_tail - queue a buffer at the list tail
1220  *	@list: list to use
1221  *	@newsk: buffer to queue
1222  *
1223  *	Queue a buffer at the end of a list. This function takes no locks
1224  *	and you must therefore hold required locks before calling it.
1225  *
1226  *	A buffer cannot be placed on two lists at the same time.
1227  */
1228 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
1229 static inline void __skb_queue_tail(struct sk_buff_head *list,
1230 				   struct sk_buff *newsk)
1231 {
1232 	__skb_queue_before(list, (struct sk_buff *)list, newsk);
1233 }
1234 
1235 /*
1236  * remove sk_buff from list. _Must_ be called atomically, and with
1237  * the list known..
1238  */
1239 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
1240 static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
1241 {
1242 	struct sk_buff *next, *prev;
1243 
1244 	list->qlen--;
1245 	next	   = skb->next;
1246 	prev	   = skb->prev;
1247 	skb->next  = skb->prev = NULL;
1248 	next->prev = prev;
1249 	prev->next = next;
1250 }
1251 
1252 /**
1253  *	__skb_dequeue - remove from the head of the queue
1254  *	@list: list to dequeue from
1255  *
1256  *	Remove the head of the list. This function does not take any locks
1257  *	so must be used with appropriate locks held only. The head item is
1258  *	returned or %NULL if the list is empty.
1259  */
1260 struct sk_buff *skb_dequeue(struct sk_buff_head *list);
1261 static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
1262 {
1263 	struct sk_buff *skb = skb_peek(list);
1264 	if (skb)
1265 		__skb_unlink(skb, list);
1266 	return skb;
1267 }
1268 
1269 /**
1270  *	__skb_dequeue_tail - remove from the tail of the queue
1271  *	@list: list to dequeue from
1272  *
1273  *	Remove the tail of the list. This function does not take any locks
1274  *	so must be used with appropriate locks held only. The tail item is
1275  *	returned or %NULL if the list is empty.
1276  */
1277 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
1278 static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
1279 {
1280 	struct sk_buff *skb = skb_peek_tail(list);
1281 	if (skb)
1282 		__skb_unlink(skb, list);
1283 	return skb;
1284 }
1285 
1286 
1287 static inline bool skb_is_nonlinear(const struct sk_buff *skb)
1288 {
1289 	return skb->data_len;
1290 }
1291 
1292 static inline unsigned int skb_headlen(const struct sk_buff *skb)
1293 {
1294 	return skb->len - skb->data_len;
1295 }
1296 
1297 static inline int skb_pagelen(const struct sk_buff *skb)
1298 {
1299 	int i, len = 0;
1300 
1301 	for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--)
1302 		len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
1303 	return len + skb_headlen(skb);
1304 }
1305 
1306 /**
1307  * __skb_fill_page_desc - initialise a paged fragment in an skb
1308  * @skb: buffer containing fragment to be initialised
1309  * @i: paged fragment index to initialise
1310  * @page: the page to use for this fragment
1311  * @off: the offset to the data with @page
1312  * @size: the length of the data
1313  *
1314  * Initialises the @i'th fragment of @skb to point to &size bytes at
1315  * offset @off within @page.
1316  *
1317  * Does not take any additional reference on the fragment.
1318  */
1319 static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
1320 					struct page *page, int off, int size)
1321 {
1322 	skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1323 
1324 	/*
1325 	 * Propagate page->pfmemalloc to the skb if we can. The problem is
1326 	 * that not all callers have unique ownership of the page. If
1327 	 * pfmemalloc is set, we check the mapping as a mapping implies
1328 	 * page->index is set (index and pfmemalloc share space).
1329 	 * If it's a valid mapping, we cannot use page->pfmemalloc but we
1330 	 * do not lose pfmemalloc information as the pages would not be
1331 	 * allocated using __GFP_MEMALLOC.
1332 	 */
1333 	frag->page.p		  = page;
1334 	frag->page_offset	  = off;
1335 	skb_frag_size_set(frag, size);
1336 
1337 	page = compound_head(page);
1338 	if (page->pfmemalloc && !page->mapping)
1339 		skb->pfmemalloc	= true;
1340 }
1341 
1342 /**
1343  * skb_fill_page_desc - initialise a paged fragment in an skb
1344  * @skb: buffer containing fragment to be initialised
1345  * @i: paged fragment index to initialise
1346  * @page: the page to use for this fragment
1347  * @off: the offset to the data with @page
1348  * @size: the length of the data
1349  *
1350  * As per __skb_fill_page_desc() -- initialises the @i'th fragment of
1351  * @skb to point to @size bytes at offset @off within @page. In
1352  * addition updates @skb such that @i is the last fragment.
1353  *
1354  * Does not take any additional reference on the fragment.
1355  */
1356 static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
1357 				      struct page *page, int off, int size)
1358 {
1359 	__skb_fill_page_desc(skb, i, page, off, size);
1360 	skb_shinfo(skb)->nr_frags = i + 1;
1361 }
1362 
1363 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
1364 		     int size, unsigned int truesize);
1365 
1366 void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
1367 			  unsigned int truesize);
1368 
1369 #define SKB_PAGE_ASSERT(skb) 	BUG_ON(skb_shinfo(skb)->nr_frags)
1370 #define SKB_FRAG_ASSERT(skb) 	BUG_ON(skb_has_frag_list(skb))
1371 #define SKB_LINEAR_ASSERT(skb)  BUG_ON(skb_is_nonlinear(skb))
1372 
1373 #ifdef NET_SKBUFF_DATA_USES_OFFSET
1374 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1375 {
1376 	return skb->head + skb->tail;
1377 }
1378 
1379 static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1380 {
1381 	skb->tail = skb->data - skb->head;
1382 }
1383 
1384 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1385 {
1386 	skb_reset_tail_pointer(skb);
1387 	skb->tail += offset;
1388 }
1389 
1390 #else /* NET_SKBUFF_DATA_USES_OFFSET */
1391 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1392 {
1393 	return skb->tail;
1394 }
1395 
1396 static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1397 {
1398 	skb->tail = skb->data;
1399 }
1400 
1401 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1402 {
1403 	skb->tail = skb->data + offset;
1404 }
1405 
1406 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
1407 
1408 /*
1409  *	Add data to an sk_buff
1410  */
1411 unsigned char *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
1412 unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
1413 static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
1414 {
1415 	unsigned char *tmp = skb_tail_pointer(skb);
1416 	SKB_LINEAR_ASSERT(skb);
1417 	skb->tail += len;
1418 	skb->len  += len;
1419 	return tmp;
1420 }
1421 
1422 unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
1423 static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
1424 {
1425 	skb->data -= len;
1426 	skb->len  += len;
1427 	return skb->data;
1428 }
1429 
1430 unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
1431 static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
1432 {
1433 	skb->len -= len;
1434 	BUG_ON(skb->len < skb->data_len);
1435 	return skb->data += len;
1436 }
1437 
1438 static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int len)
1439 {
1440 	return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
1441 }
1442 
1443 unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
1444 
1445 static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
1446 {
1447 	if (len > skb_headlen(skb) &&
1448 	    !__pskb_pull_tail(skb, len - skb_headlen(skb)))
1449 		return NULL;
1450 	skb->len -= len;
1451 	return skb->data += len;
1452 }
1453 
1454 static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
1455 {
1456 	return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
1457 }
1458 
1459 static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
1460 {
1461 	if (likely(len <= skb_headlen(skb)))
1462 		return 1;
1463 	if (unlikely(len > skb->len))
1464 		return 0;
1465 	return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
1466 }
1467 
1468 /**
1469  *	skb_headroom - bytes at buffer head
1470  *	@skb: buffer to check
1471  *
1472  *	Return the number of bytes of free space at the head of an &sk_buff.
1473  */
1474 static inline unsigned int skb_headroom(const struct sk_buff *skb)
1475 {
1476 	return skb->data - skb->head;
1477 }
1478 
1479 /**
1480  *	skb_tailroom - bytes at buffer end
1481  *	@skb: buffer to check
1482  *
1483  *	Return the number of bytes of free space at the tail of an sk_buff
1484  */
1485 static inline int skb_tailroom(const struct sk_buff *skb)
1486 {
1487 	return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
1488 }
1489 
1490 /**
1491  *	skb_availroom - bytes at buffer end
1492  *	@skb: buffer to check
1493  *
1494  *	Return the number of bytes of free space at the tail of an sk_buff
1495  *	allocated by sk_stream_alloc()
1496  */
1497 static inline int skb_availroom(const struct sk_buff *skb)
1498 {
1499 	if (skb_is_nonlinear(skb))
1500 		return 0;
1501 
1502 	return skb->end - skb->tail - skb->reserved_tailroom;
1503 }
1504 
1505 /**
1506  *	skb_reserve - adjust headroom
1507  *	@skb: buffer to alter
1508  *	@len: bytes to move
1509  *
1510  *	Increase the headroom of an empty &sk_buff by reducing the tail
1511  *	room. This is only allowed for an empty buffer.
1512  */
1513 static inline void skb_reserve(struct sk_buff *skb, int len)
1514 {
1515 	skb->data += len;
1516 	skb->tail += len;
1517 }
1518 
1519 static inline void skb_reset_inner_headers(struct sk_buff *skb)
1520 {
1521 	skb->inner_mac_header = skb->mac_header;
1522 	skb->inner_network_header = skb->network_header;
1523 	skb->inner_transport_header = skb->transport_header;
1524 }
1525 
1526 static inline void skb_reset_mac_len(struct sk_buff *skb)
1527 {
1528 	skb->mac_len = skb->network_header - skb->mac_header;
1529 }
1530 
1531 static inline unsigned char *skb_inner_transport_header(const struct sk_buff
1532 							*skb)
1533 {
1534 	return skb->head + skb->inner_transport_header;
1535 }
1536 
1537 static inline void skb_reset_inner_transport_header(struct sk_buff *skb)
1538 {
1539 	skb->inner_transport_header = skb->data - skb->head;
1540 }
1541 
1542 static inline void skb_set_inner_transport_header(struct sk_buff *skb,
1543 						   const int offset)
1544 {
1545 	skb_reset_inner_transport_header(skb);
1546 	skb->inner_transport_header += offset;
1547 }
1548 
1549 static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb)
1550 {
1551 	return skb->head + skb->inner_network_header;
1552 }
1553 
1554 static inline void skb_reset_inner_network_header(struct sk_buff *skb)
1555 {
1556 	skb->inner_network_header = skb->data - skb->head;
1557 }
1558 
1559 static inline void skb_set_inner_network_header(struct sk_buff *skb,
1560 						const int offset)
1561 {
1562 	skb_reset_inner_network_header(skb);
1563 	skb->inner_network_header += offset;
1564 }
1565 
1566 static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
1567 {
1568 	return skb->head + skb->inner_mac_header;
1569 }
1570 
1571 static inline void skb_reset_inner_mac_header(struct sk_buff *skb)
1572 {
1573 	skb->inner_mac_header = skb->data - skb->head;
1574 }
1575 
1576 static inline void skb_set_inner_mac_header(struct sk_buff *skb,
1577 					    const int offset)
1578 {
1579 	skb_reset_inner_mac_header(skb);
1580 	skb->inner_mac_header += offset;
1581 }
1582 static inline bool skb_transport_header_was_set(const struct sk_buff *skb)
1583 {
1584 	return skb->transport_header != (typeof(skb->transport_header))~0U;
1585 }
1586 
1587 static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
1588 {
1589 	return skb->head + skb->transport_header;
1590 }
1591 
1592 static inline void skb_reset_transport_header(struct sk_buff *skb)
1593 {
1594 	skb->transport_header = skb->data - skb->head;
1595 }
1596 
1597 static inline void skb_set_transport_header(struct sk_buff *skb,
1598 					    const int offset)
1599 {
1600 	skb_reset_transport_header(skb);
1601 	skb->transport_header += offset;
1602 }
1603 
1604 static inline unsigned char *skb_network_header(const struct sk_buff *skb)
1605 {
1606 	return skb->head + skb->network_header;
1607 }
1608 
1609 static inline void skb_reset_network_header(struct sk_buff *skb)
1610 {
1611 	skb->network_header = skb->data - skb->head;
1612 }
1613 
1614 static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
1615 {
1616 	skb_reset_network_header(skb);
1617 	skb->network_header += offset;
1618 }
1619 
1620 static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
1621 {
1622 	return skb->head + skb->mac_header;
1623 }
1624 
1625 static inline int skb_mac_header_was_set(const struct sk_buff *skb)
1626 {
1627 	return skb->mac_header != (typeof(skb->mac_header))~0U;
1628 }
1629 
1630 static inline void skb_reset_mac_header(struct sk_buff *skb)
1631 {
1632 	skb->mac_header = skb->data - skb->head;
1633 }
1634 
1635 static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
1636 {
1637 	skb_reset_mac_header(skb);
1638 	skb->mac_header += offset;
1639 }
1640 
1641 static inline void skb_probe_transport_header(struct sk_buff *skb,
1642 					      const int offset_hint)
1643 {
1644 	struct flow_keys keys;
1645 
1646 	if (skb_transport_header_was_set(skb))
1647 		return;
1648 	else if (skb_flow_dissect(skb, &keys))
1649 		skb_set_transport_header(skb, keys.thoff);
1650 	else
1651 		skb_set_transport_header(skb, offset_hint);
1652 }
1653 
1654 static inline void skb_mac_header_rebuild(struct sk_buff *skb)
1655 {
1656 	if (skb_mac_header_was_set(skb)) {
1657 		const unsigned char *old_mac = skb_mac_header(skb);
1658 
1659 		skb_set_mac_header(skb, -skb->mac_len);
1660 		memmove(skb_mac_header(skb), old_mac, skb->mac_len);
1661 	}
1662 }
1663 
1664 static inline int skb_checksum_start_offset(const struct sk_buff *skb)
1665 {
1666 	return skb->csum_start - skb_headroom(skb);
1667 }
1668 
1669 static inline int skb_transport_offset(const struct sk_buff *skb)
1670 {
1671 	return skb_transport_header(skb) - skb->data;
1672 }
1673 
1674 static inline u32 skb_network_header_len(const struct sk_buff *skb)
1675 {
1676 	return skb->transport_header - skb->network_header;
1677 }
1678 
1679 static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
1680 {
1681 	return skb->inner_transport_header - skb->inner_network_header;
1682 }
1683 
1684 static inline int skb_network_offset(const struct sk_buff *skb)
1685 {
1686 	return skb_network_header(skb) - skb->data;
1687 }
1688 
1689 static inline int skb_inner_network_offset(const struct sk_buff *skb)
1690 {
1691 	return skb_inner_network_header(skb) - skb->data;
1692 }
1693 
1694 static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
1695 {
1696 	return pskb_may_pull(skb, skb_network_offset(skb) + len);
1697 }
1698 
1699 /*
1700  * CPUs often take a performance hit when accessing unaligned memory
1701  * locations. The actual performance hit varies, it can be small if the
1702  * hardware handles it or large if we have to take an exception and fix it
1703  * in software.
1704  *
1705  * Since an ethernet header is 14 bytes network drivers often end up with
1706  * the IP header at an unaligned offset. The IP header can be aligned by
1707  * shifting the start of the packet by 2 bytes. Drivers should do this
1708  * with:
1709  *
1710  * skb_reserve(skb, NET_IP_ALIGN);
1711  *
1712  * The downside to this alignment of the IP header is that the DMA is now
1713  * unaligned. On some architectures the cost of an unaligned DMA is high
1714  * and this cost outweighs the gains made by aligning the IP header.
1715  *
1716  * Since this trade off varies between architectures, we allow NET_IP_ALIGN
1717  * to be overridden.
1718  */
1719 #ifndef NET_IP_ALIGN
1720 #define NET_IP_ALIGN	2
1721 #endif
1722 
1723 /*
1724  * The networking layer reserves some headroom in skb data (via
1725  * dev_alloc_skb). This is used to avoid having to reallocate skb data when
1726  * the header has to grow. In the default case, if the header has to grow
1727  * 32 bytes or less we avoid the reallocation.
1728  *
1729  * Unfortunately this headroom changes the DMA alignment of the resulting
1730  * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive
1731  * on some architectures. An architecture can override this value,
1732  * perhaps setting it to a cacheline in size (since that will maintain
1733  * cacheline alignment of the DMA). It must be a power of 2.
1734  *
1735  * Various parts of the networking layer expect at least 32 bytes of
1736  * headroom, you should not reduce this.
1737  *
1738  * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS)
1739  * to reduce average number of cache lines per packet.
1740  * get_rps_cpus() for example only access one 64 bytes aligned block :
1741  * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
1742  */
1743 #ifndef NET_SKB_PAD
1744 #define NET_SKB_PAD	max(32, L1_CACHE_BYTES)
1745 #endif
1746 
1747 int ___pskb_trim(struct sk_buff *skb, unsigned int len);
1748 
1749 static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
1750 {
1751 	if (unlikely(skb_is_nonlinear(skb))) {
1752 		WARN_ON(1);
1753 		return;
1754 	}
1755 	skb->len = len;
1756 	skb_set_tail_pointer(skb, len);
1757 }
1758 
1759 void skb_trim(struct sk_buff *skb, unsigned int len);
1760 
1761 static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
1762 {
1763 	if (skb->data_len)
1764 		return ___pskb_trim(skb, len);
1765 	__skb_trim(skb, len);
1766 	return 0;
1767 }
1768 
1769 static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
1770 {
1771 	return (len < skb->len) ? __pskb_trim(skb, len) : 0;
1772 }
1773 
1774 /**
1775  *	pskb_trim_unique - remove end from a paged unique (not cloned) buffer
1776  *	@skb: buffer to alter
1777  *	@len: new length
1778  *
1779  *	This is identical to pskb_trim except that the caller knows that
1780  *	the skb is not cloned so we should never get an error due to out-
1781  *	of-memory.
1782  */
1783 static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
1784 {
1785 	int err = pskb_trim(skb, len);
1786 	BUG_ON(err);
1787 }
1788 
1789 /**
1790  *	skb_orphan - orphan a buffer
1791  *	@skb: buffer to orphan
1792  *
1793  *	If a buffer currently has an owner then we call the owner's
1794  *	destructor function and make the @skb unowned. The buffer continues
1795  *	to exist but is no longer charged to its former owner.
1796  */
1797 static inline void skb_orphan(struct sk_buff *skb)
1798 {
1799 	if (skb->destructor) {
1800 		skb->destructor(skb);
1801 		skb->destructor = NULL;
1802 		skb->sk		= NULL;
1803 	} else {
1804 		BUG_ON(skb->sk);
1805 	}
1806 }
1807 
1808 /**
1809  *	skb_orphan_frags - orphan the frags contained in a buffer
1810  *	@skb: buffer to orphan frags from
1811  *	@gfp_mask: allocation mask for replacement pages
1812  *
1813  *	For each frag in the SKB which needs a destructor (i.e. has an
1814  *	owner) create a copy of that frag and release the original
1815  *	page by calling the destructor.
1816  */
1817 static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
1818 {
1819 	if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)))
1820 		return 0;
1821 	return skb_copy_ubufs(skb, gfp_mask);
1822 }
1823 
1824 /**
1825  *	__skb_queue_purge - empty a list
1826  *	@list: list to empty
1827  *
1828  *	Delete all buffers on an &sk_buff list. Each buffer is removed from
1829  *	the list and one reference dropped. This function does not take the
1830  *	list lock and the caller must hold the relevant locks to use it.
1831  */
1832 void skb_queue_purge(struct sk_buff_head *list);
1833 static inline void __skb_queue_purge(struct sk_buff_head *list)
1834 {
1835 	struct sk_buff *skb;
1836 	while ((skb = __skb_dequeue(list)) != NULL)
1837 		kfree_skb(skb);
1838 }
1839 
1840 #define NETDEV_FRAG_PAGE_MAX_ORDER get_order(32768)
1841 #define NETDEV_FRAG_PAGE_MAX_SIZE  (PAGE_SIZE << NETDEV_FRAG_PAGE_MAX_ORDER)
1842 #define NETDEV_PAGECNT_MAX_BIAS	   NETDEV_FRAG_PAGE_MAX_SIZE
1843 
1844 void *netdev_alloc_frag(unsigned int fragsz);
1845 
1846 struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
1847 				   gfp_t gfp_mask);
1848 
1849 /**
1850  *	netdev_alloc_skb - allocate an skbuff for rx on a specific device
1851  *	@dev: network device to receive on
1852  *	@length: length to allocate
1853  *
1854  *	Allocate a new &sk_buff and assign it a usage count of one. The
1855  *	buffer has unspecified headroom built in. Users should allocate
1856  *	the headroom they think they need without accounting for the
1857  *	built in space. The built in space is used for optimisations.
1858  *
1859  *	%NULL is returned if there is no free memory. Although this function
1860  *	allocates memory it can be called from an interrupt.
1861  */
1862 static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
1863 					       unsigned int length)
1864 {
1865 	return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
1866 }
1867 
1868 /* legacy helper around __netdev_alloc_skb() */
1869 static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
1870 					      gfp_t gfp_mask)
1871 {
1872 	return __netdev_alloc_skb(NULL, length, gfp_mask);
1873 }
1874 
1875 /* legacy helper around netdev_alloc_skb() */
1876 static inline struct sk_buff *dev_alloc_skb(unsigned int length)
1877 {
1878 	return netdev_alloc_skb(NULL, length);
1879 }
1880 
1881 
1882 static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
1883 		unsigned int length, gfp_t gfp)
1884 {
1885 	struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
1886 
1887 	if (NET_IP_ALIGN && skb)
1888 		skb_reserve(skb, NET_IP_ALIGN);
1889 	return skb;
1890 }
1891 
1892 static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
1893 		unsigned int length)
1894 {
1895 	return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
1896 }
1897 
1898 /**
1899  *	__skb_alloc_pages - allocate pages for ps-rx on a skb and preserve pfmemalloc data
1900  *	@gfp_mask: alloc_pages_node mask. Set __GFP_NOMEMALLOC if not for network packet RX
1901  *	@skb: skb to set pfmemalloc on if __GFP_MEMALLOC is used
1902  *	@order: size of the allocation
1903  *
1904  * 	Allocate a new page.
1905  *
1906  * 	%NULL is returned if there is no free memory.
1907 */
1908 static inline struct page *__skb_alloc_pages(gfp_t gfp_mask,
1909 					      struct sk_buff *skb,
1910 					      unsigned int order)
1911 {
1912 	struct page *page;
1913 
1914 	gfp_mask |= __GFP_COLD;
1915 
1916 	if (!(gfp_mask & __GFP_NOMEMALLOC))
1917 		gfp_mask |= __GFP_MEMALLOC;
1918 
1919 	page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
1920 	if (skb && page && page->pfmemalloc)
1921 		skb->pfmemalloc = true;
1922 
1923 	return page;
1924 }
1925 
1926 /**
1927  *	__skb_alloc_page - allocate a page for ps-rx for a given skb and preserve pfmemalloc data
1928  *	@gfp_mask: alloc_pages_node mask. Set __GFP_NOMEMALLOC if not for network packet RX
1929  *	@skb: skb to set pfmemalloc on if __GFP_MEMALLOC is used
1930  *
1931  * 	Allocate a new page.
1932  *
1933  * 	%NULL is returned if there is no free memory.
1934  */
1935 static inline struct page *__skb_alloc_page(gfp_t gfp_mask,
1936 					     struct sk_buff *skb)
1937 {
1938 	return __skb_alloc_pages(gfp_mask, skb, 0);
1939 }
1940 
1941 /**
1942  *	skb_propagate_pfmemalloc - Propagate pfmemalloc if skb is allocated after RX page
1943  *	@page: The page that was allocated from skb_alloc_page
1944  *	@skb: The skb that may need pfmemalloc set
1945  */
1946 static inline void skb_propagate_pfmemalloc(struct page *page,
1947 					     struct sk_buff *skb)
1948 {
1949 	if (page && page->pfmemalloc)
1950 		skb->pfmemalloc = true;
1951 }
1952 
1953 /**
1954  * skb_frag_page - retrieve the page refered to by a paged fragment
1955  * @frag: the paged fragment
1956  *
1957  * Returns the &struct page associated with @frag.
1958  */
1959 static inline struct page *skb_frag_page(const skb_frag_t *frag)
1960 {
1961 	return frag->page.p;
1962 }
1963 
1964 /**
1965  * __skb_frag_ref - take an addition reference on a paged fragment.
1966  * @frag: the paged fragment
1967  *
1968  * Takes an additional reference on the paged fragment @frag.
1969  */
1970 static inline void __skb_frag_ref(skb_frag_t *frag)
1971 {
1972 	get_page(skb_frag_page(frag));
1973 }
1974 
1975 /**
1976  * skb_frag_ref - take an addition reference on a paged fragment of an skb.
1977  * @skb: the buffer
1978  * @f: the fragment offset.
1979  *
1980  * Takes an additional reference on the @f'th paged fragment of @skb.
1981  */
1982 static inline void skb_frag_ref(struct sk_buff *skb, int f)
1983 {
1984 	__skb_frag_ref(&skb_shinfo(skb)->frags[f]);
1985 }
1986 
1987 /**
1988  * __skb_frag_unref - release a reference on a paged fragment.
1989  * @frag: the paged fragment
1990  *
1991  * Releases a reference on the paged fragment @frag.
1992  */
1993 static inline void __skb_frag_unref(skb_frag_t *frag)
1994 {
1995 	put_page(skb_frag_page(frag));
1996 }
1997 
1998 /**
1999  * skb_frag_unref - release a reference on a paged fragment of an skb.
2000  * @skb: the buffer
2001  * @f: the fragment offset
2002  *
2003  * Releases a reference on the @f'th paged fragment of @skb.
2004  */
2005 static inline void skb_frag_unref(struct sk_buff *skb, int f)
2006 {
2007 	__skb_frag_unref(&skb_shinfo(skb)->frags[f]);
2008 }
2009 
2010 /**
2011  * skb_frag_address - gets the address of the data contained in a paged fragment
2012  * @frag: the paged fragment buffer
2013  *
2014  * Returns the address of the data within @frag. The page must already
2015  * be mapped.
2016  */
2017 static inline void *skb_frag_address(const skb_frag_t *frag)
2018 {
2019 	return page_address(skb_frag_page(frag)) + frag->page_offset;
2020 }
2021 
2022 /**
2023  * skb_frag_address_safe - gets the address of the data contained in a paged fragment
2024  * @frag: the paged fragment buffer
2025  *
2026  * Returns the address of the data within @frag. Checks that the page
2027  * is mapped and returns %NULL otherwise.
2028  */
2029 static inline void *skb_frag_address_safe(const skb_frag_t *frag)
2030 {
2031 	void *ptr = page_address(skb_frag_page(frag));
2032 	if (unlikely(!ptr))
2033 		return NULL;
2034 
2035 	return ptr + frag->page_offset;
2036 }
2037 
2038 /**
2039  * __skb_frag_set_page - sets the page contained in a paged fragment
2040  * @frag: the paged fragment
2041  * @page: the page to set
2042  *
2043  * Sets the fragment @frag to contain @page.
2044  */
2045 static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
2046 {
2047 	frag->page.p = page;
2048 }
2049 
2050 /**
2051  * skb_frag_set_page - sets the page contained in a paged fragment of an skb
2052  * @skb: the buffer
2053  * @f: the fragment offset
2054  * @page: the page to set
2055  *
2056  * Sets the @f'th fragment of @skb to contain @page.
2057  */
2058 static inline void skb_frag_set_page(struct sk_buff *skb, int f,
2059 				     struct page *page)
2060 {
2061 	__skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
2062 }
2063 
2064 bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio);
2065 
2066 /**
2067  * skb_frag_dma_map - maps a paged fragment via the DMA API
2068  * @dev: the device to map the fragment to
2069  * @frag: the paged fragment to map
2070  * @offset: the offset within the fragment (starting at the
2071  *          fragment's own offset)
2072  * @size: the number of bytes to map
2073  * @dir: the direction of the mapping (%PCI_DMA_*)
2074  *
2075  * Maps the page associated with @frag to @device.
2076  */
2077 static inline dma_addr_t skb_frag_dma_map(struct device *dev,
2078 					  const skb_frag_t *frag,
2079 					  size_t offset, size_t size,
2080 					  enum dma_data_direction dir)
2081 {
2082 	return dma_map_page(dev, skb_frag_page(frag),
2083 			    frag->page_offset + offset, size, dir);
2084 }
2085 
2086 static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
2087 					gfp_t gfp_mask)
2088 {
2089 	return __pskb_copy(skb, skb_headroom(skb), gfp_mask);
2090 }
2091 
2092 /**
2093  *	skb_clone_writable - is the header of a clone writable
2094  *	@skb: buffer to check
2095  *	@len: length up to which to write
2096  *
2097  *	Returns true if modifying the header part of the cloned buffer
2098  *	does not requires the data to be copied.
2099  */
2100 static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len)
2101 {
2102 	return !skb_header_cloned(skb) &&
2103 	       skb_headroom(skb) + len <= skb->hdr_len;
2104 }
2105 
2106 static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
2107 			    int cloned)
2108 {
2109 	int delta = 0;
2110 
2111 	if (headroom > skb_headroom(skb))
2112 		delta = headroom - skb_headroom(skb);
2113 
2114 	if (delta || cloned)
2115 		return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
2116 					GFP_ATOMIC);
2117 	return 0;
2118 }
2119 
2120 /**
2121  *	skb_cow - copy header of skb when it is required
2122  *	@skb: buffer to cow
2123  *	@headroom: needed headroom
2124  *
2125  *	If the skb passed lacks sufficient headroom or its data part
2126  *	is shared, data is reallocated. If reallocation fails, an error
2127  *	is returned and original skb is not changed.
2128  *
2129  *	The result is skb with writable area skb->head...skb->tail
2130  *	and at least @headroom of space at head.
2131  */
2132 static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
2133 {
2134 	return __skb_cow(skb, headroom, skb_cloned(skb));
2135 }
2136 
2137 /**
2138  *	skb_cow_head - skb_cow but only making the head writable
2139  *	@skb: buffer to cow
2140  *	@headroom: needed headroom
2141  *
2142  *	This function is identical to skb_cow except that we replace the
2143  *	skb_cloned check by skb_header_cloned.  It should be used when
2144  *	you only need to push on some header and do not need to modify
2145  *	the data.
2146  */
2147 static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
2148 {
2149 	return __skb_cow(skb, headroom, skb_header_cloned(skb));
2150 }
2151 
2152 /**
2153  *	skb_padto	- pad an skbuff up to a minimal size
2154  *	@skb: buffer to pad
2155  *	@len: minimal length
2156  *
2157  *	Pads up a buffer to ensure the trailing bytes exist and are
2158  *	blanked. If the buffer already contains sufficient data it
2159  *	is untouched. Otherwise it is extended. Returns zero on
2160  *	success. The skb is freed on error.
2161  */
2162 
2163 static inline int skb_padto(struct sk_buff *skb, unsigned int len)
2164 {
2165 	unsigned int size = skb->len;
2166 	if (likely(size >= len))
2167 		return 0;
2168 	return skb_pad(skb, len - size);
2169 }
2170 
2171 static inline int skb_add_data(struct sk_buff *skb,
2172 			       char __user *from, int copy)
2173 {
2174 	const int off = skb->len;
2175 
2176 	if (skb->ip_summed == CHECKSUM_NONE) {
2177 		int err = 0;
2178 		__wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy),
2179 							    copy, 0, &err);
2180 		if (!err) {
2181 			skb->csum = csum_block_add(skb->csum, csum, off);
2182 			return 0;
2183 		}
2184 	} else if (!copy_from_user(skb_put(skb, copy), from, copy))
2185 		return 0;
2186 
2187 	__skb_trim(skb, off);
2188 	return -EFAULT;
2189 }
2190 
2191 static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
2192 				    const struct page *page, int off)
2193 {
2194 	if (i) {
2195 		const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
2196 
2197 		return page == skb_frag_page(frag) &&
2198 		       off == frag->page_offset + skb_frag_size(frag);
2199 	}
2200 	return false;
2201 }
2202 
2203 static inline int __skb_linearize(struct sk_buff *skb)
2204 {
2205 	return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
2206 }
2207 
2208 /**
2209  *	skb_linearize - convert paged skb to linear one
2210  *	@skb: buffer to linarize
2211  *
2212  *	If there is no free memory -ENOMEM is returned, otherwise zero
2213  *	is returned and the old skb data released.
2214  */
2215 static inline int skb_linearize(struct sk_buff *skb)
2216 {
2217 	return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
2218 }
2219 
2220 /**
2221  * skb_has_shared_frag - can any frag be overwritten
2222  * @skb: buffer to test
2223  *
2224  * Return true if the skb has at least one frag that might be modified
2225  * by an external entity (as in vmsplice()/sendfile())
2226  */
2227 static inline bool skb_has_shared_frag(const struct sk_buff *skb)
2228 {
2229 	return skb_is_nonlinear(skb) &&
2230 	       skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
2231 }
2232 
2233 /**
2234  *	skb_linearize_cow - make sure skb is linear and writable
2235  *	@skb: buffer to process
2236  *
2237  *	If there is no free memory -ENOMEM is returned, otherwise zero
2238  *	is returned and the old skb data released.
2239  */
2240 static inline int skb_linearize_cow(struct sk_buff *skb)
2241 {
2242 	return skb_is_nonlinear(skb) || skb_cloned(skb) ?
2243 	       __skb_linearize(skb) : 0;
2244 }
2245 
2246 /**
2247  *	skb_postpull_rcsum - update checksum for received skb after pull
2248  *	@skb: buffer to update
2249  *	@start: start of data before pull
2250  *	@len: length of data pulled
2251  *
2252  *	After doing a pull on a received packet, you need to call this to
2253  *	update the CHECKSUM_COMPLETE checksum, or set ip_summed to
2254  *	CHECKSUM_NONE so that it can be recomputed from scratch.
2255  */
2256 
2257 static inline void skb_postpull_rcsum(struct sk_buff *skb,
2258 				      const void *start, unsigned int len)
2259 {
2260 	if (skb->ip_summed == CHECKSUM_COMPLETE)
2261 		skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
2262 }
2263 
2264 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
2265 
2266 #define skb_queue_walk(queue, skb) \
2267 		for (skb = (queue)->next;					\
2268 		     skb != (struct sk_buff *)(queue);				\
2269 		     skb = skb->next)
2270 
2271 #define skb_queue_walk_safe(queue, skb, tmp)					\
2272 		for (skb = (queue)->next, tmp = skb->next;			\
2273 		     skb != (struct sk_buff *)(queue);				\
2274 		     skb = tmp, tmp = skb->next)
2275 
2276 #define skb_queue_walk_from(queue, skb)						\
2277 		for (; skb != (struct sk_buff *)(queue);			\
2278 		     skb = skb->next)
2279 
2280 #define skb_queue_walk_from_safe(queue, skb, tmp)				\
2281 		for (tmp = skb->next;						\
2282 		     skb != (struct sk_buff *)(queue);				\
2283 		     skb = tmp, tmp = skb->next)
2284 
2285 #define skb_queue_reverse_walk(queue, skb) \
2286 		for (skb = (queue)->prev;					\
2287 		     skb != (struct sk_buff *)(queue);				\
2288 		     skb = skb->prev)
2289 
2290 #define skb_queue_reverse_walk_safe(queue, skb, tmp)				\
2291 		for (skb = (queue)->prev, tmp = skb->prev;			\
2292 		     skb != (struct sk_buff *)(queue);				\
2293 		     skb = tmp, tmp = skb->prev)
2294 
2295 #define skb_queue_reverse_walk_from_safe(queue, skb, tmp)			\
2296 		for (tmp = skb->prev;						\
2297 		     skb != (struct sk_buff *)(queue);				\
2298 		     skb = tmp, tmp = skb->prev)
2299 
2300 static inline bool skb_has_frag_list(const struct sk_buff *skb)
2301 {
2302 	return skb_shinfo(skb)->frag_list != NULL;
2303 }
2304 
2305 static inline void skb_frag_list_init(struct sk_buff *skb)
2306 {
2307 	skb_shinfo(skb)->frag_list = NULL;
2308 }
2309 
2310 static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag)
2311 {
2312 	frag->next = skb_shinfo(skb)->frag_list;
2313 	skb_shinfo(skb)->frag_list = frag;
2314 }
2315 
2316 #define skb_walk_frags(skb, iter)	\
2317 	for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
2318 
2319 struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
2320 				    int *peeked, int *off, int *err);
2321 struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
2322 				  int *err);
2323 unsigned int datagram_poll(struct file *file, struct socket *sock,
2324 			   struct poll_table_struct *wait);
2325 int skb_copy_datagram_iovec(const struct sk_buff *from, int offset,
2326 			    struct iovec *to, int size);
2327 int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, int hlen,
2328 				     struct iovec *iov);
2329 int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
2330 				 const struct iovec *from, int from_offset,
2331 				 int len);
2332 int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *frm,
2333 			   int offset, size_t count);
2334 int skb_copy_datagram_const_iovec(const struct sk_buff *from, int offset,
2335 				  const struct iovec *to, int to_offset,
2336 				  int size);
2337 void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
2338 void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb);
2339 int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
2340 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
2341 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
2342 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
2343 			      int len, __wsum csum);
2344 int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
2345 		    struct pipe_inode_info *pipe, unsigned int len,
2346 		    unsigned int flags);
2347 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
2348 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
2349 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
2350 void skb_scrub_packet(struct sk_buff *skb, bool xnet);
2351 struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
2352 
2353 struct skb_checksum_ops {
2354 	__wsum (*update)(const void *mem, int len, __wsum wsum);
2355 	__wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len);
2356 };
2357 
2358 __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
2359 		      __wsum csum, const struct skb_checksum_ops *ops);
2360 __wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
2361 		    __wsum csum);
2362 
2363 /**
2364  *	pskb_trim_rcsum - trim received skb and update checksum
2365  *	@skb: buffer to trim
2366  *	@len: new length
2367  *
2368  *	This is exactly the same as pskb_trim except that it ensures the
2369  *	checksum of received packets are still valid after the operation.
2370  */
2371 
2372 static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
2373 {
2374 	if (likely(len >= skb->len))
2375 		return 0;
2376 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
2377 		__wsum adj = skb_checksum(skb, len, skb->len - len, 0);
2378 
2379 		skb->csum = csum_sub(skb->csum, adj);
2380 	}
2381 	return __pskb_trim(skb, len);
2382 }
2383 
2384 static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
2385 				       int len, void *buffer)
2386 {
2387 	int hlen = skb_headlen(skb);
2388 
2389 	if (hlen - offset >= len)
2390 		return skb->data + offset;
2391 
2392 	if (skb_copy_bits(skb, offset, buffer, len) < 0)
2393 		return NULL;
2394 
2395 	return buffer;
2396 }
2397 
2398 static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
2399 					     void *to,
2400 					     const unsigned int len)
2401 {
2402 	memcpy(to, skb->data, len);
2403 }
2404 
2405 static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
2406 						    const int offset, void *to,
2407 						    const unsigned int len)
2408 {
2409 	memcpy(to, skb->data + offset, len);
2410 }
2411 
2412 static inline void skb_copy_to_linear_data(struct sk_buff *skb,
2413 					   const void *from,
2414 					   const unsigned int len)
2415 {
2416 	memcpy(skb->data, from, len);
2417 }
2418 
2419 static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
2420 						  const int offset,
2421 						  const void *from,
2422 						  const unsigned int len)
2423 {
2424 	memcpy(skb->data + offset, from, len);
2425 }
2426 
2427 void skb_init(void);
2428 
2429 static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
2430 {
2431 	return skb->tstamp;
2432 }
2433 
2434 /**
2435  *	skb_get_timestamp - get timestamp from a skb
2436  *	@skb: skb to get stamp from
2437  *	@stamp: pointer to struct timeval to store stamp in
2438  *
2439  *	Timestamps are stored in the skb as offsets to a base timestamp.
2440  *	This function converts the offset back to a struct timeval and stores
2441  *	it in stamp.
2442  */
2443 static inline void skb_get_timestamp(const struct sk_buff *skb,
2444 				     struct timeval *stamp)
2445 {
2446 	*stamp = ktime_to_timeval(skb->tstamp);
2447 }
2448 
2449 static inline void skb_get_timestampns(const struct sk_buff *skb,
2450 				       struct timespec *stamp)
2451 {
2452 	*stamp = ktime_to_timespec(skb->tstamp);
2453 }
2454 
2455 static inline void __net_timestamp(struct sk_buff *skb)
2456 {
2457 	skb->tstamp = ktime_get_real();
2458 }
2459 
2460 static inline ktime_t net_timedelta(ktime_t t)
2461 {
2462 	return ktime_sub(ktime_get_real(), t);
2463 }
2464 
2465 static inline ktime_t net_invalid_timestamp(void)
2466 {
2467 	return ktime_set(0, 0);
2468 }
2469 
2470 void skb_timestamping_init(void);
2471 
2472 #ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
2473 
2474 void skb_clone_tx_timestamp(struct sk_buff *skb);
2475 bool skb_defer_rx_timestamp(struct sk_buff *skb);
2476 
2477 #else /* CONFIG_NETWORK_PHY_TIMESTAMPING */
2478 
2479 static inline void skb_clone_tx_timestamp(struct sk_buff *skb)
2480 {
2481 }
2482 
2483 static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
2484 {
2485 	return false;
2486 }
2487 
2488 #endif /* !CONFIG_NETWORK_PHY_TIMESTAMPING */
2489 
2490 /**
2491  * skb_complete_tx_timestamp() - deliver cloned skb with tx timestamps
2492  *
2493  * PHY drivers may accept clones of transmitted packets for
2494  * timestamping via their phy_driver.txtstamp method. These drivers
2495  * must call this function to return the skb back to the stack, with
2496  * or without a timestamp.
2497  *
2498  * @skb: clone of the the original outgoing packet
2499  * @hwtstamps: hardware time stamps, may be NULL if not available
2500  *
2501  */
2502 void skb_complete_tx_timestamp(struct sk_buff *skb,
2503 			       struct skb_shared_hwtstamps *hwtstamps);
2504 
2505 /**
2506  * skb_tstamp_tx - queue clone of skb with send time stamps
2507  * @orig_skb:	the original outgoing packet
2508  * @hwtstamps:	hardware time stamps, may be NULL if not available
2509  *
2510  * If the skb has a socket associated, then this function clones the
2511  * skb (thus sharing the actual data and optional structures), stores
2512  * the optional hardware time stamping information (if non NULL) or
2513  * generates a software time stamp (otherwise), then queues the clone
2514  * to the error queue of the socket.  Errors are silently ignored.
2515  */
2516 void skb_tstamp_tx(struct sk_buff *orig_skb,
2517 		   struct skb_shared_hwtstamps *hwtstamps);
2518 
2519 static inline void sw_tx_timestamp(struct sk_buff *skb)
2520 {
2521 	if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP &&
2522 	    !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
2523 		skb_tstamp_tx(skb, NULL);
2524 }
2525 
2526 /**
2527  * skb_tx_timestamp() - Driver hook for transmit timestamping
2528  *
2529  * Ethernet MAC Drivers should call this function in their hard_xmit()
2530  * function immediately before giving the sk_buff to the MAC hardware.
2531  *
2532  * @skb: A socket buffer.
2533  */
2534 static inline void skb_tx_timestamp(struct sk_buff *skb)
2535 {
2536 	skb_clone_tx_timestamp(skb);
2537 	sw_tx_timestamp(skb);
2538 }
2539 
2540 /**
2541  * skb_complete_wifi_ack - deliver skb with wifi status
2542  *
2543  * @skb: the original outgoing packet
2544  * @acked: ack status
2545  *
2546  */
2547 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
2548 
2549 __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
2550 __sum16 __skb_checksum_complete(struct sk_buff *skb);
2551 
2552 static inline int skb_csum_unnecessary(const struct sk_buff *skb)
2553 {
2554 	return skb->ip_summed & CHECKSUM_UNNECESSARY;
2555 }
2556 
2557 /**
2558  *	skb_checksum_complete - Calculate checksum of an entire packet
2559  *	@skb: packet to process
2560  *
2561  *	This function calculates the checksum over the entire packet plus
2562  *	the value of skb->csum.  The latter can be used to supply the
2563  *	checksum of a pseudo header as used by TCP/UDP.  It returns the
2564  *	checksum.
2565  *
2566  *	For protocols that contain complete checksums such as ICMP/TCP/UDP,
2567  *	this function can be used to verify that checksum on received
2568  *	packets.  In that case the function should return zero if the
2569  *	checksum is correct.  In particular, this function will return zero
2570  *	if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the
2571  *	hardware has already verified the correctness of the checksum.
2572  */
2573 static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
2574 {
2575 	return skb_csum_unnecessary(skb) ?
2576 	       0 : __skb_checksum_complete(skb);
2577 }
2578 
2579 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2580 void nf_conntrack_destroy(struct nf_conntrack *nfct);
2581 static inline void nf_conntrack_put(struct nf_conntrack *nfct)
2582 {
2583 	if (nfct && atomic_dec_and_test(&nfct->use))
2584 		nf_conntrack_destroy(nfct);
2585 }
2586 static inline void nf_conntrack_get(struct nf_conntrack *nfct)
2587 {
2588 	if (nfct)
2589 		atomic_inc(&nfct->use);
2590 }
2591 #endif
2592 #ifdef CONFIG_BRIDGE_NETFILTER
2593 static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
2594 {
2595 	if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
2596 		kfree(nf_bridge);
2597 }
2598 static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
2599 {
2600 	if (nf_bridge)
2601 		atomic_inc(&nf_bridge->use);
2602 }
2603 #endif /* CONFIG_BRIDGE_NETFILTER */
2604 static inline void nf_reset(struct sk_buff *skb)
2605 {
2606 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2607 	nf_conntrack_put(skb->nfct);
2608 	skb->nfct = NULL;
2609 #endif
2610 #ifdef CONFIG_BRIDGE_NETFILTER
2611 	nf_bridge_put(skb->nf_bridge);
2612 	skb->nf_bridge = NULL;
2613 #endif
2614 }
2615 
2616 static inline void nf_reset_trace(struct sk_buff *skb)
2617 {
2618 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
2619 	skb->nf_trace = 0;
2620 #endif
2621 }
2622 
2623 /* Note: This doesn't put any conntrack and bridge info in dst. */
2624 static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
2625 {
2626 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2627 	dst->nfct = src->nfct;
2628 	nf_conntrack_get(src->nfct);
2629 	dst->nfctinfo = src->nfctinfo;
2630 #endif
2631 #ifdef CONFIG_BRIDGE_NETFILTER
2632 	dst->nf_bridge  = src->nf_bridge;
2633 	nf_bridge_get(src->nf_bridge);
2634 #endif
2635 }
2636 
2637 static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
2638 {
2639 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2640 	nf_conntrack_put(dst->nfct);
2641 #endif
2642 #ifdef CONFIG_BRIDGE_NETFILTER
2643 	nf_bridge_put(dst->nf_bridge);
2644 #endif
2645 	__nf_copy(dst, src);
2646 }
2647 
2648 #ifdef CONFIG_NETWORK_SECMARK
2649 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
2650 {
2651 	to->secmark = from->secmark;
2652 }
2653 
2654 static inline void skb_init_secmark(struct sk_buff *skb)
2655 {
2656 	skb->secmark = 0;
2657 }
2658 #else
2659 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
2660 { }
2661 
2662 static inline void skb_init_secmark(struct sk_buff *skb)
2663 { }
2664 #endif
2665 
2666 static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
2667 {
2668 	skb->queue_mapping = queue_mapping;
2669 }
2670 
2671 static inline u16 skb_get_queue_mapping(const struct sk_buff *skb)
2672 {
2673 	return skb->queue_mapping;
2674 }
2675 
2676 static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
2677 {
2678 	to->queue_mapping = from->queue_mapping;
2679 }
2680 
2681 static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
2682 {
2683 	skb->queue_mapping = rx_queue + 1;
2684 }
2685 
2686 static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
2687 {
2688 	return skb->queue_mapping - 1;
2689 }
2690 
2691 static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
2692 {
2693 	return skb->queue_mapping != 0;
2694 }
2695 
2696 u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
2697 		  unsigned int num_tx_queues);
2698 
2699 static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
2700 {
2701 #ifdef CONFIG_XFRM
2702 	return skb->sp;
2703 #else
2704 	return NULL;
2705 #endif
2706 }
2707 
2708 /* Keeps track of mac header offset relative to skb->head.
2709  * It is useful for TSO of Tunneling protocol. e.g. GRE.
2710  * For non-tunnel skb it points to skb_mac_header() and for
2711  * tunnel skb it points to outer mac header.
2712  * Keeps track of level of encapsulation of network headers.
2713  */
2714 struct skb_gso_cb {
2715 	int	mac_offset;
2716 	int	encap_level;
2717 };
2718 #define SKB_GSO_CB(skb) ((struct skb_gso_cb *)(skb)->cb)
2719 
2720 static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
2721 {
2722 	return (skb_mac_header(inner_skb) - inner_skb->head) -
2723 		SKB_GSO_CB(inner_skb)->mac_offset;
2724 }
2725 
2726 static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
2727 {
2728 	int new_headroom, headroom;
2729 	int ret;
2730 
2731 	headroom = skb_headroom(skb);
2732 	ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC);
2733 	if (ret)
2734 		return ret;
2735 
2736 	new_headroom = skb_headroom(skb);
2737 	SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom);
2738 	return 0;
2739 }
2740 
2741 static inline bool skb_is_gso(const struct sk_buff *skb)
2742 {
2743 	return skb_shinfo(skb)->gso_size;
2744 }
2745 
2746 /* Note: Should be called only if skb_is_gso(skb) is true */
2747 static inline bool skb_is_gso_v6(const struct sk_buff *skb)
2748 {
2749 	return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
2750 }
2751 
2752 void __skb_warn_lro_forwarding(const struct sk_buff *skb);
2753 
2754 static inline bool skb_warn_if_lro(const struct sk_buff *skb)
2755 {
2756 	/* LRO sets gso_size but not gso_type, whereas if GSO is really
2757 	 * wanted then gso_type will be set. */
2758 	const struct skb_shared_info *shinfo = skb_shinfo(skb);
2759 
2760 	if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
2761 	    unlikely(shinfo->gso_type == 0)) {
2762 		__skb_warn_lro_forwarding(skb);
2763 		return true;
2764 	}
2765 	return false;
2766 }
2767 
2768 static inline void skb_forward_csum(struct sk_buff *skb)
2769 {
2770 	/* Unfortunately we don't support this one.  Any brave souls? */
2771 	if (skb->ip_summed == CHECKSUM_COMPLETE)
2772 		skb->ip_summed = CHECKSUM_NONE;
2773 }
2774 
2775 /**
2776  * skb_checksum_none_assert - make sure skb ip_summed is CHECKSUM_NONE
2777  * @skb: skb to check
2778  *
2779  * fresh skbs have their ip_summed set to CHECKSUM_NONE.
2780  * Instead of forcing ip_summed to CHECKSUM_NONE, we can
2781  * use this helper, to document places where we make this assertion.
2782  */
2783 static inline void skb_checksum_none_assert(const struct sk_buff *skb)
2784 {
2785 #ifdef DEBUG
2786 	BUG_ON(skb->ip_summed != CHECKSUM_NONE);
2787 #endif
2788 }
2789 
2790 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
2791 
2792 u32 __skb_get_poff(const struct sk_buff *skb);
2793 
2794 /**
2795  * skb_head_is_locked - Determine if the skb->head is locked down
2796  * @skb: skb to check
2797  *
2798  * The head on skbs build around a head frag can be removed if they are
2799  * not cloned.  This function returns true if the skb head is locked down
2800  * due to either being allocated via kmalloc, or by being a clone with
2801  * multiple references to the head.
2802  */
2803 static inline bool skb_head_is_locked(const struct sk_buff *skb)
2804 {
2805 	return !skb->head_frag || skb_cloned(skb);
2806 }
2807 #endif	/* __KERNEL__ */
2808 #endif	/* _LINUX_SKBUFF_H */
2809