xref: /linux-6.15/include/linux/skbuff.h (revision 29a36d4d)
1 /*
2  *	Definitions for the 'struct sk_buff' memory handlers.
3  *
4  *	Authors:
5  *		Alan Cox, <[email protected]>
6  *		Florian La Roche, <[email protected]>
7  *
8  *	This program is free software; you can redistribute it and/or
9  *	modify it under the terms of the GNU General Public License
10  *	as published by the Free Software Foundation; either version
11  *	2 of the License, or (at your option) any later version.
12  */
13 
14 #ifndef _LINUX_SKBUFF_H
15 #define _LINUX_SKBUFF_H
16 
17 #include <linux/kernel.h>
18 #include <linux/kmemcheck.h>
19 #include <linux/compiler.h>
20 #include <linux/time.h>
21 #include <linux/cache.h>
22 
23 #include <linux/atomic.h>
24 #include <asm/types.h>
25 #include <linux/spinlock.h>
26 #include <linux/net.h>
27 #include <linux/textsearch.h>
28 #include <net/checksum.h>
29 #include <linux/rcupdate.h>
30 #include <linux/dmaengine.h>
31 #include <linux/hrtimer.h>
32 #include <linux/dma-mapping.h>
33 
34 /* Don't change this without changing skb_csum_unnecessary! */
35 #define CHECKSUM_NONE 0
36 #define CHECKSUM_UNNECESSARY 1
37 #define CHECKSUM_COMPLETE 2
38 #define CHECKSUM_PARTIAL 3
39 
40 #define SKB_DATA_ALIGN(X)	(((X) + (SMP_CACHE_BYTES - 1)) & \
41 				 ~(SMP_CACHE_BYTES - 1))
42 #define SKB_WITH_OVERHEAD(X)	\
43 	((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
44 #define SKB_MAX_ORDER(X, ORDER) \
45 	SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
46 #define SKB_MAX_HEAD(X)		(SKB_MAX_ORDER((X), 0))
47 #define SKB_MAX_ALLOC		(SKB_MAX_ORDER(0, 2))
48 
49 /* return minimum truesize of one skb containing X bytes of data */
50 #define SKB_TRUESIZE(X) ((X) +						\
51 			 SKB_DATA_ALIGN(sizeof(struct sk_buff)) +	\
52 			 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
53 
54 /* A. Checksumming of received packets by device.
55  *
56  *	NONE: device failed to checksum this packet.
57  *		skb->csum is undefined.
58  *
59  *	UNNECESSARY: device parsed packet and wouldbe verified checksum.
60  *		skb->csum is undefined.
61  *	      It is bad option, but, unfortunately, many of vendors do this.
62  *	      Apparently with secret goal to sell you new device, when you
63  *	      will add new protocol to your host. F.e. IPv6. 8)
64  *
65  *	COMPLETE: the most generic way. Device supplied checksum of _all_
66  *	    the packet as seen by netif_rx in skb->csum.
67  *	    NOTE: Even if device supports only some protocols, but
68  *	    is able to produce some skb->csum, it MUST use COMPLETE,
69  *	    not UNNECESSARY.
70  *
71  *	PARTIAL: identical to the case for output below.  This may occur
72  *	    on a packet received directly from another Linux OS, e.g.,
73  *	    a virtualised Linux kernel on the same host.  The packet can
74  *	    be treated in the same way as UNNECESSARY except that on
75  *	    output (i.e., forwarding) the checksum must be filled in
76  *	    by the OS or the hardware.
77  *
78  * B. Checksumming on output.
79  *
80  *	NONE: skb is checksummed by protocol or csum is not required.
81  *
82  *	PARTIAL: device is required to csum packet as seen by hard_start_xmit
83  *	from skb->csum_start to the end and to record the checksum
84  *	at skb->csum_start + skb->csum_offset.
85  *
86  *	Device must show its capabilities in dev->features, set
87  *	at device setup time.
88  *	NETIF_F_HW_CSUM	- it is clever device, it is able to checksum
89  *			  everything.
90  *	NETIF_F_NO_CSUM - loopback or reliable single hop media.
91  *	NETIF_F_IP_CSUM - device is dumb. It is able to csum only
92  *			  TCP/UDP over IPv4. Sigh. Vendors like this
93  *			  way by an unknown reason. Though, see comment above
94  *			  about CHECKSUM_UNNECESSARY. 8)
95  *	NETIF_F_IPV6_CSUM about as dumb as the last one but does IPv6 instead.
96  *
97  *	Any questions? No questions, good. 		--ANK
98  */
99 
100 struct net_device;
101 struct scatterlist;
102 struct pipe_inode_info;
103 
104 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
105 struct nf_conntrack {
106 	atomic_t use;
107 };
108 #endif
109 
110 #ifdef CONFIG_BRIDGE_NETFILTER
111 struct nf_bridge_info {
112 	atomic_t use;
113 	struct net_device *physindev;
114 	struct net_device *physoutdev;
115 	unsigned int mask;
116 	unsigned long data[32 / sizeof(unsigned long)];
117 };
118 #endif
119 
120 struct sk_buff_head {
121 	/* These two members must be first. */
122 	struct sk_buff	*next;
123 	struct sk_buff	*prev;
124 
125 	__u32		qlen;
126 	spinlock_t	lock;
127 };
128 
129 struct sk_buff;
130 
131 /* To allow 64K frame to be packed as single skb without frag_list. Since
132  * GRO uses frags we allocate at least 16 regardless of page size.
133  */
134 #if (65536/PAGE_SIZE + 2) < 16
135 #define MAX_SKB_FRAGS 16UL
136 #else
137 #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2)
138 #endif
139 
140 typedef struct skb_frag_struct skb_frag_t;
141 
142 struct skb_frag_struct {
143 	struct {
144 		struct page *p;
145 	} page;
146 #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
147 	__u32 page_offset;
148 	__u32 size;
149 #else
150 	__u16 page_offset;
151 	__u16 size;
152 #endif
153 };
154 
155 static inline unsigned int skb_frag_size(const skb_frag_t *frag)
156 {
157 	return frag->size;
158 }
159 
160 static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
161 {
162 	frag->size = size;
163 }
164 
165 static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
166 {
167 	frag->size += delta;
168 }
169 
170 static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
171 {
172 	frag->size -= delta;
173 }
174 
175 #define HAVE_HW_TIME_STAMP
176 
177 /**
178  * struct skb_shared_hwtstamps - hardware time stamps
179  * @hwtstamp:	hardware time stamp transformed into duration
180  *		since arbitrary point in time
181  * @syststamp:	hwtstamp transformed to system time base
182  *
183  * Software time stamps generated by ktime_get_real() are stored in
184  * skb->tstamp. The relation between the different kinds of time
185  * stamps is as follows:
186  *
187  * syststamp and tstamp can be compared against each other in
188  * arbitrary combinations.  The accuracy of a
189  * syststamp/tstamp/"syststamp from other device" comparison is
190  * limited by the accuracy of the transformation into system time
191  * base. This depends on the device driver and its underlying
192  * hardware.
193  *
194  * hwtstamps can only be compared against other hwtstamps from
195  * the same device.
196  *
197  * This structure is attached to packets as part of the
198  * &skb_shared_info. Use skb_hwtstamps() to get a pointer.
199  */
200 struct skb_shared_hwtstamps {
201 	ktime_t	hwtstamp;
202 	ktime_t	syststamp;
203 };
204 
205 /* Definitions for tx_flags in struct skb_shared_info */
206 enum {
207 	/* generate hardware time stamp */
208 	SKBTX_HW_TSTAMP = 1 << 0,
209 
210 	/* generate software time stamp */
211 	SKBTX_SW_TSTAMP = 1 << 1,
212 
213 	/* device driver is going to provide hardware time stamp */
214 	SKBTX_IN_PROGRESS = 1 << 2,
215 
216 	/* ensure the originating sk reference is available on driver level */
217 	SKBTX_DRV_NEEDS_SK_REF = 1 << 3,
218 
219 	/* device driver supports TX zero-copy buffers */
220 	SKBTX_DEV_ZEROCOPY = 1 << 4,
221 };
222 
223 /*
224  * The callback notifies userspace to release buffers when skb DMA is done in
225  * lower device, the skb last reference should be 0 when calling this.
226  * The desc is used to track userspace buffer index.
227  */
228 struct ubuf_info {
229 	void (*callback)(void *);
230 	void *arg;
231 	unsigned long desc;
232 };
233 
234 /* This data is invariant across clones and lives at
235  * the end of the header data, ie. at skb->end.
236  */
237 struct skb_shared_info {
238 	unsigned short	nr_frags;
239 	unsigned short	gso_size;
240 	/* Warning: this field is not always filled in (UFO)! */
241 	unsigned short	gso_segs;
242 	unsigned short  gso_type;
243 	__be32          ip6_frag_id;
244 	__u8		tx_flags;
245 	struct sk_buff	*frag_list;
246 	struct skb_shared_hwtstamps hwtstamps;
247 
248 	/*
249 	 * Warning : all fields before dataref are cleared in __alloc_skb()
250 	 */
251 	atomic_t	dataref;
252 
253 	/* Intermediate layers must ensure that destructor_arg
254 	 * remains valid until skb destructor */
255 	void *		destructor_arg;
256 
257 	/* must be last field, see pskb_expand_head() */
258 	skb_frag_t	frags[MAX_SKB_FRAGS];
259 };
260 
261 /* We divide dataref into two halves.  The higher 16 bits hold references
262  * to the payload part of skb->data.  The lower 16 bits hold references to
263  * the entire skb->data.  A clone of a headerless skb holds the length of
264  * the header in skb->hdr_len.
265  *
266  * All users must obey the rule that the skb->data reference count must be
267  * greater than or equal to the payload reference count.
268  *
269  * Holding a reference to the payload part means that the user does not
270  * care about modifications to the header part of skb->data.
271  */
272 #define SKB_DATAREF_SHIFT 16
273 #define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
274 
275 
276 enum {
277 	SKB_FCLONE_UNAVAILABLE,
278 	SKB_FCLONE_ORIG,
279 	SKB_FCLONE_CLONE,
280 };
281 
282 enum {
283 	SKB_GSO_TCPV4 = 1 << 0,
284 	SKB_GSO_UDP = 1 << 1,
285 
286 	/* This indicates the skb is from an untrusted source. */
287 	SKB_GSO_DODGY = 1 << 2,
288 
289 	/* This indicates the tcp segment has CWR set. */
290 	SKB_GSO_TCP_ECN = 1 << 3,
291 
292 	SKB_GSO_TCPV6 = 1 << 4,
293 
294 	SKB_GSO_FCOE = 1 << 5,
295 };
296 
297 #if BITS_PER_LONG > 32
298 #define NET_SKBUFF_DATA_USES_OFFSET 1
299 #endif
300 
301 #ifdef NET_SKBUFF_DATA_USES_OFFSET
302 typedef unsigned int sk_buff_data_t;
303 #else
304 typedef unsigned char *sk_buff_data_t;
305 #endif
306 
307 #if defined(CONFIG_NF_DEFRAG_IPV4) || defined(CONFIG_NF_DEFRAG_IPV4_MODULE) || \
308     defined(CONFIG_NF_DEFRAG_IPV6) || defined(CONFIG_NF_DEFRAG_IPV6_MODULE)
309 #define NET_SKBUFF_NF_DEFRAG_NEEDED 1
310 #endif
311 
312 /**
313  *	struct sk_buff - socket buffer
314  *	@next: Next buffer in list
315  *	@prev: Previous buffer in list
316  *	@tstamp: Time we arrived
317  *	@sk: Socket we are owned by
318  *	@dev: Device we arrived on/are leaving by
319  *	@cb: Control buffer. Free for use by every layer. Put private vars here
320  *	@_skb_refdst: destination entry (with norefcount bit)
321  *	@sp: the security path, used for xfrm
322  *	@len: Length of actual data
323  *	@data_len: Data length
324  *	@mac_len: Length of link layer header
325  *	@hdr_len: writable header length of cloned skb
326  *	@csum: Checksum (must include start/offset pair)
327  *	@csum_start: Offset from skb->head where checksumming should start
328  *	@csum_offset: Offset from csum_start where checksum should be stored
329  *	@priority: Packet queueing priority
330  *	@local_df: allow local fragmentation
331  *	@cloned: Head may be cloned (check refcnt to be sure)
332  *	@ip_summed: Driver fed us an IP checksum
333  *	@nohdr: Payload reference only, must not modify header
334  *	@nfctinfo: Relationship of this skb to the connection
335  *	@pkt_type: Packet class
336  *	@fclone: skbuff clone status
337  *	@ipvs_property: skbuff is owned by ipvs
338  *	@peeked: this packet has been seen already, so stats have been
339  *		done for it, don't do them again
340  *	@nf_trace: netfilter packet trace flag
341  *	@protocol: Packet protocol from driver
342  *	@destructor: Destruct function
343  *	@nfct: Associated connection, if any
344  *	@nfct_reasm: netfilter conntrack re-assembly pointer
345  *	@nf_bridge: Saved data about a bridged frame - see br_netfilter.c
346  *	@skb_iif: ifindex of device we arrived on
347  *	@tc_index: Traffic control index
348  *	@tc_verd: traffic control verdict
349  *	@rxhash: the packet hash computed on receive
350  *	@queue_mapping: Queue mapping for multiqueue devices
351  *	@ndisc_nodetype: router type (from link layer)
352  *	@ooo_okay: allow the mapping of a socket to a queue to be changed
353  *	@l4_rxhash: indicate rxhash is a canonical 4-tuple hash over transport
354  *		ports.
355  *	@dma_cookie: a cookie to one of several possible DMA operations
356  *		done by skb DMA functions
357  *	@secmark: security marking
358  *	@mark: Generic packet mark
359  *	@dropcount: total number of sk_receive_queue overflows
360  *	@vlan_tci: vlan tag control information
361  *	@transport_header: Transport layer header
362  *	@network_header: Network layer header
363  *	@mac_header: Link layer header
364  *	@tail: Tail pointer
365  *	@end: End pointer
366  *	@head: Head of buffer
367  *	@data: Data head pointer
368  *	@truesize: Buffer size
369  *	@users: User count - see {datagram,tcp}.c
370  */
371 
372 struct sk_buff {
373 	/* These two members must be first. */
374 	struct sk_buff		*next;
375 	struct sk_buff		*prev;
376 
377 	ktime_t			tstamp;
378 
379 	struct sock		*sk;
380 	struct net_device	*dev;
381 
382 	/*
383 	 * This is the control buffer. It is free to use for every
384 	 * layer. Please put your private variables there. If you
385 	 * want to keep them across layers you have to do a skb_clone()
386 	 * first. This is owned by whoever has the skb queued ATM.
387 	 */
388 	char			cb[48] __aligned(8);
389 
390 	unsigned long		_skb_refdst;
391 #ifdef CONFIG_XFRM
392 	struct	sec_path	*sp;
393 #endif
394 	unsigned int		len,
395 				data_len;
396 	__u16			mac_len,
397 				hdr_len;
398 	union {
399 		__wsum		csum;
400 		struct {
401 			__u16	csum_start;
402 			__u16	csum_offset;
403 		};
404 	};
405 	__u32			priority;
406 	kmemcheck_bitfield_begin(flags1);
407 	__u8			local_df:1,
408 				cloned:1,
409 				ip_summed:2,
410 				nohdr:1,
411 				nfctinfo:3;
412 	__u8			pkt_type:3,
413 				fclone:2,
414 				ipvs_property:1,
415 				peeked:1,
416 				nf_trace:1;
417 	kmemcheck_bitfield_end(flags1);
418 	__be16			protocol;
419 
420 	void			(*destructor)(struct sk_buff *skb);
421 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
422 	struct nf_conntrack	*nfct;
423 #endif
424 #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
425 	struct sk_buff		*nfct_reasm;
426 #endif
427 #ifdef CONFIG_BRIDGE_NETFILTER
428 	struct nf_bridge_info	*nf_bridge;
429 #endif
430 
431 	int			skb_iif;
432 #ifdef CONFIG_NET_SCHED
433 	__u16			tc_index;	/* traffic control index */
434 #ifdef CONFIG_NET_CLS_ACT
435 	__u16			tc_verd;	/* traffic control verdict */
436 #endif
437 #endif
438 
439 	__u32			rxhash;
440 
441 	__u16			queue_mapping;
442 	kmemcheck_bitfield_begin(flags2);
443 #ifdef CONFIG_IPV6_NDISC_NODETYPE
444 	__u8			ndisc_nodetype:2;
445 #endif
446 	__u8			ooo_okay:1;
447 	__u8			l4_rxhash:1;
448 	kmemcheck_bitfield_end(flags2);
449 
450 	/* 0/13 bit hole */
451 
452 #ifdef CONFIG_NET_DMA
453 	dma_cookie_t		dma_cookie;
454 #endif
455 #ifdef CONFIG_NETWORK_SECMARK
456 	__u32			secmark;
457 #endif
458 	union {
459 		__u32		mark;
460 		__u32		dropcount;
461 	};
462 
463 	__u16			vlan_tci;
464 
465 	sk_buff_data_t		transport_header;
466 	sk_buff_data_t		network_header;
467 	sk_buff_data_t		mac_header;
468 	/* These elements must be at the end, see alloc_skb() for details.  */
469 	sk_buff_data_t		tail;
470 	sk_buff_data_t		end;
471 	unsigned char		*head,
472 				*data;
473 	unsigned int		truesize;
474 	atomic_t		users;
475 };
476 
477 #ifdef __KERNEL__
478 /*
479  *	Handling routines are only of interest to the kernel
480  */
481 #include <linux/slab.h>
482 
483 #include <asm/system.h>
484 
485 /*
486  * skb might have a dst pointer attached, refcounted or not.
487  * _skb_refdst low order bit is set if refcount was _not_ taken
488  */
489 #define SKB_DST_NOREF	1UL
490 #define SKB_DST_PTRMASK	~(SKB_DST_NOREF)
491 
492 /**
493  * skb_dst - returns skb dst_entry
494  * @skb: buffer
495  *
496  * Returns skb dst_entry, regardless of reference taken or not.
497  */
498 static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
499 {
500 	/* If refdst was not refcounted, check we still are in a
501 	 * rcu_read_lock section
502 	 */
503 	WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
504 		!rcu_read_lock_held() &&
505 		!rcu_read_lock_bh_held());
506 	return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
507 }
508 
509 /**
510  * skb_dst_set - sets skb dst
511  * @skb: buffer
512  * @dst: dst entry
513  *
514  * Sets skb dst, assuming a reference was taken on dst and should
515  * be released by skb_dst_drop()
516  */
517 static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
518 {
519 	skb->_skb_refdst = (unsigned long)dst;
520 }
521 
522 extern void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst);
523 
524 /**
525  * skb_dst_is_noref - Test if skb dst isn't refcounted
526  * @skb: buffer
527  */
528 static inline bool skb_dst_is_noref(const struct sk_buff *skb)
529 {
530 	return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
531 }
532 
533 static inline struct rtable *skb_rtable(const struct sk_buff *skb)
534 {
535 	return (struct rtable *)skb_dst(skb);
536 }
537 
538 extern void kfree_skb(struct sk_buff *skb);
539 extern void consume_skb(struct sk_buff *skb);
540 extern void	       __kfree_skb(struct sk_buff *skb);
541 extern struct sk_buff *__alloc_skb(unsigned int size,
542 				   gfp_t priority, int fclone, int node);
543 static inline struct sk_buff *alloc_skb(unsigned int size,
544 					gfp_t priority)
545 {
546 	return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
547 }
548 
549 static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
550 					       gfp_t priority)
551 {
552 	return __alloc_skb(size, priority, 1, NUMA_NO_NODE);
553 }
554 
555 extern void skb_recycle(struct sk_buff *skb);
556 extern bool skb_recycle_check(struct sk_buff *skb, int skb_size);
557 
558 extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
559 extern int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
560 extern struct sk_buff *skb_clone(struct sk_buff *skb,
561 				 gfp_t priority);
562 extern struct sk_buff *skb_copy(const struct sk_buff *skb,
563 				gfp_t priority);
564 extern struct sk_buff *pskb_copy(struct sk_buff *skb,
565 				 gfp_t gfp_mask);
566 extern int	       pskb_expand_head(struct sk_buff *skb,
567 					int nhead, int ntail,
568 					gfp_t gfp_mask);
569 extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
570 					    unsigned int headroom);
571 extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
572 				       int newheadroom, int newtailroom,
573 				       gfp_t priority);
574 extern int	       skb_to_sgvec(struct sk_buff *skb,
575 				    struct scatterlist *sg, int offset,
576 				    int len);
577 extern int	       skb_cow_data(struct sk_buff *skb, int tailbits,
578 				    struct sk_buff **trailer);
579 extern int	       skb_pad(struct sk_buff *skb, int pad);
580 #define dev_kfree_skb(a)	consume_skb(a)
581 
582 extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
583 			int getfrag(void *from, char *to, int offset,
584 			int len,int odd, struct sk_buff *skb),
585 			void *from, int length);
586 
587 struct skb_seq_state {
588 	__u32		lower_offset;
589 	__u32		upper_offset;
590 	__u32		frag_idx;
591 	__u32		stepped_offset;
592 	struct sk_buff	*root_skb;
593 	struct sk_buff	*cur_skb;
594 	__u8		*frag_data;
595 };
596 
597 extern void	      skb_prepare_seq_read(struct sk_buff *skb,
598 					   unsigned int from, unsigned int to,
599 					   struct skb_seq_state *st);
600 extern unsigned int   skb_seq_read(unsigned int consumed, const u8 **data,
601 				   struct skb_seq_state *st);
602 extern void	      skb_abort_seq_read(struct skb_seq_state *st);
603 
604 extern unsigned int   skb_find_text(struct sk_buff *skb, unsigned int from,
605 				    unsigned int to, struct ts_config *config,
606 				    struct ts_state *state);
607 
608 extern void __skb_get_rxhash(struct sk_buff *skb);
609 static inline __u32 skb_get_rxhash(struct sk_buff *skb)
610 {
611 	if (!skb->rxhash)
612 		__skb_get_rxhash(skb);
613 
614 	return skb->rxhash;
615 }
616 
617 #ifdef NET_SKBUFF_DATA_USES_OFFSET
618 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
619 {
620 	return skb->head + skb->end;
621 }
622 #else
623 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
624 {
625 	return skb->end;
626 }
627 #endif
628 
629 /* Internal */
630 #define skb_shinfo(SKB)	((struct skb_shared_info *)(skb_end_pointer(SKB)))
631 
632 static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
633 {
634 	return &skb_shinfo(skb)->hwtstamps;
635 }
636 
637 /**
638  *	skb_queue_empty - check if a queue is empty
639  *	@list: queue head
640  *
641  *	Returns true if the queue is empty, false otherwise.
642  */
643 static inline int skb_queue_empty(const struct sk_buff_head *list)
644 {
645 	return list->next == (struct sk_buff *)list;
646 }
647 
648 /**
649  *	skb_queue_is_last - check if skb is the last entry in the queue
650  *	@list: queue head
651  *	@skb: buffer
652  *
653  *	Returns true if @skb is the last buffer on the list.
654  */
655 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
656 				     const struct sk_buff *skb)
657 {
658 	return skb->next == (struct sk_buff *)list;
659 }
660 
661 /**
662  *	skb_queue_is_first - check if skb is the first entry in the queue
663  *	@list: queue head
664  *	@skb: buffer
665  *
666  *	Returns true if @skb is the first buffer on the list.
667  */
668 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
669 				      const struct sk_buff *skb)
670 {
671 	return skb->prev == (struct sk_buff *)list;
672 }
673 
674 /**
675  *	skb_queue_next - return the next packet in the queue
676  *	@list: queue head
677  *	@skb: current buffer
678  *
679  *	Return the next packet in @list after @skb.  It is only valid to
680  *	call this if skb_queue_is_last() evaluates to false.
681  */
682 static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
683 					     const struct sk_buff *skb)
684 {
685 	/* This BUG_ON may seem severe, but if we just return then we
686 	 * are going to dereference garbage.
687 	 */
688 	BUG_ON(skb_queue_is_last(list, skb));
689 	return skb->next;
690 }
691 
692 /**
693  *	skb_queue_prev - return the prev packet in the queue
694  *	@list: queue head
695  *	@skb: current buffer
696  *
697  *	Return the prev packet in @list before @skb.  It is only valid to
698  *	call this if skb_queue_is_first() evaluates to false.
699  */
700 static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
701 					     const struct sk_buff *skb)
702 {
703 	/* This BUG_ON may seem severe, but if we just return then we
704 	 * are going to dereference garbage.
705 	 */
706 	BUG_ON(skb_queue_is_first(list, skb));
707 	return skb->prev;
708 }
709 
710 /**
711  *	skb_get - reference buffer
712  *	@skb: buffer to reference
713  *
714  *	Makes another reference to a socket buffer and returns a pointer
715  *	to the buffer.
716  */
717 static inline struct sk_buff *skb_get(struct sk_buff *skb)
718 {
719 	atomic_inc(&skb->users);
720 	return skb;
721 }
722 
723 /*
724  * If users == 1, we are the only owner and are can avoid redundant
725  * atomic change.
726  */
727 
728 /**
729  *	skb_cloned - is the buffer a clone
730  *	@skb: buffer to check
731  *
732  *	Returns true if the buffer was generated with skb_clone() and is
733  *	one of multiple shared copies of the buffer. Cloned buffers are
734  *	shared data so must not be written to under normal circumstances.
735  */
736 static inline int skb_cloned(const struct sk_buff *skb)
737 {
738 	return skb->cloned &&
739 	       (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
740 }
741 
742 /**
743  *	skb_header_cloned - is the header a clone
744  *	@skb: buffer to check
745  *
746  *	Returns true if modifying the header part of the buffer requires
747  *	the data to be copied.
748  */
749 static inline int skb_header_cloned(const struct sk_buff *skb)
750 {
751 	int dataref;
752 
753 	if (!skb->cloned)
754 		return 0;
755 
756 	dataref = atomic_read(&skb_shinfo(skb)->dataref);
757 	dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
758 	return dataref != 1;
759 }
760 
761 /**
762  *	skb_header_release - release reference to header
763  *	@skb: buffer to operate on
764  *
765  *	Drop a reference to the header part of the buffer.  This is done
766  *	by acquiring a payload reference.  You must not read from the header
767  *	part of skb->data after this.
768  */
769 static inline void skb_header_release(struct sk_buff *skb)
770 {
771 	BUG_ON(skb->nohdr);
772 	skb->nohdr = 1;
773 	atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref);
774 }
775 
776 /**
777  *	skb_shared - is the buffer shared
778  *	@skb: buffer to check
779  *
780  *	Returns true if more than one person has a reference to this
781  *	buffer.
782  */
783 static inline int skb_shared(const struct sk_buff *skb)
784 {
785 	return atomic_read(&skb->users) != 1;
786 }
787 
788 /**
789  *	skb_share_check - check if buffer is shared and if so clone it
790  *	@skb: buffer to check
791  *	@pri: priority for memory allocation
792  *
793  *	If the buffer is shared the buffer is cloned and the old copy
794  *	drops a reference. A new clone with a single reference is returned.
795  *	If the buffer is not shared the original buffer is returned. When
796  *	being called from interrupt status or with spinlocks held pri must
797  *	be GFP_ATOMIC.
798  *
799  *	NULL is returned on a memory allocation failure.
800  */
801 static inline struct sk_buff *skb_share_check(struct sk_buff *skb,
802 					      gfp_t pri)
803 {
804 	might_sleep_if(pri & __GFP_WAIT);
805 	if (skb_shared(skb)) {
806 		struct sk_buff *nskb = skb_clone(skb, pri);
807 		kfree_skb(skb);
808 		skb = nskb;
809 	}
810 	return skb;
811 }
812 
813 /*
814  *	Copy shared buffers into a new sk_buff. We effectively do COW on
815  *	packets to handle cases where we have a local reader and forward
816  *	and a couple of other messy ones. The normal one is tcpdumping
817  *	a packet thats being forwarded.
818  */
819 
820 /**
821  *	skb_unshare - make a copy of a shared buffer
822  *	@skb: buffer to check
823  *	@pri: priority for memory allocation
824  *
825  *	If the socket buffer is a clone then this function creates a new
826  *	copy of the data, drops a reference count on the old copy and returns
827  *	the new copy with the reference count at 1. If the buffer is not a clone
828  *	the original buffer is returned. When called with a spinlock held or
829  *	from interrupt state @pri must be %GFP_ATOMIC
830  *
831  *	%NULL is returned on a memory allocation failure.
832  */
833 static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
834 					  gfp_t pri)
835 {
836 	might_sleep_if(pri & __GFP_WAIT);
837 	if (skb_cloned(skb)) {
838 		struct sk_buff *nskb = skb_copy(skb, pri);
839 		kfree_skb(skb);	/* Free our shared copy */
840 		skb = nskb;
841 	}
842 	return skb;
843 }
844 
845 /**
846  *	skb_peek - peek at the head of an &sk_buff_head
847  *	@list_: list to peek at
848  *
849  *	Peek an &sk_buff. Unlike most other operations you _MUST_
850  *	be careful with this one. A peek leaves the buffer on the
851  *	list and someone else may run off with it. You must hold
852  *	the appropriate locks or have a private queue to do this.
853  *
854  *	Returns %NULL for an empty list or a pointer to the head element.
855  *	The reference count is not incremented and the reference is therefore
856  *	volatile. Use with caution.
857  */
858 static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
859 {
860 	struct sk_buff *list = ((const struct sk_buff *)list_)->next;
861 	if (list == (struct sk_buff *)list_)
862 		list = NULL;
863 	return list;
864 }
865 
866 /**
867  *	skb_peek_tail - peek at the tail of an &sk_buff_head
868  *	@list_: list to peek at
869  *
870  *	Peek an &sk_buff. Unlike most other operations you _MUST_
871  *	be careful with this one. A peek leaves the buffer on the
872  *	list and someone else may run off with it. You must hold
873  *	the appropriate locks or have a private queue to do this.
874  *
875  *	Returns %NULL for an empty list or a pointer to the tail element.
876  *	The reference count is not incremented and the reference is therefore
877  *	volatile. Use with caution.
878  */
879 static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
880 {
881 	struct sk_buff *list = ((const struct sk_buff *)list_)->prev;
882 	if (list == (struct sk_buff *)list_)
883 		list = NULL;
884 	return list;
885 }
886 
887 /**
888  *	skb_queue_len	- get queue length
889  *	@list_: list to measure
890  *
891  *	Return the length of an &sk_buff queue.
892  */
893 static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
894 {
895 	return list_->qlen;
896 }
897 
898 /**
899  *	__skb_queue_head_init - initialize non-spinlock portions of sk_buff_head
900  *	@list: queue to initialize
901  *
902  *	This initializes only the list and queue length aspects of
903  *	an sk_buff_head object.  This allows to initialize the list
904  *	aspects of an sk_buff_head without reinitializing things like
905  *	the spinlock.  It can also be used for on-stack sk_buff_head
906  *	objects where the spinlock is known to not be used.
907  */
908 static inline void __skb_queue_head_init(struct sk_buff_head *list)
909 {
910 	list->prev = list->next = (struct sk_buff *)list;
911 	list->qlen = 0;
912 }
913 
914 /*
915  * This function creates a split out lock class for each invocation;
916  * this is needed for now since a whole lot of users of the skb-queue
917  * infrastructure in drivers have different locking usage (in hardirq)
918  * than the networking core (in softirq only). In the long run either the
919  * network layer or drivers should need annotation to consolidate the
920  * main types of usage into 3 classes.
921  */
922 static inline void skb_queue_head_init(struct sk_buff_head *list)
923 {
924 	spin_lock_init(&list->lock);
925 	__skb_queue_head_init(list);
926 }
927 
928 static inline void skb_queue_head_init_class(struct sk_buff_head *list,
929 		struct lock_class_key *class)
930 {
931 	skb_queue_head_init(list);
932 	lockdep_set_class(&list->lock, class);
933 }
934 
935 /*
936  *	Insert an sk_buff on a list.
937  *
938  *	The "__skb_xxxx()" functions are the non-atomic ones that
939  *	can only be called with interrupts disabled.
940  */
941 extern void        skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
942 static inline void __skb_insert(struct sk_buff *newsk,
943 				struct sk_buff *prev, struct sk_buff *next,
944 				struct sk_buff_head *list)
945 {
946 	newsk->next = next;
947 	newsk->prev = prev;
948 	next->prev  = prev->next = newsk;
949 	list->qlen++;
950 }
951 
952 static inline void __skb_queue_splice(const struct sk_buff_head *list,
953 				      struct sk_buff *prev,
954 				      struct sk_buff *next)
955 {
956 	struct sk_buff *first = list->next;
957 	struct sk_buff *last = list->prev;
958 
959 	first->prev = prev;
960 	prev->next = first;
961 
962 	last->next = next;
963 	next->prev = last;
964 }
965 
966 /**
967  *	skb_queue_splice - join two skb lists, this is designed for stacks
968  *	@list: the new list to add
969  *	@head: the place to add it in the first list
970  */
971 static inline void skb_queue_splice(const struct sk_buff_head *list,
972 				    struct sk_buff_head *head)
973 {
974 	if (!skb_queue_empty(list)) {
975 		__skb_queue_splice(list, (struct sk_buff *) head, head->next);
976 		head->qlen += list->qlen;
977 	}
978 }
979 
980 /**
981  *	skb_queue_splice - join two skb lists and reinitialise the emptied list
982  *	@list: the new list to add
983  *	@head: the place to add it in the first list
984  *
985  *	The list at @list is reinitialised
986  */
987 static inline void skb_queue_splice_init(struct sk_buff_head *list,
988 					 struct sk_buff_head *head)
989 {
990 	if (!skb_queue_empty(list)) {
991 		__skb_queue_splice(list, (struct sk_buff *) head, head->next);
992 		head->qlen += list->qlen;
993 		__skb_queue_head_init(list);
994 	}
995 }
996 
997 /**
998  *	skb_queue_splice_tail - join two skb lists, each list being a queue
999  *	@list: the new list to add
1000  *	@head: the place to add it in the first list
1001  */
1002 static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
1003 					 struct sk_buff_head *head)
1004 {
1005 	if (!skb_queue_empty(list)) {
1006 		__skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1007 		head->qlen += list->qlen;
1008 	}
1009 }
1010 
1011 /**
1012  *	skb_queue_splice_tail - join two skb lists and reinitialise the emptied list
1013  *	@list: the new list to add
1014  *	@head: the place to add it in the first list
1015  *
1016  *	Each of the lists is a queue.
1017  *	The list at @list is reinitialised
1018  */
1019 static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
1020 					      struct sk_buff_head *head)
1021 {
1022 	if (!skb_queue_empty(list)) {
1023 		__skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1024 		head->qlen += list->qlen;
1025 		__skb_queue_head_init(list);
1026 	}
1027 }
1028 
1029 /**
1030  *	__skb_queue_after - queue a buffer at the list head
1031  *	@list: list to use
1032  *	@prev: place after this buffer
1033  *	@newsk: buffer to queue
1034  *
1035  *	Queue a buffer int the middle of a list. This function takes no locks
1036  *	and you must therefore hold required locks before calling it.
1037  *
1038  *	A buffer cannot be placed on two lists at the same time.
1039  */
1040 static inline void __skb_queue_after(struct sk_buff_head *list,
1041 				     struct sk_buff *prev,
1042 				     struct sk_buff *newsk)
1043 {
1044 	__skb_insert(newsk, prev, prev->next, list);
1045 }
1046 
1047 extern void skb_append(struct sk_buff *old, struct sk_buff *newsk,
1048 		       struct sk_buff_head *list);
1049 
1050 static inline void __skb_queue_before(struct sk_buff_head *list,
1051 				      struct sk_buff *next,
1052 				      struct sk_buff *newsk)
1053 {
1054 	__skb_insert(newsk, next->prev, next, list);
1055 }
1056 
1057 /**
1058  *	__skb_queue_head - queue a buffer at the list head
1059  *	@list: list to use
1060  *	@newsk: buffer to queue
1061  *
1062  *	Queue a buffer at the start of a list. This function takes no locks
1063  *	and you must therefore hold required locks before calling it.
1064  *
1065  *	A buffer cannot be placed on two lists at the same time.
1066  */
1067 extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
1068 static inline void __skb_queue_head(struct sk_buff_head *list,
1069 				    struct sk_buff *newsk)
1070 {
1071 	__skb_queue_after(list, (struct sk_buff *)list, newsk);
1072 }
1073 
1074 /**
1075  *	__skb_queue_tail - queue a buffer at the list tail
1076  *	@list: list to use
1077  *	@newsk: buffer to queue
1078  *
1079  *	Queue a buffer at the end of a list. This function takes no locks
1080  *	and you must therefore hold required locks before calling it.
1081  *
1082  *	A buffer cannot be placed on two lists at the same time.
1083  */
1084 extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
1085 static inline void __skb_queue_tail(struct sk_buff_head *list,
1086 				   struct sk_buff *newsk)
1087 {
1088 	__skb_queue_before(list, (struct sk_buff *)list, newsk);
1089 }
1090 
1091 /*
1092  * remove sk_buff from list. _Must_ be called atomically, and with
1093  * the list known..
1094  */
1095 extern void	   skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
1096 static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
1097 {
1098 	struct sk_buff *next, *prev;
1099 
1100 	list->qlen--;
1101 	next	   = skb->next;
1102 	prev	   = skb->prev;
1103 	skb->next  = skb->prev = NULL;
1104 	next->prev = prev;
1105 	prev->next = next;
1106 }
1107 
1108 /**
1109  *	__skb_dequeue - remove from the head of the queue
1110  *	@list: list to dequeue from
1111  *
1112  *	Remove the head of the list. This function does not take any locks
1113  *	so must be used with appropriate locks held only. The head item is
1114  *	returned or %NULL if the list is empty.
1115  */
1116 extern struct sk_buff *skb_dequeue(struct sk_buff_head *list);
1117 static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
1118 {
1119 	struct sk_buff *skb = skb_peek(list);
1120 	if (skb)
1121 		__skb_unlink(skb, list);
1122 	return skb;
1123 }
1124 
1125 /**
1126  *	__skb_dequeue_tail - remove from the tail of the queue
1127  *	@list: list to dequeue from
1128  *
1129  *	Remove the tail of the list. This function does not take any locks
1130  *	so must be used with appropriate locks held only. The tail item is
1131  *	returned or %NULL if the list is empty.
1132  */
1133 extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
1134 static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
1135 {
1136 	struct sk_buff *skb = skb_peek_tail(list);
1137 	if (skb)
1138 		__skb_unlink(skb, list);
1139 	return skb;
1140 }
1141 
1142 
1143 static inline int skb_is_nonlinear(const struct sk_buff *skb)
1144 {
1145 	return skb->data_len;
1146 }
1147 
1148 static inline unsigned int skb_headlen(const struct sk_buff *skb)
1149 {
1150 	return skb->len - skb->data_len;
1151 }
1152 
1153 static inline int skb_pagelen(const struct sk_buff *skb)
1154 {
1155 	int i, len = 0;
1156 
1157 	for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--)
1158 		len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
1159 	return len + skb_headlen(skb);
1160 }
1161 
1162 /**
1163  * __skb_fill_page_desc - initialise a paged fragment in an skb
1164  * @skb: buffer containing fragment to be initialised
1165  * @i: paged fragment index to initialise
1166  * @page: the page to use for this fragment
1167  * @off: the offset to the data with @page
1168  * @size: the length of the data
1169  *
1170  * Initialises the @i'th fragment of @skb to point to &size bytes at
1171  * offset @off within @page.
1172  *
1173  * Does not take any additional reference on the fragment.
1174  */
1175 static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
1176 					struct page *page, int off, int size)
1177 {
1178 	skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1179 
1180 	frag->page.p		  = page;
1181 	frag->page_offset	  = off;
1182 	skb_frag_size_set(frag, size);
1183 }
1184 
1185 /**
1186  * skb_fill_page_desc - initialise a paged fragment in an skb
1187  * @skb: buffer containing fragment to be initialised
1188  * @i: paged fragment index to initialise
1189  * @page: the page to use for this fragment
1190  * @off: the offset to the data with @page
1191  * @size: the length of the data
1192  *
1193  * As per __skb_fill_page_desc() -- initialises the @i'th fragment of
1194  * @skb to point to &size bytes at offset @off within @page. In
1195  * addition updates @skb such that @i is the last fragment.
1196  *
1197  * Does not take any additional reference on the fragment.
1198  */
1199 static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
1200 				      struct page *page, int off, int size)
1201 {
1202 	__skb_fill_page_desc(skb, i, page, off, size);
1203 	skb_shinfo(skb)->nr_frags = i + 1;
1204 }
1205 
1206 extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
1207 			    int off, int size);
1208 
1209 #define SKB_PAGE_ASSERT(skb) 	BUG_ON(skb_shinfo(skb)->nr_frags)
1210 #define SKB_FRAG_ASSERT(skb) 	BUG_ON(skb_has_frag_list(skb))
1211 #define SKB_LINEAR_ASSERT(skb)  BUG_ON(skb_is_nonlinear(skb))
1212 
1213 #ifdef NET_SKBUFF_DATA_USES_OFFSET
1214 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1215 {
1216 	return skb->head + skb->tail;
1217 }
1218 
1219 static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1220 {
1221 	skb->tail = skb->data - skb->head;
1222 }
1223 
1224 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1225 {
1226 	skb_reset_tail_pointer(skb);
1227 	skb->tail += offset;
1228 }
1229 #else /* NET_SKBUFF_DATA_USES_OFFSET */
1230 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1231 {
1232 	return skb->tail;
1233 }
1234 
1235 static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1236 {
1237 	skb->tail = skb->data;
1238 }
1239 
1240 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1241 {
1242 	skb->tail = skb->data + offset;
1243 }
1244 
1245 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
1246 
1247 /*
1248  *	Add data to an sk_buff
1249  */
1250 extern unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
1251 static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
1252 {
1253 	unsigned char *tmp = skb_tail_pointer(skb);
1254 	SKB_LINEAR_ASSERT(skb);
1255 	skb->tail += len;
1256 	skb->len  += len;
1257 	return tmp;
1258 }
1259 
1260 extern unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
1261 static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
1262 {
1263 	skb->data -= len;
1264 	skb->len  += len;
1265 	return skb->data;
1266 }
1267 
1268 extern unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
1269 static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
1270 {
1271 	skb->len -= len;
1272 	BUG_ON(skb->len < skb->data_len);
1273 	return skb->data += len;
1274 }
1275 
1276 static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int len)
1277 {
1278 	return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
1279 }
1280 
1281 extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
1282 
1283 static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
1284 {
1285 	if (len > skb_headlen(skb) &&
1286 	    !__pskb_pull_tail(skb, len - skb_headlen(skb)))
1287 		return NULL;
1288 	skb->len -= len;
1289 	return skb->data += len;
1290 }
1291 
1292 static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
1293 {
1294 	return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
1295 }
1296 
1297 static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
1298 {
1299 	if (likely(len <= skb_headlen(skb)))
1300 		return 1;
1301 	if (unlikely(len > skb->len))
1302 		return 0;
1303 	return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
1304 }
1305 
1306 /**
1307  *	skb_headroom - bytes at buffer head
1308  *	@skb: buffer to check
1309  *
1310  *	Return the number of bytes of free space at the head of an &sk_buff.
1311  */
1312 static inline unsigned int skb_headroom(const struct sk_buff *skb)
1313 {
1314 	return skb->data - skb->head;
1315 }
1316 
1317 /**
1318  *	skb_tailroom - bytes at buffer end
1319  *	@skb: buffer to check
1320  *
1321  *	Return the number of bytes of free space at the tail of an sk_buff
1322  */
1323 static inline int skb_tailroom(const struct sk_buff *skb)
1324 {
1325 	return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
1326 }
1327 
1328 /**
1329  *	skb_reserve - adjust headroom
1330  *	@skb: buffer to alter
1331  *	@len: bytes to move
1332  *
1333  *	Increase the headroom of an empty &sk_buff by reducing the tail
1334  *	room. This is only allowed for an empty buffer.
1335  */
1336 static inline void skb_reserve(struct sk_buff *skb, int len)
1337 {
1338 	skb->data += len;
1339 	skb->tail += len;
1340 }
1341 
1342 static inline void skb_reset_mac_len(struct sk_buff *skb)
1343 {
1344 	skb->mac_len = skb->network_header - skb->mac_header;
1345 }
1346 
1347 #ifdef NET_SKBUFF_DATA_USES_OFFSET
1348 static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
1349 {
1350 	return skb->head + skb->transport_header;
1351 }
1352 
1353 static inline void skb_reset_transport_header(struct sk_buff *skb)
1354 {
1355 	skb->transport_header = skb->data - skb->head;
1356 }
1357 
1358 static inline void skb_set_transport_header(struct sk_buff *skb,
1359 					    const int offset)
1360 {
1361 	skb_reset_transport_header(skb);
1362 	skb->transport_header += offset;
1363 }
1364 
1365 static inline unsigned char *skb_network_header(const struct sk_buff *skb)
1366 {
1367 	return skb->head + skb->network_header;
1368 }
1369 
1370 static inline void skb_reset_network_header(struct sk_buff *skb)
1371 {
1372 	skb->network_header = skb->data - skb->head;
1373 }
1374 
1375 static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
1376 {
1377 	skb_reset_network_header(skb);
1378 	skb->network_header += offset;
1379 }
1380 
1381 static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
1382 {
1383 	return skb->head + skb->mac_header;
1384 }
1385 
1386 static inline int skb_mac_header_was_set(const struct sk_buff *skb)
1387 {
1388 	return skb->mac_header != ~0U;
1389 }
1390 
1391 static inline void skb_reset_mac_header(struct sk_buff *skb)
1392 {
1393 	skb->mac_header = skb->data - skb->head;
1394 }
1395 
1396 static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
1397 {
1398 	skb_reset_mac_header(skb);
1399 	skb->mac_header += offset;
1400 }
1401 
1402 #else /* NET_SKBUFF_DATA_USES_OFFSET */
1403 
1404 static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
1405 {
1406 	return skb->transport_header;
1407 }
1408 
1409 static inline void skb_reset_transport_header(struct sk_buff *skb)
1410 {
1411 	skb->transport_header = skb->data;
1412 }
1413 
1414 static inline void skb_set_transport_header(struct sk_buff *skb,
1415 					    const int offset)
1416 {
1417 	skb->transport_header = skb->data + offset;
1418 }
1419 
1420 static inline unsigned char *skb_network_header(const struct sk_buff *skb)
1421 {
1422 	return skb->network_header;
1423 }
1424 
1425 static inline void skb_reset_network_header(struct sk_buff *skb)
1426 {
1427 	skb->network_header = skb->data;
1428 }
1429 
1430 static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
1431 {
1432 	skb->network_header = skb->data + offset;
1433 }
1434 
1435 static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
1436 {
1437 	return skb->mac_header;
1438 }
1439 
1440 static inline int skb_mac_header_was_set(const struct sk_buff *skb)
1441 {
1442 	return skb->mac_header != NULL;
1443 }
1444 
1445 static inline void skb_reset_mac_header(struct sk_buff *skb)
1446 {
1447 	skb->mac_header = skb->data;
1448 }
1449 
1450 static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
1451 {
1452 	skb->mac_header = skb->data + offset;
1453 }
1454 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
1455 
1456 static inline int skb_checksum_start_offset(const struct sk_buff *skb)
1457 {
1458 	return skb->csum_start - skb_headroom(skb);
1459 }
1460 
1461 static inline int skb_transport_offset(const struct sk_buff *skb)
1462 {
1463 	return skb_transport_header(skb) - skb->data;
1464 }
1465 
1466 static inline u32 skb_network_header_len(const struct sk_buff *skb)
1467 {
1468 	return skb->transport_header - skb->network_header;
1469 }
1470 
1471 static inline int skb_network_offset(const struct sk_buff *skb)
1472 {
1473 	return skb_network_header(skb) - skb->data;
1474 }
1475 
1476 static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
1477 {
1478 	return pskb_may_pull(skb, skb_network_offset(skb) + len);
1479 }
1480 
1481 /*
1482  * CPUs often take a performance hit when accessing unaligned memory
1483  * locations. The actual performance hit varies, it can be small if the
1484  * hardware handles it or large if we have to take an exception and fix it
1485  * in software.
1486  *
1487  * Since an ethernet header is 14 bytes network drivers often end up with
1488  * the IP header at an unaligned offset. The IP header can be aligned by
1489  * shifting the start of the packet by 2 bytes. Drivers should do this
1490  * with:
1491  *
1492  * skb_reserve(skb, NET_IP_ALIGN);
1493  *
1494  * The downside to this alignment of the IP header is that the DMA is now
1495  * unaligned. On some architectures the cost of an unaligned DMA is high
1496  * and this cost outweighs the gains made by aligning the IP header.
1497  *
1498  * Since this trade off varies between architectures, we allow NET_IP_ALIGN
1499  * to be overridden.
1500  */
1501 #ifndef NET_IP_ALIGN
1502 #define NET_IP_ALIGN	2
1503 #endif
1504 
1505 /*
1506  * The networking layer reserves some headroom in skb data (via
1507  * dev_alloc_skb). This is used to avoid having to reallocate skb data when
1508  * the header has to grow. In the default case, if the header has to grow
1509  * 32 bytes or less we avoid the reallocation.
1510  *
1511  * Unfortunately this headroom changes the DMA alignment of the resulting
1512  * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive
1513  * on some architectures. An architecture can override this value,
1514  * perhaps setting it to a cacheline in size (since that will maintain
1515  * cacheline alignment of the DMA). It must be a power of 2.
1516  *
1517  * Various parts of the networking layer expect at least 32 bytes of
1518  * headroom, you should not reduce this.
1519  *
1520  * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS)
1521  * to reduce average number of cache lines per packet.
1522  * get_rps_cpus() for example only access one 64 bytes aligned block :
1523  * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
1524  */
1525 #ifndef NET_SKB_PAD
1526 #define NET_SKB_PAD	max(32, L1_CACHE_BYTES)
1527 #endif
1528 
1529 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
1530 
1531 static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
1532 {
1533 	if (unlikely(skb_is_nonlinear(skb))) {
1534 		WARN_ON(1);
1535 		return;
1536 	}
1537 	skb->len = len;
1538 	skb_set_tail_pointer(skb, len);
1539 }
1540 
1541 extern void skb_trim(struct sk_buff *skb, unsigned int len);
1542 
1543 static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
1544 {
1545 	if (skb->data_len)
1546 		return ___pskb_trim(skb, len);
1547 	__skb_trim(skb, len);
1548 	return 0;
1549 }
1550 
1551 static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
1552 {
1553 	return (len < skb->len) ? __pskb_trim(skb, len) : 0;
1554 }
1555 
1556 /**
1557  *	pskb_trim_unique - remove end from a paged unique (not cloned) buffer
1558  *	@skb: buffer to alter
1559  *	@len: new length
1560  *
1561  *	This is identical to pskb_trim except that the caller knows that
1562  *	the skb is not cloned so we should never get an error due to out-
1563  *	of-memory.
1564  */
1565 static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
1566 {
1567 	int err = pskb_trim(skb, len);
1568 	BUG_ON(err);
1569 }
1570 
1571 /**
1572  *	skb_orphan - orphan a buffer
1573  *	@skb: buffer to orphan
1574  *
1575  *	If a buffer currently has an owner then we call the owner's
1576  *	destructor function and make the @skb unowned. The buffer continues
1577  *	to exist but is no longer charged to its former owner.
1578  */
1579 static inline void skb_orphan(struct sk_buff *skb)
1580 {
1581 	if (skb->destructor)
1582 		skb->destructor(skb);
1583 	skb->destructor = NULL;
1584 	skb->sk		= NULL;
1585 }
1586 
1587 /**
1588  *	__skb_queue_purge - empty a list
1589  *	@list: list to empty
1590  *
1591  *	Delete all buffers on an &sk_buff list. Each buffer is removed from
1592  *	the list and one reference dropped. This function does not take the
1593  *	list lock and the caller must hold the relevant locks to use it.
1594  */
1595 extern void skb_queue_purge(struct sk_buff_head *list);
1596 static inline void __skb_queue_purge(struct sk_buff_head *list)
1597 {
1598 	struct sk_buff *skb;
1599 	while ((skb = __skb_dequeue(list)) != NULL)
1600 		kfree_skb(skb);
1601 }
1602 
1603 /**
1604  *	__dev_alloc_skb - allocate an skbuff for receiving
1605  *	@length: length to allocate
1606  *	@gfp_mask: get_free_pages mask, passed to alloc_skb
1607  *
1608  *	Allocate a new &sk_buff and assign it a usage count of one. The
1609  *	buffer has unspecified headroom built in. Users should allocate
1610  *	the headroom they think they need without accounting for the
1611  *	built in space. The built in space is used for optimisations.
1612  *
1613  *	%NULL is returned if there is no free memory.
1614  */
1615 static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
1616 					      gfp_t gfp_mask)
1617 {
1618 	struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask);
1619 	if (likely(skb))
1620 		skb_reserve(skb, NET_SKB_PAD);
1621 	return skb;
1622 }
1623 
1624 extern struct sk_buff *dev_alloc_skb(unsigned int length);
1625 
1626 extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
1627 		unsigned int length, gfp_t gfp_mask);
1628 
1629 /**
1630  *	netdev_alloc_skb - allocate an skbuff for rx on a specific device
1631  *	@dev: network device to receive on
1632  *	@length: length to allocate
1633  *
1634  *	Allocate a new &sk_buff and assign it a usage count of one. The
1635  *	buffer has unspecified headroom built in. Users should allocate
1636  *	the headroom they think they need without accounting for the
1637  *	built in space. The built in space is used for optimisations.
1638  *
1639  *	%NULL is returned if there is no free memory. Although this function
1640  *	allocates memory it can be called from an interrupt.
1641  */
1642 static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
1643 		unsigned int length)
1644 {
1645 	return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
1646 }
1647 
1648 static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
1649 		unsigned int length, gfp_t gfp)
1650 {
1651 	struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
1652 
1653 	if (NET_IP_ALIGN && skb)
1654 		skb_reserve(skb, NET_IP_ALIGN);
1655 	return skb;
1656 }
1657 
1658 static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
1659 		unsigned int length)
1660 {
1661 	return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
1662 }
1663 
1664 /**
1665  *	__netdev_alloc_page - allocate a page for ps-rx on a specific device
1666  *	@dev: network device to receive on
1667  *	@gfp_mask: alloc_pages_node mask
1668  *
1669  * 	Allocate a new page. dev currently unused.
1670  *
1671  * 	%NULL is returned if there is no free memory.
1672  */
1673 static inline struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask)
1674 {
1675 	return alloc_pages_node(NUMA_NO_NODE, gfp_mask, 0);
1676 }
1677 
1678 /**
1679  *	netdev_alloc_page - allocate a page for ps-rx on a specific device
1680  *	@dev: network device to receive on
1681  *
1682  * 	Allocate a new page. dev currently unused.
1683  *
1684  * 	%NULL is returned if there is no free memory.
1685  */
1686 static inline struct page *netdev_alloc_page(struct net_device *dev)
1687 {
1688 	return __netdev_alloc_page(dev, GFP_ATOMIC);
1689 }
1690 
1691 static inline void netdev_free_page(struct net_device *dev, struct page *page)
1692 {
1693 	__free_page(page);
1694 }
1695 
1696 /**
1697  * skb_frag_page - retrieve the page refered to by a paged fragment
1698  * @frag: the paged fragment
1699  *
1700  * Returns the &struct page associated with @frag.
1701  */
1702 static inline struct page *skb_frag_page(const skb_frag_t *frag)
1703 {
1704 	return frag->page.p;
1705 }
1706 
1707 /**
1708  * __skb_frag_ref - take an addition reference on a paged fragment.
1709  * @frag: the paged fragment
1710  *
1711  * Takes an additional reference on the paged fragment @frag.
1712  */
1713 static inline void __skb_frag_ref(skb_frag_t *frag)
1714 {
1715 	get_page(skb_frag_page(frag));
1716 }
1717 
1718 /**
1719  * skb_frag_ref - take an addition reference on a paged fragment of an skb.
1720  * @skb: the buffer
1721  * @f: the fragment offset.
1722  *
1723  * Takes an additional reference on the @f'th paged fragment of @skb.
1724  */
1725 static inline void skb_frag_ref(struct sk_buff *skb, int f)
1726 {
1727 	__skb_frag_ref(&skb_shinfo(skb)->frags[f]);
1728 }
1729 
1730 /**
1731  * __skb_frag_unref - release a reference on a paged fragment.
1732  * @frag: the paged fragment
1733  *
1734  * Releases a reference on the paged fragment @frag.
1735  */
1736 static inline void __skb_frag_unref(skb_frag_t *frag)
1737 {
1738 	put_page(skb_frag_page(frag));
1739 }
1740 
1741 /**
1742  * skb_frag_unref - release a reference on a paged fragment of an skb.
1743  * @skb: the buffer
1744  * @f: the fragment offset
1745  *
1746  * Releases a reference on the @f'th paged fragment of @skb.
1747  */
1748 static inline void skb_frag_unref(struct sk_buff *skb, int f)
1749 {
1750 	__skb_frag_unref(&skb_shinfo(skb)->frags[f]);
1751 }
1752 
1753 /**
1754  * skb_frag_address - gets the address of the data contained in a paged fragment
1755  * @frag: the paged fragment buffer
1756  *
1757  * Returns the address of the data within @frag. The page must already
1758  * be mapped.
1759  */
1760 static inline void *skb_frag_address(const skb_frag_t *frag)
1761 {
1762 	return page_address(skb_frag_page(frag)) + frag->page_offset;
1763 }
1764 
1765 /**
1766  * skb_frag_address_safe - gets the address of the data contained in a paged fragment
1767  * @frag: the paged fragment buffer
1768  *
1769  * Returns the address of the data within @frag. Checks that the page
1770  * is mapped and returns %NULL otherwise.
1771  */
1772 static inline void *skb_frag_address_safe(const skb_frag_t *frag)
1773 {
1774 	void *ptr = page_address(skb_frag_page(frag));
1775 	if (unlikely(!ptr))
1776 		return NULL;
1777 
1778 	return ptr + frag->page_offset;
1779 }
1780 
1781 /**
1782  * __skb_frag_set_page - sets the page contained in a paged fragment
1783  * @frag: the paged fragment
1784  * @page: the page to set
1785  *
1786  * Sets the fragment @frag to contain @page.
1787  */
1788 static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
1789 {
1790 	frag->page.p = page;
1791 }
1792 
1793 /**
1794  * skb_frag_set_page - sets the page contained in a paged fragment of an skb
1795  * @skb: the buffer
1796  * @f: the fragment offset
1797  * @page: the page to set
1798  *
1799  * Sets the @f'th fragment of @skb to contain @page.
1800  */
1801 static inline void skb_frag_set_page(struct sk_buff *skb, int f,
1802 				     struct page *page)
1803 {
1804 	__skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
1805 }
1806 
1807 /**
1808  * skb_frag_dma_map - maps a paged fragment via the DMA API
1809  * @dev: the device to map the fragment to
1810  * @frag: the paged fragment to map
1811  * @offset: the offset within the fragment (starting at the
1812  *          fragment's own offset)
1813  * @size: the number of bytes to map
1814  * @dir: the direction of the mapping (%PCI_DMA_*)
1815  *
1816  * Maps the page associated with @frag to @device.
1817  */
1818 static inline dma_addr_t skb_frag_dma_map(struct device *dev,
1819 					  const skb_frag_t *frag,
1820 					  size_t offset, size_t size,
1821 					  enum dma_data_direction dir)
1822 {
1823 	return dma_map_page(dev, skb_frag_page(frag),
1824 			    frag->page_offset + offset, size, dir);
1825 }
1826 
1827 /**
1828  *	skb_clone_writable - is the header of a clone writable
1829  *	@skb: buffer to check
1830  *	@len: length up to which to write
1831  *
1832  *	Returns true if modifying the header part of the cloned buffer
1833  *	does not requires the data to be copied.
1834  */
1835 static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len)
1836 {
1837 	return !skb_header_cloned(skb) &&
1838 	       skb_headroom(skb) + len <= skb->hdr_len;
1839 }
1840 
1841 static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
1842 			    int cloned)
1843 {
1844 	int delta = 0;
1845 
1846 	if (headroom < NET_SKB_PAD)
1847 		headroom = NET_SKB_PAD;
1848 	if (headroom > skb_headroom(skb))
1849 		delta = headroom - skb_headroom(skb);
1850 
1851 	if (delta || cloned)
1852 		return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
1853 					GFP_ATOMIC);
1854 	return 0;
1855 }
1856 
1857 /**
1858  *	skb_cow - copy header of skb when it is required
1859  *	@skb: buffer to cow
1860  *	@headroom: needed headroom
1861  *
1862  *	If the skb passed lacks sufficient headroom or its data part
1863  *	is shared, data is reallocated. If reallocation fails, an error
1864  *	is returned and original skb is not changed.
1865  *
1866  *	The result is skb with writable area skb->head...skb->tail
1867  *	and at least @headroom of space at head.
1868  */
1869 static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
1870 {
1871 	return __skb_cow(skb, headroom, skb_cloned(skb));
1872 }
1873 
1874 /**
1875  *	skb_cow_head - skb_cow but only making the head writable
1876  *	@skb: buffer to cow
1877  *	@headroom: needed headroom
1878  *
1879  *	This function is identical to skb_cow except that we replace the
1880  *	skb_cloned check by skb_header_cloned.  It should be used when
1881  *	you only need to push on some header and do not need to modify
1882  *	the data.
1883  */
1884 static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
1885 {
1886 	return __skb_cow(skb, headroom, skb_header_cloned(skb));
1887 }
1888 
1889 /**
1890  *	skb_padto	- pad an skbuff up to a minimal size
1891  *	@skb: buffer to pad
1892  *	@len: minimal length
1893  *
1894  *	Pads up a buffer to ensure the trailing bytes exist and are
1895  *	blanked. If the buffer already contains sufficient data it
1896  *	is untouched. Otherwise it is extended. Returns zero on
1897  *	success. The skb is freed on error.
1898  */
1899 
1900 static inline int skb_padto(struct sk_buff *skb, unsigned int len)
1901 {
1902 	unsigned int size = skb->len;
1903 	if (likely(size >= len))
1904 		return 0;
1905 	return skb_pad(skb, len - size);
1906 }
1907 
1908 static inline int skb_add_data(struct sk_buff *skb,
1909 			       char __user *from, int copy)
1910 {
1911 	const int off = skb->len;
1912 
1913 	if (skb->ip_summed == CHECKSUM_NONE) {
1914 		int err = 0;
1915 		__wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy),
1916 							    copy, 0, &err);
1917 		if (!err) {
1918 			skb->csum = csum_block_add(skb->csum, csum, off);
1919 			return 0;
1920 		}
1921 	} else if (!copy_from_user(skb_put(skb, copy), from, copy))
1922 		return 0;
1923 
1924 	__skb_trim(skb, off);
1925 	return -EFAULT;
1926 }
1927 
1928 static inline int skb_can_coalesce(struct sk_buff *skb, int i,
1929 				   const struct page *page, int off)
1930 {
1931 	if (i) {
1932 		const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1933 
1934 		return page == skb_frag_page(frag) &&
1935 		       off == frag->page_offset + skb_frag_size(frag);
1936 	}
1937 	return 0;
1938 }
1939 
1940 static inline int __skb_linearize(struct sk_buff *skb)
1941 {
1942 	return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
1943 }
1944 
1945 /**
1946  *	skb_linearize - convert paged skb to linear one
1947  *	@skb: buffer to linarize
1948  *
1949  *	If there is no free memory -ENOMEM is returned, otherwise zero
1950  *	is returned and the old skb data released.
1951  */
1952 static inline int skb_linearize(struct sk_buff *skb)
1953 {
1954 	return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
1955 }
1956 
1957 /**
1958  *	skb_linearize_cow - make sure skb is linear and writable
1959  *	@skb: buffer to process
1960  *
1961  *	If there is no free memory -ENOMEM is returned, otherwise zero
1962  *	is returned and the old skb data released.
1963  */
1964 static inline int skb_linearize_cow(struct sk_buff *skb)
1965 {
1966 	return skb_is_nonlinear(skb) || skb_cloned(skb) ?
1967 	       __skb_linearize(skb) : 0;
1968 }
1969 
1970 /**
1971  *	skb_postpull_rcsum - update checksum for received skb after pull
1972  *	@skb: buffer to update
1973  *	@start: start of data before pull
1974  *	@len: length of data pulled
1975  *
1976  *	After doing a pull on a received packet, you need to call this to
1977  *	update the CHECKSUM_COMPLETE checksum, or set ip_summed to
1978  *	CHECKSUM_NONE so that it can be recomputed from scratch.
1979  */
1980 
1981 static inline void skb_postpull_rcsum(struct sk_buff *skb,
1982 				      const void *start, unsigned int len)
1983 {
1984 	if (skb->ip_summed == CHECKSUM_COMPLETE)
1985 		skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
1986 }
1987 
1988 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
1989 
1990 /**
1991  *	pskb_trim_rcsum - trim received skb and update checksum
1992  *	@skb: buffer to trim
1993  *	@len: new length
1994  *
1995  *	This is exactly the same as pskb_trim except that it ensures the
1996  *	checksum of received packets are still valid after the operation.
1997  */
1998 
1999 static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
2000 {
2001 	if (likely(len >= skb->len))
2002 		return 0;
2003 	if (skb->ip_summed == CHECKSUM_COMPLETE)
2004 		skb->ip_summed = CHECKSUM_NONE;
2005 	return __pskb_trim(skb, len);
2006 }
2007 
2008 #define skb_queue_walk(queue, skb) \
2009 		for (skb = (queue)->next;					\
2010 		     skb != (struct sk_buff *)(queue);				\
2011 		     skb = skb->next)
2012 
2013 #define skb_queue_walk_safe(queue, skb, tmp)					\
2014 		for (skb = (queue)->next, tmp = skb->next;			\
2015 		     skb != (struct sk_buff *)(queue);				\
2016 		     skb = tmp, tmp = skb->next)
2017 
2018 #define skb_queue_walk_from(queue, skb)						\
2019 		for (; skb != (struct sk_buff *)(queue);			\
2020 		     skb = skb->next)
2021 
2022 #define skb_queue_walk_from_safe(queue, skb, tmp)				\
2023 		for (tmp = skb->next;						\
2024 		     skb != (struct sk_buff *)(queue);				\
2025 		     skb = tmp, tmp = skb->next)
2026 
2027 #define skb_queue_reverse_walk(queue, skb) \
2028 		for (skb = (queue)->prev;					\
2029 		     skb != (struct sk_buff *)(queue);				\
2030 		     skb = skb->prev)
2031 
2032 #define skb_queue_reverse_walk_safe(queue, skb, tmp)				\
2033 		for (skb = (queue)->prev, tmp = skb->prev;			\
2034 		     skb != (struct sk_buff *)(queue);				\
2035 		     skb = tmp, tmp = skb->prev)
2036 
2037 #define skb_queue_reverse_walk_from_safe(queue, skb, tmp)			\
2038 		for (tmp = skb->prev;						\
2039 		     skb != (struct sk_buff *)(queue);				\
2040 		     skb = tmp, tmp = skb->prev)
2041 
2042 static inline bool skb_has_frag_list(const struct sk_buff *skb)
2043 {
2044 	return skb_shinfo(skb)->frag_list != NULL;
2045 }
2046 
2047 static inline void skb_frag_list_init(struct sk_buff *skb)
2048 {
2049 	skb_shinfo(skb)->frag_list = NULL;
2050 }
2051 
2052 static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag)
2053 {
2054 	frag->next = skb_shinfo(skb)->frag_list;
2055 	skb_shinfo(skb)->frag_list = frag;
2056 }
2057 
2058 #define skb_walk_frags(skb, iter)	\
2059 	for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
2060 
2061 extern struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
2062 					   int *peeked, int *err);
2063 extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
2064 					 int noblock, int *err);
2065 extern unsigned int    datagram_poll(struct file *file, struct socket *sock,
2066 				     struct poll_table_struct *wait);
2067 extern int	       skb_copy_datagram_iovec(const struct sk_buff *from,
2068 					       int offset, struct iovec *to,
2069 					       int size);
2070 extern int	       skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
2071 							int hlen,
2072 							struct iovec *iov);
2073 extern int	       skb_copy_datagram_from_iovec(struct sk_buff *skb,
2074 						    int offset,
2075 						    const struct iovec *from,
2076 						    int from_offset,
2077 						    int len);
2078 extern int	       skb_copy_datagram_const_iovec(const struct sk_buff *from,
2079 						     int offset,
2080 						     const struct iovec *to,
2081 						     int to_offset,
2082 						     int size);
2083 extern void	       skb_free_datagram(struct sock *sk, struct sk_buff *skb);
2084 extern void	       skb_free_datagram_locked(struct sock *sk,
2085 						struct sk_buff *skb);
2086 extern int	       skb_kill_datagram(struct sock *sk, struct sk_buff *skb,
2087 					 unsigned int flags);
2088 extern __wsum	       skb_checksum(const struct sk_buff *skb, int offset,
2089 				    int len, __wsum csum);
2090 extern int	       skb_copy_bits(const struct sk_buff *skb, int offset,
2091 				     void *to, int len);
2092 extern int	       skb_store_bits(struct sk_buff *skb, int offset,
2093 				      const void *from, int len);
2094 extern __wsum	       skb_copy_and_csum_bits(const struct sk_buff *skb,
2095 					      int offset, u8 *to, int len,
2096 					      __wsum csum);
2097 extern int             skb_splice_bits(struct sk_buff *skb,
2098 						unsigned int offset,
2099 						struct pipe_inode_info *pipe,
2100 						unsigned int len,
2101 						unsigned int flags);
2102 extern void	       skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
2103 extern void	       skb_split(struct sk_buff *skb,
2104 				 struct sk_buff *skb1, const u32 len);
2105 extern int	       skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
2106 				 int shiftlen);
2107 
2108 extern struct sk_buff *skb_segment(struct sk_buff *skb, u32 features);
2109 
2110 static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
2111 				       int len, void *buffer)
2112 {
2113 	int hlen = skb_headlen(skb);
2114 
2115 	if (hlen - offset >= len)
2116 		return skb->data + offset;
2117 
2118 	if (skb_copy_bits(skb, offset, buffer, len) < 0)
2119 		return NULL;
2120 
2121 	return buffer;
2122 }
2123 
2124 static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
2125 					     void *to,
2126 					     const unsigned int len)
2127 {
2128 	memcpy(to, skb->data, len);
2129 }
2130 
2131 static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
2132 						    const int offset, void *to,
2133 						    const unsigned int len)
2134 {
2135 	memcpy(to, skb->data + offset, len);
2136 }
2137 
2138 static inline void skb_copy_to_linear_data(struct sk_buff *skb,
2139 					   const void *from,
2140 					   const unsigned int len)
2141 {
2142 	memcpy(skb->data, from, len);
2143 }
2144 
2145 static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
2146 						  const int offset,
2147 						  const void *from,
2148 						  const unsigned int len)
2149 {
2150 	memcpy(skb->data + offset, from, len);
2151 }
2152 
2153 extern void skb_init(void);
2154 
2155 static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
2156 {
2157 	return skb->tstamp;
2158 }
2159 
2160 /**
2161  *	skb_get_timestamp - get timestamp from a skb
2162  *	@skb: skb to get stamp from
2163  *	@stamp: pointer to struct timeval to store stamp in
2164  *
2165  *	Timestamps are stored in the skb as offsets to a base timestamp.
2166  *	This function converts the offset back to a struct timeval and stores
2167  *	it in stamp.
2168  */
2169 static inline void skb_get_timestamp(const struct sk_buff *skb,
2170 				     struct timeval *stamp)
2171 {
2172 	*stamp = ktime_to_timeval(skb->tstamp);
2173 }
2174 
2175 static inline void skb_get_timestampns(const struct sk_buff *skb,
2176 				       struct timespec *stamp)
2177 {
2178 	*stamp = ktime_to_timespec(skb->tstamp);
2179 }
2180 
2181 static inline void __net_timestamp(struct sk_buff *skb)
2182 {
2183 	skb->tstamp = ktime_get_real();
2184 }
2185 
2186 static inline ktime_t net_timedelta(ktime_t t)
2187 {
2188 	return ktime_sub(ktime_get_real(), t);
2189 }
2190 
2191 static inline ktime_t net_invalid_timestamp(void)
2192 {
2193 	return ktime_set(0, 0);
2194 }
2195 
2196 extern void skb_timestamping_init(void);
2197 
2198 #ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
2199 
2200 extern void skb_clone_tx_timestamp(struct sk_buff *skb);
2201 extern bool skb_defer_rx_timestamp(struct sk_buff *skb);
2202 
2203 #else /* CONFIG_NETWORK_PHY_TIMESTAMPING */
2204 
2205 static inline void skb_clone_tx_timestamp(struct sk_buff *skb)
2206 {
2207 }
2208 
2209 static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
2210 {
2211 	return false;
2212 }
2213 
2214 #endif /* !CONFIG_NETWORK_PHY_TIMESTAMPING */
2215 
2216 /**
2217  * skb_complete_tx_timestamp() - deliver cloned skb with tx timestamps
2218  *
2219  * PHY drivers may accept clones of transmitted packets for
2220  * timestamping via their phy_driver.txtstamp method. These drivers
2221  * must call this function to return the skb back to the stack, with
2222  * or without a timestamp.
2223  *
2224  * @skb: clone of the the original outgoing packet
2225  * @hwtstamps: hardware time stamps, may be NULL if not available
2226  *
2227  */
2228 void skb_complete_tx_timestamp(struct sk_buff *skb,
2229 			       struct skb_shared_hwtstamps *hwtstamps);
2230 
2231 /**
2232  * skb_tstamp_tx - queue clone of skb with send time stamps
2233  * @orig_skb:	the original outgoing packet
2234  * @hwtstamps:	hardware time stamps, may be NULL if not available
2235  *
2236  * If the skb has a socket associated, then this function clones the
2237  * skb (thus sharing the actual data and optional structures), stores
2238  * the optional hardware time stamping information (if non NULL) or
2239  * generates a software time stamp (otherwise), then queues the clone
2240  * to the error queue of the socket.  Errors are silently ignored.
2241  */
2242 extern void skb_tstamp_tx(struct sk_buff *orig_skb,
2243 			struct skb_shared_hwtstamps *hwtstamps);
2244 
2245 static inline void sw_tx_timestamp(struct sk_buff *skb)
2246 {
2247 	if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP &&
2248 	    !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
2249 		skb_tstamp_tx(skb, NULL);
2250 }
2251 
2252 /**
2253  * skb_tx_timestamp() - Driver hook for transmit timestamping
2254  *
2255  * Ethernet MAC Drivers should call this function in their hard_xmit()
2256  * function immediately before giving the sk_buff to the MAC hardware.
2257  *
2258  * @skb: A socket buffer.
2259  */
2260 static inline void skb_tx_timestamp(struct sk_buff *skb)
2261 {
2262 	skb_clone_tx_timestamp(skb);
2263 	sw_tx_timestamp(skb);
2264 }
2265 
2266 extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
2267 extern __sum16 __skb_checksum_complete(struct sk_buff *skb);
2268 
2269 static inline int skb_csum_unnecessary(const struct sk_buff *skb)
2270 {
2271 	return skb->ip_summed & CHECKSUM_UNNECESSARY;
2272 }
2273 
2274 /**
2275  *	skb_checksum_complete - Calculate checksum of an entire packet
2276  *	@skb: packet to process
2277  *
2278  *	This function calculates the checksum over the entire packet plus
2279  *	the value of skb->csum.  The latter can be used to supply the
2280  *	checksum of a pseudo header as used by TCP/UDP.  It returns the
2281  *	checksum.
2282  *
2283  *	For protocols that contain complete checksums such as ICMP/TCP/UDP,
2284  *	this function can be used to verify that checksum on received
2285  *	packets.  In that case the function should return zero if the
2286  *	checksum is correct.  In particular, this function will return zero
2287  *	if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the
2288  *	hardware has already verified the correctness of the checksum.
2289  */
2290 static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
2291 {
2292 	return skb_csum_unnecessary(skb) ?
2293 	       0 : __skb_checksum_complete(skb);
2294 }
2295 
2296 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2297 extern void nf_conntrack_destroy(struct nf_conntrack *nfct);
2298 static inline void nf_conntrack_put(struct nf_conntrack *nfct)
2299 {
2300 	if (nfct && atomic_dec_and_test(&nfct->use))
2301 		nf_conntrack_destroy(nfct);
2302 }
2303 static inline void nf_conntrack_get(struct nf_conntrack *nfct)
2304 {
2305 	if (nfct)
2306 		atomic_inc(&nfct->use);
2307 }
2308 #endif
2309 #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2310 static inline void nf_conntrack_get_reasm(struct sk_buff *skb)
2311 {
2312 	if (skb)
2313 		atomic_inc(&skb->users);
2314 }
2315 static inline void nf_conntrack_put_reasm(struct sk_buff *skb)
2316 {
2317 	if (skb)
2318 		kfree_skb(skb);
2319 }
2320 #endif
2321 #ifdef CONFIG_BRIDGE_NETFILTER
2322 static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
2323 {
2324 	if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
2325 		kfree(nf_bridge);
2326 }
2327 static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
2328 {
2329 	if (nf_bridge)
2330 		atomic_inc(&nf_bridge->use);
2331 }
2332 #endif /* CONFIG_BRIDGE_NETFILTER */
2333 static inline void nf_reset(struct sk_buff *skb)
2334 {
2335 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2336 	nf_conntrack_put(skb->nfct);
2337 	skb->nfct = NULL;
2338 #endif
2339 #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2340 	nf_conntrack_put_reasm(skb->nfct_reasm);
2341 	skb->nfct_reasm = NULL;
2342 #endif
2343 #ifdef CONFIG_BRIDGE_NETFILTER
2344 	nf_bridge_put(skb->nf_bridge);
2345 	skb->nf_bridge = NULL;
2346 #endif
2347 }
2348 
2349 /* Note: This doesn't put any conntrack and bridge info in dst. */
2350 static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
2351 {
2352 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2353 	dst->nfct = src->nfct;
2354 	nf_conntrack_get(src->nfct);
2355 	dst->nfctinfo = src->nfctinfo;
2356 #endif
2357 #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2358 	dst->nfct_reasm = src->nfct_reasm;
2359 	nf_conntrack_get_reasm(src->nfct_reasm);
2360 #endif
2361 #ifdef CONFIG_BRIDGE_NETFILTER
2362 	dst->nf_bridge  = src->nf_bridge;
2363 	nf_bridge_get(src->nf_bridge);
2364 #endif
2365 }
2366 
2367 static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
2368 {
2369 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2370 	nf_conntrack_put(dst->nfct);
2371 #endif
2372 #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2373 	nf_conntrack_put_reasm(dst->nfct_reasm);
2374 #endif
2375 #ifdef CONFIG_BRIDGE_NETFILTER
2376 	nf_bridge_put(dst->nf_bridge);
2377 #endif
2378 	__nf_copy(dst, src);
2379 }
2380 
2381 #ifdef CONFIG_NETWORK_SECMARK
2382 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
2383 {
2384 	to->secmark = from->secmark;
2385 }
2386 
2387 static inline void skb_init_secmark(struct sk_buff *skb)
2388 {
2389 	skb->secmark = 0;
2390 }
2391 #else
2392 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
2393 { }
2394 
2395 static inline void skb_init_secmark(struct sk_buff *skb)
2396 { }
2397 #endif
2398 
2399 static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
2400 {
2401 	skb->queue_mapping = queue_mapping;
2402 }
2403 
2404 static inline u16 skb_get_queue_mapping(const struct sk_buff *skb)
2405 {
2406 	return skb->queue_mapping;
2407 }
2408 
2409 static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
2410 {
2411 	to->queue_mapping = from->queue_mapping;
2412 }
2413 
2414 static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
2415 {
2416 	skb->queue_mapping = rx_queue + 1;
2417 }
2418 
2419 static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
2420 {
2421 	return skb->queue_mapping - 1;
2422 }
2423 
2424 static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
2425 {
2426 	return skb->queue_mapping != 0;
2427 }
2428 
2429 extern u16 __skb_tx_hash(const struct net_device *dev,
2430 			 const struct sk_buff *skb,
2431 			 unsigned int num_tx_queues);
2432 
2433 #ifdef CONFIG_XFRM
2434 static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
2435 {
2436 	return skb->sp;
2437 }
2438 #else
2439 static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
2440 {
2441 	return NULL;
2442 }
2443 #endif
2444 
2445 static inline int skb_is_gso(const struct sk_buff *skb)
2446 {
2447 	return skb_shinfo(skb)->gso_size;
2448 }
2449 
2450 static inline int skb_is_gso_v6(const struct sk_buff *skb)
2451 {
2452 	return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
2453 }
2454 
2455 extern void __skb_warn_lro_forwarding(const struct sk_buff *skb);
2456 
2457 static inline bool skb_warn_if_lro(const struct sk_buff *skb)
2458 {
2459 	/* LRO sets gso_size but not gso_type, whereas if GSO is really
2460 	 * wanted then gso_type will be set. */
2461 	const struct skb_shared_info *shinfo = skb_shinfo(skb);
2462 
2463 	if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
2464 	    unlikely(shinfo->gso_type == 0)) {
2465 		__skb_warn_lro_forwarding(skb);
2466 		return true;
2467 	}
2468 	return false;
2469 }
2470 
2471 static inline void skb_forward_csum(struct sk_buff *skb)
2472 {
2473 	/* Unfortunately we don't support this one.  Any brave souls? */
2474 	if (skb->ip_summed == CHECKSUM_COMPLETE)
2475 		skb->ip_summed = CHECKSUM_NONE;
2476 }
2477 
2478 /**
2479  * skb_checksum_none_assert - make sure skb ip_summed is CHECKSUM_NONE
2480  * @skb: skb to check
2481  *
2482  * fresh skbs have their ip_summed set to CHECKSUM_NONE.
2483  * Instead of forcing ip_summed to CHECKSUM_NONE, we can
2484  * use this helper, to document places where we make this assertion.
2485  */
2486 static inline void skb_checksum_none_assert(const struct sk_buff *skb)
2487 {
2488 #ifdef DEBUG
2489 	BUG_ON(skb->ip_summed != CHECKSUM_NONE);
2490 #endif
2491 }
2492 
2493 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
2494 
2495 static inline bool skb_is_recycleable(const struct sk_buff *skb, int skb_size)
2496 {
2497 	if (irqs_disabled())
2498 		return false;
2499 
2500 	if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)
2501 		return false;
2502 
2503 	if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
2504 		return false;
2505 
2506 	skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
2507 	if (skb_end_pointer(skb) - skb->head < skb_size)
2508 		return false;
2509 
2510 	if (skb_shared(skb) || skb_cloned(skb))
2511 		return false;
2512 
2513 	return true;
2514 }
2515 #endif	/* __KERNEL__ */
2516 #endif	/* _LINUX_SKBUFF_H */
2517