xref: /linux-6.15/include/linux/skbuff.h (revision 384740dc)
1 /*
2  *	Definitions for the 'struct sk_buff' memory handlers.
3  *
4  *	Authors:
5  *		Alan Cox, <[email protected]>
6  *		Florian La Roche, <[email protected]>
7  *
8  *	This program is free software; you can redistribute it and/or
9  *	modify it under the terms of the GNU General Public License
10  *	as published by the Free Software Foundation; either version
11  *	2 of the License, or (at your option) any later version.
12  */
13 
14 #ifndef _LINUX_SKBUFF_H
15 #define _LINUX_SKBUFF_H
16 
17 #include <linux/kernel.h>
18 #include <linux/compiler.h>
19 #include <linux/time.h>
20 #include <linux/cache.h>
21 
22 #include <asm/atomic.h>
23 #include <asm/types.h>
24 #include <linux/spinlock.h>
25 #include <linux/net.h>
26 #include <linux/textsearch.h>
27 #include <net/checksum.h>
28 #include <linux/rcupdate.h>
29 #include <linux/dmaengine.h>
30 #include <linux/hrtimer.h>
31 
32 #define HAVE_ALLOC_SKB		/* For the drivers to know */
33 #define HAVE_ALIGNABLE_SKB	/* Ditto 8)		   */
34 
35 /* Don't change this without changing skb_csum_unnecessary! */
36 #define CHECKSUM_NONE 0
37 #define CHECKSUM_UNNECESSARY 1
38 #define CHECKSUM_COMPLETE 2
39 #define CHECKSUM_PARTIAL 3
40 
41 #define SKB_DATA_ALIGN(X)	(((X) + (SMP_CACHE_BYTES - 1)) & \
42 				 ~(SMP_CACHE_BYTES - 1))
43 #define SKB_WITH_OVERHEAD(X)	\
44 	((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
45 #define SKB_MAX_ORDER(X, ORDER) \
46 	SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
47 #define SKB_MAX_HEAD(X)		(SKB_MAX_ORDER((X), 0))
48 #define SKB_MAX_ALLOC		(SKB_MAX_ORDER(0, 2))
49 
50 /* A. Checksumming of received packets by device.
51  *
52  *	NONE: device failed to checksum this packet.
53  *		skb->csum is undefined.
54  *
55  *	UNNECESSARY: device parsed packet and wouldbe verified checksum.
56  *		skb->csum is undefined.
57  *	      It is bad option, but, unfortunately, many of vendors do this.
58  *	      Apparently with secret goal to sell you new device, when you
59  *	      will add new protocol to your host. F.e. IPv6. 8)
60  *
61  *	COMPLETE: the most generic way. Device supplied checksum of _all_
62  *	    the packet as seen by netif_rx in skb->csum.
63  *	    NOTE: Even if device supports only some protocols, but
64  *	    is able to produce some skb->csum, it MUST use COMPLETE,
65  *	    not UNNECESSARY.
66  *
67  *	PARTIAL: identical to the case for output below.  This may occur
68  *	    on a packet received directly from another Linux OS, e.g.,
69  *	    a virtualised Linux kernel on the same host.  The packet can
70  *	    be treated in the same way as UNNECESSARY except that on
71  *	    output (i.e., forwarding) the checksum must be filled in
72  *	    by the OS or the hardware.
73  *
74  * B. Checksumming on output.
75  *
76  *	NONE: skb is checksummed by protocol or csum is not required.
77  *
78  *	PARTIAL: device is required to csum packet as seen by hard_start_xmit
79  *	from skb->csum_start to the end and to record the checksum
80  *	at skb->csum_start + skb->csum_offset.
81  *
82  *	Device must show its capabilities in dev->features, set
83  *	at device setup time.
84  *	NETIF_F_HW_CSUM	- it is clever device, it is able to checksum
85  *			  everything.
86  *	NETIF_F_NO_CSUM - loopback or reliable single hop media.
87  *	NETIF_F_IP_CSUM - device is dumb. It is able to csum only
88  *			  TCP/UDP over IPv4. Sigh. Vendors like this
89  *			  way by an unknown reason. Though, see comment above
90  *			  about CHECKSUM_UNNECESSARY. 8)
91  *	NETIF_F_IPV6_CSUM about as dumb as the last one but does IPv6 instead.
92  *
93  *	Any questions? No questions, good. 		--ANK
94  */
95 
96 struct net_device;
97 struct scatterlist;
98 struct pipe_inode_info;
99 
100 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
101 struct nf_conntrack {
102 	atomic_t use;
103 };
104 #endif
105 
106 #ifdef CONFIG_BRIDGE_NETFILTER
107 struct nf_bridge_info {
108 	atomic_t use;
109 	struct net_device *physindev;
110 	struct net_device *physoutdev;
111 	unsigned int mask;
112 	unsigned long data[32 / sizeof(unsigned long)];
113 };
114 #endif
115 
116 struct sk_buff_head {
117 	/* These two members must be first. */
118 	struct sk_buff	*next;
119 	struct sk_buff	*prev;
120 
121 	__u32		qlen;
122 	spinlock_t	lock;
123 };
124 
125 struct sk_buff;
126 
127 /* To allow 64K frame to be packed as single skb without frag_list */
128 #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2)
129 
130 typedef struct skb_frag_struct skb_frag_t;
131 
132 struct skb_frag_struct {
133 	struct page *page;
134 	__u32 page_offset;
135 	__u32 size;
136 };
137 
138 /* This data is invariant across clones and lives at
139  * the end of the header data, ie. at skb->end.
140  */
141 struct skb_shared_info {
142 	atomic_t	dataref;
143 	unsigned short	nr_frags;
144 	unsigned short	gso_size;
145 	/* Warning: this field is not always filled in (UFO)! */
146 	unsigned short	gso_segs;
147 	unsigned short  gso_type;
148 	__be32          ip6_frag_id;
149 	struct sk_buff	*frag_list;
150 	skb_frag_t	frags[MAX_SKB_FRAGS];
151 };
152 
153 /* We divide dataref into two halves.  The higher 16 bits hold references
154  * to the payload part of skb->data.  The lower 16 bits hold references to
155  * the entire skb->data.  A clone of a headerless skb holds the length of
156  * the header in skb->hdr_len.
157  *
158  * All users must obey the rule that the skb->data reference count must be
159  * greater than or equal to the payload reference count.
160  *
161  * Holding a reference to the payload part means that the user does not
162  * care about modifications to the header part of skb->data.
163  */
164 #define SKB_DATAREF_SHIFT 16
165 #define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
166 
167 
168 enum {
169 	SKB_FCLONE_UNAVAILABLE,
170 	SKB_FCLONE_ORIG,
171 	SKB_FCLONE_CLONE,
172 };
173 
174 enum {
175 	SKB_GSO_TCPV4 = 1 << 0,
176 	SKB_GSO_UDP = 1 << 1,
177 
178 	/* This indicates the skb is from an untrusted source. */
179 	SKB_GSO_DODGY = 1 << 2,
180 
181 	/* This indicates the tcp segment has CWR set. */
182 	SKB_GSO_TCP_ECN = 1 << 3,
183 
184 	SKB_GSO_TCPV6 = 1 << 4,
185 };
186 
187 #if BITS_PER_LONG > 32
188 #define NET_SKBUFF_DATA_USES_OFFSET 1
189 #endif
190 
191 #ifdef NET_SKBUFF_DATA_USES_OFFSET
192 typedef unsigned int sk_buff_data_t;
193 #else
194 typedef unsigned char *sk_buff_data_t;
195 #endif
196 
197 /**
198  *	struct sk_buff - socket buffer
199  *	@next: Next buffer in list
200  *	@prev: Previous buffer in list
201  *	@sk: Socket we are owned by
202  *	@tstamp: Time we arrived
203  *	@dev: Device we arrived on/are leaving by
204  *	@transport_header: Transport layer header
205  *	@network_header: Network layer header
206  *	@mac_header: Link layer header
207  *	@dst: destination entry
208  *	@sp: the security path, used for xfrm
209  *	@cb: Control buffer. Free for use by every layer. Put private vars here
210  *	@len: Length of actual data
211  *	@data_len: Data length
212  *	@mac_len: Length of link layer header
213  *	@hdr_len: writable header length of cloned skb
214  *	@csum: Checksum (must include start/offset pair)
215  *	@csum_start: Offset from skb->head where checksumming should start
216  *	@csum_offset: Offset from csum_start where checksum should be stored
217  *	@local_df: allow local fragmentation
218  *	@cloned: Head may be cloned (check refcnt to be sure)
219  *	@nohdr: Payload reference only, must not modify header
220  *	@pkt_type: Packet class
221  *	@fclone: skbuff clone status
222  *	@ip_summed: Driver fed us an IP checksum
223  *	@priority: Packet queueing priority
224  *	@users: User count - see {datagram,tcp}.c
225  *	@protocol: Packet protocol from driver
226  *	@truesize: Buffer size
227  *	@head: Head of buffer
228  *	@data: Data head pointer
229  *	@tail: Tail pointer
230  *	@end: End pointer
231  *	@destructor: Destruct function
232  *	@mark: Generic packet mark
233  *	@nfct: Associated connection, if any
234  *	@ipvs_property: skbuff is owned by ipvs
235  *	@peeked: this packet has been seen already, so stats have been
236  *		done for it, don't do them again
237  *	@nf_trace: netfilter packet trace flag
238  *	@nfctinfo: Relationship of this skb to the connection
239  *	@nfct_reasm: netfilter conntrack re-assembly pointer
240  *	@nf_bridge: Saved data about a bridged frame - see br_netfilter.c
241  *	@iif: ifindex of device we arrived on
242  *	@queue_mapping: Queue mapping for multiqueue devices
243  *	@tc_index: Traffic control index
244  *	@tc_verd: traffic control verdict
245  *	@ndisc_nodetype: router type (from link layer)
246  *	@do_not_encrypt: set to prevent encryption of this frame
247  *	@dma_cookie: a cookie to one of several possible DMA operations
248  *		done by skb DMA functions
249  *	@secmark: security marking
250  *	@vlan_tci: vlan tag control information
251  */
252 
253 struct sk_buff {
254 	/* These two members must be first. */
255 	struct sk_buff		*next;
256 	struct sk_buff		*prev;
257 
258 	struct sock		*sk;
259 	ktime_t			tstamp;
260 	struct net_device	*dev;
261 
262 	union {
263 		struct  dst_entry	*dst;
264 		struct  rtable		*rtable;
265 	};
266 	struct	sec_path	*sp;
267 
268 	/*
269 	 * This is the control buffer. It is free to use for every
270 	 * layer. Please put your private variables there. If you
271 	 * want to keep them across layers you have to do a skb_clone()
272 	 * first. This is owned by whoever has the skb queued ATM.
273 	 */
274 	char			cb[48];
275 
276 	unsigned int		len,
277 				data_len;
278 	__u16			mac_len,
279 				hdr_len;
280 	union {
281 		__wsum		csum;
282 		struct {
283 			__u16	csum_start;
284 			__u16	csum_offset;
285 		};
286 	};
287 	__u32			priority;
288 	__u8			local_df:1,
289 				cloned:1,
290 				ip_summed:2,
291 				nohdr:1,
292 				nfctinfo:3;
293 	__u8			pkt_type:3,
294 				fclone:2,
295 				ipvs_property:1,
296 				peeked:1,
297 				nf_trace:1;
298 	__be16			protocol;
299 
300 	void			(*destructor)(struct sk_buff *skb);
301 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
302 	struct nf_conntrack	*nfct;
303 	struct sk_buff		*nfct_reasm;
304 #endif
305 #ifdef CONFIG_BRIDGE_NETFILTER
306 	struct nf_bridge_info	*nf_bridge;
307 #endif
308 
309 	int			iif;
310 	__u16			queue_mapping;
311 #ifdef CONFIG_NET_SCHED
312 	__u16			tc_index;	/* traffic control index */
313 #ifdef CONFIG_NET_CLS_ACT
314 	__u16			tc_verd;	/* traffic control verdict */
315 #endif
316 #endif
317 #ifdef CONFIG_IPV6_NDISC_NODETYPE
318 	__u8			ndisc_nodetype:2;
319 #endif
320 #if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE)
321 	__u8			do_not_encrypt:1;
322 #endif
323 	/* 0/13/14 bit hole */
324 
325 #ifdef CONFIG_NET_DMA
326 	dma_cookie_t		dma_cookie;
327 #endif
328 #ifdef CONFIG_NETWORK_SECMARK
329 	__u32			secmark;
330 #endif
331 
332 	__u32			mark;
333 
334 	__u16			vlan_tci;
335 
336 	sk_buff_data_t		transport_header;
337 	sk_buff_data_t		network_header;
338 	sk_buff_data_t		mac_header;
339 	/* These elements must be at the end, see alloc_skb() for details.  */
340 	sk_buff_data_t		tail;
341 	sk_buff_data_t		end;
342 	unsigned char		*head,
343 				*data;
344 	unsigned int		truesize;
345 	atomic_t		users;
346 };
347 
348 #ifdef __KERNEL__
349 /*
350  *	Handling routines are only of interest to the kernel
351  */
352 #include <linux/slab.h>
353 
354 #include <asm/system.h>
355 
356 extern void kfree_skb(struct sk_buff *skb);
357 extern void	       __kfree_skb(struct sk_buff *skb);
358 extern struct sk_buff *__alloc_skb(unsigned int size,
359 				   gfp_t priority, int fclone, int node);
360 static inline struct sk_buff *alloc_skb(unsigned int size,
361 					gfp_t priority)
362 {
363 	return __alloc_skb(size, priority, 0, -1);
364 }
365 
366 static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
367 					       gfp_t priority)
368 {
369 	return __alloc_skb(size, priority, 1, -1);
370 }
371 
372 extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
373 extern struct sk_buff *skb_clone(struct sk_buff *skb,
374 				 gfp_t priority);
375 extern struct sk_buff *skb_copy(const struct sk_buff *skb,
376 				gfp_t priority);
377 extern struct sk_buff *pskb_copy(struct sk_buff *skb,
378 				 gfp_t gfp_mask);
379 extern int	       pskb_expand_head(struct sk_buff *skb,
380 					int nhead, int ntail,
381 					gfp_t gfp_mask);
382 extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
383 					    unsigned int headroom);
384 extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
385 				       int newheadroom, int newtailroom,
386 				       gfp_t priority);
387 extern int	       skb_to_sgvec(struct sk_buff *skb,
388 				    struct scatterlist *sg, int offset,
389 				    int len);
390 extern int	       skb_cow_data(struct sk_buff *skb, int tailbits,
391 				    struct sk_buff **trailer);
392 extern int	       skb_pad(struct sk_buff *skb, int pad);
393 #define dev_kfree_skb(a)	kfree_skb(a)
394 extern void	      skb_over_panic(struct sk_buff *skb, int len,
395 				     void *here);
396 extern void	      skb_under_panic(struct sk_buff *skb, int len,
397 				      void *here);
398 extern void	      skb_truesize_bug(struct sk_buff *skb);
399 
400 static inline void skb_truesize_check(struct sk_buff *skb)
401 {
402 	int len = sizeof(struct sk_buff) + skb->len;
403 
404 	if (unlikely((int)skb->truesize < len))
405 		skb_truesize_bug(skb);
406 }
407 
408 extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
409 			int getfrag(void *from, char *to, int offset,
410 			int len,int odd, struct sk_buff *skb),
411 			void *from, int length);
412 
413 struct skb_seq_state
414 {
415 	__u32		lower_offset;
416 	__u32		upper_offset;
417 	__u32		frag_idx;
418 	__u32		stepped_offset;
419 	struct sk_buff	*root_skb;
420 	struct sk_buff	*cur_skb;
421 	__u8		*frag_data;
422 };
423 
424 extern void	      skb_prepare_seq_read(struct sk_buff *skb,
425 					   unsigned int from, unsigned int to,
426 					   struct skb_seq_state *st);
427 extern unsigned int   skb_seq_read(unsigned int consumed, const u8 **data,
428 				   struct skb_seq_state *st);
429 extern void	      skb_abort_seq_read(struct skb_seq_state *st);
430 
431 extern unsigned int   skb_find_text(struct sk_buff *skb, unsigned int from,
432 				    unsigned int to, struct ts_config *config,
433 				    struct ts_state *state);
434 
435 #ifdef NET_SKBUFF_DATA_USES_OFFSET
436 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
437 {
438 	return skb->head + skb->end;
439 }
440 #else
441 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
442 {
443 	return skb->end;
444 }
445 #endif
446 
447 /* Internal */
448 #define skb_shinfo(SKB)	((struct skb_shared_info *)(skb_end_pointer(SKB)))
449 
450 /**
451  *	skb_queue_empty - check if a queue is empty
452  *	@list: queue head
453  *
454  *	Returns true if the queue is empty, false otherwise.
455  */
456 static inline int skb_queue_empty(const struct sk_buff_head *list)
457 {
458 	return list->next == (struct sk_buff *)list;
459 }
460 
461 /**
462  *	skb_get - reference buffer
463  *	@skb: buffer to reference
464  *
465  *	Makes another reference to a socket buffer and returns a pointer
466  *	to the buffer.
467  */
468 static inline struct sk_buff *skb_get(struct sk_buff *skb)
469 {
470 	atomic_inc(&skb->users);
471 	return skb;
472 }
473 
474 /*
475  * If users == 1, we are the only owner and are can avoid redundant
476  * atomic change.
477  */
478 
479 /**
480  *	skb_cloned - is the buffer a clone
481  *	@skb: buffer to check
482  *
483  *	Returns true if the buffer was generated with skb_clone() and is
484  *	one of multiple shared copies of the buffer. Cloned buffers are
485  *	shared data so must not be written to under normal circumstances.
486  */
487 static inline int skb_cloned(const struct sk_buff *skb)
488 {
489 	return skb->cloned &&
490 	       (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
491 }
492 
493 /**
494  *	skb_header_cloned - is the header a clone
495  *	@skb: buffer to check
496  *
497  *	Returns true if modifying the header part of the buffer requires
498  *	the data to be copied.
499  */
500 static inline int skb_header_cloned(const struct sk_buff *skb)
501 {
502 	int dataref;
503 
504 	if (!skb->cloned)
505 		return 0;
506 
507 	dataref = atomic_read(&skb_shinfo(skb)->dataref);
508 	dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
509 	return dataref != 1;
510 }
511 
512 /**
513  *	skb_header_release - release reference to header
514  *	@skb: buffer to operate on
515  *
516  *	Drop a reference to the header part of the buffer.  This is done
517  *	by acquiring a payload reference.  You must not read from the header
518  *	part of skb->data after this.
519  */
520 static inline void skb_header_release(struct sk_buff *skb)
521 {
522 	BUG_ON(skb->nohdr);
523 	skb->nohdr = 1;
524 	atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref);
525 }
526 
527 /**
528  *	skb_shared - is the buffer shared
529  *	@skb: buffer to check
530  *
531  *	Returns true if more than one person has a reference to this
532  *	buffer.
533  */
534 static inline int skb_shared(const struct sk_buff *skb)
535 {
536 	return atomic_read(&skb->users) != 1;
537 }
538 
539 /**
540  *	skb_share_check - check if buffer is shared and if so clone it
541  *	@skb: buffer to check
542  *	@pri: priority for memory allocation
543  *
544  *	If the buffer is shared the buffer is cloned and the old copy
545  *	drops a reference. A new clone with a single reference is returned.
546  *	If the buffer is not shared the original buffer is returned. When
547  *	being called from interrupt status or with spinlocks held pri must
548  *	be GFP_ATOMIC.
549  *
550  *	NULL is returned on a memory allocation failure.
551  */
552 static inline struct sk_buff *skb_share_check(struct sk_buff *skb,
553 					      gfp_t pri)
554 {
555 	might_sleep_if(pri & __GFP_WAIT);
556 	if (skb_shared(skb)) {
557 		struct sk_buff *nskb = skb_clone(skb, pri);
558 		kfree_skb(skb);
559 		skb = nskb;
560 	}
561 	return skb;
562 }
563 
564 /*
565  *	Copy shared buffers into a new sk_buff. We effectively do COW on
566  *	packets to handle cases where we have a local reader and forward
567  *	and a couple of other messy ones. The normal one is tcpdumping
568  *	a packet thats being forwarded.
569  */
570 
571 /**
572  *	skb_unshare - make a copy of a shared buffer
573  *	@skb: buffer to check
574  *	@pri: priority for memory allocation
575  *
576  *	If the socket buffer is a clone then this function creates a new
577  *	copy of the data, drops a reference count on the old copy and returns
578  *	the new copy with the reference count at 1. If the buffer is not a clone
579  *	the original buffer is returned. When called with a spinlock held or
580  *	from interrupt state @pri must be %GFP_ATOMIC
581  *
582  *	%NULL is returned on a memory allocation failure.
583  */
584 static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
585 					  gfp_t pri)
586 {
587 	might_sleep_if(pri & __GFP_WAIT);
588 	if (skb_cloned(skb)) {
589 		struct sk_buff *nskb = skb_copy(skb, pri);
590 		kfree_skb(skb);	/* Free our shared copy */
591 		skb = nskb;
592 	}
593 	return skb;
594 }
595 
596 /**
597  *	skb_peek
598  *	@list_: list to peek at
599  *
600  *	Peek an &sk_buff. Unlike most other operations you _MUST_
601  *	be careful with this one. A peek leaves the buffer on the
602  *	list and someone else may run off with it. You must hold
603  *	the appropriate locks or have a private queue to do this.
604  *
605  *	Returns %NULL for an empty list or a pointer to the head element.
606  *	The reference count is not incremented and the reference is therefore
607  *	volatile. Use with caution.
608  */
609 static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
610 {
611 	struct sk_buff *list = ((struct sk_buff *)list_)->next;
612 	if (list == (struct sk_buff *)list_)
613 		list = NULL;
614 	return list;
615 }
616 
617 /**
618  *	skb_peek_tail
619  *	@list_: list to peek at
620  *
621  *	Peek an &sk_buff. Unlike most other operations you _MUST_
622  *	be careful with this one. A peek leaves the buffer on the
623  *	list and someone else may run off with it. You must hold
624  *	the appropriate locks or have a private queue to do this.
625  *
626  *	Returns %NULL for an empty list or a pointer to the tail element.
627  *	The reference count is not incremented and the reference is therefore
628  *	volatile. Use with caution.
629  */
630 static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_)
631 {
632 	struct sk_buff *list = ((struct sk_buff *)list_)->prev;
633 	if (list == (struct sk_buff *)list_)
634 		list = NULL;
635 	return list;
636 }
637 
638 /**
639  *	skb_queue_len	- get queue length
640  *	@list_: list to measure
641  *
642  *	Return the length of an &sk_buff queue.
643  */
644 static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
645 {
646 	return list_->qlen;
647 }
648 
649 /*
650  * This function creates a split out lock class for each invocation;
651  * this is needed for now since a whole lot of users of the skb-queue
652  * infrastructure in drivers have different locking usage (in hardirq)
653  * than the networking core (in softirq only). In the long run either the
654  * network layer or drivers should need annotation to consolidate the
655  * main types of usage into 3 classes.
656  */
657 static inline void skb_queue_head_init(struct sk_buff_head *list)
658 {
659 	spin_lock_init(&list->lock);
660 	list->prev = list->next = (struct sk_buff *)list;
661 	list->qlen = 0;
662 }
663 
664 static inline void skb_queue_head_init_class(struct sk_buff_head *list,
665 		struct lock_class_key *class)
666 {
667 	skb_queue_head_init(list);
668 	lockdep_set_class(&list->lock, class);
669 }
670 
671 /*
672  *	Insert an sk_buff on a list.
673  *
674  *	The "__skb_xxxx()" functions are the non-atomic ones that
675  *	can only be called with interrupts disabled.
676  */
677 extern void        skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
678 static inline void __skb_insert(struct sk_buff *newsk,
679 				struct sk_buff *prev, struct sk_buff *next,
680 				struct sk_buff_head *list)
681 {
682 	newsk->next = next;
683 	newsk->prev = prev;
684 	next->prev  = prev->next = newsk;
685 	list->qlen++;
686 }
687 
688 /**
689  *	__skb_queue_after - queue a buffer at the list head
690  *	@list: list to use
691  *	@prev: place after this buffer
692  *	@newsk: buffer to queue
693  *
694  *	Queue a buffer int the middle of a list. This function takes no locks
695  *	and you must therefore hold required locks before calling it.
696  *
697  *	A buffer cannot be placed on two lists at the same time.
698  */
699 static inline void __skb_queue_after(struct sk_buff_head *list,
700 				     struct sk_buff *prev,
701 				     struct sk_buff *newsk)
702 {
703 	__skb_insert(newsk, prev, prev->next, list);
704 }
705 
706 extern void skb_append(struct sk_buff *old, struct sk_buff *newsk,
707 		       struct sk_buff_head *list);
708 
709 static inline void __skb_queue_before(struct sk_buff_head *list,
710 				      struct sk_buff *next,
711 				      struct sk_buff *newsk)
712 {
713 	__skb_insert(newsk, next->prev, next, list);
714 }
715 
716 /**
717  *	__skb_queue_head - queue a buffer at the list head
718  *	@list: list to use
719  *	@newsk: buffer to queue
720  *
721  *	Queue a buffer at the start of a list. This function takes no locks
722  *	and you must therefore hold required locks before calling it.
723  *
724  *	A buffer cannot be placed on two lists at the same time.
725  */
726 extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
727 static inline void __skb_queue_head(struct sk_buff_head *list,
728 				    struct sk_buff *newsk)
729 {
730 	__skb_queue_after(list, (struct sk_buff *)list, newsk);
731 }
732 
733 /**
734  *	__skb_queue_tail - queue a buffer at the list tail
735  *	@list: list to use
736  *	@newsk: buffer to queue
737  *
738  *	Queue a buffer at the end of a list. This function takes no locks
739  *	and you must therefore hold required locks before calling it.
740  *
741  *	A buffer cannot be placed on two lists at the same time.
742  */
743 extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
744 static inline void __skb_queue_tail(struct sk_buff_head *list,
745 				   struct sk_buff *newsk)
746 {
747 	__skb_queue_before(list, (struct sk_buff *)list, newsk);
748 }
749 
750 /*
751  * remove sk_buff from list. _Must_ be called atomically, and with
752  * the list known..
753  */
754 extern void	   skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
755 static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
756 {
757 	struct sk_buff *next, *prev;
758 
759 	list->qlen--;
760 	next	   = skb->next;
761 	prev	   = skb->prev;
762 	skb->next  = skb->prev = NULL;
763 	next->prev = prev;
764 	prev->next = next;
765 }
766 
767 /**
768  *	__skb_dequeue - remove from the head of the queue
769  *	@list: list to dequeue from
770  *
771  *	Remove the head of the list. This function does not take any locks
772  *	so must be used with appropriate locks held only. The head item is
773  *	returned or %NULL if the list is empty.
774  */
775 extern struct sk_buff *skb_dequeue(struct sk_buff_head *list);
776 static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
777 {
778 	struct sk_buff *skb = skb_peek(list);
779 	if (skb)
780 		__skb_unlink(skb, list);
781 	return skb;
782 }
783 
784 /**
785  *	__skb_dequeue_tail - remove from the tail of the queue
786  *	@list: list to dequeue from
787  *
788  *	Remove the tail of the list. This function does not take any locks
789  *	so must be used with appropriate locks held only. The tail item is
790  *	returned or %NULL if the list is empty.
791  */
792 extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
793 static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
794 {
795 	struct sk_buff *skb = skb_peek_tail(list);
796 	if (skb)
797 		__skb_unlink(skb, list);
798 	return skb;
799 }
800 
801 
802 static inline int skb_is_nonlinear(const struct sk_buff *skb)
803 {
804 	return skb->data_len;
805 }
806 
807 static inline unsigned int skb_headlen(const struct sk_buff *skb)
808 {
809 	return skb->len - skb->data_len;
810 }
811 
812 static inline int skb_pagelen(const struct sk_buff *skb)
813 {
814 	int i, len = 0;
815 
816 	for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--)
817 		len += skb_shinfo(skb)->frags[i].size;
818 	return len + skb_headlen(skb);
819 }
820 
821 static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
822 				      struct page *page, int off, int size)
823 {
824 	skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
825 
826 	frag->page		  = page;
827 	frag->page_offset	  = off;
828 	frag->size		  = size;
829 	skb_shinfo(skb)->nr_frags = i + 1;
830 }
831 
832 #define SKB_PAGE_ASSERT(skb) 	BUG_ON(skb_shinfo(skb)->nr_frags)
833 #define SKB_FRAG_ASSERT(skb) 	BUG_ON(skb_shinfo(skb)->frag_list)
834 #define SKB_LINEAR_ASSERT(skb)  BUG_ON(skb_is_nonlinear(skb))
835 
836 #ifdef NET_SKBUFF_DATA_USES_OFFSET
837 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
838 {
839 	return skb->head + skb->tail;
840 }
841 
842 static inline void skb_reset_tail_pointer(struct sk_buff *skb)
843 {
844 	skb->tail = skb->data - skb->head;
845 }
846 
847 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
848 {
849 	skb_reset_tail_pointer(skb);
850 	skb->tail += offset;
851 }
852 #else /* NET_SKBUFF_DATA_USES_OFFSET */
853 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
854 {
855 	return skb->tail;
856 }
857 
858 static inline void skb_reset_tail_pointer(struct sk_buff *skb)
859 {
860 	skb->tail = skb->data;
861 }
862 
863 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
864 {
865 	skb->tail = skb->data + offset;
866 }
867 
868 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
869 
870 /*
871  *	Add data to an sk_buff
872  */
873 extern unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
874 static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
875 {
876 	unsigned char *tmp = skb_tail_pointer(skb);
877 	SKB_LINEAR_ASSERT(skb);
878 	skb->tail += len;
879 	skb->len  += len;
880 	return tmp;
881 }
882 
883 extern unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
884 static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
885 {
886 	skb->data -= len;
887 	skb->len  += len;
888 	return skb->data;
889 }
890 
891 extern unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
892 static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
893 {
894 	skb->len -= len;
895 	BUG_ON(skb->len < skb->data_len);
896 	return skb->data += len;
897 }
898 
899 extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
900 
901 static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
902 {
903 	if (len > skb_headlen(skb) &&
904 	    !__pskb_pull_tail(skb, len - skb_headlen(skb)))
905 		return NULL;
906 	skb->len -= len;
907 	return skb->data += len;
908 }
909 
910 static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
911 {
912 	return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
913 }
914 
915 static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
916 {
917 	if (likely(len <= skb_headlen(skb)))
918 		return 1;
919 	if (unlikely(len > skb->len))
920 		return 0;
921 	return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
922 }
923 
924 /**
925  *	skb_headroom - bytes at buffer head
926  *	@skb: buffer to check
927  *
928  *	Return the number of bytes of free space at the head of an &sk_buff.
929  */
930 static inline unsigned int skb_headroom(const struct sk_buff *skb)
931 {
932 	return skb->data - skb->head;
933 }
934 
935 /**
936  *	skb_tailroom - bytes at buffer end
937  *	@skb: buffer to check
938  *
939  *	Return the number of bytes of free space at the tail of an sk_buff
940  */
941 static inline int skb_tailroom(const struct sk_buff *skb)
942 {
943 	return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
944 }
945 
946 /**
947  *	skb_reserve - adjust headroom
948  *	@skb: buffer to alter
949  *	@len: bytes to move
950  *
951  *	Increase the headroom of an empty &sk_buff by reducing the tail
952  *	room. This is only allowed for an empty buffer.
953  */
954 static inline void skb_reserve(struct sk_buff *skb, int len)
955 {
956 	skb->data += len;
957 	skb->tail += len;
958 }
959 
960 #ifdef NET_SKBUFF_DATA_USES_OFFSET
961 static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
962 {
963 	return skb->head + skb->transport_header;
964 }
965 
966 static inline void skb_reset_transport_header(struct sk_buff *skb)
967 {
968 	skb->transport_header = skb->data - skb->head;
969 }
970 
971 static inline void skb_set_transport_header(struct sk_buff *skb,
972 					    const int offset)
973 {
974 	skb_reset_transport_header(skb);
975 	skb->transport_header += offset;
976 }
977 
978 static inline unsigned char *skb_network_header(const struct sk_buff *skb)
979 {
980 	return skb->head + skb->network_header;
981 }
982 
983 static inline void skb_reset_network_header(struct sk_buff *skb)
984 {
985 	skb->network_header = skb->data - skb->head;
986 }
987 
988 static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
989 {
990 	skb_reset_network_header(skb);
991 	skb->network_header += offset;
992 }
993 
994 static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
995 {
996 	return skb->head + skb->mac_header;
997 }
998 
999 static inline int skb_mac_header_was_set(const struct sk_buff *skb)
1000 {
1001 	return skb->mac_header != ~0U;
1002 }
1003 
1004 static inline void skb_reset_mac_header(struct sk_buff *skb)
1005 {
1006 	skb->mac_header = skb->data - skb->head;
1007 }
1008 
1009 static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
1010 {
1011 	skb_reset_mac_header(skb);
1012 	skb->mac_header += offset;
1013 }
1014 
1015 #else /* NET_SKBUFF_DATA_USES_OFFSET */
1016 
1017 static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
1018 {
1019 	return skb->transport_header;
1020 }
1021 
1022 static inline void skb_reset_transport_header(struct sk_buff *skb)
1023 {
1024 	skb->transport_header = skb->data;
1025 }
1026 
1027 static inline void skb_set_transport_header(struct sk_buff *skb,
1028 					    const int offset)
1029 {
1030 	skb->transport_header = skb->data + offset;
1031 }
1032 
1033 static inline unsigned char *skb_network_header(const struct sk_buff *skb)
1034 {
1035 	return skb->network_header;
1036 }
1037 
1038 static inline void skb_reset_network_header(struct sk_buff *skb)
1039 {
1040 	skb->network_header = skb->data;
1041 }
1042 
1043 static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
1044 {
1045 	skb->network_header = skb->data + offset;
1046 }
1047 
1048 static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
1049 {
1050 	return skb->mac_header;
1051 }
1052 
1053 static inline int skb_mac_header_was_set(const struct sk_buff *skb)
1054 {
1055 	return skb->mac_header != NULL;
1056 }
1057 
1058 static inline void skb_reset_mac_header(struct sk_buff *skb)
1059 {
1060 	skb->mac_header = skb->data;
1061 }
1062 
1063 static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
1064 {
1065 	skb->mac_header = skb->data + offset;
1066 }
1067 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
1068 
1069 static inline int skb_transport_offset(const struct sk_buff *skb)
1070 {
1071 	return skb_transport_header(skb) - skb->data;
1072 }
1073 
1074 static inline u32 skb_network_header_len(const struct sk_buff *skb)
1075 {
1076 	return skb->transport_header - skb->network_header;
1077 }
1078 
1079 static inline int skb_network_offset(const struct sk_buff *skb)
1080 {
1081 	return skb_network_header(skb) - skb->data;
1082 }
1083 
1084 /*
1085  * CPUs often take a performance hit when accessing unaligned memory
1086  * locations. The actual performance hit varies, it can be small if the
1087  * hardware handles it or large if we have to take an exception and fix it
1088  * in software.
1089  *
1090  * Since an ethernet header is 14 bytes network drivers often end up with
1091  * the IP header at an unaligned offset. The IP header can be aligned by
1092  * shifting the start of the packet by 2 bytes. Drivers should do this
1093  * with:
1094  *
1095  * skb_reserve(NET_IP_ALIGN);
1096  *
1097  * The downside to this alignment of the IP header is that the DMA is now
1098  * unaligned. On some architectures the cost of an unaligned DMA is high
1099  * and this cost outweighs the gains made by aligning the IP header.
1100  *
1101  * Since this trade off varies between architectures, we allow NET_IP_ALIGN
1102  * to be overridden.
1103  */
1104 #ifndef NET_IP_ALIGN
1105 #define NET_IP_ALIGN	2
1106 #endif
1107 
1108 /*
1109  * The networking layer reserves some headroom in skb data (via
1110  * dev_alloc_skb). This is used to avoid having to reallocate skb data when
1111  * the header has to grow. In the default case, if the header has to grow
1112  * 16 bytes or less we avoid the reallocation.
1113  *
1114  * Unfortunately this headroom changes the DMA alignment of the resulting
1115  * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive
1116  * on some architectures. An architecture can override this value,
1117  * perhaps setting it to a cacheline in size (since that will maintain
1118  * cacheline alignment of the DMA). It must be a power of 2.
1119  *
1120  * Various parts of the networking layer expect at least 16 bytes of
1121  * headroom, you should not reduce this.
1122  */
1123 #ifndef NET_SKB_PAD
1124 #define NET_SKB_PAD	16
1125 #endif
1126 
1127 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
1128 
1129 static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
1130 {
1131 	if (unlikely(skb->data_len)) {
1132 		WARN_ON(1);
1133 		return;
1134 	}
1135 	skb->len = len;
1136 	skb_set_tail_pointer(skb, len);
1137 }
1138 
1139 extern void skb_trim(struct sk_buff *skb, unsigned int len);
1140 
1141 static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
1142 {
1143 	if (skb->data_len)
1144 		return ___pskb_trim(skb, len);
1145 	__skb_trim(skb, len);
1146 	return 0;
1147 }
1148 
1149 static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
1150 {
1151 	return (len < skb->len) ? __pskb_trim(skb, len) : 0;
1152 }
1153 
1154 /**
1155  *	pskb_trim_unique - remove end from a paged unique (not cloned) buffer
1156  *	@skb: buffer to alter
1157  *	@len: new length
1158  *
1159  *	This is identical to pskb_trim except that the caller knows that
1160  *	the skb is not cloned so we should never get an error due to out-
1161  *	of-memory.
1162  */
1163 static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
1164 {
1165 	int err = pskb_trim(skb, len);
1166 	BUG_ON(err);
1167 }
1168 
1169 /**
1170  *	skb_orphan - orphan a buffer
1171  *	@skb: buffer to orphan
1172  *
1173  *	If a buffer currently has an owner then we call the owner's
1174  *	destructor function and make the @skb unowned. The buffer continues
1175  *	to exist but is no longer charged to its former owner.
1176  */
1177 static inline void skb_orphan(struct sk_buff *skb)
1178 {
1179 	if (skb->destructor)
1180 		skb->destructor(skb);
1181 	skb->destructor = NULL;
1182 	skb->sk		= NULL;
1183 }
1184 
1185 /**
1186  *	__skb_queue_purge - empty a list
1187  *	@list: list to empty
1188  *
1189  *	Delete all buffers on an &sk_buff list. Each buffer is removed from
1190  *	the list and one reference dropped. This function does not take the
1191  *	list lock and the caller must hold the relevant locks to use it.
1192  */
1193 extern void skb_queue_purge(struct sk_buff_head *list);
1194 static inline void __skb_queue_purge(struct sk_buff_head *list)
1195 {
1196 	struct sk_buff *skb;
1197 	while ((skb = __skb_dequeue(list)) != NULL)
1198 		kfree_skb(skb);
1199 }
1200 
1201 /**
1202  *	__dev_alloc_skb - allocate an skbuff for receiving
1203  *	@length: length to allocate
1204  *	@gfp_mask: get_free_pages mask, passed to alloc_skb
1205  *
1206  *	Allocate a new &sk_buff and assign it a usage count of one. The
1207  *	buffer has unspecified headroom built in. Users should allocate
1208  *	the headroom they think they need without accounting for the
1209  *	built in space. The built in space is used for optimisations.
1210  *
1211  *	%NULL is returned if there is no free memory.
1212  */
1213 static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
1214 					      gfp_t gfp_mask)
1215 {
1216 	struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask);
1217 	if (likely(skb))
1218 		skb_reserve(skb, NET_SKB_PAD);
1219 	return skb;
1220 }
1221 
1222 extern struct sk_buff *dev_alloc_skb(unsigned int length);
1223 
1224 extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
1225 		unsigned int length, gfp_t gfp_mask);
1226 
1227 /**
1228  *	netdev_alloc_skb - allocate an skbuff for rx on a specific device
1229  *	@dev: network device to receive on
1230  *	@length: length to allocate
1231  *
1232  *	Allocate a new &sk_buff and assign it a usage count of one. The
1233  *	buffer has unspecified headroom built in. Users should allocate
1234  *	the headroom they think they need without accounting for the
1235  *	built in space. The built in space is used for optimisations.
1236  *
1237  *	%NULL is returned if there is no free memory. Although this function
1238  *	allocates memory it can be called from an interrupt.
1239  */
1240 static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
1241 		unsigned int length)
1242 {
1243 	return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
1244 }
1245 
1246 /**
1247  *	skb_clone_writable - is the header of a clone writable
1248  *	@skb: buffer to check
1249  *	@len: length up to which to write
1250  *
1251  *	Returns true if modifying the header part of the cloned buffer
1252  *	does not requires the data to be copied.
1253  */
1254 static inline int skb_clone_writable(struct sk_buff *skb, unsigned int len)
1255 {
1256 	return !skb_header_cloned(skb) &&
1257 	       skb_headroom(skb) + len <= skb->hdr_len;
1258 }
1259 
1260 static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
1261 			    int cloned)
1262 {
1263 	int delta = 0;
1264 
1265 	if (headroom < NET_SKB_PAD)
1266 		headroom = NET_SKB_PAD;
1267 	if (headroom > skb_headroom(skb))
1268 		delta = headroom - skb_headroom(skb);
1269 
1270 	if (delta || cloned)
1271 		return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
1272 					GFP_ATOMIC);
1273 	return 0;
1274 }
1275 
1276 /**
1277  *	skb_cow - copy header of skb when it is required
1278  *	@skb: buffer to cow
1279  *	@headroom: needed headroom
1280  *
1281  *	If the skb passed lacks sufficient headroom or its data part
1282  *	is shared, data is reallocated. If reallocation fails, an error
1283  *	is returned and original skb is not changed.
1284  *
1285  *	The result is skb with writable area skb->head...skb->tail
1286  *	and at least @headroom of space at head.
1287  */
1288 static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
1289 {
1290 	return __skb_cow(skb, headroom, skb_cloned(skb));
1291 }
1292 
1293 /**
1294  *	skb_cow_head - skb_cow but only making the head writable
1295  *	@skb: buffer to cow
1296  *	@headroom: needed headroom
1297  *
1298  *	This function is identical to skb_cow except that we replace the
1299  *	skb_cloned check by skb_header_cloned.  It should be used when
1300  *	you only need to push on some header and do not need to modify
1301  *	the data.
1302  */
1303 static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
1304 {
1305 	return __skb_cow(skb, headroom, skb_header_cloned(skb));
1306 }
1307 
1308 /**
1309  *	skb_padto	- pad an skbuff up to a minimal size
1310  *	@skb: buffer to pad
1311  *	@len: minimal length
1312  *
1313  *	Pads up a buffer to ensure the trailing bytes exist and are
1314  *	blanked. If the buffer already contains sufficient data it
1315  *	is untouched. Otherwise it is extended. Returns zero on
1316  *	success. The skb is freed on error.
1317  */
1318 
1319 static inline int skb_padto(struct sk_buff *skb, unsigned int len)
1320 {
1321 	unsigned int size = skb->len;
1322 	if (likely(size >= len))
1323 		return 0;
1324 	return skb_pad(skb, len - size);
1325 }
1326 
1327 static inline int skb_add_data(struct sk_buff *skb,
1328 			       char __user *from, int copy)
1329 {
1330 	const int off = skb->len;
1331 
1332 	if (skb->ip_summed == CHECKSUM_NONE) {
1333 		int err = 0;
1334 		__wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy),
1335 							    copy, 0, &err);
1336 		if (!err) {
1337 			skb->csum = csum_block_add(skb->csum, csum, off);
1338 			return 0;
1339 		}
1340 	} else if (!copy_from_user(skb_put(skb, copy), from, copy))
1341 		return 0;
1342 
1343 	__skb_trim(skb, off);
1344 	return -EFAULT;
1345 }
1346 
1347 static inline int skb_can_coalesce(struct sk_buff *skb, int i,
1348 				   struct page *page, int off)
1349 {
1350 	if (i) {
1351 		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1352 
1353 		return page == frag->page &&
1354 		       off == frag->page_offset + frag->size;
1355 	}
1356 	return 0;
1357 }
1358 
1359 static inline int __skb_linearize(struct sk_buff *skb)
1360 {
1361 	return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
1362 }
1363 
1364 /**
1365  *	skb_linearize - convert paged skb to linear one
1366  *	@skb: buffer to linarize
1367  *
1368  *	If there is no free memory -ENOMEM is returned, otherwise zero
1369  *	is returned and the old skb data released.
1370  */
1371 static inline int skb_linearize(struct sk_buff *skb)
1372 {
1373 	return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
1374 }
1375 
1376 /**
1377  *	skb_linearize_cow - make sure skb is linear and writable
1378  *	@skb: buffer to process
1379  *
1380  *	If there is no free memory -ENOMEM is returned, otherwise zero
1381  *	is returned and the old skb data released.
1382  */
1383 static inline int skb_linearize_cow(struct sk_buff *skb)
1384 {
1385 	return skb_is_nonlinear(skb) || skb_cloned(skb) ?
1386 	       __skb_linearize(skb) : 0;
1387 }
1388 
1389 /**
1390  *	skb_postpull_rcsum - update checksum for received skb after pull
1391  *	@skb: buffer to update
1392  *	@start: start of data before pull
1393  *	@len: length of data pulled
1394  *
1395  *	After doing a pull on a received packet, you need to call this to
1396  *	update the CHECKSUM_COMPLETE checksum, or set ip_summed to
1397  *	CHECKSUM_NONE so that it can be recomputed from scratch.
1398  */
1399 
1400 static inline void skb_postpull_rcsum(struct sk_buff *skb,
1401 				      const void *start, unsigned int len)
1402 {
1403 	if (skb->ip_summed == CHECKSUM_COMPLETE)
1404 		skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
1405 }
1406 
1407 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
1408 
1409 /**
1410  *	pskb_trim_rcsum - trim received skb and update checksum
1411  *	@skb: buffer to trim
1412  *	@len: new length
1413  *
1414  *	This is exactly the same as pskb_trim except that it ensures the
1415  *	checksum of received packets are still valid after the operation.
1416  */
1417 
1418 static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
1419 {
1420 	if (likely(len >= skb->len))
1421 		return 0;
1422 	if (skb->ip_summed == CHECKSUM_COMPLETE)
1423 		skb->ip_summed = CHECKSUM_NONE;
1424 	return __pskb_trim(skb, len);
1425 }
1426 
1427 #define skb_queue_walk(queue, skb) \
1428 		for (skb = (queue)->next;					\
1429 		     prefetch(skb->next), (skb != (struct sk_buff *)(queue));	\
1430 		     skb = skb->next)
1431 
1432 #define skb_queue_walk_safe(queue, skb, tmp)					\
1433 		for (skb = (queue)->next, tmp = skb->next;			\
1434 		     skb != (struct sk_buff *)(queue);				\
1435 		     skb = tmp, tmp = skb->next)
1436 
1437 #define skb_queue_reverse_walk(queue, skb) \
1438 		for (skb = (queue)->prev;					\
1439 		     prefetch(skb->prev), (skb != (struct sk_buff *)(queue));	\
1440 		     skb = skb->prev)
1441 
1442 
1443 extern struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
1444 					   int *peeked, int *err);
1445 extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
1446 					 int noblock, int *err);
1447 extern unsigned int    datagram_poll(struct file *file, struct socket *sock,
1448 				     struct poll_table_struct *wait);
1449 extern int	       skb_copy_datagram_iovec(const struct sk_buff *from,
1450 					       int offset, struct iovec *to,
1451 					       int size);
1452 extern int	       skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
1453 							int hlen,
1454 							struct iovec *iov);
1455 extern int	       skb_copy_datagram_from_iovec(struct sk_buff *skb,
1456 						    int offset,
1457 						    struct iovec *from,
1458 						    int len);
1459 extern void	       skb_free_datagram(struct sock *sk, struct sk_buff *skb);
1460 extern int	       skb_kill_datagram(struct sock *sk, struct sk_buff *skb,
1461 					 unsigned int flags);
1462 extern __wsum	       skb_checksum(const struct sk_buff *skb, int offset,
1463 				    int len, __wsum csum);
1464 extern int	       skb_copy_bits(const struct sk_buff *skb, int offset,
1465 				     void *to, int len);
1466 extern int	       skb_store_bits(struct sk_buff *skb, int offset,
1467 				      const void *from, int len);
1468 extern __wsum	       skb_copy_and_csum_bits(const struct sk_buff *skb,
1469 					      int offset, u8 *to, int len,
1470 					      __wsum csum);
1471 extern int             skb_splice_bits(struct sk_buff *skb,
1472 						unsigned int offset,
1473 						struct pipe_inode_info *pipe,
1474 						unsigned int len,
1475 						unsigned int flags);
1476 extern void	       skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
1477 extern void	       skb_split(struct sk_buff *skb,
1478 				 struct sk_buff *skb1, const u32 len);
1479 
1480 extern struct sk_buff *skb_segment(struct sk_buff *skb, int features);
1481 
1482 static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
1483 				       int len, void *buffer)
1484 {
1485 	int hlen = skb_headlen(skb);
1486 
1487 	if (hlen - offset >= len)
1488 		return skb->data + offset;
1489 
1490 	if (skb_copy_bits(skb, offset, buffer, len) < 0)
1491 		return NULL;
1492 
1493 	return buffer;
1494 }
1495 
1496 static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
1497 					     void *to,
1498 					     const unsigned int len)
1499 {
1500 	memcpy(to, skb->data, len);
1501 }
1502 
1503 static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
1504 						    const int offset, void *to,
1505 						    const unsigned int len)
1506 {
1507 	memcpy(to, skb->data + offset, len);
1508 }
1509 
1510 static inline void skb_copy_to_linear_data(struct sk_buff *skb,
1511 					   const void *from,
1512 					   const unsigned int len)
1513 {
1514 	memcpy(skb->data, from, len);
1515 }
1516 
1517 static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
1518 						  const int offset,
1519 						  const void *from,
1520 						  const unsigned int len)
1521 {
1522 	memcpy(skb->data + offset, from, len);
1523 }
1524 
1525 extern void skb_init(void);
1526 
1527 /**
1528  *	skb_get_timestamp - get timestamp from a skb
1529  *	@skb: skb to get stamp from
1530  *	@stamp: pointer to struct timeval to store stamp in
1531  *
1532  *	Timestamps are stored in the skb as offsets to a base timestamp.
1533  *	This function converts the offset back to a struct timeval and stores
1534  *	it in stamp.
1535  */
1536 static inline void skb_get_timestamp(const struct sk_buff *skb, struct timeval *stamp)
1537 {
1538 	*stamp = ktime_to_timeval(skb->tstamp);
1539 }
1540 
1541 static inline void __net_timestamp(struct sk_buff *skb)
1542 {
1543 	skb->tstamp = ktime_get_real();
1544 }
1545 
1546 static inline ktime_t net_timedelta(ktime_t t)
1547 {
1548 	return ktime_sub(ktime_get_real(), t);
1549 }
1550 
1551 static inline ktime_t net_invalid_timestamp(void)
1552 {
1553 	return ktime_set(0, 0);
1554 }
1555 
1556 extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
1557 extern __sum16 __skb_checksum_complete(struct sk_buff *skb);
1558 
1559 static inline int skb_csum_unnecessary(const struct sk_buff *skb)
1560 {
1561 	return skb->ip_summed & CHECKSUM_UNNECESSARY;
1562 }
1563 
1564 /**
1565  *	skb_checksum_complete - Calculate checksum of an entire packet
1566  *	@skb: packet to process
1567  *
1568  *	This function calculates the checksum over the entire packet plus
1569  *	the value of skb->csum.  The latter can be used to supply the
1570  *	checksum of a pseudo header as used by TCP/UDP.  It returns the
1571  *	checksum.
1572  *
1573  *	For protocols that contain complete checksums such as ICMP/TCP/UDP,
1574  *	this function can be used to verify that checksum on received
1575  *	packets.  In that case the function should return zero if the
1576  *	checksum is correct.  In particular, this function will return zero
1577  *	if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the
1578  *	hardware has already verified the correctness of the checksum.
1579  */
1580 static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
1581 {
1582 	return skb_csum_unnecessary(skb) ?
1583 	       0 : __skb_checksum_complete(skb);
1584 }
1585 
1586 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1587 extern void nf_conntrack_destroy(struct nf_conntrack *nfct);
1588 static inline void nf_conntrack_put(struct nf_conntrack *nfct)
1589 {
1590 	if (nfct && atomic_dec_and_test(&nfct->use))
1591 		nf_conntrack_destroy(nfct);
1592 }
1593 static inline void nf_conntrack_get(struct nf_conntrack *nfct)
1594 {
1595 	if (nfct)
1596 		atomic_inc(&nfct->use);
1597 }
1598 static inline void nf_conntrack_get_reasm(struct sk_buff *skb)
1599 {
1600 	if (skb)
1601 		atomic_inc(&skb->users);
1602 }
1603 static inline void nf_conntrack_put_reasm(struct sk_buff *skb)
1604 {
1605 	if (skb)
1606 		kfree_skb(skb);
1607 }
1608 #endif
1609 #ifdef CONFIG_BRIDGE_NETFILTER
1610 static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
1611 {
1612 	if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
1613 		kfree(nf_bridge);
1614 }
1615 static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
1616 {
1617 	if (nf_bridge)
1618 		atomic_inc(&nf_bridge->use);
1619 }
1620 #endif /* CONFIG_BRIDGE_NETFILTER */
1621 static inline void nf_reset(struct sk_buff *skb)
1622 {
1623 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1624 	nf_conntrack_put(skb->nfct);
1625 	skb->nfct = NULL;
1626 	nf_conntrack_put_reasm(skb->nfct_reasm);
1627 	skb->nfct_reasm = NULL;
1628 #endif
1629 #ifdef CONFIG_BRIDGE_NETFILTER
1630 	nf_bridge_put(skb->nf_bridge);
1631 	skb->nf_bridge = NULL;
1632 #endif
1633 }
1634 
1635 /* Note: This doesn't put any conntrack and bridge info in dst. */
1636 static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
1637 {
1638 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1639 	dst->nfct = src->nfct;
1640 	nf_conntrack_get(src->nfct);
1641 	dst->nfctinfo = src->nfctinfo;
1642 	dst->nfct_reasm = src->nfct_reasm;
1643 	nf_conntrack_get_reasm(src->nfct_reasm);
1644 #endif
1645 #ifdef CONFIG_BRIDGE_NETFILTER
1646 	dst->nf_bridge  = src->nf_bridge;
1647 	nf_bridge_get(src->nf_bridge);
1648 #endif
1649 }
1650 
1651 static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
1652 {
1653 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1654 	nf_conntrack_put(dst->nfct);
1655 	nf_conntrack_put_reasm(dst->nfct_reasm);
1656 #endif
1657 #ifdef CONFIG_BRIDGE_NETFILTER
1658 	nf_bridge_put(dst->nf_bridge);
1659 #endif
1660 	__nf_copy(dst, src);
1661 }
1662 
1663 #ifdef CONFIG_NETWORK_SECMARK
1664 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
1665 {
1666 	to->secmark = from->secmark;
1667 }
1668 
1669 static inline void skb_init_secmark(struct sk_buff *skb)
1670 {
1671 	skb->secmark = 0;
1672 }
1673 #else
1674 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
1675 { }
1676 
1677 static inline void skb_init_secmark(struct sk_buff *skb)
1678 { }
1679 #endif
1680 
1681 static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
1682 {
1683 	skb->queue_mapping = queue_mapping;
1684 }
1685 
1686 static inline u16 skb_get_queue_mapping(struct sk_buff *skb)
1687 {
1688 	return skb->queue_mapping;
1689 }
1690 
1691 static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
1692 {
1693 	to->queue_mapping = from->queue_mapping;
1694 }
1695 
1696 static inline int skb_is_gso(const struct sk_buff *skb)
1697 {
1698 	return skb_shinfo(skb)->gso_size;
1699 }
1700 
1701 static inline int skb_is_gso_v6(const struct sk_buff *skb)
1702 {
1703 	return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
1704 }
1705 
1706 extern void __skb_warn_lro_forwarding(const struct sk_buff *skb);
1707 
1708 static inline bool skb_warn_if_lro(const struct sk_buff *skb)
1709 {
1710 	/* LRO sets gso_size but not gso_type, whereas if GSO is really
1711 	 * wanted then gso_type will be set. */
1712 	struct skb_shared_info *shinfo = skb_shinfo(skb);
1713 	if (shinfo->gso_size != 0 && unlikely(shinfo->gso_type == 0)) {
1714 		__skb_warn_lro_forwarding(skb);
1715 		return true;
1716 	}
1717 	return false;
1718 }
1719 
1720 static inline void skb_forward_csum(struct sk_buff *skb)
1721 {
1722 	/* Unfortunately we don't support this one.  Any brave souls? */
1723 	if (skb->ip_summed == CHECKSUM_COMPLETE)
1724 		skb->ip_summed = CHECKSUM_NONE;
1725 }
1726 
1727 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
1728 #endif	/* __KERNEL__ */
1729 #endif	/* _LINUX_SKBUFF_H */
1730