xref: /linux-6.15/include/linux/skbuff.h (revision 173d6681)
1 /*
2  *	Definitions for the 'struct sk_buff' memory handlers.
3  *
4  *	Authors:
5  *		Alan Cox, <[email protected]>
6  *		Florian La Roche, <[email protected]>
7  *
8  *	This program is free software; you can redistribute it and/or
9  *	modify it under the terms of the GNU General Public License
10  *	as published by the Free Software Foundation; either version
11  *	2 of the License, or (at your option) any later version.
12  */
13 
14 #ifndef _LINUX_SKBUFF_H
15 #define _LINUX_SKBUFF_H
16 
17 #include <linux/kernel.h>
18 #include <linux/compiler.h>
19 #include <linux/time.h>
20 #include <linux/cache.h>
21 
22 #include <asm/atomic.h>
23 #include <asm/types.h>
24 #include <linux/spinlock.h>
25 #include <linux/net.h>
26 #include <linux/textsearch.h>
27 #include <net/checksum.h>
28 #include <linux/rcupdate.h>
29 #include <linux/dmaengine.h>
30 
31 #define HAVE_ALLOC_SKB		/* For the drivers to know */
32 #define HAVE_ALIGNABLE_SKB	/* Ditto 8)		   */
33 
34 #define CHECKSUM_NONE 0
35 #define CHECKSUM_PARTIAL 1
36 #define CHECKSUM_UNNECESSARY 2
37 #define CHECKSUM_COMPLETE 3
38 
39 #define SKB_DATA_ALIGN(X)	(((X) + (SMP_CACHE_BYTES - 1)) & \
40 				 ~(SMP_CACHE_BYTES - 1))
41 #define SKB_MAX_ORDER(X, ORDER)	(((PAGE_SIZE << (ORDER)) - (X) - \
42 				  sizeof(struct skb_shared_info)) & \
43 				  ~(SMP_CACHE_BYTES - 1))
44 #define SKB_MAX_HEAD(X)		(SKB_MAX_ORDER((X), 0))
45 #define SKB_MAX_ALLOC		(SKB_MAX_ORDER(0, 2))
46 
47 /* A. Checksumming of received packets by device.
48  *
49  *	NONE: device failed to checksum this packet.
50  *		skb->csum is undefined.
51  *
52  *	UNNECESSARY: device parsed packet and wouldbe verified checksum.
53  *		skb->csum is undefined.
54  *	      It is bad option, but, unfortunately, many of vendors do this.
55  *	      Apparently with secret goal to sell you new device, when you
56  *	      will add new protocol to your host. F.e. IPv6. 8)
57  *
58  *	COMPLETE: the most generic way. Device supplied checksum of _all_
59  *	    the packet as seen by netif_rx in skb->csum.
60  *	    NOTE: Even if device supports only some protocols, but
61  *	    is able to produce some skb->csum, it MUST use COMPLETE,
62  *	    not UNNECESSARY.
63  *
64  * B. Checksumming on output.
65  *
66  *	NONE: skb is checksummed by protocol or csum is not required.
67  *
68  *	PARTIAL: device is required to csum packet as seen by hard_start_xmit
69  *	from skb->h.raw to the end and to record the checksum
70  *	at skb->h.raw+skb->csum.
71  *
72  *	Device must show its capabilities in dev->features, set
73  *	at device setup time.
74  *	NETIF_F_HW_CSUM	- it is clever device, it is able to checksum
75  *			  everything.
76  *	NETIF_F_NO_CSUM - loopback or reliable single hop media.
77  *	NETIF_F_IP_CSUM - device is dumb. It is able to csum only
78  *			  TCP/UDP over IPv4. Sigh. Vendors like this
79  *			  way by an unknown reason. Though, see comment above
80  *			  about CHECKSUM_UNNECESSARY. 8)
81  *
82  *	Any questions? No questions, good. 		--ANK
83  */
84 
85 struct net_device;
86 
87 #ifdef CONFIG_NETFILTER
88 struct nf_conntrack {
89 	atomic_t use;
90 	void (*destroy)(struct nf_conntrack *);
91 };
92 
93 #ifdef CONFIG_BRIDGE_NETFILTER
94 struct nf_bridge_info {
95 	atomic_t use;
96 	struct net_device *physindev;
97 	struct net_device *physoutdev;
98 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
99 	struct net_device *netoutdev;
100 #endif
101 	unsigned int mask;
102 	unsigned long data[32 / sizeof(unsigned long)];
103 };
104 #endif
105 
106 #endif
107 
108 struct sk_buff_head {
109 	/* These two members must be first. */
110 	struct sk_buff	*next;
111 	struct sk_buff	*prev;
112 
113 	__u32		qlen;
114 	spinlock_t	lock;
115 };
116 
117 struct sk_buff;
118 
119 /* To allow 64K frame to be packed as single skb without frag_list */
120 #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2)
121 
122 typedef struct skb_frag_struct skb_frag_t;
123 
124 struct skb_frag_struct {
125 	struct page *page;
126 	__u16 page_offset;
127 	__u16 size;
128 };
129 
130 /* This data is invariant across clones and lives at
131  * the end of the header data, ie. at skb->end.
132  */
133 struct skb_shared_info {
134 	atomic_t	dataref;
135 	unsigned short	nr_frags;
136 	unsigned short	gso_size;
137 	/* Warning: this field is not always filled in (UFO)! */
138 	unsigned short	gso_segs;
139 	unsigned short  gso_type;
140 	__be32          ip6_frag_id;
141 	struct sk_buff	*frag_list;
142 	skb_frag_t	frags[MAX_SKB_FRAGS];
143 };
144 
145 /* We divide dataref into two halves.  The higher 16 bits hold references
146  * to the payload part of skb->data.  The lower 16 bits hold references to
147  * the entire skb->data.  It is up to the users of the skb to agree on
148  * where the payload starts.
149  *
150  * All users must obey the rule that the skb->data reference count must be
151  * greater than or equal to the payload reference count.
152  *
153  * Holding a reference to the payload part means that the user does not
154  * care about modifications to the header part of skb->data.
155  */
156 #define SKB_DATAREF_SHIFT 16
157 #define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
158 
159 struct skb_timeval {
160 	u32	off_sec;
161 	u32	off_usec;
162 };
163 
164 
165 enum {
166 	SKB_FCLONE_UNAVAILABLE,
167 	SKB_FCLONE_ORIG,
168 	SKB_FCLONE_CLONE,
169 };
170 
171 enum {
172 	SKB_GSO_TCPV4 = 1 << 0,
173 	SKB_GSO_UDP = 1 << 1,
174 
175 	/* This indicates the skb is from an untrusted source. */
176 	SKB_GSO_DODGY = 1 << 2,
177 
178 	/* This indicates the tcp segment has CWR set. */
179 	SKB_GSO_TCP_ECN = 1 << 3,
180 
181 	SKB_GSO_TCPV6 = 1 << 4,
182 };
183 
184 /**
185  *	struct sk_buff - socket buffer
186  *	@next: Next buffer in list
187  *	@prev: Previous buffer in list
188  *	@sk: Socket we are owned by
189  *	@tstamp: Time we arrived
190  *	@dev: Device we arrived on/are leaving by
191  *	@input_dev: Device we arrived on
192  *	@h: Transport layer header
193  *	@nh: Network layer header
194  *	@mac: Link layer header
195  *	@dst: destination entry
196  *	@sp: the security path, used for xfrm
197  *	@cb: Control buffer. Free for use by every layer. Put private vars here
198  *	@len: Length of actual data
199  *	@data_len: Data length
200  *	@mac_len: Length of link layer header
201  *	@csum: Checksum
202  *	@local_df: allow local fragmentation
203  *	@cloned: Head may be cloned (check refcnt to be sure)
204  *	@nohdr: Payload reference only, must not modify header
205  *	@pkt_type: Packet class
206  *	@fclone: skbuff clone status
207  *	@ip_summed: Driver fed us an IP checksum
208  *	@priority: Packet queueing priority
209  *	@users: User count - see {datagram,tcp}.c
210  *	@protocol: Packet protocol from driver
211  *	@truesize: Buffer size
212  *	@head: Head of buffer
213  *	@data: Data head pointer
214  *	@tail: Tail pointer
215  *	@end: End pointer
216  *	@destructor: Destruct function
217  *	@mark: Generic packet mark
218  *	@nfct: Associated connection, if any
219  *	@ipvs_property: skbuff is owned by ipvs
220  *	@nfctinfo: Relationship of this skb to the connection
221  *	@nfct_reasm: netfilter conntrack re-assembly pointer
222  *	@nf_bridge: Saved data about a bridged frame - see br_netfilter.c
223  *	@tc_index: Traffic control index
224  *	@tc_verd: traffic control verdict
225  *	@dma_cookie: a cookie to one of several possible DMA operations
226  *		done by skb DMA functions
227  *	@secmark: security marking
228  */
229 
230 struct sk_buff {
231 	/* These two members must be first. */
232 	struct sk_buff		*next;
233 	struct sk_buff		*prev;
234 
235 	struct sock		*sk;
236 	struct skb_timeval	tstamp;
237 	struct net_device	*dev;
238 	struct net_device	*input_dev;
239 
240 	union {
241 		struct tcphdr	*th;
242 		struct udphdr	*uh;
243 		struct icmphdr	*icmph;
244 		struct igmphdr	*igmph;
245 		struct iphdr	*ipiph;
246 		struct ipv6hdr	*ipv6h;
247 		unsigned char	*raw;
248 	} h;
249 
250 	union {
251 		struct iphdr	*iph;
252 		struct ipv6hdr	*ipv6h;
253 		struct arphdr	*arph;
254 		unsigned char	*raw;
255 	} nh;
256 
257 	union {
258 	  	unsigned char 	*raw;
259 	} mac;
260 
261 	struct  dst_entry	*dst;
262 	struct	sec_path	*sp;
263 
264 	/*
265 	 * This is the control buffer. It is free to use for every
266 	 * layer. Please put your private variables there. If you
267 	 * want to keep them across layers you have to do a skb_clone()
268 	 * first. This is owned by whoever has the skb queued ATM.
269 	 */
270 	char			cb[48];
271 
272 	unsigned int		len,
273 				data_len,
274 				mac_len;
275 	union {
276 		__wsum		csum;
277 		__u32		csum_offset;
278 	};
279 	__u32			priority;
280 	__u8			local_df:1,
281 				cloned:1,
282 				ip_summed:2,
283 				nohdr:1,
284 				nfctinfo:3;
285 	__u8			pkt_type:3,
286 				fclone:2,
287 				ipvs_property:1;
288 	__be16			protocol;
289 
290 	void			(*destructor)(struct sk_buff *skb);
291 #ifdef CONFIG_NETFILTER
292 	struct nf_conntrack	*nfct;
293 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
294 	struct sk_buff		*nfct_reasm;
295 #endif
296 #ifdef CONFIG_BRIDGE_NETFILTER
297 	struct nf_bridge_info	*nf_bridge;
298 #endif
299 #endif /* CONFIG_NETFILTER */
300 #ifdef CONFIG_NET_SCHED
301 	__u16			tc_index;	/* traffic control index */
302 #ifdef CONFIG_NET_CLS_ACT
303 	__u16			tc_verd;	/* traffic control verdict */
304 #endif
305 #endif
306 #ifdef CONFIG_NET_DMA
307 	dma_cookie_t		dma_cookie;
308 #endif
309 #ifdef CONFIG_NETWORK_SECMARK
310 	__u32			secmark;
311 #endif
312 
313 	__u32			mark;
314 
315 	/* These elements must be at the end, see alloc_skb() for details.  */
316 	unsigned int		truesize;
317 	atomic_t		users;
318 	unsigned char		*head,
319 				*data,
320 				*tail,
321 				*end;
322 };
323 
324 #ifdef __KERNEL__
325 /*
326  *	Handling routines are only of interest to the kernel
327  */
328 #include <linux/slab.h>
329 
330 #include <asm/system.h>
331 
332 extern void kfree_skb(struct sk_buff *skb);
333 extern void	       __kfree_skb(struct sk_buff *skb);
334 extern struct sk_buff *__alloc_skb(unsigned int size,
335 				   gfp_t priority, int fclone, int node);
336 static inline struct sk_buff *alloc_skb(unsigned int size,
337 					gfp_t priority)
338 {
339 	return __alloc_skb(size, priority, 0, -1);
340 }
341 
342 static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
343 					       gfp_t priority)
344 {
345 	return __alloc_skb(size, priority, 1, -1);
346 }
347 
348 extern struct sk_buff *alloc_skb_from_cache(struct kmem_cache *cp,
349 					    unsigned int size,
350 					    gfp_t priority);
351 extern void	       kfree_skbmem(struct sk_buff *skb);
352 extern struct sk_buff *skb_clone(struct sk_buff *skb,
353 				 gfp_t priority);
354 extern struct sk_buff *skb_copy(const struct sk_buff *skb,
355 				gfp_t priority);
356 extern struct sk_buff *pskb_copy(struct sk_buff *skb,
357 				 gfp_t gfp_mask);
358 extern int	       pskb_expand_head(struct sk_buff *skb,
359 					int nhead, int ntail,
360 					gfp_t gfp_mask);
361 extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
362 					    unsigned int headroom);
363 extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
364 				       int newheadroom, int newtailroom,
365 				       gfp_t priority);
366 extern int	       skb_pad(struct sk_buff *skb, int pad);
367 #define dev_kfree_skb(a)	kfree_skb(a)
368 extern void	      skb_over_panic(struct sk_buff *skb, int len,
369 				     void *here);
370 extern void	      skb_under_panic(struct sk_buff *skb, int len,
371 				      void *here);
372 extern void	      skb_truesize_bug(struct sk_buff *skb);
373 
374 static inline void skb_truesize_check(struct sk_buff *skb)
375 {
376 	if (unlikely((int)skb->truesize < sizeof(struct sk_buff) + skb->len))
377 		skb_truesize_bug(skb);
378 }
379 
380 extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
381 			int getfrag(void *from, char *to, int offset,
382 			int len,int odd, struct sk_buff *skb),
383 			void *from, int length);
384 
385 struct skb_seq_state
386 {
387 	__u32		lower_offset;
388 	__u32		upper_offset;
389 	__u32		frag_idx;
390 	__u32		stepped_offset;
391 	struct sk_buff	*root_skb;
392 	struct sk_buff	*cur_skb;
393 	__u8		*frag_data;
394 };
395 
396 extern void	      skb_prepare_seq_read(struct sk_buff *skb,
397 					   unsigned int from, unsigned int to,
398 					   struct skb_seq_state *st);
399 extern unsigned int   skb_seq_read(unsigned int consumed, const u8 **data,
400 				   struct skb_seq_state *st);
401 extern void	      skb_abort_seq_read(struct skb_seq_state *st);
402 
403 extern unsigned int   skb_find_text(struct sk_buff *skb, unsigned int from,
404 				    unsigned int to, struct ts_config *config,
405 				    struct ts_state *state);
406 
407 /* Internal */
408 #define skb_shinfo(SKB)		((struct skb_shared_info *)((SKB)->end))
409 
410 /**
411  *	skb_queue_empty - check if a queue is empty
412  *	@list: queue head
413  *
414  *	Returns true if the queue is empty, false otherwise.
415  */
416 static inline int skb_queue_empty(const struct sk_buff_head *list)
417 {
418 	return list->next == (struct sk_buff *)list;
419 }
420 
421 /**
422  *	skb_get - reference buffer
423  *	@skb: buffer to reference
424  *
425  *	Makes another reference to a socket buffer and returns a pointer
426  *	to the buffer.
427  */
428 static inline struct sk_buff *skb_get(struct sk_buff *skb)
429 {
430 	atomic_inc(&skb->users);
431 	return skb;
432 }
433 
434 /*
435  * If users == 1, we are the only owner and are can avoid redundant
436  * atomic change.
437  */
438 
439 /**
440  *	skb_cloned - is the buffer a clone
441  *	@skb: buffer to check
442  *
443  *	Returns true if the buffer was generated with skb_clone() and is
444  *	one of multiple shared copies of the buffer. Cloned buffers are
445  *	shared data so must not be written to under normal circumstances.
446  */
447 static inline int skb_cloned(const struct sk_buff *skb)
448 {
449 	return skb->cloned &&
450 	       (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
451 }
452 
453 /**
454  *	skb_header_cloned - is the header a clone
455  *	@skb: buffer to check
456  *
457  *	Returns true if modifying the header part of the buffer requires
458  *	the data to be copied.
459  */
460 static inline int skb_header_cloned(const struct sk_buff *skb)
461 {
462 	int dataref;
463 
464 	if (!skb->cloned)
465 		return 0;
466 
467 	dataref = atomic_read(&skb_shinfo(skb)->dataref);
468 	dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
469 	return dataref != 1;
470 }
471 
472 /**
473  *	skb_header_release - release reference to header
474  *	@skb: buffer to operate on
475  *
476  *	Drop a reference to the header part of the buffer.  This is done
477  *	by acquiring a payload reference.  You must not read from the header
478  *	part of skb->data after this.
479  */
480 static inline void skb_header_release(struct sk_buff *skb)
481 {
482 	BUG_ON(skb->nohdr);
483 	skb->nohdr = 1;
484 	atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref);
485 }
486 
487 /**
488  *	skb_shared - is the buffer shared
489  *	@skb: buffer to check
490  *
491  *	Returns true if more than one person has a reference to this
492  *	buffer.
493  */
494 static inline int skb_shared(const struct sk_buff *skb)
495 {
496 	return atomic_read(&skb->users) != 1;
497 }
498 
499 /**
500  *	skb_share_check - check if buffer is shared and if so clone it
501  *	@skb: buffer to check
502  *	@pri: priority for memory allocation
503  *
504  *	If the buffer is shared the buffer is cloned and the old copy
505  *	drops a reference. A new clone with a single reference is returned.
506  *	If the buffer is not shared the original buffer is returned. When
507  *	being called from interrupt status or with spinlocks held pri must
508  *	be GFP_ATOMIC.
509  *
510  *	NULL is returned on a memory allocation failure.
511  */
512 static inline struct sk_buff *skb_share_check(struct sk_buff *skb,
513 					      gfp_t pri)
514 {
515 	might_sleep_if(pri & __GFP_WAIT);
516 	if (skb_shared(skb)) {
517 		struct sk_buff *nskb = skb_clone(skb, pri);
518 		kfree_skb(skb);
519 		skb = nskb;
520 	}
521 	return skb;
522 }
523 
524 /*
525  *	Copy shared buffers into a new sk_buff. We effectively do COW on
526  *	packets to handle cases where we have a local reader and forward
527  *	and a couple of other messy ones. The normal one is tcpdumping
528  *	a packet thats being forwarded.
529  */
530 
531 /**
532  *	skb_unshare - make a copy of a shared buffer
533  *	@skb: buffer to check
534  *	@pri: priority for memory allocation
535  *
536  *	If the socket buffer is a clone then this function creates a new
537  *	copy of the data, drops a reference count on the old copy and returns
538  *	the new copy with the reference count at 1. If the buffer is not a clone
539  *	the original buffer is returned. When called with a spinlock held or
540  *	from interrupt state @pri must be %GFP_ATOMIC
541  *
542  *	%NULL is returned on a memory allocation failure.
543  */
544 static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
545 					  gfp_t pri)
546 {
547 	might_sleep_if(pri & __GFP_WAIT);
548 	if (skb_cloned(skb)) {
549 		struct sk_buff *nskb = skb_copy(skb, pri);
550 		kfree_skb(skb);	/* Free our shared copy */
551 		skb = nskb;
552 	}
553 	return skb;
554 }
555 
556 /**
557  *	skb_peek
558  *	@list_: list to peek at
559  *
560  *	Peek an &sk_buff. Unlike most other operations you _MUST_
561  *	be careful with this one. A peek leaves the buffer on the
562  *	list and someone else may run off with it. You must hold
563  *	the appropriate locks or have a private queue to do this.
564  *
565  *	Returns %NULL for an empty list or a pointer to the head element.
566  *	The reference count is not incremented and the reference is therefore
567  *	volatile. Use with caution.
568  */
569 static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
570 {
571 	struct sk_buff *list = ((struct sk_buff *)list_)->next;
572 	if (list == (struct sk_buff *)list_)
573 		list = NULL;
574 	return list;
575 }
576 
577 /**
578  *	skb_peek_tail
579  *	@list_: list to peek at
580  *
581  *	Peek an &sk_buff. Unlike most other operations you _MUST_
582  *	be careful with this one. A peek leaves the buffer on the
583  *	list and someone else may run off with it. You must hold
584  *	the appropriate locks or have a private queue to do this.
585  *
586  *	Returns %NULL for an empty list or a pointer to the tail element.
587  *	The reference count is not incremented and the reference is therefore
588  *	volatile. Use with caution.
589  */
590 static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_)
591 {
592 	struct sk_buff *list = ((struct sk_buff *)list_)->prev;
593 	if (list == (struct sk_buff *)list_)
594 		list = NULL;
595 	return list;
596 }
597 
598 /**
599  *	skb_queue_len	- get queue length
600  *	@list_: list to measure
601  *
602  *	Return the length of an &sk_buff queue.
603  */
604 static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
605 {
606 	return list_->qlen;
607 }
608 
609 /*
610  * This function creates a split out lock class for each invocation;
611  * this is needed for now since a whole lot of users of the skb-queue
612  * infrastructure in drivers have different locking usage (in hardirq)
613  * than the networking core (in softirq only). In the long run either the
614  * network layer or drivers should need annotation to consolidate the
615  * main types of usage into 3 classes.
616  */
617 static inline void skb_queue_head_init(struct sk_buff_head *list)
618 {
619 	spin_lock_init(&list->lock);
620 	list->prev = list->next = (struct sk_buff *)list;
621 	list->qlen = 0;
622 }
623 
624 /*
625  *	Insert an sk_buff at the start of a list.
626  *
627  *	The "__skb_xxxx()" functions are the non-atomic ones that
628  *	can only be called with interrupts disabled.
629  */
630 
631 /**
632  *	__skb_queue_after - queue a buffer at the list head
633  *	@list: list to use
634  *	@prev: place after this buffer
635  *	@newsk: buffer to queue
636  *
637  *	Queue a buffer int the middle of a list. This function takes no locks
638  *	and you must therefore hold required locks before calling it.
639  *
640  *	A buffer cannot be placed on two lists at the same time.
641  */
642 static inline void __skb_queue_after(struct sk_buff_head *list,
643 				     struct sk_buff *prev,
644 				     struct sk_buff *newsk)
645 {
646 	struct sk_buff *next;
647 	list->qlen++;
648 
649 	next = prev->next;
650 	newsk->next = next;
651 	newsk->prev = prev;
652 	next->prev  = prev->next = newsk;
653 }
654 
655 /**
656  *	__skb_queue_head - queue a buffer at the list head
657  *	@list: list to use
658  *	@newsk: buffer to queue
659  *
660  *	Queue a buffer at the start of a list. This function takes no locks
661  *	and you must therefore hold required locks before calling it.
662  *
663  *	A buffer cannot be placed on two lists at the same time.
664  */
665 extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
666 static inline void __skb_queue_head(struct sk_buff_head *list,
667 				    struct sk_buff *newsk)
668 {
669 	__skb_queue_after(list, (struct sk_buff *)list, newsk);
670 }
671 
672 /**
673  *	__skb_queue_tail - queue a buffer at the list tail
674  *	@list: list to use
675  *	@newsk: buffer to queue
676  *
677  *	Queue a buffer at the end of a list. This function takes no locks
678  *	and you must therefore hold required locks before calling it.
679  *
680  *	A buffer cannot be placed on two lists at the same time.
681  */
682 extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
683 static inline void __skb_queue_tail(struct sk_buff_head *list,
684 				   struct sk_buff *newsk)
685 {
686 	struct sk_buff *prev, *next;
687 
688 	list->qlen++;
689 	next = (struct sk_buff *)list;
690 	prev = next->prev;
691 	newsk->next = next;
692 	newsk->prev = prev;
693 	next->prev  = prev->next = newsk;
694 }
695 
696 
697 /**
698  *	__skb_dequeue - remove from the head of the queue
699  *	@list: list to dequeue from
700  *
701  *	Remove the head of the list. This function does not take any locks
702  *	so must be used with appropriate locks held only. The head item is
703  *	returned or %NULL if the list is empty.
704  */
705 extern struct sk_buff *skb_dequeue(struct sk_buff_head *list);
706 static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
707 {
708 	struct sk_buff *next, *prev, *result;
709 
710 	prev = (struct sk_buff *) list;
711 	next = prev->next;
712 	result = NULL;
713 	if (next != prev) {
714 		result	     = next;
715 		next	     = next->next;
716 		list->qlen--;
717 		next->prev   = prev;
718 		prev->next   = next;
719 		result->next = result->prev = NULL;
720 	}
721 	return result;
722 }
723 
724 
725 /*
726  *	Insert a packet on a list.
727  */
728 extern void        skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
729 static inline void __skb_insert(struct sk_buff *newsk,
730 				struct sk_buff *prev, struct sk_buff *next,
731 				struct sk_buff_head *list)
732 {
733 	newsk->next = next;
734 	newsk->prev = prev;
735 	next->prev  = prev->next = newsk;
736 	list->qlen++;
737 }
738 
739 /*
740  *	Place a packet after a given packet in a list.
741  */
742 extern void	   skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
743 static inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
744 {
745 	__skb_insert(newsk, old, old->next, list);
746 }
747 
748 /*
749  * remove sk_buff from list. _Must_ be called atomically, and with
750  * the list known..
751  */
752 extern void	   skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
753 static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
754 {
755 	struct sk_buff *next, *prev;
756 
757 	list->qlen--;
758 	next	   = skb->next;
759 	prev	   = skb->prev;
760 	skb->next  = skb->prev = NULL;
761 	next->prev = prev;
762 	prev->next = next;
763 }
764 
765 
766 /* XXX: more streamlined implementation */
767 
768 /**
769  *	__skb_dequeue_tail - remove from the tail of the queue
770  *	@list: list to dequeue from
771  *
772  *	Remove the tail of the list. This function does not take any locks
773  *	so must be used with appropriate locks held only. The tail item is
774  *	returned or %NULL if the list is empty.
775  */
776 extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
777 static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
778 {
779 	struct sk_buff *skb = skb_peek_tail(list);
780 	if (skb)
781 		__skb_unlink(skb, list);
782 	return skb;
783 }
784 
785 
786 static inline int skb_is_nonlinear(const struct sk_buff *skb)
787 {
788 	return skb->data_len;
789 }
790 
791 static inline unsigned int skb_headlen(const struct sk_buff *skb)
792 {
793 	return skb->len - skb->data_len;
794 }
795 
796 static inline int skb_pagelen(const struct sk_buff *skb)
797 {
798 	int i, len = 0;
799 
800 	for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--)
801 		len += skb_shinfo(skb)->frags[i].size;
802 	return len + skb_headlen(skb);
803 }
804 
805 static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
806 				      struct page *page, int off, int size)
807 {
808 	skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
809 
810 	frag->page		  = page;
811 	frag->page_offset	  = off;
812 	frag->size		  = size;
813 	skb_shinfo(skb)->nr_frags = i + 1;
814 }
815 
816 #define SKB_PAGE_ASSERT(skb) 	BUG_ON(skb_shinfo(skb)->nr_frags)
817 #define SKB_FRAG_ASSERT(skb) 	BUG_ON(skb_shinfo(skb)->frag_list)
818 #define SKB_LINEAR_ASSERT(skb)  BUG_ON(skb_is_nonlinear(skb))
819 
820 /*
821  *	Add data to an sk_buff
822  */
823 static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
824 {
825 	unsigned char *tmp = skb->tail;
826 	SKB_LINEAR_ASSERT(skb);
827 	skb->tail += len;
828 	skb->len  += len;
829 	return tmp;
830 }
831 
832 /**
833  *	skb_put - add data to a buffer
834  *	@skb: buffer to use
835  *	@len: amount of data to add
836  *
837  *	This function extends the used data area of the buffer. If this would
838  *	exceed the total buffer size the kernel will panic. A pointer to the
839  *	first byte of the extra data is returned.
840  */
841 static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
842 {
843 	unsigned char *tmp = skb->tail;
844 	SKB_LINEAR_ASSERT(skb);
845 	skb->tail += len;
846 	skb->len  += len;
847 	if (unlikely(skb->tail>skb->end))
848 		skb_over_panic(skb, len, current_text_addr());
849 	return tmp;
850 }
851 
852 static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
853 {
854 	skb->data -= len;
855 	skb->len  += len;
856 	return skb->data;
857 }
858 
859 /**
860  *	skb_push - add data to the start of a buffer
861  *	@skb: buffer to use
862  *	@len: amount of data to add
863  *
864  *	This function extends the used data area of the buffer at the buffer
865  *	start. If this would exceed the total buffer headroom the kernel will
866  *	panic. A pointer to the first byte of the extra data is returned.
867  */
868 static inline unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
869 {
870 	skb->data -= len;
871 	skb->len  += len;
872 	if (unlikely(skb->data<skb->head))
873 		skb_under_panic(skb, len, current_text_addr());
874 	return skb->data;
875 }
876 
877 static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
878 {
879 	skb->len -= len;
880 	BUG_ON(skb->len < skb->data_len);
881 	return skb->data += len;
882 }
883 
884 /**
885  *	skb_pull - remove data from the start of a buffer
886  *	@skb: buffer to use
887  *	@len: amount of data to remove
888  *
889  *	This function removes data from the start of a buffer, returning
890  *	the memory to the headroom. A pointer to the next data in the buffer
891  *	is returned. Once the data has been pulled future pushes will overwrite
892  *	the old data.
893  */
894 static inline unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
895 {
896 	return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
897 }
898 
899 extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
900 
901 static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
902 {
903 	if (len > skb_headlen(skb) &&
904 	    !__pskb_pull_tail(skb, len-skb_headlen(skb)))
905 		return NULL;
906 	skb->len -= len;
907 	return skb->data += len;
908 }
909 
910 static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
911 {
912 	return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
913 }
914 
915 static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
916 {
917 	if (likely(len <= skb_headlen(skb)))
918 		return 1;
919 	if (unlikely(len > skb->len))
920 		return 0;
921 	return __pskb_pull_tail(skb, len-skb_headlen(skb)) != NULL;
922 }
923 
924 /**
925  *	skb_headroom - bytes at buffer head
926  *	@skb: buffer to check
927  *
928  *	Return the number of bytes of free space at the head of an &sk_buff.
929  */
930 static inline int skb_headroom(const struct sk_buff *skb)
931 {
932 	return skb->data - skb->head;
933 }
934 
935 /**
936  *	skb_tailroom - bytes at buffer end
937  *	@skb: buffer to check
938  *
939  *	Return the number of bytes of free space at the tail of an sk_buff
940  */
941 static inline int skb_tailroom(const struct sk_buff *skb)
942 {
943 	return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
944 }
945 
946 /**
947  *	skb_reserve - adjust headroom
948  *	@skb: buffer to alter
949  *	@len: bytes to move
950  *
951  *	Increase the headroom of an empty &sk_buff by reducing the tail
952  *	room. This is only allowed for an empty buffer.
953  */
954 static inline void skb_reserve(struct sk_buff *skb, int len)
955 {
956 	skb->data += len;
957 	skb->tail += len;
958 }
959 
960 /*
961  * CPUs often take a performance hit when accessing unaligned memory
962  * locations. The actual performance hit varies, it can be small if the
963  * hardware handles it or large if we have to take an exception and fix it
964  * in software.
965  *
966  * Since an ethernet header is 14 bytes network drivers often end up with
967  * the IP header at an unaligned offset. The IP header can be aligned by
968  * shifting the start of the packet by 2 bytes. Drivers should do this
969  * with:
970  *
971  * skb_reserve(NET_IP_ALIGN);
972  *
973  * The downside to this alignment of the IP header is that the DMA is now
974  * unaligned. On some architectures the cost of an unaligned DMA is high
975  * and this cost outweighs the gains made by aligning the IP header.
976  *
977  * Since this trade off varies between architectures, we allow NET_IP_ALIGN
978  * to be overridden.
979  */
980 #ifndef NET_IP_ALIGN
981 #define NET_IP_ALIGN	2
982 #endif
983 
984 /*
985  * The networking layer reserves some headroom in skb data (via
986  * dev_alloc_skb). This is used to avoid having to reallocate skb data when
987  * the header has to grow. In the default case, if the header has to grow
988  * 16 bytes or less we avoid the reallocation.
989  *
990  * Unfortunately this headroom changes the DMA alignment of the resulting
991  * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive
992  * on some architectures. An architecture can override this value,
993  * perhaps setting it to a cacheline in size (since that will maintain
994  * cacheline alignment of the DMA). It must be a power of 2.
995  *
996  * Various parts of the networking layer expect at least 16 bytes of
997  * headroom, you should not reduce this.
998  */
999 #ifndef NET_SKB_PAD
1000 #define NET_SKB_PAD	16
1001 #endif
1002 
1003 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
1004 
1005 static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
1006 {
1007 	if (unlikely(skb->data_len)) {
1008 		WARN_ON(1);
1009 		return;
1010 	}
1011 	skb->len  = len;
1012 	skb->tail = skb->data + len;
1013 }
1014 
1015 /**
1016  *	skb_trim - remove end from a buffer
1017  *	@skb: buffer to alter
1018  *	@len: new length
1019  *
1020  *	Cut the length of a buffer down by removing data from the tail. If
1021  *	the buffer is already under the length specified it is not modified.
1022  *	The skb must be linear.
1023  */
1024 static inline void skb_trim(struct sk_buff *skb, unsigned int len)
1025 {
1026 	if (skb->len > len)
1027 		__skb_trim(skb, len);
1028 }
1029 
1030 
1031 static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
1032 {
1033 	if (skb->data_len)
1034 		return ___pskb_trim(skb, len);
1035 	__skb_trim(skb, len);
1036 	return 0;
1037 }
1038 
1039 static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
1040 {
1041 	return (len < skb->len) ? __pskb_trim(skb, len) : 0;
1042 }
1043 
1044 /**
1045  *	pskb_trim_unique - remove end from a paged unique (not cloned) buffer
1046  *	@skb: buffer to alter
1047  *	@len: new length
1048  *
1049  *	This is identical to pskb_trim except that the caller knows that
1050  *	the skb is not cloned so we should never get an error due to out-
1051  *	of-memory.
1052  */
1053 static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
1054 {
1055 	int err = pskb_trim(skb, len);
1056 	BUG_ON(err);
1057 }
1058 
1059 /**
1060  *	skb_orphan - orphan a buffer
1061  *	@skb: buffer to orphan
1062  *
1063  *	If a buffer currently has an owner then we call the owner's
1064  *	destructor function and make the @skb unowned. The buffer continues
1065  *	to exist but is no longer charged to its former owner.
1066  */
1067 static inline void skb_orphan(struct sk_buff *skb)
1068 {
1069 	if (skb->destructor)
1070 		skb->destructor(skb);
1071 	skb->destructor = NULL;
1072 	skb->sk		= NULL;
1073 }
1074 
1075 /**
1076  *	__skb_queue_purge - empty a list
1077  *	@list: list to empty
1078  *
1079  *	Delete all buffers on an &sk_buff list. Each buffer is removed from
1080  *	the list and one reference dropped. This function does not take the
1081  *	list lock and the caller must hold the relevant locks to use it.
1082  */
1083 extern void skb_queue_purge(struct sk_buff_head *list);
1084 static inline void __skb_queue_purge(struct sk_buff_head *list)
1085 {
1086 	struct sk_buff *skb;
1087 	while ((skb = __skb_dequeue(list)) != NULL)
1088 		kfree_skb(skb);
1089 }
1090 
1091 /**
1092  *	__dev_alloc_skb - allocate an skbuff for receiving
1093  *	@length: length to allocate
1094  *	@gfp_mask: get_free_pages mask, passed to alloc_skb
1095  *
1096  *	Allocate a new &sk_buff and assign it a usage count of one. The
1097  *	buffer has unspecified headroom built in. Users should allocate
1098  *	the headroom they think they need without accounting for the
1099  *	built in space. The built in space is used for optimisations.
1100  *
1101  *	%NULL is returned if there is no free memory.
1102  */
1103 static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
1104 					      gfp_t gfp_mask)
1105 {
1106 	struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask);
1107 	if (likely(skb))
1108 		skb_reserve(skb, NET_SKB_PAD);
1109 	return skb;
1110 }
1111 
1112 /**
1113  *	dev_alloc_skb - allocate an skbuff for receiving
1114  *	@length: length to allocate
1115  *
1116  *	Allocate a new &sk_buff and assign it a usage count of one. The
1117  *	buffer has unspecified headroom built in. Users should allocate
1118  *	the headroom they think they need without accounting for the
1119  *	built in space. The built in space is used for optimisations.
1120  *
1121  *	%NULL is returned if there is no free memory. Although this function
1122  *	allocates memory it can be called from an interrupt.
1123  */
1124 static inline struct sk_buff *dev_alloc_skb(unsigned int length)
1125 {
1126 	return __dev_alloc_skb(length, GFP_ATOMIC);
1127 }
1128 
1129 extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
1130 		unsigned int length, gfp_t gfp_mask);
1131 
1132 /**
1133  *	netdev_alloc_skb - allocate an skbuff for rx on a specific device
1134  *	@dev: network device to receive on
1135  *	@length: length to allocate
1136  *
1137  *	Allocate a new &sk_buff and assign it a usage count of one. The
1138  *	buffer has unspecified headroom built in. Users should allocate
1139  *	the headroom they think they need without accounting for the
1140  *	built in space. The built in space is used for optimisations.
1141  *
1142  *	%NULL is returned if there is no free memory. Although this function
1143  *	allocates memory it can be called from an interrupt.
1144  */
1145 static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
1146 		unsigned int length)
1147 {
1148 	return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
1149 }
1150 
1151 /**
1152  *	skb_cow - copy header of skb when it is required
1153  *	@skb: buffer to cow
1154  *	@headroom: needed headroom
1155  *
1156  *	If the skb passed lacks sufficient headroom or its data part
1157  *	is shared, data is reallocated. If reallocation fails, an error
1158  *	is returned and original skb is not changed.
1159  *
1160  *	The result is skb with writable area skb->head...skb->tail
1161  *	and at least @headroom of space at head.
1162  */
1163 static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
1164 {
1165 	int delta = (headroom > NET_SKB_PAD ? headroom : NET_SKB_PAD) -
1166 			skb_headroom(skb);
1167 
1168 	if (delta < 0)
1169 		delta = 0;
1170 
1171 	if (delta || skb_cloned(skb))
1172 		return pskb_expand_head(skb, (delta + (NET_SKB_PAD-1)) &
1173 				~(NET_SKB_PAD-1), 0, GFP_ATOMIC);
1174 	return 0;
1175 }
1176 
1177 /**
1178  *	skb_padto	- pad an skbuff up to a minimal size
1179  *	@skb: buffer to pad
1180  *	@len: minimal length
1181  *
1182  *	Pads up a buffer to ensure the trailing bytes exist and are
1183  *	blanked. If the buffer already contains sufficient data it
1184  *	is untouched. Otherwise it is extended. Returns zero on
1185  *	success. The skb is freed on error.
1186  */
1187 
1188 static inline int skb_padto(struct sk_buff *skb, unsigned int len)
1189 {
1190 	unsigned int size = skb->len;
1191 	if (likely(size >= len))
1192 		return 0;
1193 	return skb_pad(skb, len-size);
1194 }
1195 
1196 static inline int skb_add_data(struct sk_buff *skb,
1197 			       char __user *from, int copy)
1198 {
1199 	const int off = skb->len;
1200 
1201 	if (skb->ip_summed == CHECKSUM_NONE) {
1202 		int err = 0;
1203 		__wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy),
1204 							    copy, 0, &err);
1205 		if (!err) {
1206 			skb->csum = csum_block_add(skb->csum, csum, off);
1207 			return 0;
1208 		}
1209 	} else if (!copy_from_user(skb_put(skb, copy), from, copy))
1210 		return 0;
1211 
1212 	__skb_trim(skb, off);
1213 	return -EFAULT;
1214 }
1215 
1216 static inline int skb_can_coalesce(struct sk_buff *skb, int i,
1217 				   struct page *page, int off)
1218 {
1219 	if (i) {
1220 		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1221 
1222 		return page == frag->page &&
1223 		       off == frag->page_offset + frag->size;
1224 	}
1225 	return 0;
1226 }
1227 
1228 static inline int __skb_linearize(struct sk_buff *skb)
1229 {
1230 	return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
1231 }
1232 
1233 /**
1234  *	skb_linearize - convert paged skb to linear one
1235  *	@skb: buffer to linarize
1236  *
1237  *	If there is no free memory -ENOMEM is returned, otherwise zero
1238  *	is returned and the old skb data released.
1239  */
1240 static inline int skb_linearize(struct sk_buff *skb)
1241 {
1242 	return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
1243 }
1244 
1245 /**
1246  *	skb_linearize_cow - make sure skb is linear and writable
1247  *	@skb: buffer to process
1248  *
1249  *	If there is no free memory -ENOMEM is returned, otherwise zero
1250  *	is returned and the old skb data released.
1251  */
1252 static inline int skb_linearize_cow(struct sk_buff *skb)
1253 {
1254 	return skb_is_nonlinear(skb) || skb_cloned(skb) ?
1255 	       __skb_linearize(skb) : 0;
1256 }
1257 
1258 /**
1259  *	skb_postpull_rcsum - update checksum for received skb after pull
1260  *	@skb: buffer to update
1261  *	@start: start of data before pull
1262  *	@len: length of data pulled
1263  *
1264  *	After doing a pull on a received packet, you need to call this to
1265  *	update the CHECKSUM_COMPLETE checksum, or set ip_summed to
1266  *	CHECKSUM_NONE so that it can be recomputed from scratch.
1267  */
1268 
1269 static inline void skb_postpull_rcsum(struct sk_buff *skb,
1270 				      const void *start, unsigned int len)
1271 {
1272 	if (skb->ip_summed == CHECKSUM_COMPLETE)
1273 		skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
1274 }
1275 
1276 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
1277 
1278 /**
1279  *	pskb_trim_rcsum - trim received skb and update checksum
1280  *	@skb: buffer to trim
1281  *	@len: new length
1282  *
1283  *	This is exactly the same as pskb_trim except that it ensures the
1284  *	checksum of received packets are still valid after the operation.
1285  */
1286 
1287 static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
1288 {
1289 	if (likely(len >= skb->len))
1290 		return 0;
1291 	if (skb->ip_summed == CHECKSUM_COMPLETE)
1292 		skb->ip_summed = CHECKSUM_NONE;
1293 	return __pskb_trim(skb, len);
1294 }
1295 
1296 #define skb_queue_walk(queue, skb) \
1297 		for (skb = (queue)->next;					\
1298 		     prefetch(skb->next), (skb != (struct sk_buff *)(queue));	\
1299 		     skb = skb->next)
1300 
1301 #define skb_queue_reverse_walk(queue, skb) \
1302 		for (skb = (queue)->prev;					\
1303 		     prefetch(skb->prev), (skb != (struct sk_buff *)(queue));	\
1304 		     skb = skb->prev)
1305 
1306 
1307 extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
1308 					 int noblock, int *err);
1309 extern unsigned int    datagram_poll(struct file *file, struct socket *sock,
1310 				     struct poll_table_struct *wait);
1311 extern int	       skb_copy_datagram_iovec(const struct sk_buff *from,
1312 					       int offset, struct iovec *to,
1313 					       int size);
1314 extern int	       skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
1315 							int hlen,
1316 							struct iovec *iov);
1317 extern void	       skb_free_datagram(struct sock *sk, struct sk_buff *skb);
1318 extern void	       skb_kill_datagram(struct sock *sk, struct sk_buff *skb,
1319 					 unsigned int flags);
1320 extern __wsum	       skb_checksum(const struct sk_buff *skb, int offset,
1321 				    int len, __wsum csum);
1322 extern int	       skb_copy_bits(const struct sk_buff *skb, int offset,
1323 				     void *to, int len);
1324 extern int	       skb_store_bits(const struct sk_buff *skb, int offset,
1325 				      void *from, int len);
1326 extern __wsum	       skb_copy_and_csum_bits(const struct sk_buff *skb,
1327 					      int offset, u8 *to, int len,
1328 					      __wsum csum);
1329 extern void	       skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
1330 extern void	       skb_split(struct sk_buff *skb,
1331 				 struct sk_buff *skb1, const u32 len);
1332 
1333 extern struct sk_buff *skb_segment(struct sk_buff *skb, int features);
1334 
1335 static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
1336 				       int len, void *buffer)
1337 {
1338 	int hlen = skb_headlen(skb);
1339 
1340 	if (hlen - offset >= len)
1341 		return skb->data + offset;
1342 
1343 	if (skb_copy_bits(skb, offset, buffer, len) < 0)
1344 		return NULL;
1345 
1346 	return buffer;
1347 }
1348 
1349 extern void skb_init(void);
1350 extern void skb_add_mtu(int mtu);
1351 
1352 /**
1353  *	skb_get_timestamp - get timestamp from a skb
1354  *	@skb: skb to get stamp from
1355  *	@stamp: pointer to struct timeval to store stamp in
1356  *
1357  *	Timestamps are stored in the skb as offsets to a base timestamp.
1358  *	This function converts the offset back to a struct timeval and stores
1359  *	it in stamp.
1360  */
1361 static inline void skb_get_timestamp(const struct sk_buff *skb, struct timeval *stamp)
1362 {
1363 	stamp->tv_sec  = skb->tstamp.off_sec;
1364 	stamp->tv_usec = skb->tstamp.off_usec;
1365 }
1366 
1367 /**
1368  * 	skb_set_timestamp - set timestamp of a skb
1369  *	@skb: skb to set stamp of
1370  *	@stamp: pointer to struct timeval to get stamp from
1371  *
1372  *	Timestamps are stored in the skb as offsets to a base timestamp.
1373  *	This function converts a struct timeval to an offset and stores
1374  *	it in the skb.
1375  */
1376 static inline void skb_set_timestamp(struct sk_buff *skb, const struct timeval *stamp)
1377 {
1378 	skb->tstamp.off_sec  = stamp->tv_sec;
1379 	skb->tstamp.off_usec = stamp->tv_usec;
1380 }
1381 
1382 extern void __net_timestamp(struct sk_buff *skb);
1383 
1384 extern __sum16 __skb_checksum_complete(struct sk_buff *skb);
1385 
1386 /**
1387  *	skb_checksum_complete - Calculate checksum of an entire packet
1388  *	@skb: packet to process
1389  *
1390  *	This function calculates the checksum over the entire packet plus
1391  *	the value of skb->csum.  The latter can be used to supply the
1392  *	checksum of a pseudo header as used by TCP/UDP.  It returns the
1393  *	checksum.
1394  *
1395  *	For protocols that contain complete checksums such as ICMP/TCP/UDP,
1396  *	this function can be used to verify that checksum on received
1397  *	packets.  In that case the function should return zero if the
1398  *	checksum is correct.  In particular, this function will return zero
1399  *	if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the
1400  *	hardware has already verified the correctness of the checksum.
1401  */
1402 static inline unsigned int skb_checksum_complete(struct sk_buff *skb)
1403 {
1404 	return skb->ip_summed != CHECKSUM_UNNECESSARY &&
1405 		__skb_checksum_complete(skb);
1406 }
1407 
1408 #ifdef CONFIG_NETFILTER
1409 static inline void nf_conntrack_put(struct nf_conntrack *nfct)
1410 {
1411 	if (nfct && atomic_dec_and_test(&nfct->use))
1412 		nfct->destroy(nfct);
1413 }
1414 static inline void nf_conntrack_get(struct nf_conntrack *nfct)
1415 {
1416 	if (nfct)
1417 		atomic_inc(&nfct->use);
1418 }
1419 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1420 static inline void nf_conntrack_get_reasm(struct sk_buff *skb)
1421 {
1422 	if (skb)
1423 		atomic_inc(&skb->users);
1424 }
1425 static inline void nf_conntrack_put_reasm(struct sk_buff *skb)
1426 {
1427 	if (skb)
1428 		kfree_skb(skb);
1429 }
1430 #endif
1431 #ifdef CONFIG_BRIDGE_NETFILTER
1432 static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
1433 {
1434 	if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
1435 		kfree(nf_bridge);
1436 }
1437 static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
1438 {
1439 	if (nf_bridge)
1440 		atomic_inc(&nf_bridge->use);
1441 }
1442 #endif /* CONFIG_BRIDGE_NETFILTER */
1443 static inline void nf_reset(struct sk_buff *skb)
1444 {
1445 	nf_conntrack_put(skb->nfct);
1446 	skb->nfct = NULL;
1447 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1448 	nf_conntrack_put_reasm(skb->nfct_reasm);
1449 	skb->nfct_reasm = NULL;
1450 #endif
1451 #ifdef CONFIG_BRIDGE_NETFILTER
1452 	nf_bridge_put(skb->nf_bridge);
1453 	skb->nf_bridge = NULL;
1454 #endif
1455 }
1456 
1457 #else /* CONFIG_NETFILTER */
1458 static inline void nf_reset(struct sk_buff *skb) {}
1459 #endif /* CONFIG_NETFILTER */
1460 
1461 #ifdef CONFIG_NETWORK_SECMARK
1462 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
1463 {
1464 	to->secmark = from->secmark;
1465 }
1466 
1467 static inline void skb_init_secmark(struct sk_buff *skb)
1468 {
1469 	skb->secmark = 0;
1470 }
1471 #else
1472 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
1473 { }
1474 
1475 static inline void skb_init_secmark(struct sk_buff *skb)
1476 { }
1477 #endif
1478 
1479 static inline int skb_is_gso(const struct sk_buff *skb)
1480 {
1481 	return skb_shinfo(skb)->gso_size;
1482 }
1483 
1484 #endif	/* __KERNEL__ */
1485 #endif	/* _LINUX_SKBUFF_H */
1486