xref: /linux-6.15/include/linux/skbuff.h (revision ba6e8564)
1 /*
2  *	Definitions for the 'struct sk_buff' memory handlers.
3  *
4  *	Authors:
5  *		Alan Cox, <[email protected]>
6  *		Florian La Roche, <[email protected]>
7  *
8  *	This program is free software; you can redistribute it and/or
9  *	modify it under the terms of the GNU General Public License
10  *	as published by the Free Software Foundation; either version
11  *	2 of the License, or (at your option) any later version.
12  */
13 
14 #ifndef _LINUX_SKBUFF_H
15 #define _LINUX_SKBUFF_H
16 
17 #include <linux/kernel.h>
18 #include <linux/compiler.h>
19 #include <linux/time.h>
20 #include <linux/cache.h>
21 
22 #include <asm/atomic.h>
23 #include <asm/types.h>
24 #include <linux/spinlock.h>
25 #include <linux/net.h>
26 #include <linux/textsearch.h>
27 #include <net/checksum.h>
28 #include <linux/rcupdate.h>
29 #include <linux/dmaengine.h>
30 
31 #define HAVE_ALLOC_SKB		/* For the drivers to know */
32 #define HAVE_ALIGNABLE_SKB	/* Ditto 8)		   */
33 
34 #define CHECKSUM_NONE 0
35 #define CHECKSUM_PARTIAL 1
36 #define CHECKSUM_UNNECESSARY 2
37 #define CHECKSUM_COMPLETE 3
38 
39 #define SKB_DATA_ALIGN(X)	(((X) + (SMP_CACHE_BYTES - 1)) & \
40 				 ~(SMP_CACHE_BYTES - 1))
41 #define SKB_MAX_ORDER(X, ORDER)	(((PAGE_SIZE << (ORDER)) - (X) - \
42 				  sizeof(struct skb_shared_info)) & \
43 				  ~(SMP_CACHE_BYTES - 1))
44 #define SKB_MAX_HEAD(X)		(SKB_MAX_ORDER((X), 0))
45 #define SKB_MAX_ALLOC		(SKB_MAX_ORDER(0, 2))
46 
47 /* A. Checksumming of received packets by device.
48  *
49  *	NONE: device failed to checksum this packet.
50  *		skb->csum is undefined.
51  *
52  *	UNNECESSARY: device parsed packet and wouldbe verified checksum.
53  *		skb->csum is undefined.
54  *	      It is bad option, but, unfortunately, many of vendors do this.
55  *	      Apparently with secret goal to sell you new device, when you
56  *	      will add new protocol to your host. F.e. IPv6. 8)
57  *
58  *	COMPLETE: the most generic way. Device supplied checksum of _all_
59  *	    the packet as seen by netif_rx in skb->csum.
60  *	    NOTE: Even if device supports only some protocols, but
61  *	    is able to produce some skb->csum, it MUST use COMPLETE,
62  *	    not UNNECESSARY.
63  *
64  * B. Checksumming on output.
65  *
66  *	NONE: skb is checksummed by protocol or csum is not required.
67  *
68  *	PARTIAL: device is required to csum packet as seen by hard_start_xmit
69  *	from skb->h.raw to the end and to record the checksum
70  *	at skb->h.raw+skb->csum.
71  *
72  *	Device must show its capabilities in dev->features, set
73  *	at device setup time.
74  *	NETIF_F_HW_CSUM	- it is clever device, it is able to checksum
75  *			  everything.
76  *	NETIF_F_NO_CSUM - loopback or reliable single hop media.
77  *	NETIF_F_IP_CSUM - device is dumb. It is able to csum only
78  *			  TCP/UDP over IPv4. Sigh. Vendors like this
79  *			  way by an unknown reason. Though, see comment above
80  *			  about CHECKSUM_UNNECESSARY. 8)
81  *
82  *	Any questions? No questions, good. 		--ANK
83  */
84 
85 struct net_device;
86 
87 #ifdef CONFIG_NETFILTER
88 struct nf_conntrack {
89 	atomic_t use;
90 	void (*destroy)(struct nf_conntrack *);
91 };
92 
93 #ifdef CONFIG_BRIDGE_NETFILTER
94 struct nf_bridge_info {
95 	atomic_t use;
96 	struct net_device *physindev;
97 	struct net_device *physoutdev;
98 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
99 	struct net_device *netoutdev;
100 #endif
101 	unsigned int mask;
102 	unsigned long data[32 / sizeof(unsigned long)];
103 };
104 #endif
105 
106 #endif
107 
108 struct sk_buff_head {
109 	/* These two members must be first. */
110 	struct sk_buff	*next;
111 	struct sk_buff	*prev;
112 
113 	__u32		qlen;
114 	spinlock_t	lock;
115 };
116 
117 struct sk_buff;
118 
119 /* To allow 64K frame to be packed as single skb without frag_list */
120 #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2)
121 
122 typedef struct skb_frag_struct skb_frag_t;
123 
124 struct skb_frag_struct {
125 	struct page *page;
126 	__u16 page_offset;
127 	__u16 size;
128 };
129 
130 /* This data is invariant across clones and lives at
131  * the end of the header data, ie. at skb->end.
132  */
133 struct skb_shared_info {
134 	atomic_t	dataref;
135 	unsigned short	nr_frags;
136 	unsigned short	gso_size;
137 	/* Warning: this field is not always filled in (UFO)! */
138 	unsigned short	gso_segs;
139 	unsigned short  gso_type;
140 	__be32          ip6_frag_id;
141 	struct sk_buff	*frag_list;
142 	skb_frag_t	frags[MAX_SKB_FRAGS];
143 };
144 
145 /* We divide dataref into two halves.  The higher 16 bits hold references
146  * to the payload part of skb->data.  The lower 16 bits hold references to
147  * the entire skb->data.  It is up to the users of the skb to agree on
148  * where the payload starts.
149  *
150  * All users must obey the rule that the skb->data reference count must be
151  * greater than or equal to the payload reference count.
152  *
153  * Holding a reference to the payload part means that the user does not
154  * care about modifications to the header part of skb->data.
155  */
156 #define SKB_DATAREF_SHIFT 16
157 #define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
158 
159 struct skb_timeval {
160 	u32	off_sec;
161 	u32	off_usec;
162 };
163 
164 
165 enum {
166 	SKB_FCLONE_UNAVAILABLE,
167 	SKB_FCLONE_ORIG,
168 	SKB_FCLONE_CLONE,
169 };
170 
171 enum {
172 	SKB_GSO_TCPV4 = 1 << 0,
173 	SKB_GSO_UDP = 1 << 1,
174 
175 	/* This indicates the skb is from an untrusted source. */
176 	SKB_GSO_DODGY = 1 << 2,
177 
178 	/* This indicates the tcp segment has CWR set. */
179 	SKB_GSO_TCP_ECN = 1 << 3,
180 
181 	SKB_GSO_TCPV6 = 1 << 4,
182 };
183 
184 /**
185  *	struct sk_buff - socket buffer
186  *	@next: Next buffer in list
187  *	@prev: Previous buffer in list
188  *	@sk: Socket we are owned by
189  *	@tstamp: Time we arrived
190  *	@dev: Device we arrived on/are leaving by
191  *	@iif: ifindex of device we arrived on
192  *	@h: Transport layer header
193  *	@nh: Network layer header
194  *	@mac: Link layer header
195  *	@dst: destination entry
196  *	@sp: the security path, used for xfrm
197  *	@cb: Control buffer. Free for use by every layer. Put private vars here
198  *	@len: Length of actual data
199  *	@data_len: Data length
200  *	@mac_len: Length of link layer header
201  *	@csum: Checksum
202  *	@local_df: allow local fragmentation
203  *	@cloned: Head may be cloned (check refcnt to be sure)
204  *	@nohdr: Payload reference only, must not modify header
205  *	@pkt_type: Packet class
206  *	@fclone: skbuff clone status
207  *	@ip_summed: Driver fed us an IP checksum
208  *	@priority: Packet queueing priority
209  *	@users: User count - see {datagram,tcp}.c
210  *	@protocol: Packet protocol from driver
211  *	@truesize: Buffer size
212  *	@head: Head of buffer
213  *	@data: Data head pointer
214  *	@tail: Tail pointer
215  *	@end: End pointer
216  *	@destructor: Destruct function
217  *	@mark: Generic packet mark
218  *	@nfct: Associated connection, if any
219  *	@ipvs_property: skbuff is owned by ipvs
220  *	@nfctinfo: Relationship of this skb to the connection
221  *	@nfct_reasm: netfilter conntrack re-assembly pointer
222  *	@nf_bridge: Saved data about a bridged frame - see br_netfilter.c
223  *	@tc_index: Traffic control index
224  *	@tc_verd: traffic control verdict
225  *	@dma_cookie: a cookie to one of several possible DMA operations
226  *		done by skb DMA functions
227  *	@secmark: security marking
228  */
229 
230 struct sk_buff {
231 	/* These two members must be first. */
232 	struct sk_buff		*next;
233 	struct sk_buff		*prev;
234 
235 	struct sock		*sk;
236 	struct skb_timeval	tstamp;
237 	struct net_device	*dev;
238 	int			iif;
239 	/* 4 byte hole on 64 bit*/
240 
241 	union {
242 		struct tcphdr	*th;
243 		struct udphdr	*uh;
244 		struct icmphdr	*icmph;
245 		struct igmphdr	*igmph;
246 		struct iphdr	*ipiph;
247 		struct ipv6hdr	*ipv6h;
248 		unsigned char	*raw;
249 	} h;
250 
251 	union {
252 		struct iphdr	*iph;
253 		struct ipv6hdr	*ipv6h;
254 		struct arphdr	*arph;
255 		unsigned char	*raw;
256 	} nh;
257 
258 	union {
259 	  	unsigned char 	*raw;
260 	} mac;
261 
262 	struct  dst_entry	*dst;
263 	struct	sec_path	*sp;
264 
265 	/*
266 	 * This is the control buffer. It is free to use for every
267 	 * layer. Please put your private variables there. If you
268 	 * want to keep them across layers you have to do a skb_clone()
269 	 * first. This is owned by whoever has the skb queued ATM.
270 	 */
271 	char			cb[48];
272 
273 	unsigned int		len,
274 				data_len,
275 				mac_len;
276 	union {
277 		__wsum		csum;
278 		__u32		csum_offset;
279 	};
280 	__u32			priority;
281 	__u8			local_df:1,
282 				cloned:1,
283 				ip_summed:2,
284 				nohdr:1,
285 				nfctinfo:3;
286 	__u8			pkt_type:3,
287 				fclone:2,
288 				ipvs_property:1;
289 	__be16			protocol;
290 
291 	void			(*destructor)(struct sk_buff *skb);
292 #ifdef CONFIG_NETFILTER
293 	struct nf_conntrack	*nfct;
294 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
295 	struct sk_buff		*nfct_reasm;
296 #endif
297 #ifdef CONFIG_BRIDGE_NETFILTER
298 	struct nf_bridge_info	*nf_bridge;
299 #endif
300 #endif /* CONFIG_NETFILTER */
301 #ifdef CONFIG_NET_SCHED
302 	__u16			tc_index;	/* traffic control index */
303 #ifdef CONFIG_NET_CLS_ACT
304 	__u16			tc_verd;	/* traffic control verdict */
305 #endif
306 #endif
307 #ifdef CONFIG_NET_DMA
308 	dma_cookie_t		dma_cookie;
309 #endif
310 #ifdef CONFIG_NETWORK_SECMARK
311 	__u32			secmark;
312 #endif
313 
314 	__u32			mark;
315 
316 	/* These elements must be at the end, see alloc_skb() for details.  */
317 	unsigned int		truesize;
318 	atomic_t		users;
319 	unsigned char		*head,
320 				*data,
321 				*tail,
322 				*end;
323 };
324 
325 #ifdef __KERNEL__
326 /*
327  *	Handling routines are only of interest to the kernel
328  */
329 #include <linux/slab.h>
330 
331 #include <asm/system.h>
332 
333 extern void kfree_skb(struct sk_buff *skb);
334 extern void	       __kfree_skb(struct sk_buff *skb);
335 extern struct sk_buff *__alloc_skb(unsigned int size,
336 				   gfp_t priority, int fclone, int node);
337 static inline struct sk_buff *alloc_skb(unsigned int size,
338 					gfp_t priority)
339 {
340 	return __alloc_skb(size, priority, 0, -1);
341 }
342 
343 static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
344 					       gfp_t priority)
345 {
346 	return __alloc_skb(size, priority, 1, -1);
347 }
348 
349 extern struct sk_buff *alloc_skb_from_cache(struct kmem_cache *cp,
350 					    unsigned int size,
351 					    gfp_t priority);
352 extern void	       kfree_skbmem(struct sk_buff *skb);
353 extern struct sk_buff *skb_clone(struct sk_buff *skb,
354 				 gfp_t priority);
355 extern struct sk_buff *skb_copy(const struct sk_buff *skb,
356 				gfp_t priority);
357 extern struct sk_buff *pskb_copy(struct sk_buff *skb,
358 				 gfp_t gfp_mask);
359 extern int	       pskb_expand_head(struct sk_buff *skb,
360 					int nhead, int ntail,
361 					gfp_t gfp_mask);
362 extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
363 					    unsigned int headroom);
364 extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
365 				       int newheadroom, int newtailroom,
366 				       gfp_t priority);
367 extern int	       skb_pad(struct sk_buff *skb, int pad);
368 #define dev_kfree_skb(a)	kfree_skb(a)
369 extern void	      skb_over_panic(struct sk_buff *skb, int len,
370 				     void *here);
371 extern void	      skb_under_panic(struct sk_buff *skb, int len,
372 				      void *here);
373 extern void	      skb_truesize_bug(struct sk_buff *skb);
374 
375 static inline void skb_truesize_check(struct sk_buff *skb)
376 {
377 	if (unlikely((int)skb->truesize < sizeof(struct sk_buff) + skb->len))
378 		skb_truesize_bug(skb);
379 }
380 
381 extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
382 			int getfrag(void *from, char *to, int offset,
383 			int len,int odd, struct sk_buff *skb),
384 			void *from, int length);
385 
386 struct skb_seq_state
387 {
388 	__u32		lower_offset;
389 	__u32		upper_offset;
390 	__u32		frag_idx;
391 	__u32		stepped_offset;
392 	struct sk_buff	*root_skb;
393 	struct sk_buff	*cur_skb;
394 	__u8		*frag_data;
395 };
396 
397 extern void	      skb_prepare_seq_read(struct sk_buff *skb,
398 					   unsigned int from, unsigned int to,
399 					   struct skb_seq_state *st);
400 extern unsigned int   skb_seq_read(unsigned int consumed, const u8 **data,
401 				   struct skb_seq_state *st);
402 extern void	      skb_abort_seq_read(struct skb_seq_state *st);
403 
404 extern unsigned int   skb_find_text(struct sk_buff *skb, unsigned int from,
405 				    unsigned int to, struct ts_config *config,
406 				    struct ts_state *state);
407 
408 /* Internal */
409 #define skb_shinfo(SKB)		((struct skb_shared_info *)((SKB)->end))
410 
411 /**
412  *	skb_queue_empty - check if a queue is empty
413  *	@list: queue head
414  *
415  *	Returns true if the queue is empty, false otherwise.
416  */
417 static inline int skb_queue_empty(const struct sk_buff_head *list)
418 {
419 	return list->next == (struct sk_buff *)list;
420 }
421 
422 /**
423  *	skb_get - reference buffer
424  *	@skb: buffer to reference
425  *
426  *	Makes another reference to a socket buffer and returns a pointer
427  *	to the buffer.
428  */
429 static inline struct sk_buff *skb_get(struct sk_buff *skb)
430 {
431 	atomic_inc(&skb->users);
432 	return skb;
433 }
434 
435 /*
436  * If users == 1, we are the only owner and are can avoid redundant
437  * atomic change.
438  */
439 
440 /**
441  *	skb_cloned - is the buffer a clone
442  *	@skb: buffer to check
443  *
444  *	Returns true if the buffer was generated with skb_clone() and is
445  *	one of multiple shared copies of the buffer. Cloned buffers are
446  *	shared data so must not be written to under normal circumstances.
447  */
448 static inline int skb_cloned(const struct sk_buff *skb)
449 {
450 	return skb->cloned &&
451 	       (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
452 }
453 
454 /**
455  *	skb_header_cloned - is the header a clone
456  *	@skb: buffer to check
457  *
458  *	Returns true if modifying the header part of the buffer requires
459  *	the data to be copied.
460  */
461 static inline int skb_header_cloned(const struct sk_buff *skb)
462 {
463 	int dataref;
464 
465 	if (!skb->cloned)
466 		return 0;
467 
468 	dataref = atomic_read(&skb_shinfo(skb)->dataref);
469 	dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
470 	return dataref != 1;
471 }
472 
473 /**
474  *	skb_header_release - release reference to header
475  *	@skb: buffer to operate on
476  *
477  *	Drop a reference to the header part of the buffer.  This is done
478  *	by acquiring a payload reference.  You must not read from the header
479  *	part of skb->data after this.
480  */
481 static inline void skb_header_release(struct sk_buff *skb)
482 {
483 	BUG_ON(skb->nohdr);
484 	skb->nohdr = 1;
485 	atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref);
486 }
487 
488 /**
489  *	skb_shared - is the buffer shared
490  *	@skb: buffer to check
491  *
492  *	Returns true if more than one person has a reference to this
493  *	buffer.
494  */
495 static inline int skb_shared(const struct sk_buff *skb)
496 {
497 	return atomic_read(&skb->users) != 1;
498 }
499 
500 /**
501  *	skb_share_check - check if buffer is shared and if so clone it
502  *	@skb: buffer to check
503  *	@pri: priority for memory allocation
504  *
505  *	If the buffer is shared the buffer is cloned and the old copy
506  *	drops a reference. A new clone with a single reference is returned.
507  *	If the buffer is not shared the original buffer is returned. When
508  *	being called from interrupt status or with spinlocks held pri must
509  *	be GFP_ATOMIC.
510  *
511  *	NULL is returned on a memory allocation failure.
512  */
513 static inline struct sk_buff *skb_share_check(struct sk_buff *skb,
514 					      gfp_t pri)
515 {
516 	might_sleep_if(pri & __GFP_WAIT);
517 	if (skb_shared(skb)) {
518 		struct sk_buff *nskb = skb_clone(skb, pri);
519 		kfree_skb(skb);
520 		skb = nskb;
521 	}
522 	return skb;
523 }
524 
525 /*
526  *	Copy shared buffers into a new sk_buff. We effectively do COW on
527  *	packets to handle cases where we have a local reader and forward
528  *	and a couple of other messy ones. The normal one is tcpdumping
529  *	a packet thats being forwarded.
530  */
531 
532 /**
533  *	skb_unshare - make a copy of a shared buffer
534  *	@skb: buffer to check
535  *	@pri: priority for memory allocation
536  *
537  *	If the socket buffer is a clone then this function creates a new
538  *	copy of the data, drops a reference count on the old copy and returns
539  *	the new copy with the reference count at 1. If the buffer is not a clone
540  *	the original buffer is returned. When called with a spinlock held or
541  *	from interrupt state @pri must be %GFP_ATOMIC
542  *
543  *	%NULL is returned on a memory allocation failure.
544  */
545 static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
546 					  gfp_t pri)
547 {
548 	might_sleep_if(pri & __GFP_WAIT);
549 	if (skb_cloned(skb)) {
550 		struct sk_buff *nskb = skb_copy(skb, pri);
551 		kfree_skb(skb);	/* Free our shared copy */
552 		skb = nskb;
553 	}
554 	return skb;
555 }
556 
557 /**
558  *	skb_peek
559  *	@list_: list to peek at
560  *
561  *	Peek an &sk_buff. Unlike most other operations you _MUST_
562  *	be careful with this one. A peek leaves the buffer on the
563  *	list and someone else may run off with it. You must hold
564  *	the appropriate locks or have a private queue to do this.
565  *
566  *	Returns %NULL for an empty list or a pointer to the head element.
567  *	The reference count is not incremented and the reference is therefore
568  *	volatile. Use with caution.
569  */
570 static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
571 {
572 	struct sk_buff *list = ((struct sk_buff *)list_)->next;
573 	if (list == (struct sk_buff *)list_)
574 		list = NULL;
575 	return list;
576 }
577 
578 /**
579  *	skb_peek_tail
580  *	@list_: list to peek at
581  *
582  *	Peek an &sk_buff. Unlike most other operations you _MUST_
583  *	be careful with this one. A peek leaves the buffer on the
584  *	list and someone else may run off with it. You must hold
585  *	the appropriate locks or have a private queue to do this.
586  *
587  *	Returns %NULL for an empty list or a pointer to the tail element.
588  *	The reference count is not incremented and the reference is therefore
589  *	volatile. Use with caution.
590  */
591 static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_)
592 {
593 	struct sk_buff *list = ((struct sk_buff *)list_)->prev;
594 	if (list == (struct sk_buff *)list_)
595 		list = NULL;
596 	return list;
597 }
598 
599 /**
600  *	skb_queue_len	- get queue length
601  *	@list_: list to measure
602  *
603  *	Return the length of an &sk_buff queue.
604  */
605 static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
606 {
607 	return list_->qlen;
608 }
609 
610 /*
611  * This function creates a split out lock class for each invocation;
612  * this is needed for now since a whole lot of users of the skb-queue
613  * infrastructure in drivers have different locking usage (in hardirq)
614  * than the networking core (in softirq only). In the long run either the
615  * network layer or drivers should need annotation to consolidate the
616  * main types of usage into 3 classes.
617  */
618 static inline void skb_queue_head_init(struct sk_buff_head *list)
619 {
620 	spin_lock_init(&list->lock);
621 	list->prev = list->next = (struct sk_buff *)list;
622 	list->qlen = 0;
623 }
624 
625 /*
626  *	Insert an sk_buff at the start of a list.
627  *
628  *	The "__skb_xxxx()" functions are the non-atomic ones that
629  *	can only be called with interrupts disabled.
630  */
631 
632 /**
633  *	__skb_queue_after - queue a buffer at the list head
634  *	@list: list to use
635  *	@prev: place after this buffer
636  *	@newsk: buffer to queue
637  *
638  *	Queue a buffer int the middle of a list. This function takes no locks
639  *	and you must therefore hold required locks before calling it.
640  *
641  *	A buffer cannot be placed on two lists at the same time.
642  */
643 static inline void __skb_queue_after(struct sk_buff_head *list,
644 				     struct sk_buff *prev,
645 				     struct sk_buff *newsk)
646 {
647 	struct sk_buff *next;
648 	list->qlen++;
649 
650 	next = prev->next;
651 	newsk->next = next;
652 	newsk->prev = prev;
653 	next->prev  = prev->next = newsk;
654 }
655 
656 /**
657  *	__skb_queue_head - queue a buffer at the list head
658  *	@list: list to use
659  *	@newsk: buffer to queue
660  *
661  *	Queue a buffer at the start of a list. This function takes no locks
662  *	and you must therefore hold required locks before calling it.
663  *
664  *	A buffer cannot be placed on two lists at the same time.
665  */
666 extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
667 static inline void __skb_queue_head(struct sk_buff_head *list,
668 				    struct sk_buff *newsk)
669 {
670 	__skb_queue_after(list, (struct sk_buff *)list, newsk);
671 }
672 
673 /**
674  *	__skb_queue_tail - queue a buffer at the list tail
675  *	@list: list to use
676  *	@newsk: buffer to queue
677  *
678  *	Queue a buffer at the end of a list. This function takes no locks
679  *	and you must therefore hold required locks before calling it.
680  *
681  *	A buffer cannot be placed on two lists at the same time.
682  */
683 extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
684 static inline void __skb_queue_tail(struct sk_buff_head *list,
685 				   struct sk_buff *newsk)
686 {
687 	struct sk_buff *prev, *next;
688 
689 	list->qlen++;
690 	next = (struct sk_buff *)list;
691 	prev = next->prev;
692 	newsk->next = next;
693 	newsk->prev = prev;
694 	next->prev  = prev->next = newsk;
695 }
696 
697 
698 /**
699  *	__skb_dequeue - remove from the head of the queue
700  *	@list: list to dequeue from
701  *
702  *	Remove the head of the list. This function does not take any locks
703  *	so must be used with appropriate locks held only. The head item is
704  *	returned or %NULL if the list is empty.
705  */
706 extern struct sk_buff *skb_dequeue(struct sk_buff_head *list);
707 static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
708 {
709 	struct sk_buff *next, *prev, *result;
710 
711 	prev = (struct sk_buff *) list;
712 	next = prev->next;
713 	result = NULL;
714 	if (next != prev) {
715 		result	     = next;
716 		next	     = next->next;
717 		list->qlen--;
718 		next->prev   = prev;
719 		prev->next   = next;
720 		result->next = result->prev = NULL;
721 	}
722 	return result;
723 }
724 
725 
726 /*
727  *	Insert a packet on a list.
728  */
729 extern void        skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
730 static inline void __skb_insert(struct sk_buff *newsk,
731 				struct sk_buff *prev, struct sk_buff *next,
732 				struct sk_buff_head *list)
733 {
734 	newsk->next = next;
735 	newsk->prev = prev;
736 	next->prev  = prev->next = newsk;
737 	list->qlen++;
738 }
739 
740 /*
741  *	Place a packet after a given packet in a list.
742  */
743 extern void	   skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
744 static inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
745 {
746 	__skb_insert(newsk, old, old->next, list);
747 }
748 
749 /*
750  * remove sk_buff from list. _Must_ be called atomically, and with
751  * the list known..
752  */
753 extern void	   skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
754 static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
755 {
756 	struct sk_buff *next, *prev;
757 
758 	list->qlen--;
759 	next	   = skb->next;
760 	prev	   = skb->prev;
761 	skb->next  = skb->prev = NULL;
762 	next->prev = prev;
763 	prev->next = next;
764 }
765 
766 
767 /* XXX: more streamlined implementation */
768 
769 /**
770  *	__skb_dequeue_tail - remove from the tail of the queue
771  *	@list: list to dequeue from
772  *
773  *	Remove the tail of the list. This function does not take any locks
774  *	so must be used with appropriate locks held only. The tail item is
775  *	returned or %NULL if the list is empty.
776  */
777 extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
778 static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
779 {
780 	struct sk_buff *skb = skb_peek_tail(list);
781 	if (skb)
782 		__skb_unlink(skb, list);
783 	return skb;
784 }
785 
786 
787 static inline int skb_is_nonlinear(const struct sk_buff *skb)
788 {
789 	return skb->data_len;
790 }
791 
792 static inline unsigned int skb_headlen(const struct sk_buff *skb)
793 {
794 	return skb->len - skb->data_len;
795 }
796 
797 static inline int skb_pagelen(const struct sk_buff *skb)
798 {
799 	int i, len = 0;
800 
801 	for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--)
802 		len += skb_shinfo(skb)->frags[i].size;
803 	return len + skb_headlen(skb);
804 }
805 
806 static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
807 				      struct page *page, int off, int size)
808 {
809 	skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
810 
811 	frag->page		  = page;
812 	frag->page_offset	  = off;
813 	frag->size		  = size;
814 	skb_shinfo(skb)->nr_frags = i + 1;
815 }
816 
817 #define SKB_PAGE_ASSERT(skb) 	BUG_ON(skb_shinfo(skb)->nr_frags)
818 #define SKB_FRAG_ASSERT(skb) 	BUG_ON(skb_shinfo(skb)->frag_list)
819 #define SKB_LINEAR_ASSERT(skb)  BUG_ON(skb_is_nonlinear(skb))
820 
821 /*
822  *	Add data to an sk_buff
823  */
824 static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
825 {
826 	unsigned char *tmp = skb->tail;
827 	SKB_LINEAR_ASSERT(skb);
828 	skb->tail += len;
829 	skb->len  += len;
830 	return tmp;
831 }
832 
833 /**
834  *	skb_put - add data to a buffer
835  *	@skb: buffer to use
836  *	@len: amount of data to add
837  *
838  *	This function extends the used data area of the buffer. If this would
839  *	exceed the total buffer size the kernel will panic. A pointer to the
840  *	first byte of the extra data is returned.
841  */
842 static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
843 {
844 	unsigned char *tmp = skb->tail;
845 	SKB_LINEAR_ASSERT(skb);
846 	skb->tail += len;
847 	skb->len  += len;
848 	if (unlikely(skb->tail>skb->end))
849 		skb_over_panic(skb, len, current_text_addr());
850 	return tmp;
851 }
852 
853 static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
854 {
855 	skb->data -= len;
856 	skb->len  += len;
857 	return skb->data;
858 }
859 
860 /**
861  *	skb_push - add data to the start of a buffer
862  *	@skb: buffer to use
863  *	@len: amount of data to add
864  *
865  *	This function extends the used data area of the buffer at the buffer
866  *	start. If this would exceed the total buffer headroom the kernel will
867  *	panic. A pointer to the first byte of the extra data is returned.
868  */
869 static inline unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
870 {
871 	skb->data -= len;
872 	skb->len  += len;
873 	if (unlikely(skb->data<skb->head))
874 		skb_under_panic(skb, len, current_text_addr());
875 	return skb->data;
876 }
877 
878 static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
879 {
880 	skb->len -= len;
881 	BUG_ON(skb->len < skb->data_len);
882 	return skb->data += len;
883 }
884 
885 /**
886  *	skb_pull - remove data from the start of a buffer
887  *	@skb: buffer to use
888  *	@len: amount of data to remove
889  *
890  *	This function removes data from the start of a buffer, returning
891  *	the memory to the headroom. A pointer to the next data in the buffer
892  *	is returned. Once the data has been pulled future pushes will overwrite
893  *	the old data.
894  */
895 static inline unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
896 {
897 	return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
898 }
899 
900 extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
901 
902 static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
903 {
904 	if (len > skb_headlen(skb) &&
905 	    !__pskb_pull_tail(skb, len-skb_headlen(skb)))
906 		return NULL;
907 	skb->len -= len;
908 	return skb->data += len;
909 }
910 
911 static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
912 {
913 	return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
914 }
915 
916 static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
917 {
918 	if (likely(len <= skb_headlen(skb)))
919 		return 1;
920 	if (unlikely(len > skb->len))
921 		return 0;
922 	return __pskb_pull_tail(skb, len-skb_headlen(skb)) != NULL;
923 }
924 
925 /**
926  *	skb_headroom - bytes at buffer head
927  *	@skb: buffer to check
928  *
929  *	Return the number of bytes of free space at the head of an &sk_buff.
930  */
931 static inline int skb_headroom(const struct sk_buff *skb)
932 {
933 	return skb->data - skb->head;
934 }
935 
936 /**
937  *	skb_tailroom - bytes at buffer end
938  *	@skb: buffer to check
939  *
940  *	Return the number of bytes of free space at the tail of an sk_buff
941  */
942 static inline int skb_tailroom(const struct sk_buff *skb)
943 {
944 	return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
945 }
946 
947 /**
948  *	skb_reserve - adjust headroom
949  *	@skb: buffer to alter
950  *	@len: bytes to move
951  *
952  *	Increase the headroom of an empty &sk_buff by reducing the tail
953  *	room. This is only allowed for an empty buffer.
954  */
955 static inline void skb_reserve(struct sk_buff *skb, int len)
956 {
957 	skb->data += len;
958 	skb->tail += len;
959 }
960 
961 /*
962  * CPUs often take a performance hit when accessing unaligned memory
963  * locations. The actual performance hit varies, it can be small if the
964  * hardware handles it or large if we have to take an exception and fix it
965  * in software.
966  *
967  * Since an ethernet header is 14 bytes network drivers often end up with
968  * the IP header at an unaligned offset. The IP header can be aligned by
969  * shifting the start of the packet by 2 bytes. Drivers should do this
970  * with:
971  *
972  * skb_reserve(NET_IP_ALIGN);
973  *
974  * The downside to this alignment of the IP header is that the DMA is now
975  * unaligned. On some architectures the cost of an unaligned DMA is high
976  * and this cost outweighs the gains made by aligning the IP header.
977  *
978  * Since this trade off varies between architectures, we allow NET_IP_ALIGN
979  * to be overridden.
980  */
981 #ifndef NET_IP_ALIGN
982 #define NET_IP_ALIGN	2
983 #endif
984 
985 /*
986  * The networking layer reserves some headroom in skb data (via
987  * dev_alloc_skb). This is used to avoid having to reallocate skb data when
988  * the header has to grow. In the default case, if the header has to grow
989  * 16 bytes or less we avoid the reallocation.
990  *
991  * Unfortunately this headroom changes the DMA alignment of the resulting
992  * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive
993  * on some architectures. An architecture can override this value,
994  * perhaps setting it to a cacheline in size (since that will maintain
995  * cacheline alignment of the DMA). It must be a power of 2.
996  *
997  * Various parts of the networking layer expect at least 16 bytes of
998  * headroom, you should not reduce this.
999  */
1000 #ifndef NET_SKB_PAD
1001 #define NET_SKB_PAD	16
1002 #endif
1003 
1004 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
1005 
1006 static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
1007 {
1008 	if (unlikely(skb->data_len)) {
1009 		WARN_ON(1);
1010 		return;
1011 	}
1012 	skb->len  = len;
1013 	skb->tail = skb->data + len;
1014 }
1015 
1016 /**
1017  *	skb_trim - remove end from a buffer
1018  *	@skb: buffer to alter
1019  *	@len: new length
1020  *
1021  *	Cut the length of a buffer down by removing data from the tail. If
1022  *	the buffer is already under the length specified it is not modified.
1023  *	The skb must be linear.
1024  */
1025 static inline void skb_trim(struct sk_buff *skb, unsigned int len)
1026 {
1027 	if (skb->len > len)
1028 		__skb_trim(skb, len);
1029 }
1030 
1031 
1032 static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
1033 {
1034 	if (skb->data_len)
1035 		return ___pskb_trim(skb, len);
1036 	__skb_trim(skb, len);
1037 	return 0;
1038 }
1039 
1040 static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
1041 {
1042 	return (len < skb->len) ? __pskb_trim(skb, len) : 0;
1043 }
1044 
1045 /**
1046  *	pskb_trim_unique - remove end from a paged unique (not cloned) buffer
1047  *	@skb: buffer to alter
1048  *	@len: new length
1049  *
1050  *	This is identical to pskb_trim except that the caller knows that
1051  *	the skb is not cloned so we should never get an error due to out-
1052  *	of-memory.
1053  */
1054 static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
1055 {
1056 	int err = pskb_trim(skb, len);
1057 	BUG_ON(err);
1058 }
1059 
1060 /**
1061  *	skb_orphan - orphan a buffer
1062  *	@skb: buffer to orphan
1063  *
1064  *	If a buffer currently has an owner then we call the owner's
1065  *	destructor function and make the @skb unowned. The buffer continues
1066  *	to exist but is no longer charged to its former owner.
1067  */
1068 static inline void skb_orphan(struct sk_buff *skb)
1069 {
1070 	if (skb->destructor)
1071 		skb->destructor(skb);
1072 	skb->destructor = NULL;
1073 	skb->sk		= NULL;
1074 }
1075 
1076 /**
1077  *	__skb_queue_purge - empty a list
1078  *	@list: list to empty
1079  *
1080  *	Delete all buffers on an &sk_buff list. Each buffer is removed from
1081  *	the list and one reference dropped. This function does not take the
1082  *	list lock and the caller must hold the relevant locks to use it.
1083  */
1084 extern void skb_queue_purge(struct sk_buff_head *list);
1085 static inline void __skb_queue_purge(struct sk_buff_head *list)
1086 {
1087 	struct sk_buff *skb;
1088 	while ((skb = __skb_dequeue(list)) != NULL)
1089 		kfree_skb(skb);
1090 }
1091 
1092 /**
1093  *	__dev_alloc_skb - allocate an skbuff for receiving
1094  *	@length: length to allocate
1095  *	@gfp_mask: get_free_pages mask, passed to alloc_skb
1096  *
1097  *	Allocate a new &sk_buff and assign it a usage count of one. The
1098  *	buffer has unspecified headroom built in. Users should allocate
1099  *	the headroom they think they need without accounting for the
1100  *	built in space. The built in space is used for optimisations.
1101  *
1102  *	%NULL is returned if there is no free memory.
1103  */
1104 static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
1105 					      gfp_t gfp_mask)
1106 {
1107 	struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask);
1108 	if (likely(skb))
1109 		skb_reserve(skb, NET_SKB_PAD);
1110 	return skb;
1111 }
1112 
1113 /**
1114  *	dev_alloc_skb - allocate an skbuff for receiving
1115  *	@length: length to allocate
1116  *
1117  *	Allocate a new &sk_buff and assign it a usage count of one. The
1118  *	buffer has unspecified headroom built in. Users should allocate
1119  *	the headroom they think they need without accounting for the
1120  *	built in space. The built in space is used for optimisations.
1121  *
1122  *	%NULL is returned if there is no free memory. Although this function
1123  *	allocates memory it can be called from an interrupt.
1124  */
1125 static inline struct sk_buff *dev_alloc_skb(unsigned int length)
1126 {
1127 	return __dev_alloc_skb(length, GFP_ATOMIC);
1128 }
1129 
1130 extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
1131 		unsigned int length, gfp_t gfp_mask);
1132 
1133 /**
1134  *	netdev_alloc_skb - allocate an skbuff for rx on a specific device
1135  *	@dev: network device to receive on
1136  *	@length: length to allocate
1137  *
1138  *	Allocate a new &sk_buff and assign it a usage count of one. The
1139  *	buffer has unspecified headroom built in. Users should allocate
1140  *	the headroom they think they need without accounting for the
1141  *	built in space. The built in space is used for optimisations.
1142  *
1143  *	%NULL is returned if there is no free memory. Although this function
1144  *	allocates memory it can be called from an interrupt.
1145  */
1146 static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
1147 		unsigned int length)
1148 {
1149 	return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
1150 }
1151 
1152 /**
1153  *	skb_cow - copy header of skb when it is required
1154  *	@skb: buffer to cow
1155  *	@headroom: needed headroom
1156  *
1157  *	If the skb passed lacks sufficient headroom or its data part
1158  *	is shared, data is reallocated. If reallocation fails, an error
1159  *	is returned and original skb is not changed.
1160  *
1161  *	The result is skb with writable area skb->head...skb->tail
1162  *	and at least @headroom of space at head.
1163  */
1164 static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
1165 {
1166 	int delta = (headroom > NET_SKB_PAD ? headroom : NET_SKB_PAD) -
1167 			skb_headroom(skb);
1168 
1169 	if (delta < 0)
1170 		delta = 0;
1171 
1172 	if (delta || skb_cloned(skb))
1173 		return pskb_expand_head(skb, (delta + (NET_SKB_PAD-1)) &
1174 				~(NET_SKB_PAD-1), 0, GFP_ATOMIC);
1175 	return 0;
1176 }
1177 
1178 /**
1179  *	skb_padto	- pad an skbuff up to a minimal size
1180  *	@skb: buffer to pad
1181  *	@len: minimal length
1182  *
1183  *	Pads up a buffer to ensure the trailing bytes exist and are
1184  *	blanked. If the buffer already contains sufficient data it
1185  *	is untouched. Otherwise it is extended. Returns zero on
1186  *	success. The skb is freed on error.
1187  */
1188 
1189 static inline int skb_padto(struct sk_buff *skb, unsigned int len)
1190 {
1191 	unsigned int size = skb->len;
1192 	if (likely(size >= len))
1193 		return 0;
1194 	return skb_pad(skb, len-size);
1195 }
1196 
1197 static inline int skb_add_data(struct sk_buff *skb,
1198 			       char __user *from, int copy)
1199 {
1200 	const int off = skb->len;
1201 
1202 	if (skb->ip_summed == CHECKSUM_NONE) {
1203 		int err = 0;
1204 		__wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy),
1205 							    copy, 0, &err);
1206 		if (!err) {
1207 			skb->csum = csum_block_add(skb->csum, csum, off);
1208 			return 0;
1209 		}
1210 	} else if (!copy_from_user(skb_put(skb, copy), from, copy))
1211 		return 0;
1212 
1213 	__skb_trim(skb, off);
1214 	return -EFAULT;
1215 }
1216 
1217 static inline int skb_can_coalesce(struct sk_buff *skb, int i,
1218 				   struct page *page, int off)
1219 {
1220 	if (i) {
1221 		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1222 
1223 		return page == frag->page &&
1224 		       off == frag->page_offset + frag->size;
1225 	}
1226 	return 0;
1227 }
1228 
1229 static inline int __skb_linearize(struct sk_buff *skb)
1230 {
1231 	return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
1232 }
1233 
1234 /**
1235  *	skb_linearize - convert paged skb to linear one
1236  *	@skb: buffer to linarize
1237  *
1238  *	If there is no free memory -ENOMEM is returned, otherwise zero
1239  *	is returned and the old skb data released.
1240  */
1241 static inline int skb_linearize(struct sk_buff *skb)
1242 {
1243 	return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
1244 }
1245 
1246 /**
1247  *	skb_linearize_cow - make sure skb is linear and writable
1248  *	@skb: buffer to process
1249  *
1250  *	If there is no free memory -ENOMEM is returned, otherwise zero
1251  *	is returned and the old skb data released.
1252  */
1253 static inline int skb_linearize_cow(struct sk_buff *skb)
1254 {
1255 	return skb_is_nonlinear(skb) || skb_cloned(skb) ?
1256 	       __skb_linearize(skb) : 0;
1257 }
1258 
1259 /**
1260  *	skb_postpull_rcsum - update checksum for received skb after pull
1261  *	@skb: buffer to update
1262  *	@start: start of data before pull
1263  *	@len: length of data pulled
1264  *
1265  *	After doing a pull on a received packet, you need to call this to
1266  *	update the CHECKSUM_COMPLETE checksum, or set ip_summed to
1267  *	CHECKSUM_NONE so that it can be recomputed from scratch.
1268  */
1269 
1270 static inline void skb_postpull_rcsum(struct sk_buff *skb,
1271 				      const void *start, unsigned int len)
1272 {
1273 	if (skb->ip_summed == CHECKSUM_COMPLETE)
1274 		skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
1275 }
1276 
1277 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
1278 
1279 /**
1280  *	pskb_trim_rcsum - trim received skb and update checksum
1281  *	@skb: buffer to trim
1282  *	@len: new length
1283  *
1284  *	This is exactly the same as pskb_trim except that it ensures the
1285  *	checksum of received packets are still valid after the operation.
1286  */
1287 
1288 static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
1289 {
1290 	if (likely(len >= skb->len))
1291 		return 0;
1292 	if (skb->ip_summed == CHECKSUM_COMPLETE)
1293 		skb->ip_summed = CHECKSUM_NONE;
1294 	return __pskb_trim(skb, len);
1295 }
1296 
1297 #define skb_queue_walk(queue, skb) \
1298 		for (skb = (queue)->next;					\
1299 		     prefetch(skb->next), (skb != (struct sk_buff *)(queue));	\
1300 		     skb = skb->next)
1301 
1302 #define skb_queue_reverse_walk(queue, skb) \
1303 		for (skb = (queue)->prev;					\
1304 		     prefetch(skb->prev), (skb != (struct sk_buff *)(queue));	\
1305 		     skb = skb->prev)
1306 
1307 
1308 extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
1309 					 int noblock, int *err);
1310 extern unsigned int    datagram_poll(struct file *file, struct socket *sock,
1311 				     struct poll_table_struct *wait);
1312 extern int	       skb_copy_datagram_iovec(const struct sk_buff *from,
1313 					       int offset, struct iovec *to,
1314 					       int size);
1315 extern int	       skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
1316 							int hlen,
1317 							struct iovec *iov);
1318 extern void	       skb_free_datagram(struct sock *sk, struct sk_buff *skb);
1319 extern void	       skb_kill_datagram(struct sock *sk, struct sk_buff *skb,
1320 					 unsigned int flags);
1321 extern __wsum	       skb_checksum(const struct sk_buff *skb, int offset,
1322 				    int len, __wsum csum);
1323 extern int	       skb_copy_bits(const struct sk_buff *skb, int offset,
1324 				     void *to, int len);
1325 extern int	       skb_store_bits(const struct sk_buff *skb, int offset,
1326 				      void *from, int len);
1327 extern __wsum	       skb_copy_and_csum_bits(const struct sk_buff *skb,
1328 					      int offset, u8 *to, int len,
1329 					      __wsum csum);
1330 extern void	       skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
1331 extern void	       skb_split(struct sk_buff *skb,
1332 				 struct sk_buff *skb1, const u32 len);
1333 
1334 extern struct sk_buff *skb_segment(struct sk_buff *skb, int features);
1335 
1336 static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
1337 				       int len, void *buffer)
1338 {
1339 	int hlen = skb_headlen(skb);
1340 
1341 	if (hlen - offset >= len)
1342 		return skb->data + offset;
1343 
1344 	if (skb_copy_bits(skb, offset, buffer, len) < 0)
1345 		return NULL;
1346 
1347 	return buffer;
1348 }
1349 
1350 extern void skb_init(void);
1351 extern void skb_add_mtu(int mtu);
1352 
1353 /**
1354  *	skb_get_timestamp - get timestamp from a skb
1355  *	@skb: skb to get stamp from
1356  *	@stamp: pointer to struct timeval to store stamp in
1357  *
1358  *	Timestamps are stored in the skb as offsets to a base timestamp.
1359  *	This function converts the offset back to a struct timeval and stores
1360  *	it in stamp.
1361  */
1362 static inline void skb_get_timestamp(const struct sk_buff *skb, struct timeval *stamp)
1363 {
1364 	stamp->tv_sec  = skb->tstamp.off_sec;
1365 	stamp->tv_usec = skb->tstamp.off_usec;
1366 }
1367 
1368 /**
1369  * 	skb_set_timestamp - set timestamp of a skb
1370  *	@skb: skb to set stamp of
1371  *	@stamp: pointer to struct timeval to get stamp from
1372  *
1373  *	Timestamps are stored in the skb as offsets to a base timestamp.
1374  *	This function converts a struct timeval to an offset and stores
1375  *	it in the skb.
1376  */
1377 static inline void skb_set_timestamp(struct sk_buff *skb, const struct timeval *stamp)
1378 {
1379 	skb->tstamp.off_sec  = stamp->tv_sec;
1380 	skb->tstamp.off_usec = stamp->tv_usec;
1381 }
1382 
1383 extern void __net_timestamp(struct sk_buff *skb);
1384 
1385 extern __sum16 __skb_checksum_complete(struct sk_buff *skb);
1386 
1387 /**
1388  *	skb_checksum_complete - Calculate checksum of an entire packet
1389  *	@skb: packet to process
1390  *
1391  *	This function calculates the checksum over the entire packet plus
1392  *	the value of skb->csum.  The latter can be used to supply the
1393  *	checksum of a pseudo header as used by TCP/UDP.  It returns the
1394  *	checksum.
1395  *
1396  *	For protocols that contain complete checksums such as ICMP/TCP/UDP,
1397  *	this function can be used to verify that checksum on received
1398  *	packets.  In that case the function should return zero if the
1399  *	checksum is correct.  In particular, this function will return zero
1400  *	if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the
1401  *	hardware has already verified the correctness of the checksum.
1402  */
1403 static inline unsigned int skb_checksum_complete(struct sk_buff *skb)
1404 {
1405 	return skb->ip_summed != CHECKSUM_UNNECESSARY &&
1406 		__skb_checksum_complete(skb);
1407 }
1408 
1409 #ifdef CONFIG_NETFILTER
1410 static inline void nf_conntrack_put(struct nf_conntrack *nfct)
1411 {
1412 	if (nfct && atomic_dec_and_test(&nfct->use))
1413 		nfct->destroy(nfct);
1414 }
1415 static inline void nf_conntrack_get(struct nf_conntrack *nfct)
1416 {
1417 	if (nfct)
1418 		atomic_inc(&nfct->use);
1419 }
1420 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1421 static inline void nf_conntrack_get_reasm(struct sk_buff *skb)
1422 {
1423 	if (skb)
1424 		atomic_inc(&skb->users);
1425 }
1426 static inline void nf_conntrack_put_reasm(struct sk_buff *skb)
1427 {
1428 	if (skb)
1429 		kfree_skb(skb);
1430 }
1431 #endif
1432 #ifdef CONFIG_BRIDGE_NETFILTER
1433 static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
1434 {
1435 	if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
1436 		kfree(nf_bridge);
1437 }
1438 static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
1439 {
1440 	if (nf_bridge)
1441 		atomic_inc(&nf_bridge->use);
1442 }
1443 #endif /* CONFIG_BRIDGE_NETFILTER */
1444 static inline void nf_reset(struct sk_buff *skb)
1445 {
1446 	nf_conntrack_put(skb->nfct);
1447 	skb->nfct = NULL;
1448 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1449 	nf_conntrack_put_reasm(skb->nfct_reasm);
1450 	skb->nfct_reasm = NULL;
1451 #endif
1452 #ifdef CONFIG_BRIDGE_NETFILTER
1453 	nf_bridge_put(skb->nf_bridge);
1454 	skb->nf_bridge = NULL;
1455 #endif
1456 }
1457 
1458 #else /* CONFIG_NETFILTER */
1459 static inline void nf_reset(struct sk_buff *skb) {}
1460 #endif /* CONFIG_NETFILTER */
1461 
1462 #ifdef CONFIG_NETWORK_SECMARK
1463 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
1464 {
1465 	to->secmark = from->secmark;
1466 }
1467 
1468 static inline void skb_init_secmark(struct sk_buff *skb)
1469 {
1470 	skb->secmark = 0;
1471 }
1472 #else
1473 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
1474 { }
1475 
1476 static inline void skb_init_secmark(struct sk_buff *skb)
1477 { }
1478 #endif
1479 
1480 static inline int skb_is_gso(const struct sk_buff *skb)
1481 {
1482 	return skb_shinfo(skb)->gso_size;
1483 }
1484 
1485 #endif	/* __KERNEL__ */
1486 #endif	/* _LINUX_SKBUFF_H */
1487