1 /* 2 * Definitions for the 'struct sk_buff' memory handlers. 3 * 4 * Authors: 5 * Alan Cox, <[email protected]> 6 * Florian La Roche, <[email protected]> 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License 10 * as published by the Free Software Foundation; either version 11 * 2 of the License, or (at your option) any later version. 12 */ 13 14 #ifndef _LINUX_SKBUFF_H 15 #define _LINUX_SKBUFF_H 16 17 #include <linux/kernel.h> 18 #include <linux/compiler.h> 19 #include <linux/time.h> 20 #include <linux/cache.h> 21 22 #include <asm/atomic.h> 23 #include <asm/types.h> 24 #include <linux/spinlock.h> 25 #include <linux/mm.h> 26 #include <linux/highmem.h> 27 #include <linux/poll.h> 28 #include <linux/net.h> 29 #include <linux/textsearch.h> 30 #include <net/checksum.h> 31 #include <linux/dmaengine.h> 32 33 #define HAVE_ALLOC_SKB /* For the drivers to know */ 34 #define HAVE_ALIGNABLE_SKB /* Ditto 8) */ 35 36 #define CHECKSUM_NONE 0 37 #define CHECKSUM_HW 1 38 #define CHECKSUM_UNNECESSARY 2 39 40 #define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \ 41 ~(SMP_CACHE_BYTES - 1)) 42 #define SKB_MAX_ORDER(X, ORDER) (((PAGE_SIZE << (ORDER)) - (X) - \ 43 sizeof(struct skb_shared_info)) & \ 44 ~(SMP_CACHE_BYTES - 1)) 45 #define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0)) 46 #define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2)) 47 48 /* A. Checksumming of received packets by device. 49 * 50 * NONE: device failed to checksum this packet. 51 * skb->csum is undefined. 52 * 53 * UNNECESSARY: device parsed packet and wouldbe verified checksum. 54 * skb->csum is undefined. 55 * It is bad option, but, unfortunately, many of vendors do this. 56 * Apparently with secret goal to sell you new device, when you 57 * will add new protocol to your host. F.e. IPv6. 8) 58 * 59 * HW: the most generic way. Device supplied checksum of _all_ 60 * the packet as seen by netif_rx in skb->csum. 61 * NOTE: Even if device supports only some protocols, but 62 * is able to produce some skb->csum, it MUST use HW, 63 * not UNNECESSARY. 64 * 65 * B. Checksumming on output. 66 * 67 * NONE: skb is checksummed by protocol or csum is not required. 68 * 69 * HW: device is required to csum packet as seen by hard_start_xmit 70 * from skb->h.raw to the end and to record the checksum 71 * at skb->h.raw+skb->csum. 72 * 73 * Device must show its capabilities in dev->features, set 74 * at device setup time. 75 * NETIF_F_HW_CSUM - it is clever device, it is able to checksum 76 * everything. 77 * NETIF_F_NO_CSUM - loopback or reliable single hop media. 78 * NETIF_F_IP_CSUM - device is dumb. It is able to csum only 79 * TCP/UDP over IPv4. Sigh. Vendors like this 80 * way by an unknown reason. Though, see comment above 81 * about CHECKSUM_UNNECESSARY. 8) 82 * 83 * Any questions? No questions, good. --ANK 84 */ 85 86 struct net_device; 87 88 #ifdef CONFIG_NETFILTER 89 struct nf_conntrack { 90 atomic_t use; 91 void (*destroy)(struct nf_conntrack *); 92 }; 93 94 #ifdef CONFIG_BRIDGE_NETFILTER 95 struct nf_bridge_info { 96 atomic_t use; 97 struct net_device *physindev; 98 struct net_device *physoutdev; 99 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 100 struct net_device *netoutdev; 101 #endif 102 unsigned int mask; 103 unsigned long data[32 / sizeof(unsigned long)]; 104 }; 105 #endif 106 107 #endif 108 109 struct sk_buff_head { 110 /* These two members must be first. */ 111 struct sk_buff *next; 112 struct sk_buff *prev; 113 114 __u32 qlen; 115 spinlock_t lock; 116 }; 117 118 struct sk_buff; 119 120 /* To allow 64K frame to be packed as single skb without frag_list */ 121 #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2) 122 123 typedef struct skb_frag_struct skb_frag_t; 124 125 struct skb_frag_struct { 126 struct page *page; 127 __u16 page_offset; 128 __u16 size; 129 }; 130 131 /* This data is invariant across clones and lives at 132 * the end of the header data, ie. at skb->end. 133 */ 134 struct skb_shared_info { 135 atomic_t dataref; 136 unsigned short nr_frags; 137 unsigned short gso_size; 138 /* Warning: this field is not always filled in (UFO)! */ 139 unsigned short gso_segs; 140 unsigned short gso_type; 141 unsigned int ip6_frag_id; 142 struct sk_buff *frag_list; 143 skb_frag_t frags[MAX_SKB_FRAGS]; 144 }; 145 146 /* We divide dataref into two halves. The higher 16 bits hold references 147 * to the payload part of skb->data. The lower 16 bits hold references to 148 * the entire skb->data. It is up to the users of the skb to agree on 149 * where the payload starts. 150 * 151 * All users must obey the rule that the skb->data reference count must be 152 * greater than or equal to the payload reference count. 153 * 154 * Holding a reference to the payload part means that the user does not 155 * care about modifications to the header part of skb->data. 156 */ 157 #define SKB_DATAREF_SHIFT 16 158 #define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1) 159 160 struct skb_timeval { 161 u32 off_sec; 162 u32 off_usec; 163 }; 164 165 166 enum { 167 SKB_FCLONE_UNAVAILABLE, 168 SKB_FCLONE_ORIG, 169 SKB_FCLONE_CLONE, 170 }; 171 172 enum { 173 SKB_GSO_TCPV4 = 1 << 0, 174 SKB_GSO_UDPV4 = 1 << 1, 175 176 /* This indicates the skb is from an untrusted source. */ 177 SKB_GSO_DODGY = 1 << 2, 178 179 /* This indicates the tcp segment has CWR set. */ 180 SKB_GSO_TCPV4_ECN = 1 << 3, 181 }; 182 183 /** 184 * struct sk_buff - socket buffer 185 * @next: Next buffer in list 186 * @prev: Previous buffer in list 187 * @sk: Socket we are owned by 188 * @tstamp: Time we arrived 189 * @dev: Device we arrived on/are leaving by 190 * @input_dev: Device we arrived on 191 * @h: Transport layer header 192 * @nh: Network layer header 193 * @mac: Link layer header 194 * @dst: destination entry 195 * @sp: the security path, used for xfrm 196 * @cb: Control buffer. Free for use by every layer. Put private vars here 197 * @len: Length of actual data 198 * @data_len: Data length 199 * @mac_len: Length of link layer header 200 * @csum: Checksum 201 * @local_df: allow local fragmentation 202 * @cloned: Head may be cloned (check refcnt to be sure) 203 * @nohdr: Payload reference only, must not modify header 204 * @pkt_type: Packet class 205 * @fclone: skbuff clone status 206 * @ip_summed: Driver fed us an IP checksum 207 * @priority: Packet queueing priority 208 * @users: User count - see {datagram,tcp}.c 209 * @protocol: Packet protocol from driver 210 * @truesize: Buffer size 211 * @head: Head of buffer 212 * @data: Data head pointer 213 * @tail: Tail pointer 214 * @end: End pointer 215 * @destructor: Destruct function 216 * @nfmark: Can be used for communication between hooks 217 * @nfct: Associated connection, if any 218 * @ipvs_property: skbuff is owned by ipvs 219 * @nfctinfo: Relationship of this skb to the connection 220 * @nfct_reasm: netfilter conntrack re-assembly pointer 221 * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c 222 * @tc_index: Traffic control index 223 * @tc_verd: traffic control verdict 224 * @dma_cookie: a cookie to one of several possible DMA operations 225 * done by skb DMA functions 226 * @secmark: security marking 227 */ 228 229 struct sk_buff { 230 /* These two members must be first. */ 231 struct sk_buff *next; 232 struct sk_buff *prev; 233 234 struct sock *sk; 235 struct skb_timeval tstamp; 236 struct net_device *dev; 237 struct net_device *input_dev; 238 239 union { 240 struct tcphdr *th; 241 struct udphdr *uh; 242 struct icmphdr *icmph; 243 struct igmphdr *igmph; 244 struct iphdr *ipiph; 245 struct ipv6hdr *ipv6h; 246 unsigned char *raw; 247 } h; 248 249 union { 250 struct iphdr *iph; 251 struct ipv6hdr *ipv6h; 252 struct arphdr *arph; 253 unsigned char *raw; 254 } nh; 255 256 union { 257 unsigned char *raw; 258 } mac; 259 260 struct dst_entry *dst; 261 struct sec_path *sp; 262 263 /* 264 * This is the control buffer. It is free to use for every 265 * layer. Please put your private variables there. If you 266 * want to keep them across layers you have to do a skb_clone() 267 * first. This is owned by whoever has the skb queued ATM. 268 */ 269 char cb[48]; 270 271 unsigned int len, 272 data_len, 273 mac_len, 274 csum; 275 __u32 priority; 276 __u8 local_df:1, 277 cloned:1, 278 ip_summed:2, 279 nohdr:1, 280 nfctinfo:3; 281 __u8 pkt_type:3, 282 fclone:2, 283 ipvs_property:1; 284 __be16 protocol; 285 286 void (*destructor)(struct sk_buff *skb); 287 #ifdef CONFIG_NETFILTER 288 struct nf_conntrack *nfct; 289 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 290 struct sk_buff *nfct_reasm; 291 #endif 292 #ifdef CONFIG_BRIDGE_NETFILTER 293 struct nf_bridge_info *nf_bridge; 294 #endif 295 __u32 nfmark; 296 #endif /* CONFIG_NETFILTER */ 297 #ifdef CONFIG_NET_SCHED 298 __u16 tc_index; /* traffic control index */ 299 #ifdef CONFIG_NET_CLS_ACT 300 __u16 tc_verd; /* traffic control verdict */ 301 #endif 302 #endif 303 #ifdef CONFIG_NET_DMA 304 dma_cookie_t dma_cookie; 305 #endif 306 #ifdef CONFIG_NETWORK_SECMARK 307 __u32 secmark; 308 #endif 309 310 311 /* These elements must be at the end, see alloc_skb() for details. */ 312 unsigned int truesize; 313 atomic_t users; 314 unsigned char *head, 315 *data, 316 *tail, 317 *end; 318 }; 319 320 #ifdef __KERNEL__ 321 /* 322 * Handling routines are only of interest to the kernel 323 */ 324 #include <linux/slab.h> 325 326 #include <asm/system.h> 327 328 extern void kfree_skb(struct sk_buff *skb); 329 extern void __kfree_skb(struct sk_buff *skb); 330 extern struct sk_buff *__alloc_skb(unsigned int size, 331 gfp_t priority, int fclone); 332 static inline struct sk_buff *alloc_skb(unsigned int size, 333 gfp_t priority) 334 { 335 return __alloc_skb(size, priority, 0); 336 } 337 338 static inline struct sk_buff *alloc_skb_fclone(unsigned int size, 339 gfp_t priority) 340 { 341 return __alloc_skb(size, priority, 1); 342 } 343 344 extern struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, 345 unsigned int size, 346 gfp_t priority); 347 extern void kfree_skbmem(struct sk_buff *skb); 348 extern struct sk_buff *skb_clone(struct sk_buff *skb, 349 gfp_t priority); 350 extern struct sk_buff *skb_copy(const struct sk_buff *skb, 351 gfp_t priority); 352 extern struct sk_buff *pskb_copy(struct sk_buff *skb, 353 gfp_t gfp_mask); 354 extern int pskb_expand_head(struct sk_buff *skb, 355 int nhead, int ntail, 356 gfp_t gfp_mask); 357 extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, 358 unsigned int headroom); 359 extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 360 int newheadroom, int newtailroom, 361 gfp_t priority); 362 extern int skb_pad(struct sk_buff *skb, int pad); 363 #define dev_kfree_skb(a) kfree_skb(a) 364 extern void skb_over_panic(struct sk_buff *skb, int len, 365 void *here); 366 extern void skb_under_panic(struct sk_buff *skb, int len, 367 void *here); 368 extern void skb_truesize_bug(struct sk_buff *skb); 369 370 static inline void skb_truesize_check(struct sk_buff *skb) 371 { 372 if (unlikely((int)skb->truesize < sizeof(struct sk_buff) + skb->len)) 373 skb_truesize_bug(skb); 374 } 375 376 extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, 377 int getfrag(void *from, char *to, int offset, 378 int len,int odd, struct sk_buff *skb), 379 void *from, int length); 380 381 struct skb_seq_state 382 { 383 __u32 lower_offset; 384 __u32 upper_offset; 385 __u32 frag_idx; 386 __u32 stepped_offset; 387 struct sk_buff *root_skb; 388 struct sk_buff *cur_skb; 389 __u8 *frag_data; 390 }; 391 392 extern void skb_prepare_seq_read(struct sk_buff *skb, 393 unsigned int from, unsigned int to, 394 struct skb_seq_state *st); 395 extern unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 396 struct skb_seq_state *st); 397 extern void skb_abort_seq_read(struct skb_seq_state *st); 398 399 extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 400 unsigned int to, struct ts_config *config, 401 struct ts_state *state); 402 403 /* Internal */ 404 #define skb_shinfo(SKB) ((struct skb_shared_info *)((SKB)->end)) 405 406 /** 407 * skb_queue_empty - check if a queue is empty 408 * @list: queue head 409 * 410 * Returns true if the queue is empty, false otherwise. 411 */ 412 static inline int skb_queue_empty(const struct sk_buff_head *list) 413 { 414 return list->next == (struct sk_buff *)list; 415 } 416 417 /** 418 * skb_get - reference buffer 419 * @skb: buffer to reference 420 * 421 * Makes another reference to a socket buffer and returns a pointer 422 * to the buffer. 423 */ 424 static inline struct sk_buff *skb_get(struct sk_buff *skb) 425 { 426 atomic_inc(&skb->users); 427 return skb; 428 } 429 430 /* 431 * If users == 1, we are the only owner and are can avoid redundant 432 * atomic change. 433 */ 434 435 /** 436 * skb_cloned - is the buffer a clone 437 * @skb: buffer to check 438 * 439 * Returns true if the buffer was generated with skb_clone() and is 440 * one of multiple shared copies of the buffer. Cloned buffers are 441 * shared data so must not be written to under normal circumstances. 442 */ 443 static inline int skb_cloned(const struct sk_buff *skb) 444 { 445 return skb->cloned && 446 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1; 447 } 448 449 /** 450 * skb_header_cloned - is the header a clone 451 * @skb: buffer to check 452 * 453 * Returns true if modifying the header part of the buffer requires 454 * the data to be copied. 455 */ 456 static inline int skb_header_cloned(const struct sk_buff *skb) 457 { 458 int dataref; 459 460 if (!skb->cloned) 461 return 0; 462 463 dataref = atomic_read(&skb_shinfo(skb)->dataref); 464 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT); 465 return dataref != 1; 466 } 467 468 /** 469 * skb_header_release - release reference to header 470 * @skb: buffer to operate on 471 * 472 * Drop a reference to the header part of the buffer. This is done 473 * by acquiring a payload reference. You must not read from the header 474 * part of skb->data after this. 475 */ 476 static inline void skb_header_release(struct sk_buff *skb) 477 { 478 BUG_ON(skb->nohdr); 479 skb->nohdr = 1; 480 atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref); 481 } 482 483 /** 484 * skb_shared - is the buffer shared 485 * @skb: buffer to check 486 * 487 * Returns true if more than one person has a reference to this 488 * buffer. 489 */ 490 static inline int skb_shared(const struct sk_buff *skb) 491 { 492 return atomic_read(&skb->users) != 1; 493 } 494 495 /** 496 * skb_share_check - check if buffer is shared and if so clone it 497 * @skb: buffer to check 498 * @pri: priority for memory allocation 499 * 500 * If the buffer is shared the buffer is cloned and the old copy 501 * drops a reference. A new clone with a single reference is returned. 502 * If the buffer is not shared the original buffer is returned. When 503 * being called from interrupt status or with spinlocks held pri must 504 * be GFP_ATOMIC. 505 * 506 * NULL is returned on a memory allocation failure. 507 */ 508 static inline struct sk_buff *skb_share_check(struct sk_buff *skb, 509 gfp_t pri) 510 { 511 might_sleep_if(pri & __GFP_WAIT); 512 if (skb_shared(skb)) { 513 struct sk_buff *nskb = skb_clone(skb, pri); 514 kfree_skb(skb); 515 skb = nskb; 516 } 517 return skb; 518 } 519 520 /* 521 * Copy shared buffers into a new sk_buff. We effectively do COW on 522 * packets to handle cases where we have a local reader and forward 523 * and a couple of other messy ones. The normal one is tcpdumping 524 * a packet thats being forwarded. 525 */ 526 527 /** 528 * skb_unshare - make a copy of a shared buffer 529 * @skb: buffer to check 530 * @pri: priority for memory allocation 531 * 532 * If the socket buffer is a clone then this function creates a new 533 * copy of the data, drops a reference count on the old copy and returns 534 * the new copy with the reference count at 1. If the buffer is not a clone 535 * the original buffer is returned. When called with a spinlock held or 536 * from interrupt state @pri must be %GFP_ATOMIC 537 * 538 * %NULL is returned on a memory allocation failure. 539 */ 540 static inline struct sk_buff *skb_unshare(struct sk_buff *skb, 541 gfp_t pri) 542 { 543 might_sleep_if(pri & __GFP_WAIT); 544 if (skb_cloned(skb)) { 545 struct sk_buff *nskb = skb_copy(skb, pri); 546 kfree_skb(skb); /* Free our shared copy */ 547 skb = nskb; 548 } 549 return skb; 550 } 551 552 /** 553 * skb_peek 554 * @list_: list to peek at 555 * 556 * Peek an &sk_buff. Unlike most other operations you _MUST_ 557 * be careful with this one. A peek leaves the buffer on the 558 * list and someone else may run off with it. You must hold 559 * the appropriate locks or have a private queue to do this. 560 * 561 * Returns %NULL for an empty list or a pointer to the head element. 562 * The reference count is not incremented and the reference is therefore 563 * volatile. Use with caution. 564 */ 565 static inline struct sk_buff *skb_peek(struct sk_buff_head *list_) 566 { 567 struct sk_buff *list = ((struct sk_buff *)list_)->next; 568 if (list == (struct sk_buff *)list_) 569 list = NULL; 570 return list; 571 } 572 573 /** 574 * skb_peek_tail 575 * @list_: list to peek at 576 * 577 * Peek an &sk_buff. Unlike most other operations you _MUST_ 578 * be careful with this one. A peek leaves the buffer on the 579 * list and someone else may run off with it. You must hold 580 * the appropriate locks or have a private queue to do this. 581 * 582 * Returns %NULL for an empty list or a pointer to the tail element. 583 * The reference count is not incremented and the reference is therefore 584 * volatile. Use with caution. 585 */ 586 static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_) 587 { 588 struct sk_buff *list = ((struct sk_buff *)list_)->prev; 589 if (list == (struct sk_buff *)list_) 590 list = NULL; 591 return list; 592 } 593 594 /** 595 * skb_queue_len - get queue length 596 * @list_: list to measure 597 * 598 * Return the length of an &sk_buff queue. 599 */ 600 static inline __u32 skb_queue_len(const struct sk_buff_head *list_) 601 { 602 return list_->qlen; 603 } 604 605 static inline void skb_queue_head_init(struct sk_buff_head *list) 606 { 607 spin_lock_init(&list->lock); 608 list->prev = list->next = (struct sk_buff *)list; 609 list->qlen = 0; 610 } 611 612 /* 613 * Insert an sk_buff at the start of a list. 614 * 615 * The "__skb_xxxx()" functions are the non-atomic ones that 616 * can only be called with interrupts disabled. 617 */ 618 619 /** 620 * __skb_queue_after - queue a buffer at the list head 621 * @list: list to use 622 * @prev: place after this buffer 623 * @newsk: buffer to queue 624 * 625 * Queue a buffer int the middle of a list. This function takes no locks 626 * and you must therefore hold required locks before calling it. 627 * 628 * A buffer cannot be placed on two lists at the same time. 629 */ 630 static inline void __skb_queue_after(struct sk_buff_head *list, 631 struct sk_buff *prev, 632 struct sk_buff *newsk) 633 { 634 struct sk_buff *next; 635 list->qlen++; 636 637 next = prev->next; 638 newsk->next = next; 639 newsk->prev = prev; 640 next->prev = prev->next = newsk; 641 } 642 643 /** 644 * __skb_queue_head - queue a buffer at the list head 645 * @list: list to use 646 * @newsk: buffer to queue 647 * 648 * Queue a buffer at the start of a list. This function takes no locks 649 * and you must therefore hold required locks before calling it. 650 * 651 * A buffer cannot be placed on two lists at the same time. 652 */ 653 extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk); 654 static inline void __skb_queue_head(struct sk_buff_head *list, 655 struct sk_buff *newsk) 656 { 657 __skb_queue_after(list, (struct sk_buff *)list, newsk); 658 } 659 660 /** 661 * __skb_queue_tail - queue a buffer at the list tail 662 * @list: list to use 663 * @newsk: buffer to queue 664 * 665 * Queue a buffer at the end of a list. This function takes no locks 666 * and you must therefore hold required locks before calling it. 667 * 668 * A buffer cannot be placed on two lists at the same time. 669 */ 670 extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk); 671 static inline void __skb_queue_tail(struct sk_buff_head *list, 672 struct sk_buff *newsk) 673 { 674 struct sk_buff *prev, *next; 675 676 list->qlen++; 677 next = (struct sk_buff *)list; 678 prev = next->prev; 679 newsk->next = next; 680 newsk->prev = prev; 681 next->prev = prev->next = newsk; 682 } 683 684 685 /** 686 * __skb_dequeue - remove from the head of the queue 687 * @list: list to dequeue from 688 * 689 * Remove the head of the list. This function does not take any locks 690 * so must be used with appropriate locks held only. The head item is 691 * returned or %NULL if the list is empty. 692 */ 693 extern struct sk_buff *skb_dequeue(struct sk_buff_head *list); 694 static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list) 695 { 696 struct sk_buff *next, *prev, *result; 697 698 prev = (struct sk_buff *) list; 699 next = prev->next; 700 result = NULL; 701 if (next != prev) { 702 result = next; 703 next = next->next; 704 list->qlen--; 705 next->prev = prev; 706 prev->next = next; 707 result->next = result->prev = NULL; 708 } 709 return result; 710 } 711 712 713 /* 714 * Insert a packet on a list. 715 */ 716 extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list); 717 static inline void __skb_insert(struct sk_buff *newsk, 718 struct sk_buff *prev, struct sk_buff *next, 719 struct sk_buff_head *list) 720 { 721 newsk->next = next; 722 newsk->prev = prev; 723 next->prev = prev->next = newsk; 724 list->qlen++; 725 } 726 727 /* 728 * Place a packet after a given packet in a list. 729 */ 730 extern void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list); 731 static inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 732 { 733 __skb_insert(newsk, old, old->next, list); 734 } 735 736 /* 737 * remove sk_buff from list. _Must_ be called atomically, and with 738 * the list known.. 739 */ 740 extern void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list); 741 static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 742 { 743 struct sk_buff *next, *prev; 744 745 list->qlen--; 746 next = skb->next; 747 prev = skb->prev; 748 skb->next = skb->prev = NULL; 749 next->prev = prev; 750 prev->next = next; 751 } 752 753 754 /* XXX: more streamlined implementation */ 755 756 /** 757 * __skb_dequeue_tail - remove from the tail of the queue 758 * @list: list to dequeue from 759 * 760 * Remove the tail of the list. This function does not take any locks 761 * so must be used with appropriate locks held only. The tail item is 762 * returned or %NULL if the list is empty. 763 */ 764 extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list); 765 static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list) 766 { 767 struct sk_buff *skb = skb_peek_tail(list); 768 if (skb) 769 __skb_unlink(skb, list); 770 return skb; 771 } 772 773 774 static inline int skb_is_nonlinear(const struct sk_buff *skb) 775 { 776 return skb->data_len; 777 } 778 779 static inline unsigned int skb_headlen(const struct sk_buff *skb) 780 { 781 return skb->len - skb->data_len; 782 } 783 784 static inline int skb_pagelen(const struct sk_buff *skb) 785 { 786 int i, len = 0; 787 788 for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) 789 len += skb_shinfo(skb)->frags[i].size; 790 return len + skb_headlen(skb); 791 } 792 793 static inline void skb_fill_page_desc(struct sk_buff *skb, int i, 794 struct page *page, int off, int size) 795 { 796 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 797 798 frag->page = page; 799 frag->page_offset = off; 800 frag->size = size; 801 skb_shinfo(skb)->nr_frags = i + 1; 802 } 803 804 #define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags) 805 #define SKB_FRAG_ASSERT(skb) BUG_ON(skb_shinfo(skb)->frag_list) 806 #define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb)) 807 808 /* 809 * Add data to an sk_buff 810 */ 811 static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len) 812 { 813 unsigned char *tmp = skb->tail; 814 SKB_LINEAR_ASSERT(skb); 815 skb->tail += len; 816 skb->len += len; 817 return tmp; 818 } 819 820 /** 821 * skb_put - add data to a buffer 822 * @skb: buffer to use 823 * @len: amount of data to add 824 * 825 * This function extends the used data area of the buffer. If this would 826 * exceed the total buffer size the kernel will panic. A pointer to the 827 * first byte of the extra data is returned. 828 */ 829 static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len) 830 { 831 unsigned char *tmp = skb->tail; 832 SKB_LINEAR_ASSERT(skb); 833 skb->tail += len; 834 skb->len += len; 835 if (unlikely(skb->tail>skb->end)) 836 skb_over_panic(skb, len, current_text_addr()); 837 return tmp; 838 } 839 840 static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len) 841 { 842 skb->data -= len; 843 skb->len += len; 844 return skb->data; 845 } 846 847 /** 848 * skb_push - add data to the start of a buffer 849 * @skb: buffer to use 850 * @len: amount of data to add 851 * 852 * This function extends the used data area of the buffer at the buffer 853 * start. If this would exceed the total buffer headroom the kernel will 854 * panic. A pointer to the first byte of the extra data is returned. 855 */ 856 static inline unsigned char *skb_push(struct sk_buff *skb, unsigned int len) 857 { 858 skb->data -= len; 859 skb->len += len; 860 if (unlikely(skb->data<skb->head)) 861 skb_under_panic(skb, len, current_text_addr()); 862 return skb->data; 863 } 864 865 static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len) 866 { 867 skb->len -= len; 868 BUG_ON(skb->len < skb->data_len); 869 return skb->data += len; 870 } 871 872 /** 873 * skb_pull - remove data from the start of a buffer 874 * @skb: buffer to use 875 * @len: amount of data to remove 876 * 877 * This function removes data from the start of a buffer, returning 878 * the memory to the headroom. A pointer to the next data in the buffer 879 * is returned. Once the data has been pulled future pushes will overwrite 880 * the old data. 881 */ 882 static inline unsigned char *skb_pull(struct sk_buff *skb, unsigned int len) 883 { 884 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len); 885 } 886 887 extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta); 888 889 static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len) 890 { 891 if (len > skb_headlen(skb) && 892 !__pskb_pull_tail(skb, len-skb_headlen(skb))) 893 return NULL; 894 skb->len -= len; 895 return skb->data += len; 896 } 897 898 static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len) 899 { 900 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len); 901 } 902 903 static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len) 904 { 905 if (likely(len <= skb_headlen(skb))) 906 return 1; 907 if (unlikely(len > skb->len)) 908 return 0; 909 return __pskb_pull_tail(skb, len-skb_headlen(skb)) != NULL; 910 } 911 912 /** 913 * skb_headroom - bytes at buffer head 914 * @skb: buffer to check 915 * 916 * Return the number of bytes of free space at the head of an &sk_buff. 917 */ 918 static inline int skb_headroom(const struct sk_buff *skb) 919 { 920 return skb->data - skb->head; 921 } 922 923 /** 924 * skb_tailroom - bytes at buffer end 925 * @skb: buffer to check 926 * 927 * Return the number of bytes of free space at the tail of an sk_buff 928 */ 929 static inline int skb_tailroom(const struct sk_buff *skb) 930 { 931 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail; 932 } 933 934 /** 935 * skb_reserve - adjust headroom 936 * @skb: buffer to alter 937 * @len: bytes to move 938 * 939 * Increase the headroom of an empty &sk_buff by reducing the tail 940 * room. This is only allowed for an empty buffer. 941 */ 942 static inline void skb_reserve(struct sk_buff *skb, int len) 943 { 944 skb->data += len; 945 skb->tail += len; 946 } 947 948 /* 949 * CPUs often take a performance hit when accessing unaligned memory 950 * locations. The actual performance hit varies, it can be small if the 951 * hardware handles it or large if we have to take an exception and fix it 952 * in software. 953 * 954 * Since an ethernet header is 14 bytes network drivers often end up with 955 * the IP header at an unaligned offset. The IP header can be aligned by 956 * shifting the start of the packet by 2 bytes. Drivers should do this 957 * with: 958 * 959 * skb_reserve(NET_IP_ALIGN); 960 * 961 * The downside to this alignment of the IP header is that the DMA is now 962 * unaligned. On some architectures the cost of an unaligned DMA is high 963 * and this cost outweighs the gains made by aligning the IP header. 964 * 965 * Since this trade off varies between architectures, we allow NET_IP_ALIGN 966 * to be overridden. 967 */ 968 #ifndef NET_IP_ALIGN 969 #define NET_IP_ALIGN 2 970 #endif 971 972 /* 973 * The networking layer reserves some headroom in skb data (via 974 * dev_alloc_skb). This is used to avoid having to reallocate skb data when 975 * the header has to grow. In the default case, if the header has to grow 976 * 16 bytes or less we avoid the reallocation. 977 * 978 * Unfortunately this headroom changes the DMA alignment of the resulting 979 * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive 980 * on some architectures. An architecture can override this value, 981 * perhaps setting it to a cacheline in size (since that will maintain 982 * cacheline alignment of the DMA). It must be a power of 2. 983 * 984 * Various parts of the networking layer expect at least 16 bytes of 985 * headroom, you should not reduce this. 986 */ 987 #ifndef NET_SKB_PAD 988 #define NET_SKB_PAD 16 989 #endif 990 991 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len); 992 993 static inline void __skb_trim(struct sk_buff *skb, unsigned int len) 994 { 995 if (unlikely(skb->data_len)) { 996 WARN_ON(1); 997 return; 998 } 999 skb->len = len; 1000 skb->tail = skb->data + len; 1001 } 1002 1003 /** 1004 * skb_trim - remove end from a buffer 1005 * @skb: buffer to alter 1006 * @len: new length 1007 * 1008 * Cut the length of a buffer down by removing data from the tail. If 1009 * the buffer is already under the length specified it is not modified. 1010 * The skb must be linear. 1011 */ 1012 static inline void skb_trim(struct sk_buff *skb, unsigned int len) 1013 { 1014 if (skb->len > len) 1015 __skb_trim(skb, len); 1016 } 1017 1018 1019 static inline int __pskb_trim(struct sk_buff *skb, unsigned int len) 1020 { 1021 if (skb->data_len) 1022 return ___pskb_trim(skb, len); 1023 __skb_trim(skb, len); 1024 return 0; 1025 } 1026 1027 static inline int pskb_trim(struct sk_buff *skb, unsigned int len) 1028 { 1029 return (len < skb->len) ? __pskb_trim(skb, len) : 0; 1030 } 1031 1032 /** 1033 * skb_orphan - orphan a buffer 1034 * @skb: buffer to orphan 1035 * 1036 * If a buffer currently has an owner then we call the owner's 1037 * destructor function and make the @skb unowned. The buffer continues 1038 * to exist but is no longer charged to its former owner. 1039 */ 1040 static inline void skb_orphan(struct sk_buff *skb) 1041 { 1042 if (skb->destructor) 1043 skb->destructor(skb); 1044 skb->destructor = NULL; 1045 skb->sk = NULL; 1046 } 1047 1048 /** 1049 * __skb_queue_purge - empty a list 1050 * @list: list to empty 1051 * 1052 * Delete all buffers on an &sk_buff list. Each buffer is removed from 1053 * the list and one reference dropped. This function does not take the 1054 * list lock and the caller must hold the relevant locks to use it. 1055 */ 1056 extern void skb_queue_purge(struct sk_buff_head *list); 1057 static inline void __skb_queue_purge(struct sk_buff_head *list) 1058 { 1059 struct sk_buff *skb; 1060 while ((skb = __skb_dequeue(list)) != NULL) 1061 kfree_skb(skb); 1062 } 1063 1064 #ifndef CONFIG_HAVE_ARCH_DEV_ALLOC_SKB 1065 /** 1066 * __dev_alloc_skb - allocate an skbuff for sending 1067 * @length: length to allocate 1068 * @gfp_mask: get_free_pages mask, passed to alloc_skb 1069 * 1070 * Allocate a new &sk_buff and assign it a usage count of one. The 1071 * buffer has unspecified headroom built in. Users should allocate 1072 * the headroom they think they need without accounting for the 1073 * built in space. The built in space is used for optimisations. 1074 * 1075 * %NULL is returned in there is no free memory. 1076 */ 1077 static inline struct sk_buff *__dev_alloc_skb(unsigned int length, 1078 gfp_t gfp_mask) 1079 { 1080 struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask); 1081 if (likely(skb)) 1082 skb_reserve(skb, NET_SKB_PAD); 1083 return skb; 1084 } 1085 #else 1086 extern struct sk_buff *__dev_alloc_skb(unsigned int length, int gfp_mask); 1087 #endif 1088 1089 /** 1090 * dev_alloc_skb - allocate an skbuff for sending 1091 * @length: length to allocate 1092 * 1093 * Allocate a new &sk_buff and assign it a usage count of one. The 1094 * buffer has unspecified headroom built in. Users should allocate 1095 * the headroom they think they need without accounting for the 1096 * built in space. The built in space is used for optimisations. 1097 * 1098 * %NULL is returned in there is no free memory. Although this function 1099 * allocates memory it can be called from an interrupt. 1100 */ 1101 static inline struct sk_buff *dev_alloc_skb(unsigned int length) 1102 { 1103 return __dev_alloc_skb(length, GFP_ATOMIC); 1104 } 1105 1106 /** 1107 * skb_cow - copy header of skb when it is required 1108 * @skb: buffer to cow 1109 * @headroom: needed headroom 1110 * 1111 * If the skb passed lacks sufficient headroom or its data part 1112 * is shared, data is reallocated. If reallocation fails, an error 1113 * is returned and original skb is not changed. 1114 * 1115 * The result is skb with writable area skb->head...skb->tail 1116 * and at least @headroom of space at head. 1117 */ 1118 static inline int skb_cow(struct sk_buff *skb, unsigned int headroom) 1119 { 1120 int delta = (headroom > NET_SKB_PAD ? headroom : NET_SKB_PAD) - 1121 skb_headroom(skb); 1122 1123 if (delta < 0) 1124 delta = 0; 1125 1126 if (delta || skb_cloned(skb)) 1127 return pskb_expand_head(skb, (delta + (NET_SKB_PAD-1)) & 1128 ~(NET_SKB_PAD-1), 0, GFP_ATOMIC); 1129 return 0; 1130 } 1131 1132 /** 1133 * skb_padto - pad an skbuff up to a minimal size 1134 * @skb: buffer to pad 1135 * @len: minimal length 1136 * 1137 * Pads up a buffer to ensure the trailing bytes exist and are 1138 * blanked. If the buffer already contains sufficient data it 1139 * is untouched. Otherwise it is extended. Returns zero on 1140 * success. The skb is freed on error. 1141 */ 1142 1143 static inline int skb_padto(struct sk_buff *skb, unsigned int len) 1144 { 1145 unsigned int size = skb->len; 1146 if (likely(size >= len)) 1147 return 0; 1148 return skb_pad(skb, len-size); 1149 } 1150 1151 static inline int skb_add_data(struct sk_buff *skb, 1152 char __user *from, int copy) 1153 { 1154 const int off = skb->len; 1155 1156 if (skb->ip_summed == CHECKSUM_NONE) { 1157 int err = 0; 1158 unsigned int csum = csum_and_copy_from_user(from, 1159 skb_put(skb, copy), 1160 copy, 0, &err); 1161 if (!err) { 1162 skb->csum = csum_block_add(skb->csum, csum, off); 1163 return 0; 1164 } 1165 } else if (!copy_from_user(skb_put(skb, copy), from, copy)) 1166 return 0; 1167 1168 __skb_trim(skb, off); 1169 return -EFAULT; 1170 } 1171 1172 static inline int skb_can_coalesce(struct sk_buff *skb, int i, 1173 struct page *page, int off) 1174 { 1175 if (i) { 1176 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; 1177 1178 return page == frag->page && 1179 off == frag->page_offset + frag->size; 1180 } 1181 return 0; 1182 } 1183 1184 static inline int __skb_linearize(struct sk_buff *skb) 1185 { 1186 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM; 1187 } 1188 1189 /** 1190 * skb_linearize - convert paged skb to linear one 1191 * @skb: buffer to linarize 1192 * 1193 * If there is no free memory -ENOMEM is returned, otherwise zero 1194 * is returned and the old skb data released. 1195 */ 1196 static inline int skb_linearize(struct sk_buff *skb) 1197 { 1198 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0; 1199 } 1200 1201 /** 1202 * skb_linearize_cow - make sure skb is linear and writable 1203 * @skb: buffer to process 1204 * 1205 * If there is no free memory -ENOMEM is returned, otherwise zero 1206 * is returned and the old skb data released. 1207 */ 1208 static inline int skb_linearize_cow(struct sk_buff *skb) 1209 { 1210 return skb_is_nonlinear(skb) || skb_cloned(skb) ? 1211 __skb_linearize(skb) : 0; 1212 } 1213 1214 /** 1215 * skb_postpull_rcsum - update checksum for received skb after pull 1216 * @skb: buffer to update 1217 * @start: start of data before pull 1218 * @len: length of data pulled 1219 * 1220 * After doing a pull on a received packet, you need to call this to 1221 * update the CHECKSUM_HW checksum, or set ip_summed to CHECKSUM_NONE 1222 * so that it can be recomputed from scratch. 1223 */ 1224 1225 static inline void skb_postpull_rcsum(struct sk_buff *skb, 1226 const void *start, unsigned int len) 1227 { 1228 if (skb->ip_summed == CHECKSUM_HW) 1229 skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0)); 1230 } 1231 1232 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); 1233 1234 /** 1235 * pskb_trim_rcsum - trim received skb and update checksum 1236 * @skb: buffer to trim 1237 * @len: new length 1238 * 1239 * This is exactly the same as pskb_trim except that it ensures the 1240 * checksum of received packets are still valid after the operation. 1241 */ 1242 1243 static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) 1244 { 1245 if (likely(len >= skb->len)) 1246 return 0; 1247 if (skb->ip_summed == CHECKSUM_HW) 1248 skb->ip_summed = CHECKSUM_NONE; 1249 return __pskb_trim(skb, len); 1250 } 1251 1252 static inline void *kmap_skb_frag(const skb_frag_t *frag) 1253 { 1254 #ifdef CONFIG_HIGHMEM 1255 BUG_ON(in_irq()); 1256 1257 local_bh_disable(); 1258 #endif 1259 return kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ); 1260 } 1261 1262 static inline void kunmap_skb_frag(void *vaddr) 1263 { 1264 kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ); 1265 #ifdef CONFIG_HIGHMEM 1266 local_bh_enable(); 1267 #endif 1268 } 1269 1270 #define skb_queue_walk(queue, skb) \ 1271 for (skb = (queue)->next; \ 1272 prefetch(skb->next), (skb != (struct sk_buff *)(queue)); \ 1273 skb = skb->next) 1274 1275 #define skb_queue_reverse_walk(queue, skb) \ 1276 for (skb = (queue)->prev; \ 1277 prefetch(skb->prev), (skb != (struct sk_buff *)(queue)); \ 1278 skb = skb->prev) 1279 1280 1281 extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, 1282 int noblock, int *err); 1283 extern unsigned int datagram_poll(struct file *file, struct socket *sock, 1284 struct poll_table_struct *wait); 1285 extern int skb_copy_datagram_iovec(const struct sk_buff *from, 1286 int offset, struct iovec *to, 1287 int size); 1288 extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, 1289 int hlen, 1290 struct iovec *iov); 1291 extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb); 1292 extern void skb_kill_datagram(struct sock *sk, struct sk_buff *skb, 1293 unsigned int flags); 1294 extern unsigned int skb_checksum(const struct sk_buff *skb, int offset, 1295 int len, unsigned int csum); 1296 extern int skb_copy_bits(const struct sk_buff *skb, int offset, 1297 void *to, int len); 1298 extern int skb_store_bits(const struct sk_buff *skb, int offset, 1299 void *from, int len); 1300 extern unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, 1301 int offset, u8 *to, int len, 1302 unsigned int csum); 1303 extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); 1304 extern void skb_split(struct sk_buff *skb, 1305 struct sk_buff *skb1, const u32 len); 1306 1307 extern struct sk_buff *skb_segment(struct sk_buff *skb, int features); 1308 1309 static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, 1310 int len, void *buffer) 1311 { 1312 int hlen = skb_headlen(skb); 1313 1314 if (hlen - offset >= len) 1315 return skb->data + offset; 1316 1317 if (skb_copy_bits(skb, offset, buffer, len) < 0) 1318 return NULL; 1319 1320 return buffer; 1321 } 1322 1323 extern void skb_init(void); 1324 extern void skb_add_mtu(int mtu); 1325 1326 /** 1327 * skb_get_timestamp - get timestamp from a skb 1328 * @skb: skb to get stamp from 1329 * @stamp: pointer to struct timeval to store stamp in 1330 * 1331 * Timestamps are stored in the skb as offsets to a base timestamp. 1332 * This function converts the offset back to a struct timeval and stores 1333 * it in stamp. 1334 */ 1335 static inline void skb_get_timestamp(const struct sk_buff *skb, struct timeval *stamp) 1336 { 1337 stamp->tv_sec = skb->tstamp.off_sec; 1338 stamp->tv_usec = skb->tstamp.off_usec; 1339 } 1340 1341 /** 1342 * skb_set_timestamp - set timestamp of a skb 1343 * @skb: skb to set stamp of 1344 * @stamp: pointer to struct timeval to get stamp from 1345 * 1346 * Timestamps are stored in the skb as offsets to a base timestamp. 1347 * This function converts a struct timeval to an offset and stores 1348 * it in the skb. 1349 */ 1350 static inline void skb_set_timestamp(struct sk_buff *skb, const struct timeval *stamp) 1351 { 1352 skb->tstamp.off_sec = stamp->tv_sec; 1353 skb->tstamp.off_usec = stamp->tv_usec; 1354 } 1355 1356 extern void __net_timestamp(struct sk_buff *skb); 1357 1358 extern unsigned int __skb_checksum_complete(struct sk_buff *skb); 1359 1360 /** 1361 * skb_checksum_complete - Calculate checksum of an entire packet 1362 * @skb: packet to process 1363 * 1364 * This function calculates the checksum over the entire packet plus 1365 * the value of skb->csum. The latter can be used to supply the 1366 * checksum of a pseudo header as used by TCP/UDP. It returns the 1367 * checksum. 1368 * 1369 * For protocols that contain complete checksums such as ICMP/TCP/UDP, 1370 * this function can be used to verify that checksum on received 1371 * packets. In that case the function should return zero if the 1372 * checksum is correct. In particular, this function will return zero 1373 * if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the 1374 * hardware has already verified the correctness of the checksum. 1375 */ 1376 static inline unsigned int skb_checksum_complete(struct sk_buff *skb) 1377 { 1378 return skb->ip_summed != CHECKSUM_UNNECESSARY && 1379 __skb_checksum_complete(skb); 1380 } 1381 1382 #ifdef CONFIG_NETFILTER 1383 static inline void nf_conntrack_put(struct nf_conntrack *nfct) 1384 { 1385 if (nfct && atomic_dec_and_test(&nfct->use)) 1386 nfct->destroy(nfct); 1387 } 1388 static inline void nf_conntrack_get(struct nf_conntrack *nfct) 1389 { 1390 if (nfct) 1391 atomic_inc(&nfct->use); 1392 } 1393 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 1394 static inline void nf_conntrack_get_reasm(struct sk_buff *skb) 1395 { 1396 if (skb) 1397 atomic_inc(&skb->users); 1398 } 1399 static inline void nf_conntrack_put_reasm(struct sk_buff *skb) 1400 { 1401 if (skb) 1402 kfree_skb(skb); 1403 } 1404 #endif 1405 #ifdef CONFIG_BRIDGE_NETFILTER 1406 static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge) 1407 { 1408 if (nf_bridge && atomic_dec_and_test(&nf_bridge->use)) 1409 kfree(nf_bridge); 1410 } 1411 static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge) 1412 { 1413 if (nf_bridge) 1414 atomic_inc(&nf_bridge->use); 1415 } 1416 #endif /* CONFIG_BRIDGE_NETFILTER */ 1417 static inline void nf_reset(struct sk_buff *skb) 1418 { 1419 nf_conntrack_put(skb->nfct); 1420 skb->nfct = NULL; 1421 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 1422 nf_conntrack_put_reasm(skb->nfct_reasm); 1423 skb->nfct_reasm = NULL; 1424 #endif 1425 #ifdef CONFIG_BRIDGE_NETFILTER 1426 nf_bridge_put(skb->nf_bridge); 1427 skb->nf_bridge = NULL; 1428 #endif 1429 } 1430 1431 #else /* CONFIG_NETFILTER */ 1432 static inline void nf_reset(struct sk_buff *skb) {} 1433 #endif /* CONFIG_NETFILTER */ 1434 1435 #ifdef CONFIG_NETWORK_SECMARK 1436 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from) 1437 { 1438 to->secmark = from->secmark; 1439 } 1440 1441 static inline void skb_init_secmark(struct sk_buff *skb) 1442 { 1443 skb->secmark = 0; 1444 } 1445 #else 1446 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from) 1447 { } 1448 1449 static inline void skb_init_secmark(struct sk_buff *skb) 1450 { } 1451 #endif 1452 1453 #endif /* __KERNEL__ */ 1454 #endif /* _LINUX_SKBUFF_H */ 1455