1 /* 2 * Definitions for the 'struct sk_buff' memory handlers. 3 * 4 * Authors: 5 * Alan Cox, <[email protected]> 6 * Florian La Roche, <[email protected]> 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License 10 * as published by the Free Software Foundation; either version 11 * 2 of the License, or (at your option) any later version. 12 */ 13 14 #ifndef _LINUX_SKBUFF_H 15 #define _LINUX_SKBUFF_H 16 17 #include <linux/kernel.h> 18 #include <linux/kmemcheck.h> 19 #include <linux/compiler.h> 20 #include <linux/time.h> 21 #include <linux/cache.h> 22 23 #include <asm/atomic.h> 24 #include <asm/types.h> 25 #include <linux/spinlock.h> 26 #include <linux/net.h> 27 #include <linux/textsearch.h> 28 #include <net/checksum.h> 29 #include <linux/rcupdate.h> 30 #include <linux/dmaengine.h> 31 #include <linux/hrtimer.h> 32 33 /* Don't change this without changing skb_csum_unnecessary! */ 34 #define CHECKSUM_NONE 0 35 #define CHECKSUM_UNNECESSARY 1 36 #define CHECKSUM_COMPLETE 2 37 #define CHECKSUM_PARTIAL 3 38 39 #define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \ 40 ~(SMP_CACHE_BYTES - 1)) 41 #define SKB_WITH_OVERHEAD(X) \ 42 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) 43 #define SKB_MAX_ORDER(X, ORDER) \ 44 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X)) 45 #define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0)) 46 #define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2)) 47 48 /* A. Checksumming of received packets by device. 49 * 50 * NONE: device failed to checksum this packet. 51 * skb->csum is undefined. 52 * 53 * UNNECESSARY: device parsed packet and wouldbe verified checksum. 54 * skb->csum is undefined. 55 * It is bad option, but, unfortunately, many of vendors do this. 56 * Apparently with secret goal to sell you new device, when you 57 * will add new protocol to your host. F.e. IPv6. 8) 58 * 59 * COMPLETE: the most generic way. Device supplied checksum of _all_ 60 * the packet as seen by netif_rx in skb->csum. 61 * NOTE: Even if device supports only some protocols, but 62 * is able to produce some skb->csum, it MUST use COMPLETE, 63 * not UNNECESSARY. 64 * 65 * PARTIAL: identical to the case for output below. This may occur 66 * on a packet received directly from another Linux OS, e.g., 67 * a virtualised Linux kernel on the same host. The packet can 68 * be treated in the same way as UNNECESSARY except that on 69 * output (i.e., forwarding) the checksum must be filled in 70 * by the OS or the hardware. 71 * 72 * B. Checksumming on output. 73 * 74 * NONE: skb is checksummed by protocol or csum is not required. 75 * 76 * PARTIAL: device is required to csum packet as seen by hard_start_xmit 77 * from skb->csum_start to the end and to record the checksum 78 * at skb->csum_start + skb->csum_offset. 79 * 80 * Device must show its capabilities in dev->features, set 81 * at device setup time. 82 * NETIF_F_HW_CSUM - it is clever device, it is able to checksum 83 * everything. 84 * NETIF_F_NO_CSUM - loopback or reliable single hop media. 85 * NETIF_F_IP_CSUM - device is dumb. It is able to csum only 86 * TCP/UDP over IPv4. Sigh. Vendors like this 87 * way by an unknown reason. Though, see comment above 88 * about CHECKSUM_UNNECESSARY. 8) 89 * NETIF_F_IPV6_CSUM about as dumb as the last one but does IPv6 instead. 90 * 91 * Any questions? No questions, good. --ANK 92 */ 93 94 struct net_device; 95 struct scatterlist; 96 struct pipe_inode_info; 97 98 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 99 struct nf_conntrack { 100 atomic_t use; 101 }; 102 #endif 103 104 #ifdef CONFIG_BRIDGE_NETFILTER 105 struct nf_bridge_info { 106 atomic_t use; 107 struct net_device *physindev; 108 struct net_device *physoutdev; 109 unsigned int mask; 110 unsigned long data[32 / sizeof(unsigned long)]; 111 }; 112 #endif 113 114 struct sk_buff_head { 115 /* These two members must be first. */ 116 struct sk_buff *next; 117 struct sk_buff *prev; 118 119 __u32 qlen; 120 spinlock_t lock; 121 }; 122 123 struct sk_buff; 124 125 /* To allow 64K frame to be packed as single skb without frag_list */ 126 #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2) 127 128 typedef struct skb_frag_struct skb_frag_t; 129 130 struct skb_frag_struct { 131 struct page *page; 132 __u32 page_offset; 133 __u32 size; 134 }; 135 136 #define HAVE_HW_TIME_STAMP 137 138 /** 139 * struct skb_shared_hwtstamps - hardware time stamps 140 * @hwtstamp: hardware time stamp transformed into duration 141 * since arbitrary point in time 142 * @syststamp: hwtstamp transformed to system time base 143 * 144 * Software time stamps generated by ktime_get_real() are stored in 145 * skb->tstamp. The relation between the different kinds of time 146 * stamps is as follows: 147 * 148 * syststamp and tstamp can be compared against each other in 149 * arbitrary combinations. The accuracy of a 150 * syststamp/tstamp/"syststamp from other device" comparison is 151 * limited by the accuracy of the transformation into system time 152 * base. This depends on the device driver and its underlying 153 * hardware. 154 * 155 * hwtstamps can only be compared against other hwtstamps from 156 * the same device. 157 * 158 * This structure is attached to packets as part of the 159 * &skb_shared_info. Use skb_hwtstamps() to get a pointer. 160 */ 161 struct skb_shared_hwtstamps { 162 ktime_t hwtstamp; 163 ktime_t syststamp; 164 }; 165 166 /** 167 * struct skb_shared_tx - instructions for time stamping of outgoing packets 168 * @hardware: generate hardware time stamp 169 * @software: generate software time stamp 170 * @in_progress: device driver is going to provide 171 * hardware time stamp 172 * @flags: all shared_tx flags 173 * 174 * These flags are attached to packets as part of the 175 * &skb_shared_info. Use skb_tx() to get a pointer. 176 */ 177 union skb_shared_tx { 178 struct { 179 __u8 hardware:1, 180 software:1, 181 in_progress:1; 182 }; 183 __u8 flags; 184 }; 185 186 /* This data is invariant across clones and lives at 187 * the end of the header data, ie. at skb->end. 188 */ 189 struct skb_shared_info { 190 unsigned short nr_frags; 191 unsigned short gso_size; 192 /* Warning: this field is not always filled in (UFO)! */ 193 unsigned short gso_segs; 194 unsigned short gso_type; 195 __be32 ip6_frag_id; 196 union skb_shared_tx tx_flags; 197 struct sk_buff *frag_list; 198 struct skb_shared_hwtstamps hwtstamps; 199 200 /* 201 * Warning : all fields before dataref are cleared in __alloc_skb() 202 */ 203 atomic_t dataref; 204 205 skb_frag_t frags[MAX_SKB_FRAGS]; 206 /* Intermediate layers must ensure that destructor_arg 207 * remains valid until skb destructor */ 208 void * destructor_arg; 209 }; 210 211 /* We divide dataref into two halves. The higher 16 bits hold references 212 * to the payload part of skb->data. The lower 16 bits hold references to 213 * the entire skb->data. A clone of a headerless skb holds the length of 214 * the header in skb->hdr_len. 215 * 216 * All users must obey the rule that the skb->data reference count must be 217 * greater than or equal to the payload reference count. 218 * 219 * Holding a reference to the payload part means that the user does not 220 * care about modifications to the header part of skb->data. 221 */ 222 #define SKB_DATAREF_SHIFT 16 223 #define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1) 224 225 226 enum { 227 SKB_FCLONE_UNAVAILABLE, 228 SKB_FCLONE_ORIG, 229 SKB_FCLONE_CLONE, 230 }; 231 232 enum { 233 SKB_GSO_TCPV4 = 1 << 0, 234 SKB_GSO_UDP = 1 << 1, 235 236 /* This indicates the skb is from an untrusted source. */ 237 SKB_GSO_DODGY = 1 << 2, 238 239 /* This indicates the tcp segment has CWR set. */ 240 SKB_GSO_TCP_ECN = 1 << 3, 241 242 SKB_GSO_TCPV6 = 1 << 4, 243 244 SKB_GSO_FCOE = 1 << 5, 245 }; 246 247 #if BITS_PER_LONG > 32 248 #define NET_SKBUFF_DATA_USES_OFFSET 1 249 #endif 250 251 #ifdef NET_SKBUFF_DATA_USES_OFFSET 252 typedef unsigned int sk_buff_data_t; 253 #else 254 typedef unsigned char *sk_buff_data_t; 255 #endif 256 257 /** 258 * struct sk_buff - socket buffer 259 * @next: Next buffer in list 260 * @prev: Previous buffer in list 261 * @sk: Socket we are owned by 262 * @tstamp: Time we arrived 263 * @dev: Device we arrived on/are leaving by 264 * @transport_header: Transport layer header 265 * @network_header: Network layer header 266 * @mac_header: Link layer header 267 * @_skb_refdst: destination entry (with norefcount bit) 268 * @sp: the security path, used for xfrm 269 * @cb: Control buffer. Free for use by every layer. Put private vars here 270 * @len: Length of actual data 271 * @data_len: Data length 272 * @mac_len: Length of link layer header 273 * @hdr_len: writable header length of cloned skb 274 * @csum: Checksum (must include start/offset pair) 275 * @csum_start: Offset from skb->head where checksumming should start 276 * @csum_offset: Offset from csum_start where checksum should be stored 277 * @local_df: allow local fragmentation 278 * @cloned: Head may be cloned (check refcnt to be sure) 279 * @nohdr: Payload reference only, must not modify header 280 * @pkt_type: Packet class 281 * @fclone: skbuff clone status 282 * @ip_summed: Driver fed us an IP checksum 283 * @priority: Packet queueing priority 284 * @users: User count - see {datagram,tcp}.c 285 * @protocol: Packet protocol from driver 286 * @truesize: Buffer size 287 * @head: Head of buffer 288 * @data: Data head pointer 289 * @tail: Tail pointer 290 * @end: End pointer 291 * @destructor: Destruct function 292 * @mark: Generic packet mark 293 * @nfct: Associated connection, if any 294 * @ipvs_property: skbuff is owned by ipvs 295 * @peeked: this packet has been seen already, so stats have been 296 * done for it, don't do them again 297 * @nf_trace: netfilter packet trace flag 298 * @nfctinfo: Relationship of this skb to the connection 299 * @nfct_reasm: netfilter conntrack re-assembly pointer 300 * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c 301 * @skb_iif: ifindex of device we arrived on 302 * @rxhash: the packet hash computed on receive 303 * @queue_mapping: Queue mapping for multiqueue devices 304 * @tc_index: Traffic control index 305 * @tc_verd: traffic control verdict 306 * @ndisc_nodetype: router type (from link layer) 307 * @dma_cookie: a cookie to one of several possible DMA operations 308 * done by skb DMA functions 309 * @secmark: security marking 310 * @vlan_tci: vlan tag control information 311 */ 312 313 struct sk_buff { 314 /* These two members must be first. */ 315 struct sk_buff *next; 316 struct sk_buff *prev; 317 318 ktime_t tstamp; 319 320 struct sock *sk; 321 struct net_device *dev; 322 323 /* 324 * This is the control buffer. It is free to use for every 325 * layer. Please put your private variables there. If you 326 * want to keep them across layers you have to do a skb_clone() 327 * first. This is owned by whoever has the skb queued ATM. 328 */ 329 char cb[48] __aligned(8); 330 331 unsigned long _skb_refdst; 332 #ifdef CONFIG_XFRM 333 struct sec_path *sp; 334 #endif 335 unsigned int len, 336 data_len; 337 __u16 mac_len, 338 hdr_len; 339 union { 340 __wsum csum; 341 struct { 342 __u16 csum_start; 343 __u16 csum_offset; 344 }; 345 }; 346 __u32 priority; 347 kmemcheck_bitfield_begin(flags1); 348 __u8 local_df:1, 349 cloned:1, 350 ip_summed:2, 351 nohdr:1, 352 nfctinfo:3; 353 __u8 pkt_type:3, 354 fclone:2, 355 ipvs_property:1, 356 peeked:1, 357 nf_trace:1; 358 kmemcheck_bitfield_end(flags1); 359 __be16 protocol; 360 361 void (*destructor)(struct sk_buff *skb); 362 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 363 struct nf_conntrack *nfct; 364 struct sk_buff *nfct_reasm; 365 #endif 366 #ifdef CONFIG_BRIDGE_NETFILTER 367 struct nf_bridge_info *nf_bridge; 368 #endif 369 370 int skb_iif; 371 #ifdef CONFIG_NET_SCHED 372 __u16 tc_index; /* traffic control index */ 373 #ifdef CONFIG_NET_CLS_ACT 374 __u16 tc_verd; /* traffic control verdict */ 375 #endif 376 #endif 377 378 __u32 rxhash; 379 380 kmemcheck_bitfield_begin(flags2); 381 __u16 queue_mapping:16; 382 #ifdef CONFIG_IPV6_NDISC_NODETYPE 383 __u8 ndisc_nodetype:2; 384 #endif 385 kmemcheck_bitfield_end(flags2); 386 387 /* 0/14 bit hole */ 388 389 #ifdef CONFIG_NET_DMA 390 dma_cookie_t dma_cookie; 391 #endif 392 #ifdef CONFIG_NETWORK_SECMARK 393 __u32 secmark; 394 #endif 395 union { 396 __u32 mark; 397 __u32 dropcount; 398 }; 399 400 __u16 vlan_tci; 401 402 sk_buff_data_t transport_header; 403 sk_buff_data_t network_header; 404 sk_buff_data_t mac_header; 405 /* These elements must be at the end, see alloc_skb() for details. */ 406 sk_buff_data_t tail; 407 sk_buff_data_t end; 408 unsigned char *head, 409 *data; 410 unsigned int truesize; 411 atomic_t users; 412 }; 413 414 #ifdef __KERNEL__ 415 /* 416 * Handling routines are only of interest to the kernel 417 */ 418 #include <linux/slab.h> 419 420 #include <asm/system.h> 421 422 /* 423 * skb might have a dst pointer attached, refcounted or not. 424 * _skb_refdst low order bit is set if refcount was _not_ taken 425 */ 426 #define SKB_DST_NOREF 1UL 427 #define SKB_DST_PTRMASK ~(SKB_DST_NOREF) 428 429 /** 430 * skb_dst - returns skb dst_entry 431 * @skb: buffer 432 * 433 * Returns skb dst_entry, regardless of reference taken or not. 434 */ 435 static inline struct dst_entry *skb_dst(const struct sk_buff *skb) 436 { 437 /* If refdst was not refcounted, check we still are in a 438 * rcu_read_lock section 439 */ 440 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) && 441 !rcu_read_lock_held() && 442 !rcu_read_lock_bh_held()); 443 return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK); 444 } 445 446 /** 447 * skb_dst_set - sets skb dst 448 * @skb: buffer 449 * @dst: dst entry 450 * 451 * Sets skb dst, assuming a reference was taken on dst and should 452 * be released by skb_dst_drop() 453 */ 454 static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst) 455 { 456 skb->_skb_refdst = (unsigned long)dst; 457 } 458 459 /** 460 * skb_dst_set_noref - sets skb dst, without a reference 461 * @skb: buffer 462 * @dst: dst entry 463 * 464 * Sets skb dst, assuming a reference was not taken on dst 465 * skb_dst_drop() should not dst_release() this dst 466 */ 467 static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst) 468 { 469 WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); 470 skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF; 471 } 472 473 /** 474 * skb_dst_is_noref - Test if skb dst isnt refcounted 475 * @skb: buffer 476 */ 477 static inline bool skb_dst_is_noref(const struct sk_buff *skb) 478 { 479 return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb); 480 } 481 482 static inline struct rtable *skb_rtable(const struct sk_buff *skb) 483 { 484 return (struct rtable *)skb_dst(skb); 485 } 486 487 extern void kfree_skb(struct sk_buff *skb); 488 extern void consume_skb(struct sk_buff *skb); 489 extern void __kfree_skb(struct sk_buff *skb); 490 extern struct sk_buff *__alloc_skb(unsigned int size, 491 gfp_t priority, int fclone, int node); 492 static inline struct sk_buff *alloc_skb(unsigned int size, 493 gfp_t priority) 494 { 495 return __alloc_skb(size, priority, 0, -1); 496 } 497 498 static inline struct sk_buff *alloc_skb_fclone(unsigned int size, 499 gfp_t priority) 500 { 501 return __alloc_skb(size, priority, 1, -1); 502 } 503 504 extern int skb_recycle_check(struct sk_buff *skb, int skb_size); 505 506 extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); 507 extern struct sk_buff *skb_clone(struct sk_buff *skb, 508 gfp_t priority); 509 extern struct sk_buff *skb_copy(const struct sk_buff *skb, 510 gfp_t priority); 511 extern struct sk_buff *pskb_copy(struct sk_buff *skb, 512 gfp_t gfp_mask); 513 extern int pskb_expand_head(struct sk_buff *skb, 514 int nhead, int ntail, 515 gfp_t gfp_mask); 516 extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, 517 unsigned int headroom); 518 extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 519 int newheadroom, int newtailroom, 520 gfp_t priority); 521 extern int skb_to_sgvec(struct sk_buff *skb, 522 struct scatterlist *sg, int offset, 523 int len); 524 extern int skb_cow_data(struct sk_buff *skb, int tailbits, 525 struct sk_buff **trailer); 526 extern int skb_pad(struct sk_buff *skb, int pad); 527 #define dev_kfree_skb(a) consume_skb(a) 528 529 extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, 530 int getfrag(void *from, char *to, int offset, 531 int len,int odd, struct sk_buff *skb), 532 void *from, int length); 533 534 struct skb_seq_state { 535 __u32 lower_offset; 536 __u32 upper_offset; 537 __u32 frag_idx; 538 __u32 stepped_offset; 539 struct sk_buff *root_skb; 540 struct sk_buff *cur_skb; 541 __u8 *frag_data; 542 }; 543 544 extern void skb_prepare_seq_read(struct sk_buff *skb, 545 unsigned int from, unsigned int to, 546 struct skb_seq_state *st); 547 extern unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 548 struct skb_seq_state *st); 549 extern void skb_abort_seq_read(struct skb_seq_state *st); 550 551 extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 552 unsigned int to, struct ts_config *config, 553 struct ts_state *state); 554 555 #ifdef NET_SKBUFF_DATA_USES_OFFSET 556 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) 557 { 558 return skb->head + skb->end; 559 } 560 #else 561 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) 562 { 563 return skb->end; 564 } 565 #endif 566 567 /* Internal */ 568 #define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB))) 569 570 static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb) 571 { 572 return &skb_shinfo(skb)->hwtstamps; 573 } 574 575 static inline union skb_shared_tx *skb_tx(struct sk_buff *skb) 576 { 577 return &skb_shinfo(skb)->tx_flags; 578 } 579 580 /** 581 * skb_queue_empty - check if a queue is empty 582 * @list: queue head 583 * 584 * Returns true if the queue is empty, false otherwise. 585 */ 586 static inline int skb_queue_empty(const struct sk_buff_head *list) 587 { 588 return list->next == (struct sk_buff *)list; 589 } 590 591 /** 592 * skb_queue_is_last - check if skb is the last entry in the queue 593 * @list: queue head 594 * @skb: buffer 595 * 596 * Returns true if @skb is the last buffer on the list. 597 */ 598 static inline bool skb_queue_is_last(const struct sk_buff_head *list, 599 const struct sk_buff *skb) 600 { 601 return (skb->next == (struct sk_buff *) list); 602 } 603 604 /** 605 * skb_queue_is_first - check if skb is the first entry in the queue 606 * @list: queue head 607 * @skb: buffer 608 * 609 * Returns true if @skb is the first buffer on the list. 610 */ 611 static inline bool skb_queue_is_first(const struct sk_buff_head *list, 612 const struct sk_buff *skb) 613 { 614 return (skb->prev == (struct sk_buff *) list); 615 } 616 617 /** 618 * skb_queue_next - return the next packet in the queue 619 * @list: queue head 620 * @skb: current buffer 621 * 622 * Return the next packet in @list after @skb. It is only valid to 623 * call this if skb_queue_is_last() evaluates to false. 624 */ 625 static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list, 626 const struct sk_buff *skb) 627 { 628 /* This BUG_ON may seem severe, but if we just return then we 629 * are going to dereference garbage. 630 */ 631 BUG_ON(skb_queue_is_last(list, skb)); 632 return skb->next; 633 } 634 635 /** 636 * skb_queue_prev - return the prev packet in the queue 637 * @list: queue head 638 * @skb: current buffer 639 * 640 * Return the prev packet in @list before @skb. It is only valid to 641 * call this if skb_queue_is_first() evaluates to false. 642 */ 643 static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list, 644 const struct sk_buff *skb) 645 { 646 /* This BUG_ON may seem severe, but if we just return then we 647 * are going to dereference garbage. 648 */ 649 BUG_ON(skb_queue_is_first(list, skb)); 650 return skb->prev; 651 } 652 653 /** 654 * skb_get - reference buffer 655 * @skb: buffer to reference 656 * 657 * Makes another reference to a socket buffer and returns a pointer 658 * to the buffer. 659 */ 660 static inline struct sk_buff *skb_get(struct sk_buff *skb) 661 { 662 atomic_inc(&skb->users); 663 return skb; 664 } 665 666 /* 667 * If users == 1, we are the only owner and are can avoid redundant 668 * atomic change. 669 */ 670 671 /** 672 * skb_cloned - is the buffer a clone 673 * @skb: buffer to check 674 * 675 * Returns true if the buffer was generated with skb_clone() and is 676 * one of multiple shared copies of the buffer. Cloned buffers are 677 * shared data so must not be written to under normal circumstances. 678 */ 679 static inline int skb_cloned(const struct sk_buff *skb) 680 { 681 return skb->cloned && 682 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1; 683 } 684 685 /** 686 * skb_header_cloned - is the header a clone 687 * @skb: buffer to check 688 * 689 * Returns true if modifying the header part of the buffer requires 690 * the data to be copied. 691 */ 692 static inline int skb_header_cloned(const struct sk_buff *skb) 693 { 694 int dataref; 695 696 if (!skb->cloned) 697 return 0; 698 699 dataref = atomic_read(&skb_shinfo(skb)->dataref); 700 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT); 701 return dataref != 1; 702 } 703 704 /** 705 * skb_header_release - release reference to header 706 * @skb: buffer to operate on 707 * 708 * Drop a reference to the header part of the buffer. This is done 709 * by acquiring a payload reference. You must not read from the header 710 * part of skb->data after this. 711 */ 712 static inline void skb_header_release(struct sk_buff *skb) 713 { 714 BUG_ON(skb->nohdr); 715 skb->nohdr = 1; 716 atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref); 717 } 718 719 /** 720 * skb_shared - is the buffer shared 721 * @skb: buffer to check 722 * 723 * Returns true if more than one person has a reference to this 724 * buffer. 725 */ 726 static inline int skb_shared(const struct sk_buff *skb) 727 { 728 return atomic_read(&skb->users) != 1; 729 } 730 731 /** 732 * skb_share_check - check if buffer is shared and if so clone it 733 * @skb: buffer to check 734 * @pri: priority for memory allocation 735 * 736 * If the buffer is shared the buffer is cloned and the old copy 737 * drops a reference. A new clone with a single reference is returned. 738 * If the buffer is not shared the original buffer is returned. When 739 * being called from interrupt status or with spinlocks held pri must 740 * be GFP_ATOMIC. 741 * 742 * NULL is returned on a memory allocation failure. 743 */ 744 static inline struct sk_buff *skb_share_check(struct sk_buff *skb, 745 gfp_t pri) 746 { 747 might_sleep_if(pri & __GFP_WAIT); 748 if (skb_shared(skb)) { 749 struct sk_buff *nskb = skb_clone(skb, pri); 750 kfree_skb(skb); 751 skb = nskb; 752 } 753 return skb; 754 } 755 756 /* 757 * Copy shared buffers into a new sk_buff. We effectively do COW on 758 * packets to handle cases where we have a local reader and forward 759 * and a couple of other messy ones. The normal one is tcpdumping 760 * a packet thats being forwarded. 761 */ 762 763 /** 764 * skb_unshare - make a copy of a shared buffer 765 * @skb: buffer to check 766 * @pri: priority for memory allocation 767 * 768 * If the socket buffer is a clone then this function creates a new 769 * copy of the data, drops a reference count on the old copy and returns 770 * the new copy with the reference count at 1. If the buffer is not a clone 771 * the original buffer is returned. When called with a spinlock held or 772 * from interrupt state @pri must be %GFP_ATOMIC 773 * 774 * %NULL is returned on a memory allocation failure. 775 */ 776 static inline struct sk_buff *skb_unshare(struct sk_buff *skb, 777 gfp_t pri) 778 { 779 might_sleep_if(pri & __GFP_WAIT); 780 if (skb_cloned(skb)) { 781 struct sk_buff *nskb = skb_copy(skb, pri); 782 kfree_skb(skb); /* Free our shared copy */ 783 skb = nskb; 784 } 785 return skb; 786 } 787 788 /** 789 * skb_peek - peek at the head of an &sk_buff_head 790 * @list_: list to peek at 791 * 792 * Peek an &sk_buff. Unlike most other operations you _MUST_ 793 * be careful with this one. A peek leaves the buffer on the 794 * list and someone else may run off with it. You must hold 795 * the appropriate locks or have a private queue to do this. 796 * 797 * Returns %NULL for an empty list or a pointer to the head element. 798 * The reference count is not incremented and the reference is therefore 799 * volatile. Use with caution. 800 */ 801 static inline struct sk_buff *skb_peek(struct sk_buff_head *list_) 802 { 803 struct sk_buff *list = ((struct sk_buff *)list_)->next; 804 if (list == (struct sk_buff *)list_) 805 list = NULL; 806 return list; 807 } 808 809 /** 810 * skb_peek_tail - peek at the tail of an &sk_buff_head 811 * @list_: list to peek at 812 * 813 * Peek an &sk_buff. Unlike most other operations you _MUST_ 814 * be careful with this one. A peek leaves the buffer on the 815 * list and someone else may run off with it. You must hold 816 * the appropriate locks or have a private queue to do this. 817 * 818 * Returns %NULL for an empty list or a pointer to the tail element. 819 * The reference count is not incremented and the reference is therefore 820 * volatile. Use with caution. 821 */ 822 static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_) 823 { 824 struct sk_buff *list = ((struct sk_buff *)list_)->prev; 825 if (list == (struct sk_buff *)list_) 826 list = NULL; 827 return list; 828 } 829 830 /** 831 * skb_queue_len - get queue length 832 * @list_: list to measure 833 * 834 * Return the length of an &sk_buff queue. 835 */ 836 static inline __u32 skb_queue_len(const struct sk_buff_head *list_) 837 { 838 return list_->qlen; 839 } 840 841 /** 842 * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head 843 * @list: queue to initialize 844 * 845 * This initializes only the list and queue length aspects of 846 * an sk_buff_head object. This allows to initialize the list 847 * aspects of an sk_buff_head without reinitializing things like 848 * the spinlock. It can also be used for on-stack sk_buff_head 849 * objects where the spinlock is known to not be used. 850 */ 851 static inline void __skb_queue_head_init(struct sk_buff_head *list) 852 { 853 list->prev = list->next = (struct sk_buff *)list; 854 list->qlen = 0; 855 } 856 857 /* 858 * This function creates a split out lock class for each invocation; 859 * this is needed for now since a whole lot of users of the skb-queue 860 * infrastructure in drivers have different locking usage (in hardirq) 861 * than the networking core (in softirq only). In the long run either the 862 * network layer or drivers should need annotation to consolidate the 863 * main types of usage into 3 classes. 864 */ 865 static inline void skb_queue_head_init(struct sk_buff_head *list) 866 { 867 spin_lock_init(&list->lock); 868 __skb_queue_head_init(list); 869 } 870 871 static inline void skb_queue_head_init_class(struct sk_buff_head *list, 872 struct lock_class_key *class) 873 { 874 skb_queue_head_init(list); 875 lockdep_set_class(&list->lock, class); 876 } 877 878 /* 879 * Insert an sk_buff on a list. 880 * 881 * The "__skb_xxxx()" functions are the non-atomic ones that 882 * can only be called with interrupts disabled. 883 */ 884 extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list); 885 static inline void __skb_insert(struct sk_buff *newsk, 886 struct sk_buff *prev, struct sk_buff *next, 887 struct sk_buff_head *list) 888 { 889 newsk->next = next; 890 newsk->prev = prev; 891 next->prev = prev->next = newsk; 892 list->qlen++; 893 } 894 895 static inline void __skb_queue_splice(const struct sk_buff_head *list, 896 struct sk_buff *prev, 897 struct sk_buff *next) 898 { 899 struct sk_buff *first = list->next; 900 struct sk_buff *last = list->prev; 901 902 first->prev = prev; 903 prev->next = first; 904 905 last->next = next; 906 next->prev = last; 907 } 908 909 /** 910 * skb_queue_splice - join two skb lists, this is designed for stacks 911 * @list: the new list to add 912 * @head: the place to add it in the first list 913 */ 914 static inline void skb_queue_splice(const struct sk_buff_head *list, 915 struct sk_buff_head *head) 916 { 917 if (!skb_queue_empty(list)) { 918 __skb_queue_splice(list, (struct sk_buff *) head, head->next); 919 head->qlen += list->qlen; 920 } 921 } 922 923 /** 924 * skb_queue_splice - join two skb lists and reinitialise the emptied list 925 * @list: the new list to add 926 * @head: the place to add it in the first list 927 * 928 * The list at @list is reinitialised 929 */ 930 static inline void skb_queue_splice_init(struct sk_buff_head *list, 931 struct sk_buff_head *head) 932 { 933 if (!skb_queue_empty(list)) { 934 __skb_queue_splice(list, (struct sk_buff *) head, head->next); 935 head->qlen += list->qlen; 936 __skb_queue_head_init(list); 937 } 938 } 939 940 /** 941 * skb_queue_splice_tail - join two skb lists, each list being a queue 942 * @list: the new list to add 943 * @head: the place to add it in the first list 944 */ 945 static inline void skb_queue_splice_tail(const struct sk_buff_head *list, 946 struct sk_buff_head *head) 947 { 948 if (!skb_queue_empty(list)) { 949 __skb_queue_splice(list, head->prev, (struct sk_buff *) head); 950 head->qlen += list->qlen; 951 } 952 } 953 954 /** 955 * skb_queue_splice_tail - join two skb lists and reinitialise the emptied list 956 * @list: the new list to add 957 * @head: the place to add it in the first list 958 * 959 * Each of the lists is a queue. 960 * The list at @list is reinitialised 961 */ 962 static inline void skb_queue_splice_tail_init(struct sk_buff_head *list, 963 struct sk_buff_head *head) 964 { 965 if (!skb_queue_empty(list)) { 966 __skb_queue_splice(list, head->prev, (struct sk_buff *) head); 967 head->qlen += list->qlen; 968 __skb_queue_head_init(list); 969 } 970 } 971 972 /** 973 * __skb_queue_after - queue a buffer at the list head 974 * @list: list to use 975 * @prev: place after this buffer 976 * @newsk: buffer to queue 977 * 978 * Queue a buffer int the middle of a list. This function takes no locks 979 * and you must therefore hold required locks before calling it. 980 * 981 * A buffer cannot be placed on two lists at the same time. 982 */ 983 static inline void __skb_queue_after(struct sk_buff_head *list, 984 struct sk_buff *prev, 985 struct sk_buff *newsk) 986 { 987 __skb_insert(newsk, prev, prev->next, list); 988 } 989 990 extern void skb_append(struct sk_buff *old, struct sk_buff *newsk, 991 struct sk_buff_head *list); 992 993 static inline void __skb_queue_before(struct sk_buff_head *list, 994 struct sk_buff *next, 995 struct sk_buff *newsk) 996 { 997 __skb_insert(newsk, next->prev, next, list); 998 } 999 1000 /** 1001 * __skb_queue_head - queue a buffer at the list head 1002 * @list: list to use 1003 * @newsk: buffer to queue 1004 * 1005 * Queue a buffer at the start of a list. This function takes no locks 1006 * and you must therefore hold required locks before calling it. 1007 * 1008 * A buffer cannot be placed on two lists at the same time. 1009 */ 1010 extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk); 1011 static inline void __skb_queue_head(struct sk_buff_head *list, 1012 struct sk_buff *newsk) 1013 { 1014 __skb_queue_after(list, (struct sk_buff *)list, newsk); 1015 } 1016 1017 /** 1018 * __skb_queue_tail - queue a buffer at the list tail 1019 * @list: list to use 1020 * @newsk: buffer to queue 1021 * 1022 * Queue a buffer at the end of a list. This function takes no locks 1023 * and you must therefore hold required locks before calling it. 1024 * 1025 * A buffer cannot be placed on two lists at the same time. 1026 */ 1027 extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk); 1028 static inline void __skb_queue_tail(struct sk_buff_head *list, 1029 struct sk_buff *newsk) 1030 { 1031 __skb_queue_before(list, (struct sk_buff *)list, newsk); 1032 } 1033 1034 /* 1035 * remove sk_buff from list. _Must_ be called atomically, and with 1036 * the list known.. 1037 */ 1038 extern void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list); 1039 static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 1040 { 1041 struct sk_buff *next, *prev; 1042 1043 list->qlen--; 1044 next = skb->next; 1045 prev = skb->prev; 1046 skb->next = skb->prev = NULL; 1047 next->prev = prev; 1048 prev->next = next; 1049 } 1050 1051 /** 1052 * __skb_dequeue - remove from the head of the queue 1053 * @list: list to dequeue from 1054 * 1055 * Remove the head of the list. This function does not take any locks 1056 * so must be used with appropriate locks held only. The head item is 1057 * returned or %NULL if the list is empty. 1058 */ 1059 extern struct sk_buff *skb_dequeue(struct sk_buff_head *list); 1060 static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list) 1061 { 1062 struct sk_buff *skb = skb_peek(list); 1063 if (skb) 1064 __skb_unlink(skb, list); 1065 return skb; 1066 } 1067 1068 /** 1069 * __skb_dequeue_tail - remove from the tail of the queue 1070 * @list: list to dequeue from 1071 * 1072 * Remove the tail of the list. This function does not take any locks 1073 * so must be used with appropriate locks held only. The tail item is 1074 * returned or %NULL if the list is empty. 1075 */ 1076 extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list); 1077 static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list) 1078 { 1079 struct sk_buff *skb = skb_peek_tail(list); 1080 if (skb) 1081 __skb_unlink(skb, list); 1082 return skb; 1083 } 1084 1085 1086 static inline int skb_is_nonlinear(const struct sk_buff *skb) 1087 { 1088 return skb->data_len; 1089 } 1090 1091 static inline unsigned int skb_headlen(const struct sk_buff *skb) 1092 { 1093 return skb->len - skb->data_len; 1094 } 1095 1096 static inline int skb_pagelen(const struct sk_buff *skb) 1097 { 1098 int i, len = 0; 1099 1100 for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) 1101 len += skb_shinfo(skb)->frags[i].size; 1102 return len + skb_headlen(skb); 1103 } 1104 1105 static inline void skb_fill_page_desc(struct sk_buff *skb, int i, 1106 struct page *page, int off, int size) 1107 { 1108 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1109 1110 frag->page = page; 1111 frag->page_offset = off; 1112 frag->size = size; 1113 skb_shinfo(skb)->nr_frags = i + 1; 1114 } 1115 1116 extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, 1117 int off, int size); 1118 1119 #define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags) 1120 #define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frags(skb)) 1121 #define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb)) 1122 1123 #ifdef NET_SKBUFF_DATA_USES_OFFSET 1124 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb) 1125 { 1126 return skb->head + skb->tail; 1127 } 1128 1129 static inline void skb_reset_tail_pointer(struct sk_buff *skb) 1130 { 1131 skb->tail = skb->data - skb->head; 1132 } 1133 1134 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) 1135 { 1136 skb_reset_tail_pointer(skb); 1137 skb->tail += offset; 1138 } 1139 #else /* NET_SKBUFF_DATA_USES_OFFSET */ 1140 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb) 1141 { 1142 return skb->tail; 1143 } 1144 1145 static inline void skb_reset_tail_pointer(struct sk_buff *skb) 1146 { 1147 skb->tail = skb->data; 1148 } 1149 1150 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) 1151 { 1152 skb->tail = skb->data + offset; 1153 } 1154 1155 #endif /* NET_SKBUFF_DATA_USES_OFFSET */ 1156 1157 /* 1158 * Add data to an sk_buff 1159 */ 1160 extern unsigned char *skb_put(struct sk_buff *skb, unsigned int len); 1161 static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len) 1162 { 1163 unsigned char *tmp = skb_tail_pointer(skb); 1164 SKB_LINEAR_ASSERT(skb); 1165 skb->tail += len; 1166 skb->len += len; 1167 return tmp; 1168 } 1169 1170 extern unsigned char *skb_push(struct sk_buff *skb, unsigned int len); 1171 static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len) 1172 { 1173 skb->data -= len; 1174 skb->len += len; 1175 return skb->data; 1176 } 1177 1178 extern unsigned char *skb_pull(struct sk_buff *skb, unsigned int len); 1179 static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len) 1180 { 1181 skb->len -= len; 1182 BUG_ON(skb->len < skb->data_len); 1183 return skb->data += len; 1184 } 1185 1186 static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int len) 1187 { 1188 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len); 1189 } 1190 1191 extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta); 1192 1193 static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len) 1194 { 1195 if (len > skb_headlen(skb) && 1196 !__pskb_pull_tail(skb, len - skb_headlen(skb))) 1197 return NULL; 1198 skb->len -= len; 1199 return skb->data += len; 1200 } 1201 1202 static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len) 1203 { 1204 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len); 1205 } 1206 1207 static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len) 1208 { 1209 if (likely(len <= skb_headlen(skb))) 1210 return 1; 1211 if (unlikely(len > skb->len)) 1212 return 0; 1213 return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL; 1214 } 1215 1216 /** 1217 * skb_headroom - bytes at buffer head 1218 * @skb: buffer to check 1219 * 1220 * Return the number of bytes of free space at the head of an &sk_buff. 1221 */ 1222 static inline unsigned int skb_headroom(const struct sk_buff *skb) 1223 { 1224 return skb->data - skb->head; 1225 } 1226 1227 /** 1228 * skb_tailroom - bytes at buffer end 1229 * @skb: buffer to check 1230 * 1231 * Return the number of bytes of free space at the tail of an sk_buff 1232 */ 1233 static inline int skb_tailroom(const struct sk_buff *skb) 1234 { 1235 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail; 1236 } 1237 1238 /** 1239 * skb_reserve - adjust headroom 1240 * @skb: buffer to alter 1241 * @len: bytes to move 1242 * 1243 * Increase the headroom of an empty &sk_buff by reducing the tail 1244 * room. This is only allowed for an empty buffer. 1245 */ 1246 static inline void skb_reserve(struct sk_buff *skb, int len) 1247 { 1248 skb->data += len; 1249 skb->tail += len; 1250 } 1251 1252 #ifdef NET_SKBUFF_DATA_USES_OFFSET 1253 static inline unsigned char *skb_transport_header(const struct sk_buff *skb) 1254 { 1255 return skb->head + skb->transport_header; 1256 } 1257 1258 static inline void skb_reset_transport_header(struct sk_buff *skb) 1259 { 1260 skb->transport_header = skb->data - skb->head; 1261 } 1262 1263 static inline void skb_set_transport_header(struct sk_buff *skb, 1264 const int offset) 1265 { 1266 skb_reset_transport_header(skb); 1267 skb->transport_header += offset; 1268 } 1269 1270 static inline unsigned char *skb_network_header(const struct sk_buff *skb) 1271 { 1272 return skb->head + skb->network_header; 1273 } 1274 1275 static inline void skb_reset_network_header(struct sk_buff *skb) 1276 { 1277 skb->network_header = skb->data - skb->head; 1278 } 1279 1280 static inline void skb_set_network_header(struct sk_buff *skb, const int offset) 1281 { 1282 skb_reset_network_header(skb); 1283 skb->network_header += offset; 1284 } 1285 1286 static inline unsigned char *skb_mac_header(const struct sk_buff *skb) 1287 { 1288 return skb->head + skb->mac_header; 1289 } 1290 1291 static inline int skb_mac_header_was_set(const struct sk_buff *skb) 1292 { 1293 return skb->mac_header != ~0U; 1294 } 1295 1296 static inline void skb_reset_mac_header(struct sk_buff *skb) 1297 { 1298 skb->mac_header = skb->data - skb->head; 1299 } 1300 1301 static inline void skb_set_mac_header(struct sk_buff *skb, const int offset) 1302 { 1303 skb_reset_mac_header(skb); 1304 skb->mac_header += offset; 1305 } 1306 1307 #else /* NET_SKBUFF_DATA_USES_OFFSET */ 1308 1309 static inline unsigned char *skb_transport_header(const struct sk_buff *skb) 1310 { 1311 return skb->transport_header; 1312 } 1313 1314 static inline void skb_reset_transport_header(struct sk_buff *skb) 1315 { 1316 skb->transport_header = skb->data; 1317 } 1318 1319 static inline void skb_set_transport_header(struct sk_buff *skb, 1320 const int offset) 1321 { 1322 skb->transport_header = skb->data + offset; 1323 } 1324 1325 static inline unsigned char *skb_network_header(const struct sk_buff *skb) 1326 { 1327 return skb->network_header; 1328 } 1329 1330 static inline void skb_reset_network_header(struct sk_buff *skb) 1331 { 1332 skb->network_header = skb->data; 1333 } 1334 1335 static inline void skb_set_network_header(struct sk_buff *skb, const int offset) 1336 { 1337 skb->network_header = skb->data + offset; 1338 } 1339 1340 static inline unsigned char *skb_mac_header(const struct sk_buff *skb) 1341 { 1342 return skb->mac_header; 1343 } 1344 1345 static inline int skb_mac_header_was_set(const struct sk_buff *skb) 1346 { 1347 return skb->mac_header != NULL; 1348 } 1349 1350 static inline void skb_reset_mac_header(struct sk_buff *skb) 1351 { 1352 skb->mac_header = skb->data; 1353 } 1354 1355 static inline void skb_set_mac_header(struct sk_buff *skb, const int offset) 1356 { 1357 skb->mac_header = skb->data + offset; 1358 } 1359 #endif /* NET_SKBUFF_DATA_USES_OFFSET */ 1360 1361 static inline int skb_transport_offset(const struct sk_buff *skb) 1362 { 1363 return skb_transport_header(skb) - skb->data; 1364 } 1365 1366 static inline u32 skb_network_header_len(const struct sk_buff *skb) 1367 { 1368 return skb->transport_header - skb->network_header; 1369 } 1370 1371 static inline int skb_network_offset(const struct sk_buff *skb) 1372 { 1373 return skb_network_header(skb) - skb->data; 1374 } 1375 1376 /* 1377 * CPUs often take a performance hit when accessing unaligned memory 1378 * locations. The actual performance hit varies, it can be small if the 1379 * hardware handles it or large if we have to take an exception and fix it 1380 * in software. 1381 * 1382 * Since an ethernet header is 14 bytes network drivers often end up with 1383 * the IP header at an unaligned offset. The IP header can be aligned by 1384 * shifting the start of the packet by 2 bytes. Drivers should do this 1385 * with: 1386 * 1387 * skb_reserve(skb, NET_IP_ALIGN); 1388 * 1389 * The downside to this alignment of the IP header is that the DMA is now 1390 * unaligned. On some architectures the cost of an unaligned DMA is high 1391 * and this cost outweighs the gains made by aligning the IP header. 1392 * 1393 * Since this trade off varies between architectures, we allow NET_IP_ALIGN 1394 * to be overridden. 1395 */ 1396 #ifndef NET_IP_ALIGN 1397 #define NET_IP_ALIGN 2 1398 #endif 1399 1400 /* 1401 * The networking layer reserves some headroom in skb data (via 1402 * dev_alloc_skb). This is used to avoid having to reallocate skb data when 1403 * the header has to grow. In the default case, if the header has to grow 1404 * 32 bytes or less we avoid the reallocation. 1405 * 1406 * Unfortunately this headroom changes the DMA alignment of the resulting 1407 * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive 1408 * on some architectures. An architecture can override this value, 1409 * perhaps setting it to a cacheline in size (since that will maintain 1410 * cacheline alignment of the DMA). It must be a power of 2. 1411 * 1412 * Various parts of the networking layer expect at least 32 bytes of 1413 * headroom, you should not reduce this. 1414 * With RPS, we raised NET_SKB_PAD to 64 so that get_rps_cpus() fetches span 1415 * a 64 bytes aligned block to fit modern (>= 64 bytes) cache line sizes 1416 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8) 1417 */ 1418 #ifndef NET_SKB_PAD 1419 #define NET_SKB_PAD 64 1420 #endif 1421 1422 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len); 1423 1424 static inline void __skb_trim(struct sk_buff *skb, unsigned int len) 1425 { 1426 if (unlikely(skb->data_len)) { 1427 WARN_ON(1); 1428 return; 1429 } 1430 skb->len = len; 1431 skb_set_tail_pointer(skb, len); 1432 } 1433 1434 extern void skb_trim(struct sk_buff *skb, unsigned int len); 1435 1436 static inline int __pskb_trim(struct sk_buff *skb, unsigned int len) 1437 { 1438 if (skb->data_len) 1439 return ___pskb_trim(skb, len); 1440 __skb_trim(skb, len); 1441 return 0; 1442 } 1443 1444 static inline int pskb_trim(struct sk_buff *skb, unsigned int len) 1445 { 1446 return (len < skb->len) ? __pskb_trim(skb, len) : 0; 1447 } 1448 1449 /** 1450 * pskb_trim_unique - remove end from a paged unique (not cloned) buffer 1451 * @skb: buffer to alter 1452 * @len: new length 1453 * 1454 * This is identical to pskb_trim except that the caller knows that 1455 * the skb is not cloned so we should never get an error due to out- 1456 * of-memory. 1457 */ 1458 static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len) 1459 { 1460 int err = pskb_trim(skb, len); 1461 BUG_ON(err); 1462 } 1463 1464 /** 1465 * skb_orphan - orphan a buffer 1466 * @skb: buffer to orphan 1467 * 1468 * If a buffer currently has an owner then we call the owner's 1469 * destructor function and make the @skb unowned. The buffer continues 1470 * to exist but is no longer charged to its former owner. 1471 */ 1472 static inline void skb_orphan(struct sk_buff *skb) 1473 { 1474 if (skb->destructor) 1475 skb->destructor(skb); 1476 skb->destructor = NULL; 1477 skb->sk = NULL; 1478 } 1479 1480 /** 1481 * __skb_queue_purge - empty a list 1482 * @list: list to empty 1483 * 1484 * Delete all buffers on an &sk_buff list. Each buffer is removed from 1485 * the list and one reference dropped. This function does not take the 1486 * list lock and the caller must hold the relevant locks to use it. 1487 */ 1488 extern void skb_queue_purge(struct sk_buff_head *list); 1489 static inline void __skb_queue_purge(struct sk_buff_head *list) 1490 { 1491 struct sk_buff *skb; 1492 while ((skb = __skb_dequeue(list)) != NULL) 1493 kfree_skb(skb); 1494 } 1495 1496 /** 1497 * __dev_alloc_skb - allocate an skbuff for receiving 1498 * @length: length to allocate 1499 * @gfp_mask: get_free_pages mask, passed to alloc_skb 1500 * 1501 * Allocate a new &sk_buff and assign it a usage count of one. The 1502 * buffer has unspecified headroom built in. Users should allocate 1503 * the headroom they think they need without accounting for the 1504 * built in space. The built in space is used for optimisations. 1505 * 1506 * %NULL is returned if there is no free memory. 1507 */ 1508 static inline struct sk_buff *__dev_alloc_skb(unsigned int length, 1509 gfp_t gfp_mask) 1510 { 1511 struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask); 1512 if (likely(skb)) 1513 skb_reserve(skb, NET_SKB_PAD); 1514 return skb; 1515 } 1516 1517 extern struct sk_buff *dev_alloc_skb(unsigned int length); 1518 1519 extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev, 1520 unsigned int length, gfp_t gfp_mask); 1521 1522 /** 1523 * netdev_alloc_skb - allocate an skbuff for rx on a specific device 1524 * @dev: network device to receive on 1525 * @length: length to allocate 1526 * 1527 * Allocate a new &sk_buff and assign it a usage count of one. The 1528 * buffer has unspecified headroom built in. Users should allocate 1529 * the headroom they think they need without accounting for the 1530 * built in space. The built in space is used for optimisations. 1531 * 1532 * %NULL is returned if there is no free memory. Although this function 1533 * allocates memory it can be called from an interrupt. 1534 */ 1535 static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev, 1536 unsigned int length) 1537 { 1538 return __netdev_alloc_skb(dev, length, GFP_ATOMIC); 1539 } 1540 1541 static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev, 1542 unsigned int length) 1543 { 1544 struct sk_buff *skb = netdev_alloc_skb(dev, length + NET_IP_ALIGN); 1545 1546 if (NET_IP_ALIGN && skb) 1547 skb_reserve(skb, NET_IP_ALIGN); 1548 return skb; 1549 } 1550 1551 extern struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask); 1552 1553 /** 1554 * netdev_alloc_page - allocate a page for ps-rx on a specific device 1555 * @dev: network device to receive on 1556 * 1557 * Allocate a new page node local to the specified device. 1558 * 1559 * %NULL is returned if there is no free memory. 1560 */ 1561 static inline struct page *netdev_alloc_page(struct net_device *dev) 1562 { 1563 return __netdev_alloc_page(dev, GFP_ATOMIC); 1564 } 1565 1566 static inline void netdev_free_page(struct net_device *dev, struct page *page) 1567 { 1568 __free_page(page); 1569 } 1570 1571 /** 1572 * skb_clone_writable - is the header of a clone writable 1573 * @skb: buffer to check 1574 * @len: length up to which to write 1575 * 1576 * Returns true if modifying the header part of the cloned buffer 1577 * does not requires the data to be copied. 1578 */ 1579 static inline int skb_clone_writable(struct sk_buff *skb, unsigned int len) 1580 { 1581 return !skb_header_cloned(skb) && 1582 skb_headroom(skb) + len <= skb->hdr_len; 1583 } 1584 1585 static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom, 1586 int cloned) 1587 { 1588 int delta = 0; 1589 1590 if (headroom < NET_SKB_PAD) 1591 headroom = NET_SKB_PAD; 1592 if (headroom > skb_headroom(skb)) 1593 delta = headroom - skb_headroom(skb); 1594 1595 if (delta || cloned) 1596 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0, 1597 GFP_ATOMIC); 1598 return 0; 1599 } 1600 1601 /** 1602 * skb_cow - copy header of skb when it is required 1603 * @skb: buffer to cow 1604 * @headroom: needed headroom 1605 * 1606 * If the skb passed lacks sufficient headroom or its data part 1607 * is shared, data is reallocated. If reallocation fails, an error 1608 * is returned and original skb is not changed. 1609 * 1610 * The result is skb with writable area skb->head...skb->tail 1611 * and at least @headroom of space at head. 1612 */ 1613 static inline int skb_cow(struct sk_buff *skb, unsigned int headroom) 1614 { 1615 return __skb_cow(skb, headroom, skb_cloned(skb)); 1616 } 1617 1618 /** 1619 * skb_cow_head - skb_cow but only making the head writable 1620 * @skb: buffer to cow 1621 * @headroom: needed headroom 1622 * 1623 * This function is identical to skb_cow except that we replace the 1624 * skb_cloned check by skb_header_cloned. It should be used when 1625 * you only need to push on some header and do not need to modify 1626 * the data. 1627 */ 1628 static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom) 1629 { 1630 return __skb_cow(skb, headroom, skb_header_cloned(skb)); 1631 } 1632 1633 /** 1634 * skb_padto - pad an skbuff up to a minimal size 1635 * @skb: buffer to pad 1636 * @len: minimal length 1637 * 1638 * Pads up a buffer to ensure the trailing bytes exist and are 1639 * blanked. If the buffer already contains sufficient data it 1640 * is untouched. Otherwise it is extended. Returns zero on 1641 * success. The skb is freed on error. 1642 */ 1643 1644 static inline int skb_padto(struct sk_buff *skb, unsigned int len) 1645 { 1646 unsigned int size = skb->len; 1647 if (likely(size >= len)) 1648 return 0; 1649 return skb_pad(skb, len - size); 1650 } 1651 1652 static inline int skb_add_data(struct sk_buff *skb, 1653 char __user *from, int copy) 1654 { 1655 const int off = skb->len; 1656 1657 if (skb->ip_summed == CHECKSUM_NONE) { 1658 int err = 0; 1659 __wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy), 1660 copy, 0, &err); 1661 if (!err) { 1662 skb->csum = csum_block_add(skb->csum, csum, off); 1663 return 0; 1664 } 1665 } else if (!copy_from_user(skb_put(skb, copy), from, copy)) 1666 return 0; 1667 1668 __skb_trim(skb, off); 1669 return -EFAULT; 1670 } 1671 1672 static inline int skb_can_coalesce(struct sk_buff *skb, int i, 1673 struct page *page, int off) 1674 { 1675 if (i) { 1676 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; 1677 1678 return page == frag->page && 1679 off == frag->page_offset + frag->size; 1680 } 1681 return 0; 1682 } 1683 1684 static inline int __skb_linearize(struct sk_buff *skb) 1685 { 1686 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM; 1687 } 1688 1689 /** 1690 * skb_linearize - convert paged skb to linear one 1691 * @skb: buffer to linarize 1692 * 1693 * If there is no free memory -ENOMEM is returned, otherwise zero 1694 * is returned and the old skb data released. 1695 */ 1696 static inline int skb_linearize(struct sk_buff *skb) 1697 { 1698 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0; 1699 } 1700 1701 /** 1702 * skb_linearize_cow - make sure skb is linear and writable 1703 * @skb: buffer to process 1704 * 1705 * If there is no free memory -ENOMEM is returned, otherwise zero 1706 * is returned and the old skb data released. 1707 */ 1708 static inline int skb_linearize_cow(struct sk_buff *skb) 1709 { 1710 return skb_is_nonlinear(skb) || skb_cloned(skb) ? 1711 __skb_linearize(skb) : 0; 1712 } 1713 1714 /** 1715 * skb_postpull_rcsum - update checksum for received skb after pull 1716 * @skb: buffer to update 1717 * @start: start of data before pull 1718 * @len: length of data pulled 1719 * 1720 * After doing a pull on a received packet, you need to call this to 1721 * update the CHECKSUM_COMPLETE checksum, or set ip_summed to 1722 * CHECKSUM_NONE so that it can be recomputed from scratch. 1723 */ 1724 1725 static inline void skb_postpull_rcsum(struct sk_buff *skb, 1726 const void *start, unsigned int len) 1727 { 1728 if (skb->ip_summed == CHECKSUM_COMPLETE) 1729 skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0)); 1730 } 1731 1732 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); 1733 1734 /** 1735 * pskb_trim_rcsum - trim received skb and update checksum 1736 * @skb: buffer to trim 1737 * @len: new length 1738 * 1739 * This is exactly the same as pskb_trim except that it ensures the 1740 * checksum of received packets are still valid after the operation. 1741 */ 1742 1743 static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) 1744 { 1745 if (likely(len >= skb->len)) 1746 return 0; 1747 if (skb->ip_summed == CHECKSUM_COMPLETE) 1748 skb->ip_summed = CHECKSUM_NONE; 1749 return __pskb_trim(skb, len); 1750 } 1751 1752 #define skb_queue_walk(queue, skb) \ 1753 for (skb = (queue)->next; \ 1754 prefetch(skb->next), (skb != (struct sk_buff *)(queue)); \ 1755 skb = skb->next) 1756 1757 #define skb_queue_walk_safe(queue, skb, tmp) \ 1758 for (skb = (queue)->next, tmp = skb->next; \ 1759 skb != (struct sk_buff *)(queue); \ 1760 skb = tmp, tmp = skb->next) 1761 1762 #define skb_queue_walk_from(queue, skb) \ 1763 for (; prefetch(skb->next), (skb != (struct sk_buff *)(queue)); \ 1764 skb = skb->next) 1765 1766 #define skb_queue_walk_from_safe(queue, skb, tmp) \ 1767 for (tmp = skb->next; \ 1768 skb != (struct sk_buff *)(queue); \ 1769 skb = tmp, tmp = skb->next) 1770 1771 #define skb_queue_reverse_walk(queue, skb) \ 1772 for (skb = (queue)->prev; \ 1773 prefetch(skb->prev), (skb != (struct sk_buff *)(queue)); \ 1774 skb = skb->prev) 1775 1776 1777 static inline bool skb_has_frags(const struct sk_buff *skb) 1778 { 1779 return skb_shinfo(skb)->frag_list != NULL; 1780 } 1781 1782 static inline void skb_frag_list_init(struct sk_buff *skb) 1783 { 1784 skb_shinfo(skb)->frag_list = NULL; 1785 } 1786 1787 static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag) 1788 { 1789 frag->next = skb_shinfo(skb)->frag_list; 1790 skb_shinfo(skb)->frag_list = frag; 1791 } 1792 1793 #define skb_walk_frags(skb, iter) \ 1794 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next) 1795 1796 extern struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags, 1797 int *peeked, int *err); 1798 extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, 1799 int noblock, int *err); 1800 extern unsigned int datagram_poll(struct file *file, struct socket *sock, 1801 struct poll_table_struct *wait); 1802 extern int skb_copy_datagram_iovec(const struct sk_buff *from, 1803 int offset, struct iovec *to, 1804 int size); 1805 extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, 1806 int hlen, 1807 struct iovec *iov); 1808 extern int skb_copy_datagram_from_iovec(struct sk_buff *skb, 1809 int offset, 1810 const struct iovec *from, 1811 int from_offset, 1812 int len); 1813 extern int skb_copy_datagram_const_iovec(const struct sk_buff *from, 1814 int offset, 1815 const struct iovec *to, 1816 int to_offset, 1817 int size); 1818 extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb); 1819 extern void skb_free_datagram_locked(struct sock *sk, 1820 struct sk_buff *skb); 1821 extern int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, 1822 unsigned int flags); 1823 extern __wsum skb_checksum(const struct sk_buff *skb, int offset, 1824 int len, __wsum csum); 1825 extern int skb_copy_bits(const struct sk_buff *skb, int offset, 1826 void *to, int len); 1827 extern int skb_store_bits(struct sk_buff *skb, int offset, 1828 const void *from, int len); 1829 extern __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, 1830 int offset, u8 *to, int len, 1831 __wsum csum); 1832 extern int skb_splice_bits(struct sk_buff *skb, 1833 unsigned int offset, 1834 struct pipe_inode_info *pipe, 1835 unsigned int len, 1836 unsigned int flags); 1837 extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); 1838 extern void skb_split(struct sk_buff *skb, 1839 struct sk_buff *skb1, const u32 len); 1840 extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, 1841 int shiftlen); 1842 1843 extern struct sk_buff *skb_segment(struct sk_buff *skb, int features); 1844 1845 static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, 1846 int len, void *buffer) 1847 { 1848 int hlen = skb_headlen(skb); 1849 1850 if (hlen - offset >= len) 1851 return skb->data + offset; 1852 1853 if (skb_copy_bits(skb, offset, buffer, len) < 0) 1854 return NULL; 1855 1856 return buffer; 1857 } 1858 1859 static inline void skb_copy_from_linear_data(const struct sk_buff *skb, 1860 void *to, 1861 const unsigned int len) 1862 { 1863 memcpy(to, skb->data, len); 1864 } 1865 1866 static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb, 1867 const int offset, void *to, 1868 const unsigned int len) 1869 { 1870 memcpy(to, skb->data + offset, len); 1871 } 1872 1873 static inline void skb_copy_to_linear_data(struct sk_buff *skb, 1874 const void *from, 1875 const unsigned int len) 1876 { 1877 memcpy(skb->data, from, len); 1878 } 1879 1880 static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb, 1881 const int offset, 1882 const void *from, 1883 const unsigned int len) 1884 { 1885 memcpy(skb->data + offset, from, len); 1886 } 1887 1888 extern void skb_init(void); 1889 1890 static inline ktime_t skb_get_ktime(const struct sk_buff *skb) 1891 { 1892 return skb->tstamp; 1893 } 1894 1895 /** 1896 * skb_get_timestamp - get timestamp from a skb 1897 * @skb: skb to get stamp from 1898 * @stamp: pointer to struct timeval to store stamp in 1899 * 1900 * Timestamps are stored in the skb as offsets to a base timestamp. 1901 * This function converts the offset back to a struct timeval and stores 1902 * it in stamp. 1903 */ 1904 static inline void skb_get_timestamp(const struct sk_buff *skb, 1905 struct timeval *stamp) 1906 { 1907 *stamp = ktime_to_timeval(skb->tstamp); 1908 } 1909 1910 static inline void skb_get_timestampns(const struct sk_buff *skb, 1911 struct timespec *stamp) 1912 { 1913 *stamp = ktime_to_timespec(skb->tstamp); 1914 } 1915 1916 static inline void __net_timestamp(struct sk_buff *skb) 1917 { 1918 skb->tstamp = ktime_get_real(); 1919 } 1920 1921 static inline ktime_t net_timedelta(ktime_t t) 1922 { 1923 return ktime_sub(ktime_get_real(), t); 1924 } 1925 1926 static inline ktime_t net_invalid_timestamp(void) 1927 { 1928 return ktime_set(0, 0); 1929 } 1930 1931 /** 1932 * skb_tstamp_tx - queue clone of skb with send time stamps 1933 * @orig_skb: the original outgoing packet 1934 * @hwtstamps: hardware time stamps, may be NULL if not available 1935 * 1936 * If the skb has a socket associated, then this function clones the 1937 * skb (thus sharing the actual data and optional structures), stores 1938 * the optional hardware time stamping information (if non NULL) or 1939 * generates a software time stamp (otherwise), then queues the clone 1940 * to the error queue of the socket. Errors are silently ignored. 1941 */ 1942 extern void skb_tstamp_tx(struct sk_buff *orig_skb, 1943 struct skb_shared_hwtstamps *hwtstamps); 1944 1945 extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len); 1946 extern __sum16 __skb_checksum_complete(struct sk_buff *skb); 1947 1948 static inline int skb_csum_unnecessary(const struct sk_buff *skb) 1949 { 1950 return skb->ip_summed & CHECKSUM_UNNECESSARY; 1951 } 1952 1953 /** 1954 * skb_checksum_complete - Calculate checksum of an entire packet 1955 * @skb: packet to process 1956 * 1957 * This function calculates the checksum over the entire packet plus 1958 * the value of skb->csum. The latter can be used to supply the 1959 * checksum of a pseudo header as used by TCP/UDP. It returns the 1960 * checksum. 1961 * 1962 * For protocols that contain complete checksums such as ICMP/TCP/UDP, 1963 * this function can be used to verify that checksum on received 1964 * packets. In that case the function should return zero if the 1965 * checksum is correct. In particular, this function will return zero 1966 * if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the 1967 * hardware has already verified the correctness of the checksum. 1968 */ 1969 static inline __sum16 skb_checksum_complete(struct sk_buff *skb) 1970 { 1971 return skb_csum_unnecessary(skb) ? 1972 0 : __skb_checksum_complete(skb); 1973 } 1974 1975 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 1976 extern void nf_conntrack_destroy(struct nf_conntrack *nfct); 1977 static inline void nf_conntrack_put(struct nf_conntrack *nfct) 1978 { 1979 if (nfct && atomic_dec_and_test(&nfct->use)) 1980 nf_conntrack_destroy(nfct); 1981 } 1982 static inline void nf_conntrack_get(struct nf_conntrack *nfct) 1983 { 1984 if (nfct) 1985 atomic_inc(&nfct->use); 1986 } 1987 static inline void nf_conntrack_get_reasm(struct sk_buff *skb) 1988 { 1989 if (skb) 1990 atomic_inc(&skb->users); 1991 } 1992 static inline void nf_conntrack_put_reasm(struct sk_buff *skb) 1993 { 1994 if (skb) 1995 kfree_skb(skb); 1996 } 1997 #endif 1998 #ifdef CONFIG_BRIDGE_NETFILTER 1999 static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge) 2000 { 2001 if (nf_bridge && atomic_dec_and_test(&nf_bridge->use)) 2002 kfree(nf_bridge); 2003 } 2004 static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge) 2005 { 2006 if (nf_bridge) 2007 atomic_inc(&nf_bridge->use); 2008 } 2009 #endif /* CONFIG_BRIDGE_NETFILTER */ 2010 static inline void nf_reset(struct sk_buff *skb) 2011 { 2012 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 2013 nf_conntrack_put(skb->nfct); 2014 skb->nfct = NULL; 2015 nf_conntrack_put_reasm(skb->nfct_reasm); 2016 skb->nfct_reasm = NULL; 2017 #endif 2018 #ifdef CONFIG_BRIDGE_NETFILTER 2019 nf_bridge_put(skb->nf_bridge); 2020 skb->nf_bridge = NULL; 2021 #endif 2022 } 2023 2024 /* Note: This doesn't put any conntrack and bridge info in dst. */ 2025 static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src) 2026 { 2027 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 2028 dst->nfct = src->nfct; 2029 nf_conntrack_get(src->nfct); 2030 dst->nfctinfo = src->nfctinfo; 2031 dst->nfct_reasm = src->nfct_reasm; 2032 nf_conntrack_get_reasm(src->nfct_reasm); 2033 #endif 2034 #ifdef CONFIG_BRIDGE_NETFILTER 2035 dst->nf_bridge = src->nf_bridge; 2036 nf_bridge_get(src->nf_bridge); 2037 #endif 2038 } 2039 2040 static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src) 2041 { 2042 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 2043 nf_conntrack_put(dst->nfct); 2044 nf_conntrack_put_reasm(dst->nfct_reasm); 2045 #endif 2046 #ifdef CONFIG_BRIDGE_NETFILTER 2047 nf_bridge_put(dst->nf_bridge); 2048 #endif 2049 __nf_copy(dst, src); 2050 } 2051 2052 #ifdef CONFIG_NETWORK_SECMARK 2053 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from) 2054 { 2055 to->secmark = from->secmark; 2056 } 2057 2058 static inline void skb_init_secmark(struct sk_buff *skb) 2059 { 2060 skb->secmark = 0; 2061 } 2062 #else 2063 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from) 2064 { } 2065 2066 static inline void skb_init_secmark(struct sk_buff *skb) 2067 { } 2068 #endif 2069 2070 static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping) 2071 { 2072 skb->queue_mapping = queue_mapping; 2073 } 2074 2075 static inline u16 skb_get_queue_mapping(const struct sk_buff *skb) 2076 { 2077 return skb->queue_mapping; 2078 } 2079 2080 static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from) 2081 { 2082 to->queue_mapping = from->queue_mapping; 2083 } 2084 2085 static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue) 2086 { 2087 skb->queue_mapping = rx_queue + 1; 2088 } 2089 2090 static inline u16 skb_get_rx_queue(const struct sk_buff *skb) 2091 { 2092 return skb->queue_mapping - 1; 2093 } 2094 2095 static inline bool skb_rx_queue_recorded(const struct sk_buff *skb) 2096 { 2097 return (skb->queue_mapping != 0); 2098 } 2099 2100 extern u16 skb_tx_hash(const struct net_device *dev, 2101 const struct sk_buff *skb); 2102 2103 #ifdef CONFIG_XFRM 2104 static inline struct sec_path *skb_sec_path(struct sk_buff *skb) 2105 { 2106 return skb->sp; 2107 } 2108 #else 2109 static inline struct sec_path *skb_sec_path(struct sk_buff *skb) 2110 { 2111 return NULL; 2112 } 2113 #endif 2114 2115 static inline int skb_is_gso(const struct sk_buff *skb) 2116 { 2117 return skb_shinfo(skb)->gso_size; 2118 } 2119 2120 static inline int skb_is_gso_v6(const struct sk_buff *skb) 2121 { 2122 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; 2123 } 2124 2125 extern void __skb_warn_lro_forwarding(const struct sk_buff *skb); 2126 2127 static inline bool skb_warn_if_lro(const struct sk_buff *skb) 2128 { 2129 /* LRO sets gso_size but not gso_type, whereas if GSO is really 2130 * wanted then gso_type will be set. */ 2131 struct skb_shared_info *shinfo = skb_shinfo(skb); 2132 if (shinfo->gso_size != 0 && unlikely(shinfo->gso_type == 0)) { 2133 __skb_warn_lro_forwarding(skb); 2134 return true; 2135 } 2136 return false; 2137 } 2138 2139 static inline void skb_forward_csum(struct sk_buff *skb) 2140 { 2141 /* Unfortunately we don't support this one. Any brave souls? */ 2142 if (skb->ip_summed == CHECKSUM_COMPLETE) 2143 skb->ip_summed = CHECKSUM_NONE; 2144 } 2145 2146 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off); 2147 #endif /* __KERNEL__ */ 2148 #endif /* _LINUX_SKBUFF_H */ 2149