1 /* 2 * Definitions for the 'struct sk_buff' memory handlers. 3 * 4 * Authors: 5 * Alan Cox, <[email protected]> 6 * Florian La Roche, <[email protected]> 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License 10 * as published by the Free Software Foundation; either version 11 * 2 of the License, or (at your option) any later version. 12 */ 13 14 #ifndef _LINUX_SKBUFF_H 15 #define _LINUX_SKBUFF_H 16 17 #include <linux/kernel.h> 18 #include <linux/kmemcheck.h> 19 #include <linux/compiler.h> 20 #include <linux/time.h> 21 #include <linux/cache.h> 22 23 #include <linux/atomic.h> 24 #include <asm/types.h> 25 #include <linux/spinlock.h> 26 #include <linux/net.h> 27 #include <linux/textsearch.h> 28 #include <net/checksum.h> 29 #include <linux/rcupdate.h> 30 #include <linux/dmaengine.h> 31 #include <linux/hrtimer.h> 32 33 /* Don't change this without changing skb_csum_unnecessary! */ 34 #define CHECKSUM_NONE 0 35 #define CHECKSUM_UNNECESSARY 1 36 #define CHECKSUM_COMPLETE 2 37 #define CHECKSUM_PARTIAL 3 38 39 #define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \ 40 ~(SMP_CACHE_BYTES - 1)) 41 #define SKB_WITH_OVERHEAD(X) \ 42 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) 43 #define SKB_MAX_ORDER(X, ORDER) \ 44 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X)) 45 #define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0)) 46 #define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2)) 47 48 /* A. Checksumming of received packets by device. 49 * 50 * NONE: device failed to checksum this packet. 51 * skb->csum is undefined. 52 * 53 * UNNECESSARY: device parsed packet and wouldbe verified checksum. 54 * skb->csum is undefined. 55 * It is bad option, but, unfortunately, many of vendors do this. 56 * Apparently with secret goal to sell you new device, when you 57 * will add new protocol to your host. F.e. IPv6. 8) 58 * 59 * COMPLETE: the most generic way. Device supplied checksum of _all_ 60 * the packet as seen by netif_rx in skb->csum. 61 * NOTE: Even if device supports only some protocols, but 62 * is able to produce some skb->csum, it MUST use COMPLETE, 63 * not UNNECESSARY. 64 * 65 * PARTIAL: identical to the case for output below. This may occur 66 * on a packet received directly from another Linux OS, e.g., 67 * a virtualised Linux kernel on the same host. The packet can 68 * be treated in the same way as UNNECESSARY except that on 69 * output (i.e., forwarding) the checksum must be filled in 70 * by the OS or the hardware. 71 * 72 * B. Checksumming on output. 73 * 74 * NONE: skb is checksummed by protocol or csum is not required. 75 * 76 * PARTIAL: device is required to csum packet as seen by hard_start_xmit 77 * from skb->csum_start to the end and to record the checksum 78 * at skb->csum_start + skb->csum_offset. 79 * 80 * Device must show its capabilities in dev->features, set 81 * at device setup time. 82 * NETIF_F_HW_CSUM - it is clever device, it is able to checksum 83 * everything. 84 * NETIF_F_NO_CSUM - loopback or reliable single hop media. 85 * NETIF_F_IP_CSUM - device is dumb. It is able to csum only 86 * TCP/UDP over IPv4. Sigh. Vendors like this 87 * way by an unknown reason. Though, see comment above 88 * about CHECKSUM_UNNECESSARY. 8) 89 * NETIF_F_IPV6_CSUM about as dumb as the last one but does IPv6 instead. 90 * 91 * Any questions? No questions, good. --ANK 92 */ 93 94 struct net_device; 95 struct scatterlist; 96 struct pipe_inode_info; 97 98 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 99 struct nf_conntrack { 100 atomic_t use; 101 }; 102 #endif 103 104 #ifdef CONFIG_BRIDGE_NETFILTER 105 struct nf_bridge_info { 106 atomic_t use; 107 struct net_device *physindev; 108 struct net_device *physoutdev; 109 unsigned int mask; 110 unsigned long data[32 / sizeof(unsigned long)]; 111 }; 112 #endif 113 114 struct sk_buff_head { 115 /* These two members must be first. */ 116 struct sk_buff *next; 117 struct sk_buff *prev; 118 119 __u32 qlen; 120 spinlock_t lock; 121 }; 122 123 struct sk_buff; 124 125 /* To allow 64K frame to be packed as single skb without frag_list. Since 126 * GRO uses frags we allocate at least 16 regardless of page size. 127 */ 128 #if (65536/PAGE_SIZE + 2) < 16 129 #define MAX_SKB_FRAGS 16UL 130 #else 131 #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2) 132 #endif 133 134 typedef struct skb_frag_struct skb_frag_t; 135 136 struct skb_frag_struct { 137 struct page *page; 138 #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) 139 __u32 page_offset; 140 __u32 size; 141 #else 142 __u16 page_offset; 143 __u16 size; 144 #endif 145 }; 146 147 #define HAVE_HW_TIME_STAMP 148 149 /** 150 * struct skb_shared_hwtstamps - hardware time stamps 151 * @hwtstamp: hardware time stamp transformed into duration 152 * since arbitrary point in time 153 * @syststamp: hwtstamp transformed to system time base 154 * 155 * Software time stamps generated by ktime_get_real() are stored in 156 * skb->tstamp. The relation between the different kinds of time 157 * stamps is as follows: 158 * 159 * syststamp and tstamp can be compared against each other in 160 * arbitrary combinations. The accuracy of a 161 * syststamp/tstamp/"syststamp from other device" comparison is 162 * limited by the accuracy of the transformation into system time 163 * base. This depends on the device driver and its underlying 164 * hardware. 165 * 166 * hwtstamps can only be compared against other hwtstamps from 167 * the same device. 168 * 169 * This structure is attached to packets as part of the 170 * &skb_shared_info. Use skb_hwtstamps() to get a pointer. 171 */ 172 struct skb_shared_hwtstamps { 173 ktime_t hwtstamp; 174 ktime_t syststamp; 175 }; 176 177 /* Definitions for tx_flags in struct skb_shared_info */ 178 enum { 179 /* generate hardware time stamp */ 180 SKBTX_HW_TSTAMP = 1 << 0, 181 182 /* generate software time stamp */ 183 SKBTX_SW_TSTAMP = 1 << 1, 184 185 /* device driver is going to provide hardware time stamp */ 186 SKBTX_IN_PROGRESS = 1 << 2, 187 188 /* ensure the originating sk reference is available on driver level */ 189 SKBTX_DRV_NEEDS_SK_REF = 1 << 3, 190 191 /* device driver supports TX zero-copy buffers */ 192 SKBTX_DEV_ZEROCOPY = 1 << 4, 193 }; 194 195 /* 196 * The callback notifies userspace to release buffers when skb DMA is done in 197 * lower device, the skb last reference should be 0 when calling this. 198 * The desc is used to track userspace buffer index. 199 */ 200 struct ubuf_info { 201 void (*callback)(void *); 202 void *arg; 203 unsigned long desc; 204 }; 205 206 /* This data is invariant across clones and lives at 207 * the end of the header data, ie. at skb->end. 208 */ 209 struct skb_shared_info { 210 unsigned short nr_frags; 211 unsigned short gso_size; 212 /* Warning: this field is not always filled in (UFO)! */ 213 unsigned short gso_segs; 214 unsigned short gso_type; 215 __be32 ip6_frag_id; 216 __u8 tx_flags; 217 struct sk_buff *frag_list; 218 struct skb_shared_hwtstamps hwtstamps; 219 220 /* 221 * Warning : all fields before dataref are cleared in __alloc_skb() 222 */ 223 atomic_t dataref; 224 225 /* Intermediate layers must ensure that destructor_arg 226 * remains valid until skb destructor */ 227 void * destructor_arg; 228 229 /* must be last field, see pskb_expand_head() */ 230 skb_frag_t frags[MAX_SKB_FRAGS]; 231 }; 232 233 /* We divide dataref into two halves. The higher 16 bits hold references 234 * to the payload part of skb->data. The lower 16 bits hold references to 235 * the entire skb->data. A clone of a headerless skb holds the length of 236 * the header in skb->hdr_len. 237 * 238 * All users must obey the rule that the skb->data reference count must be 239 * greater than or equal to the payload reference count. 240 * 241 * Holding a reference to the payload part means that the user does not 242 * care about modifications to the header part of skb->data. 243 */ 244 #define SKB_DATAREF_SHIFT 16 245 #define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1) 246 247 248 enum { 249 SKB_FCLONE_UNAVAILABLE, 250 SKB_FCLONE_ORIG, 251 SKB_FCLONE_CLONE, 252 }; 253 254 enum { 255 SKB_GSO_TCPV4 = 1 << 0, 256 SKB_GSO_UDP = 1 << 1, 257 258 /* This indicates the skb is from an untrusted source. */ 259 SKB_GSO_DODGY = 1 << 2, 260 261 /* This indicates the tcp segment has CWR set. */ 262 SKB_GSO_TCP_ECN = 1 << 3, 263 264 SKB_GSO_TCPV6 = 1 << 4, 265 266 SKB_GSO_FCOE = 1 << 5, 267 }; 268 269 #if BITS_PER_LONG > 32 270 #define NET_SKBUFF_DATA_USES_OFFSET 1 271 #endif 272 273 #ifdef NET_SKBUFF_DATA_USES_OFFSET 274 typedef unsigned int sk_buff_data_t; 275 #else 276 typedef unsigned char *sk_buff_data_t; 277 #endif 278 279 #if defined(CONFIG_NF_DEFRAG_IPV4) || defined(CONFIG_NF_DEFRAG_IPV4_MODULE) || \ 280 defined(CONFIG_NF_DEFRAG_IPV6) || defined(CONFIG_NF_DEFRAG_IPV6_MODULE) 281 #define NET_SKBUFF_NF_DEFRAG_NEEDED 1 282 #endif 283 284 /** 285 * struct sk_buff - socket buffer 286 * @next: Next buffer in list 287 * @prev: Previous buffer in list 288 * @tstamp: Time we arrived 289 * @sk: Socket we are owned by 290 * @dev: Device we arrived on/are leaving by 291 * @cb: Control buffer. Free for use by every layer. Put private vars here 292 * @_skb_refdst: destination entry (with norefcount bit) 293 * @sp: the security path, used for xfrm 294 * @len: Length of actual data 295 * @data_len: Data length 296 * @mac_len: Length of link layer header 297 * @hdr_len: writable header length of cloned skb 298 * @csum: Checksum (must include start/offset pair) 299 * @csum_start: Offset from skb->head where checksumming should start 300 * @csum_offset: Offset from csum_start where checksum should be stored 301 * @priority: Packet queueing priority 302 * @local_df: allow local fragmentation 303 * @cloned: Head may be cloned (check refcnt to be sure) 304 * @ip_summed: Driver fed us an IP checksum 305 * @nohdr: Payload reference only, must not modify header 306 * @nfctinfo: Relationship of this skb to the connection 307 * @pkt_type: Packet class 308 * @fclone: skbuff clone status 309 * @ipvs_property: skbuff is owned by ipvs 310 * @peeked: this packet has been seen already, so stats have been 311 * done for it, don't do them again 312 * @nf_trace: netfilter packet trace flag 313 * @protocol: Packet protocol from driver 314 * @destructor: Destruct function 315 * @nfct: Associated connection, if any 316 * @nfct_reasm: netfilter conntrack re-assembly pointer 317 * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c 318 * @skb_iif: ifindex of device we arrived on 319 * @tc_index: Traffic control index 320 * @tc_verd: traffic control verdict 321 * @rxhash: the packet hash computed on receive 322 * @queue_mapping: Queue mapping for multiqueue devices 323 * @ndisc_nodetype: router type (from link layer) 324 * @ooo_okay: allow the mapping of a socket to a queue to be changed 325 * @dma_cookie: a cookie to one of several possible DMA operations 326 * done by skb DMA functions 327 * @secmark: security marking 328 * @mark: Generic packet mark 329 * @dropcount: total number of sk_receive_queue overflows 330 * @vlan_tci: vlan tag control information 331 * @transport_header: Transport layer header 332 * @network_header: Network layer header 333 * @mac_header: Link layer header 334 * @tail: Tail pointer 335 * @end: End pointer 336 * @head: Head of buffer 337 * @data: Data head pointer 338 * @truesize: Buffer size 339 * @users: User count - see {datagram,tcp}.c 340 */ 341 342 struct sk_buff { 343 /* These two members must be first. */ 344 struct sk_buff *next; 345 struct sk_buff *prev; 346 347 ktime_t tstamp; 348 349 struct sock *sk; 350 struct net_device *dev; 351 352 /* 353 * This is the control buffer. It is free to use for every 354 * layer. Please put your private variables there. If you 355 * want to keep them across layers you have to do a skb_clone() 356 * first. This is owned by whoever has the skb queued ATM. 357 */ 358 char cb[48] __aligned(8); 359 360 unsigned long _skb_refdst; 361 #ifdef CONFIG_XFRM 362 struct sec_path *sp; 363 #endif 364 unsigned int len, 365 data_len; 366 __u16 mac_len, 367 hdr_len; 368 union { 369 __wsum csum; 370 struct { 371 __u16 csum_start; 372 __u16 csum_offset; 373 }; 374 }; 375 __u32 priority; 376 kmemcheck_bitfield_begin(flags1); 377 __u8 local_df:1, 378 cloned:1, 379 ip_summed:2, 380 nohdr:1, 381 nfctinfo:3; 382 __u8 pkt_type:3, 383 fclone:2, 384 ipvs_property:1, 385 peeked:1, 386 nf_trace:1; 387 kmemcheck_bitfield_end(flags1); 388 __be16 protocol; 389 390 void (*destructor)(struct sk_buff *skb); 391 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 392 struct nf_conntrack *nfct; 393 #endif 394 #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED 395 struct sk_buff *nfct_reasm; 396 #endif 397 #ifdef CONFIG_BRIDGE_NETFILTER 398 struct nf_bridge_info *nf_bridge; 399 #endif 400 401 int skb_iif; 402 #ifdef CONFIG_NET_SCHED 403 __u16 tc_index; /* traffic control index */ 404 #ifdef CONFIG_NET_CLS_ACT 405 __u16 tc_verd; /* traffic control verdict */ 406 #endif 407 #endif 408 409 __u32 rxhash; 410 411 __u16 queue_mapping; 412 kmemcheck_bitfield_begin(flags2); 413 #ifdef CONFIG_IPV6_NDISC_NODETYPE 414 __u8 ndisc_nodetype:2; 415 #endif 416 __u8 ooo_okay:1; 417 kmemcheck_bitfield_end(flags2); 418 419 /* 0/13 bit hole */ 420 421 #ifdef CONFIG_NET_DMA 422 dma_cookie_t dma_cookie; 423 #endif 424 #ifdef CONFIG_NETWORK_SECMARK 425 __u32 secmark; 426 #endif 427 union { 428 __u32 mark; 429 __u32 dropcount; 430 }; 431 432 __u16 vlan_tci; 433 434 sk_buff_data_t transport_header; 435 sk_buff_data_t network_header; 436 sk_buff_data_t mac_header; 437 /* These elements must be at the end, see alloc_skb() for details. */ 438 sk_buff_data_t tail; 439 sk_buff_data_t end; 440 unsigned char *head, 441 *data; 442 unsigned int truesize; 443 atomic_t users; 444 }; 445 446 #ifdef __KERNEL__ 447 /* 448 * Handling routines are only of interest to the kernel 449 */ 450 #include <linux/slab.h> 451 452 #include <asm/system.h> 453 454 /* 455 * skb might have a dst pointer attached, refcounted or not. 456 * _skb_refdst low order bit is set if refcount was _not_ taken 457 */ 458 #define SKB_DST_NOREF 1UL 459 #define SKB_DST_PTRMASK ~(SKB_DST_NOREF) 460 461 /** 462 * skb_dst - returns skb dst_entry 463 * @skb: buffer 464 * 465 * Returns skb dst_entry, regardless of reference taken or not. 466 */ 467 static inline struct dst_entry *skb_dst(const struct sk_buff *skb) 468 { 469 /* If refdst was not refcounted, check we still are in a 470 * rcu_read_lock section 471 */ 472 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) && 473 !rcu_read_lock_held() && 474 !rcu_read_lock_bh_held()); 475 return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK); 476 } 477 478 /** 479 * skb_dst_set - sets skb dst 480 * @skb: buffer 481 * @dst: dst entry 482 * 483 * Sets skb dst, assuming a reference was taken on dst and should 484 * be released by skb_dst_drop() 485 */ 486 static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst) 487 { 488 skb->_skb_refdst = (unsigned long)dst; 489 } 490 491 extern void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst); 492 493 /** 494 * skb_dst_is_noref - Test if skb dst isn't refcounted 495 * @skb: buffer 496 */ 497 static inline bool skb_dst_is_noref(const struct sk_buff *skb) 498 { 499 return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb); 500 } 501 502 static inline struct rtable *skb_rtable(const struct sk_buff *skb) 503 { 504 return (struct rtable *)skb_dst(skb); 505 } 506 507 extern void kfree_skb(struct sk_buff *skb); 508 extern void consume_skb(struct sk_buff *skb); 509 extern void __kfree_skb(struct sk_buff *skb); 510 extern struct sk_buff *__alloc_skb(unsigned int size, 511 gfp_t priority, int fclone, int node); 512 static inline struct sk_buff *alloc_skb(unsigned int size, 513 gfp_t priority) 514 { 515 return __alloc_skb(size, priority, 0, NUMA_NO_NODE); 516 } 517 518 static inline struct sk_buff *alloc_skb_fclone(unsigned int size, 519 gfp_t priority) 520 { 521 return __alloc_skb(size, priority, 1, NUMA_NO_NODE); 522 } 523 524 extern bool skb_recycle_check(struct sk_buff *skb, int skb_size); 525 526 extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); 527 extern int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask); 528 extern struct sk_buff *skb_clone(struct sk_buff *skb, 529 gfp_t priority); 530 extern struct sk_buff *skb_copy(const struct sk_buff *skb, 531 gfp_t priority); 532 extern struct sk_buff *pskb_copy(struct sk_buff *skb, 533 gfp_t gfp_mask); 534 extern int pskb_expand_head(struct sk_buff *skb, 535 int nhead, int ntail, 536 gfp_t gfp_mask); 537 extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, 538 unsigned int headroom); 539 extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 540 int newheadroom, int newtailroom, 541 gfp_t priority); 542 extern int skb_to_sgvec(struct sk_buff *skb, 543 struct scatterlist *sg, int offset, 544 int len); 545 extern int skb_cow_data(struct sk_buff *skb, int tailbits, 546 struct sk_buff **trailer); 547 extern int skb_pad(struct sk_buff *skb, int pad); 548 #define dev_kfree_skb(a) consume_skb(a) 549 550 extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, 551 int getfrag(void *from, char *to, int offset, 552 int len,int odd, struct sk_buff *skb), 553 void *from, int length); 554 555 struct skb_seq_state { 556 __u32 lower_offset; 557 __u32 upper_offset; 558 __u32 frag_idx; 559 __u32 stepped_offset; 560 struct sk_buff *root_skb; 561 struct sk_buff *cur_skb; 562 __u8 *frag_data; 563 }; 564 565 extern void skb_prepare_seq_read(struct sk_buff *skb, 566 unsigned int from, unsigned int to, 567 struct skb_seq_state *st); 568 extern unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 569 struct skb_seq_state *st); 570 extern void skb_abort_seq_read(struct skb_seq_state *st); 571 572 extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 573 unsigned int to, struct ts_config *config, 574 struct ts_state *state); 575 576 extern __u32 __skb_get_rxhash(struct sk_buff *skb); 577 static inline __u32 skb_get_rxhash(struct sk_buff *skb) 578 { 579 if (!skb->rxhash) 580 skb->rxhash = __skb_get_rxhash(skb); 581 582 return skb->rxhash; 583 } 584 585 #ifdef NET_SKBUFF_DATA_USES_OFFSET 586 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) 587 { 588 return skb->head + skb->end; 589 } 590 #else 591 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) 592 { 593 return skb->end; 594 } 595 #endif 596 597 /* Internal */ 598 #define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB))) 599 600 static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb) 601 { 602 return &skb_shinfo(skb)->hwtstamps; 603 } 604 605 /** 606 * skb_queue_empty - check if a queue is empty 607 * @list: queue head 608 * 609 * Returns true if the queue is empty, false otherwise. 610 */ 611 static inline int skb_queue_empty(const struct sk_buff_head *list) 612 { 613 return list->next == (struct sk_buff *)list; 614 } 615 616 /** 617 * skb_queue_is_last - check if skb is the last entry in the queue 618 * @list: queue head 619 * @skb: buffer 620 * 621 * Returns true if @skb is the last buffer on the list. 622 */ 623 static inline bool skb_queue_is_last(const struct sk_buff_head *list, 624 const struct sk_buff *skb) 625 { 626 return skb->next == (struct sk_buff *)list; 627 } 628 629 /** 630 * skb_queue_is_first - check if skb is the first entry in the queue 631 * @list: queue head 632 * @skb: buffer 633 * 634 * Returns true if @skb is the first buffer on the list. 635 */ 636 static inline bool skb_queue_is_first(const struct sk_buff_head *list, 637 const struct sk_buff *skb) 638 { 639 return skb->prev == (struct sk_buff *)list; 640 } 641 642 /** 643 * skb_queue_next - return the next packet in the queue 644 * @list: queue head 645 * @skb: current buffer 646 * 647 * Return the next packet in @list after @skb. It is only valid to 648 * call this if skb_queue_is_last() evaluates to false. 649 */ 650 static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list, 651 const struct sk_buff *skb) 652 { 653 /* This BUG_ON may seem severe, but if we just return then we 654 * are going to dereference garbage. 655 */ 656 BUG_ON(skb_queue_is_last(list, skb)); 657 return skb->next; 658 } 659 660 /** 661 * skb_queue_prev - return the prev packet in the queue 662 * @list: queue head 663 * @skb: current buffer 664 * 665 * Return the prev packet in @list before @skb. It is only valid to 666 * call this if skb_queue_is_first() evaluates to false. 667 */ 668 static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list, 669 const struct sk_buff *skb) 670 { 671 /* This BUG_ON may seem severe, but if we just return then we 672 * are going to dereference garbage. 673 */ 674 BUG_ON(skb_queue_is_first(list, skb)); 675 return skb->prev; 676 } 677 678 /** 679 * skb_get - reference buffer 680 * @skb: buffer to reference 681 * 682 * Makes another reference to a socket buffer and returns a pointer 683 * to the buffer. 684 */ 685 static inline struct sk_buff *skb_get(struct sk_buff *skb) 686 { 687 atomic_inc(&skb->users); 688 return skb; 689 } 690 691 /* 692 * If users == 1, we are the only owner and are can avoid redundant 693 * atomic change. 694 */ 695 696 /** 697 * skb_cloned - is the buffer a clone 698 * @skb: buffer to check 699 * 700 * Returns true if the buffer was generated with skb_clone() and is 701 * one of multiple shared copies of the buffer. Cloned buffers are 702 * shared data so must not be written to under normal circumstances. 703 */ 704 static inline int skb_cloned(const struct sk_buff *skb) 705 { 706 return skb->cloned && 707 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1; 708 } 709 710 /** 711 * skb_header_cloned - is the header a clone 712 * @skb: buffer to check 713 * 714 * Returns true if modifying the header part of the buffer requires 715 * the data to be copied. 716 */ 717 static inline int skb_header_cloned(const struct sk_buff *skb) 718 { 719 int dataref; 720 721 if (!skb->cloned) 722 return 0; 723 724 dataref = atomic_read(&skb_shinfo(skb)->dataref); 725 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT); 726 return dataref != 1; 727 } 728 729 /** 730 * skb_header_release - release reference to header 731 * @skb: buffer to operate on 732 * 733 * Drop a reference to the header part of the buffer. This is done 734 * by acquiring a payload reference. You must not read from the header 735 * part of skb->data after this. 736 */ 737 static inline void skb_header_release(struct sk_buff *skb) 738 { 739 BUG_ON(skb->nohdr); 740 skb->nohdr = 1; 741 atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref); 742 } 743 744 /** 745 * skb_shared - is the buffer shared 746 * @skb: buffer to check 747 * 748 * Returns true if more than one person has a reference to this 749 * buffer. 750 */ 751 static inline int skb_shared(const struct sk_buff *skb) 752 { 753 return atomic_read(&skb->users) != 1; 754 } 755 756 /** 757 * skb_share_check - check if buffer is shared and if so clone it 758 * @skb: buffer to check 759 * @pri: priority for memory allocation 760 * 761 * If the buffer is shared the buffer is cloned and the old copy 762 * drops a reference. A new clone with a single reference is returned. 763 * If the buffer is not shared the original buffer is returned. When 764 * being called from interrupt status or with spinlocks held pri must 765 * be GFP_ATOMIC. 766 * 767 * NULL is returned on a memory allocation failure. 768 */ 769 static inline struct sk_buff *skb_share_check(struct sk_buff *skb, 770 gfp_t pri) 771 { 772 might_sleep_if(pri & __GFP_WAIT); 773 if (skb_shared(skb)) { 774 struct sk_buff *nskb = skb_clone(skb, pri); 775 kfree_skb(skb); 776 skb = nskb; 777 } 778 return skb; 779 } 780 781 /* 782 * Copy shared buffers into a new sk_buff. We effectively do COW on 783 * packets to handle cases where we have a local reader and forward 784 * and a couple of other messy ones. The normal one is tcpdumping 785 * a packet thats being forwarded. 786 */ 787 788 /** 789 * skb_unshare - make a copy of a shared buffer 790 * @skb: buffer to check 791 * @pri: priority for memory allocation 792 * 793 * If the socket buffer is a clone then this function creates a new 794 * copy of the data, drops a reference count on the old copy and returns 795 * the new copy with the reference count at 1. If the buffer is not a clone 796 * the original buffer is returned. When called with a spinlock held or 797 * from interrupt state @pri must be %GFP_ATOMIC 798 * 799 * %NULL is returned on a memory allocation failure. 800 */ 801 static inline struct sk_buff *skb_unshare(struct sk_buff *skb, 802 gfp_t pri) 803 { 804 might_sleep_if(pri & __GFP_WAIT); 805 if (skb_cloned(skb)) { 806 struct sk_buff *nskb = skb_copy(skb, pri); 807 kfree_skb(skb); /* Free our shared copy */ 808 skb = nskb; 809 } 810 return skb; 811 } 812 813 /** 814 * skb_peek - peek at the head of an &sk_buff_head 815 * @list_: list to peek at 816 * 817 * Peek an &sk_buff. Unlike most other operations you _MUST_ 818 * be careful with this one. A peek leaves the buffer on the 819 * list and someone else may run off with it. You must hold 820 * the appropriate locks or have a private queue to do this. 821 * 822 * Returns %NULL for an empty list or a pointer to the head element. 823 * The reference count is not incremented and the reference is therefore 824 * volatile. Use with caution. 825 */ 826 static inline struct sk_buff *skb_peek(struct sk_buff_head *list_) 827 { 828 struct sk_buff *list = ((struct sk_buff *)list_)->next; 829 if (list == (struct sk_buff *)list_) 830 list = NULL; 831 return list; 832 } 833 834 /** 835 * skb_peek_tail - peek at the tail of an &sk_buff_head 836 * @list_: list to peek at 837 * 838 * Peek an &sk_buff. Unlike most other operations you _MUST_ 839 * be careful with this one. A peek leaves the buffer on the 840 * list and someone else may run off with it. You must hold 841 * the appropriate locks or have a private queue to do this. 842 * 843 * Returns %NULL for an empty list or a pointer to the tail element. 844 * The reference count is not incremented and the reference is therefore 845 * volatile. Use with caution. 846 */ 847 static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_) 848 { 849 struct sk_buff *list = ((struct sk_buff *)list_)->prev; 850 if (list == (struct sk_buff *)list_) 851 list = NULL; 852 return list; 853 } 854 855 /** 856 * skb_queue_len - get queue length 857 * @list_: list to measure 858 * 859 * Return the length of an &sk_buff queue. 860 */ 861 static inline __u32 skb_queue_len(const struct sk_buff_head *list_) 862 { 863 return list_->qlen; 864 } 865 866 /** 867 * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head 868 * @list: queue to initialize 869 * 870 * This initializes only the list and queue length aspects of 871 * an sk_buff_head object. This allows to initialize the list 872 * aspects of an sk_buff_head without reinitializing things like 873 * the spinlock. It can also be used for on-stack sk_buff_head 874 * objects where the spinlock is known to not be used. 875 */ 876 static inline void __skb_queue_head_init(struct sk_buff_head *list) 877 { 878 list->prev = list->next = (struct sk_buff *)list; 879 list->qlen = 0; 880 } 881 882 /* 883 * This function creates a split out lock class for each invocation; 884 * this is needed for now since a whole lot of users of the skb-queue 885 * infrastructure in drivers have different locking usage (in hardirq) 886 * than the networking core (in softirq only). In the long run either the 887 * network layer or drivers should need annotation to consolidate the 888 * main types of usage into 3 classes. 889 */ 890 static inline void skb_queue_head_init(struct sk_buff_head *list) 891 { 892 spin_lock_init(&list->lock); 893 __skb_queue_head_init(list); 894 } 895 896 static inline void skb_queue_head_init_class(struct sk_buff_head *list, 897 struct lock_class_key *class) 898 { 899 skb_queue_head_init(list); 900 lockdep_set_class(&list->lock, class); 901 } 902 903 /* 904 * Insert an sk_buff on a list. 905 * 906 * The "__skb_xxxx()" functions are the non-atomic ones that 907 * can only be called with interrupts disabled. 908 */ 909 extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list); 910 static inline void __skb_insert(struct sk_buff *newsk, 911 struct sk_buff *prev, struct sk_buff *next, 912 struct sk_buff_head *list) 913 { 914 newsk->next = next; 915 newsk->prev = prev; 916 next->prev = prev->next = newsk; 917 list->qlen++; 918 } 919 920 static inline void __skb_queue_splice(const struct sk_buff_head *list, 921 struct sk_buff *prev, 922 struct sk_buff *next) 923 { 924 struct sk_buff *first = list->next; 925 struct sk_buff *last = list->prev; 926 927 first->prev = prev; 928 prev->next = first; 929 930 last->next = next; 931 next->prev = last; 932 } 933 934 /** 935 * skb_queue_splice - join two skb lists, this is designed for stacks 936 * @list: the new list to add 937 * @head: the place to add it in the first list 938 */ 939 static inline void skb_queue_splice(const struct sk_buff_head *list, 940 struct sk_buff_head *head) 941 { 942 if (!skb_queue_empty(list)) { 943 __skb_queue_splice(list, (struct sk_buff *) head, head->next); 944 head->qlen += list->qlen; 945 } 946 } 947 948 /** 949 * skb_queue_splice - join two skb lists and reinitialise the emptied list 950 * @list: the new list to add 951 * @head: the place to add it in the first list 952 * 953 * The list at @list is reinitialised 954 */ 955 static inline void skb_queue_splice_init(struct sk_buff_head *list, 956 struct sk_buff_head *head) 957 { 958 if (!skb_queue_empty(list)) { 959 __skb_queue_splice(list, (struct sk_buff *) head, head->next); 960 head->qlen += list->qlen; 961 __skb_queue_head_init(list); 962 } 963 } 964 965 /** 966 * skb_queue_splice_tail - join two skb lists, each list being a queue 967 * @list: the new list to add 968 * @head: the place to add it in the first list 969 */ 970 static inline void skb_queue_splice_tail(const struct sk_buff_head *list, 971 struct sk_buff_head *head) 972 { 973 if (!skb_queue_empty(list)) { 974 __skb_queue_splice(list, head->prev, (struct sk_buff *) head); 975 head->qlen += list->qlen; 976 } 977 } 978 979 /** 980 * skb_queue_splice_tail - join two skb lists and reinitialise the emptied list 981 * @list: the new list to add 982 * @head: the place to add it in the first list 983 * 984 * Each of the lists is a queue. 985 * The list at @list is reinitialised 986 */ 987 static inline void skb_queue_splice_tail_init(struct sk_buff_head *list, 988 struct sk_buff_head *head) 989 { 990 if (!skb_queue_empty(list)) { 991 __skb_queue_splice(list, head->prev, (struct sk_buff *) head); 992 head->qlen += list->qlen; 993 __skb_queue_head_init(list); 994 } 995 } 996 997 /** 998 * __skb_queue_after - queue a buffer at the list head 999 * @list: list to use 1000 * @prev: place after this buffer 1001 * @newsk: buffer to queue 1002 * 1003 * Queue a buffer int the middle of a list. This function takes no locks 1004 * and you must therefore hold required locks before calling it. 1005 * 1006 * A buffer cannot be placed on two lists at the same time. 1007 */ 1008 static inline void __skb_queue_after(struct sk_buff_head *list, 1009 struct sk_buff *prev, 1010 struct sk_buff *newsk) 1011 { 1012 __skb_insert(newsk, prev, prev->next, list); 1013 } 1014 1015 extern void skb_append(struct sk_buff *old, struct sk_buff *newsk, 1016 struct sk_buff_head *list); 1017 1018 static inline void __skb_queue_before(struct sk_buff_head *list, 1019 struct sk_buff *next, 1020 struct sk_buff *newsk) 1021 { 1022 __skb_insert(newsk, next->prev, next, list); 1023 } 1024 1025 /** 1026 * __skb_queue_head - queue a buffer at the list head 1027 * @list: list to use 1028 * @newsk: buffer to queue 1029 * 1030 * Queue a buffer at the start of a list. This function takes no locks 1031 * and you must therefore hold required locks before calling it. 1032 * 1033 * A buffer cannot be placed on two lists at the same time. 1034 */ 1035 extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk); 1036 static inline void __skb_queue_head(struct sk_buff_head *list, 1037 struct sk_buff *newsk) 1038 { 1039 __skb_queue_after(list, (struct sk_buff *)list, newsk); 1040 } 1041 1042 /** 1043 * __skb_queue_tail - queue a buffer at the list tail 1044 * @list: list to use 1045 * @newsk: buffer to queue 1046 * 1047 * Queue a buffer at the end of a list. This function takes no locks 1048 * and you must therefore hold required locks before calling it. 1049 * 1050 * A buffer cannot be placed on two lists at the same time. 1051 */ 1052 extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk); 1053 static inline void __skb_queue_tail(struct sk_buff_head *list, 1054 struct sk_buff *newsk) 1055 { 1056 __skb_queue_before(list, (struct sk_buff *)list, newsk); 1057 } 1058 1059 /* 1060 * remove sk_buff from list. _Must_ be called atomically, and with 1061 * the list known.. 1062 */ 1063 extern void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list); 1064 static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 1065 { 1066 struct sk_buff *next, *prev; 1067 1068 list->qlen--; 1069 next = skb->next; 1070 prev = skb->prev; 1071 skb->next = skb->prev = NULL; 1072 next->prev = prev; 1073 prev->next = next; 1074 } 1075 1076 /** 1077 * __skb_dequeue - remove from the head of the queue 1078 * @list: list to dequeue from 1079 * 1080 * Remove the head of the list. This function does not take any locks 1081 * so must be used with appropriate locks held only. The head item is 1082 * returned or %NULL if the list is empty. 1083 */ 1084 extern struct sk_buff *skb_dequeue(struct sk_buff_head *list); 1085 static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list) 1086 { 1087 struct sk_buff *skb = skb_peek(list); 1088 if (skb) 1089 __skb_unlink(skb, list); 1090 return skb; 1091 } 1092 1093 /** 1094 * __skb_dequeue_tail - remove from the tail of the queue 1095 * @list: list to dequeue from 1096 * 1097 * Remove the tail of the list. This function does not take any locks 1098 * so must be used with appropriate locks held only. The tail item is 1099 * returned or %NULL if the list is empty. 1100 */ 1101 extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list); 1102 static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list) 1103 { 1104 struct sk_buff *skb = skb_peek_tail(list); 1105 if (skb) 1106 __skb_unlink(skb, list); 1107 return skb; 1108 } 1109 1110 1111 static inline int skb_is_nonlinear(const struct sk_buff *skb) 1112 { 1113 return skb->data_len; 1114 } 1115 1116 static inline unsigned int skb_headlen(const struct sk_buff *skb) 1117 { 1118 return skb->len - skb->data_len; 1119 } 1120 1121 static inline int skb_pagelen(const struct sk_buff *skb) 1122 { 1123 int i, len = 0; 1124 1125 for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) 1126 len += skb_shinfo(skb)->frags[i].size; 1127 return len + skb_headlen(skb); 1128 } 1129 1130 static inline void skb_fill_page_desc(struct sk_buff *skb, int i, 1131 struct page *page, int off, int size) 1132 { 1133 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1134 1135 frag->page = page; 1136 frag->page_offset = off; 1137 frag->size = size; 1138 skb_shinfo(skb)->nr_frags = i + 1; 1139 } 1140 1141 extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, 1142 int off, int size); 1143 1144 #define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags) 1145 #define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frag_list(skb)) 1146 #define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb)) 1147 1148 #ifdef NET_SKBUFF_DATA_USES_OFFSET 1149 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb) 1150 { 1151 return skb->head + skb->tail; 1152 } 1153 1154 static inline void skb_reset_tail_pointer(struct sk_buff *skb) 1155 { 1156 skb->tail = skb->data - skb->head; 1157 } 1158 1159 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) 1160 { 1161 skb_reset_tail_pointer(skb); 1162 skb->tail += offset; 1163 } 1164 #else /* NET_SKBUFF_DATA_USES_OFFSET */ 1165 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb) 1166 { 1167 return skb->tail; 1168 } 1169 1170 static inline void skb_reset_tail_pointer(struct sk_buff *skb) 1171 { 1172 skb->tail = skb->data; 1173 } 1174 1175 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) 1176 { 1177 skb->tail = skb->data + offset; 1178 } 1179 1180 #endif /* NET_SKBUFF_DATA_USES_OFFSET */ 1181 1182 /* 1183 * Add data to an sk_buff 1184 */ 1185 extern unsigned char *skb_put(struct sk_buff *skb, unsigned int len); 1186 static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len) 1187 { 1188 unsigned char *tmp = skb_tail_pointer(skb); 1189 SKB_LINEAR_ASSERT(skb); 1190 skb->tail += len; 1191 skb->len += len; 1192 return tmp; 1193 } 1194 1195 extern unsigned char *skb_push(struct sk_buff *skb, unsigned int len); 1196 static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len) 1197 { 1198 skb->data -= len; 1199 skb->len += len; 1200 return skb->data; 1201 } 1202 1203 extern unsigned char *skb_pull(struct sk_buff *skb, unsigned int len); 1204 static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len) 1205 { 1206 skb->len -= len; 1207 BUG_ON(skb->len < skb->data_len); 1208 return skb->data += len; 1209 } 1210 1211 static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int len) 1212 { 1213 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len); 1214 } 1215 1216 extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta); 1217 1218 static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len) 1219 { 1220 if (len > skb_headlen(skb) && 1221 !__pskb_pull_tail(skb, len - skb_headlen(skb))) 1222 return NULL; 1223 skb->len -= len; 1224 return skb->data += len; 1225 } 1226 1227 static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len) 1228 { 1229 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len); 1230 } 1231 1232 static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len) 1233 { 1234 if (likely(len <= skb_headlen(skb))) 1235 return 1; 1236 if (unlikely(len > skb->len)) 1237 return 0; 1238 return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL; 1239 } 1240 1241 /** 1242 * skb_headroom - bytes at buffer head 1243 * @skb: buffer to check 1244 * 1245 * Return the number of bytes of free space at the head of an &sk_buff. 1246 */ 1247 static inline unsigned int skb_headroom(const struct sk_buff *skb) 1248 { 1249 return skb->data - skb->head; 1250 } 1251 1252 /** 1253 * skb_tailroom - bytes at buffer end 1254 * @skb: buffer to check 1255 * 1256 * Return the number of bytes of free space at the tail of an sk_buff 1257 */ 1258 static inline int skb_tailroom(const struct sk_buff *skb) 1259 { 1260 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail; 1261 } 1262 1263 /** 1264 * skb_reserve - adjust headroom 1265 * @skb: buffer to alter 1266 * @len: bytes to move 1267 * 1268 * Increase the headroom of an empty &sk_buff by reducing the tail 1269 * room. This is only allowed for an empty buffer. 1270 */ 1271 static inline void skb_reserve(struct sk_buff *skb, int len) 1272 { 1273 skb->data += len; 1274 skb->tail += len; 1275 } 1276 1277 static inline void skb_reset_mac_len(struct sk_buff *skb) 1278 { 1279 skb->mac_len = skb->network_header - skb->mac_header; 1280 } 1281 1282 #ifdef NET_SKBUFF_DATA_USES_OFFSET 1283 static inline unsigned char *skb_transport_header(const struct sk_buff *skb) 1284 { 1285 return skb->head + skb->transport_header; 1286 } 1287 1288 static inline void skb_reset_transport_header(struct sk_buff *skb) 1289 { 1290 skb->transport_header = skb->data - skb->head; 1291 } 1292 1293 static inline void skb_set_transport_header(struct sk_buff *skb, 1294 const int offset) 1295 { 1296 skb_reset_transport_header(skb); 1297 skb->transport_header += offset; 1298 } 1299 1300 static inline unsigned char *skb_network_header(const struct sk_buff *skb) 1301 { 1302 return skb->head + skb->network_header; 1303 } 1304 1305 static inline void skb_reset_network_header(struct sk_buff *skb) 1306 { 1307 skb->network_header = skb->data - skb->head; 1308 } 1309 1310 static inline void skb_set_network_header(struct sk_buff *skb, const int offset) 1311 { 1312 skb_reset_network_header(skb); 1313 skb->network_header += offset; 1314 } 1315 1316 static inline unsigned char *skb_mac_header(const struct sk_buff *skb) 1317 { 1318 return skb->head + skb->mac_header; 1319 } 1320 1321 static inline int skb_mac_header_was_set(const struct sk_buff *skb) 1322 { 1323 return skb->mac_header != ~0U; 1324 } 1325 1326 static inline void skb_reset_mac_header(struct sk_buff *skb) 1327 { 1328 skb->mac_header = skb->data - skb->head; 1329 } 1330 1331 static inline void skb_set_mac_header(struct sk_buff *skb, const int offset) 1332 { 1333 skb_reset_mac_header(skb); 1334 skb->mac_header += offset; 1335 } 1336 1337 #else /* NET_SKBUFF_DATA_USES_OFFSET */ 1338 1339 static inline unsigned char *skb_transport_header(const struct sk_buff *skb) 1340 { 1341 return skb->transport_header; 1342 } 1343 1344 static inline void skb_reset_transport_header(struct sk_buff *skb) 1345 { 1346 skb->transport_header = skb->data; 1347 } 1348 1349 static inline void skb_set_transport_header(struct sk_buff *skb, 1350 const int offset) 1351 { 1352 skb->transport_header = skb->data + offset; 1353 } 1354 1355 static inline unsigned char *skb_network_header(const struct sk_buff *skb) 1356 { 1357 return skb->network_header; 1358 } 1359 1360 static inline void skb_reset_network_header(struct sk_buff *skb) 1361 { 1362 skb->network_header = skb->data; 1363 } 1364 1365 static inline void skb_set_network_header(struct sk_buff *skb, const int offset) 1366 { 1367 skb->network_header = skb->data + offset; 1368 } 1369 1370 static inline unsigned char *skb_mac_header(const struct sk_buff *skb) 1371 { 1372 return skb->mac_header; 1373 } 1374 1375 static inline int skb_mac_header_was_set(const struct sk_buff *skb) 1376 { 1377 return skb->mac_header != NULL; 1378 } 1379 1380 static inline void skb_reset_mac_header(struct sk_buff *skb) 1381 { 1382 skb->mac_header = skb->data; 1383 } 1384 1385 static inline void skb_set_mac_header(struct sk_buff *skb, const int offset) 1386 { 1387 skb->mac_header = skb->data + offset; 1388 } 1389 #endif /* NET_SKBUFF_DATA_USES_OFFSET */ 1390 1391 static inline int skb_checksum_start_offset(const struct sk_buff *skb) 1392 { 1393 return skb->csum_start - skb_headroom(skb); 1394 } 1395 1396 static inline int skb_transport_offset(const struct sk_buff *skb) 1397 { 1398 return skb_transport_header(skb) - skb->data; 1399 } 1400 1401 static inline u32 skb_network_header_len(const struct sk_buff *skb) 1402 { 1403 return skb->transport_header - skb->network_header; 1404 } 1405 1406 static inline int skb_network_offset(const struct sk_buff *skb) 1407 { 1408 return skb_network_header(skb) - skb->data; 1409 } 1410 1411 static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len) 1412 { 1413 return pskb_may_pull(skb, skb_network_offset(skb) + len); 1414 } 1415 1416 /* 1417 * CPUs often take a performance hit when accessing unaligned memory 1418 * locations. The actual performance hit varies, it can be small if the 1419 * hardware handles it or large if we have to take an exception and fix it 1420 * in software. 1421 * 1422 * Since an ethernet header is 14 bytes network drivers often end up with 1423 * the IP header at an unaligned offset. The IP header can be aligned by 1424 * shifting the start of the packet by 2 bytes. Drivers should do this 1425 * with: 1426 * 1427 * skb_reserve(skb, NET_IP_ALIGN); 1428 * 1429 * The downside to this alignment of the IP header is that the DMA is now 1430 * unaligned. On some architectures the cost of an unaligned DMA is high 1431 * and this cost outweighs the gains made by aligning the IP header. 1432 * 1433 * Since this trade off varies between architectures, we allow NET_IP_ALIGN 1434 * to be overridden. 1435 */ 1436 #ifndef NET_IP_ALIGN 1437 #define NET_IP_ALIGN 2 1438 #endif 1439 1440 /* 1441 * The networking layer reserves some headroom in skb data (via 1442 * dev_alloc_skb). This is used to avoid having to reallocate skb data when 1443 * the header has to grow. In the default case, if the header has to grow 1444 * 32 bytes or less we avoid the reallocation. 1445 * 1446 * Unfortunately this headroom changes the DMA alignment of the resulting 1447 * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive 1448 * on some architectures. An architecture can override this value, 1449 * perhaps setting it to a cacheline in size (since that will maintain 1450 * cacheline alignment of the DMA). It must be a power of 2. 1451 * 1452 * Various parts of the networking layer expect at least 32 bytes of 1453 * headroom, you should not reduce this. 1454 * 1455 * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS) 1456 * to reduce average number of cache lines per packet. 1457 * get_rps_cpus() for example only access one 64 bytes aligned block : 1458 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8) 1459 */ 1460 #ifndef NET_SKB_PAD 1461 #define NET_SKB_PAD max(32, L1_CACHE_BYTES) 1462 #endif 1463 1464 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len); 1465 1466 static inline void __skb_trim(struct sk_buff *skb, unsigned int len) 1467 { 1468 if (unlikely(skb_is_nonlinear(skb))) { 1469 WARN_ON(1); 1470 return; 1471 } 1472 skb->len = len; 1473 skb_set_tail_pointer(skb, len); 1474 } 1475 1476 extern void skb_trim(struct sk_buff *skb, unsigned int len); 1477 1478 static inline int __pskb_trim(struct sk_buff *skb, unsigned int len) 1479 { 1480 if (skb->data_len) 1481 return ___pskb_trim(skb, len); 1482 __skb_trim(skb, len); 1483 return 0; 1484 } 1485 1486 static inline int pskb_trim(struct sk_buff *skb, unsigned int len) 1487 { 1488 return (len < skb->len) ? __pskb_trim(skb, len) : 0; 1489 } 1490 1491 /** 1492 * pskb_trim_unique - remove end from a paged unique (not cloned) buffer 1493 * @skb: buffer to alter 1494 * @len: new length 1495 * 1496 * This is identical to pskb_trim except that the caller knows that 1497 * the skb is not cloned so we should never get an error due to out- 1498 * of-memory. 1499 */ 1500 static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len) 1501 { 1502 int err = pskb_trim(skb, len); 1503 BUG_ON(err); 1504 } 1505 1506 /** 1507 * skb_orphan - orphan a buffer 1508 * @skb: buffer to orphan 1509 * 1510 * If a buffer currently has an owner then we call the owner's 1511 * destructor function and make the @skb unowned. The buffer continues 1512 * to exist but is no longer charged to its former owner. 1513 */ 1514 static inline void skb_orphan(struct sk_buff *skb) 1515 { 1516 if (skb->destructor) 1517 skb->destructor(skb); 1518 skb->destructor = NULL; 1519 skb->sk = NULL; 1520 } 1521 1522 /** 1523 * __skb_queue_purge - empty a list 1524 * @list: list to empty 1525 * 1526 * Delete all buffers on an &sk_buff list. Each buffer is removed from 1527 * the list and one reference dropped. This function does not take the 1528 * list lock and the caller must hold the relevant locks to use it. 1529 */ 1530 extern void skb_queue_purge(struct sk_buff_head *list); 1531 static inline void __skb_queue_purge(struct sk_buff_head *list) 1532 { 1533 struct sk_buff *skb; 1534 while ((skb = __skb_dequeue(list)) != NULL) 1535 kfree_skb(skb); 1536 } 1537 1538 /** 1539 * __dev_alloc_skb - allocate an skbuff for receiving 1540 * @length: length to allocate 1541 * @gfp_mask: get_free_pages mask, passed to alloc_skb 1542 * 1543 * Allocate a new &sk_buff and assign it a usage count of one. The 1544 * buffer has unspecified headroom built in. Users should allocate 1545 * the headroom they think they need without accounting for the 1546 * built in space. The built in space is used for optimisations. 1547 * 1548 * %NULL is returned if there is no free memory. 1549 */ 1550 static inline struct sk_buff *__dev_alloc_skb(unsigned int length, 1551 gfp_t gfp_mask) 1552 { 1553 struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask); 1554 if (likely(skb)) 1555 skb_reserve(skb, NET_SKB_PAD); 1556 return skb; 1557 } 1558 1559 extern struct sk_buff *dev_alloc_skb(unsigned int length); 1560 1561 extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev, 1562 unsigned int length, gfp_t gfp_mask); 1563 1564 /** 1565 * netdev_alloc_skb - allocate an skbuff for rx on a specific device 1566 * @dev: network device to receive on 1567 * @length: length to allocate 1568 * 1569 * Allocate a new &sk_buff and assign it a usage count of one. The 1570 * buffer has unspecified headroom built in. Users should allocate 1571 * the headroom they think they need without accounting for the 1572 * built in space. The built in space is used for optimisations. 1573 * 1574 * %NULL is returned if there is no free memory. Although this function 1575 * allocates memory it can be called from an interrupt. 1576 */ 1577 static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev, 1578 unsigned int length) 1579 { 1580 return __netdev_alloc_skb(dev, length, GFP_ATOMIC); 1581 } 1582 1583 static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev, 1584 unsigned int length, gfp_t gfp) 1585 { 1586 struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp); 1587 1588 if (NET_IP_ALIGN && skb) 1589 skb_reserve(skb, NET_IP_ALIGN); 1590 return skb; 1591 } 1592 1593 static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev, 1594 unsigned int length) 1595 { 1596 return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC); 1597 } 1598 1599 /** 1600 * __netdev_alloc_page - allocate a page for ps-rx on a specific device 1601 * @dev: network device to receive on 1602 * @gfp_mask: alloc_pages_node mask 1603 * 1604 * Allocate a new page. dev currently unused. 1605 * 1606 * %NULL is returned if there is no free memory. 1607 */ 1608 static inline struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask) 1609 { 1610 return alloc_pages_node(NUMA_NO_NODE, gfp_mask, 0); 1611 } 1612 1613 /** 1614 * netdev_alloc_page - allocate a page for ps-rx on a specific device 1615 * @dev: network device to receive on 1616 * 1617 * Allocate a new page. dev currently unused. 1618 * 1619 * %NULL is returned if there is no free memory. 1620 */ 1621 static inline struct page *netdev_alloc_page(struct net_device *dev) 1622 { 1623 return __netdev_alloc_page(dev, GFP_ATOMIC); 1624 } 1625 1626 static inline void netdev_free_page(struct net_device *dev, struct page *page) 1627 { 1628 __free_page(page); 1629 } 1630 1631 /** 1632 * skb_clone_writable - is the header of a clone writable 1633 * @skb: buffer to check 1634 * @len: length up to which to write 1635 * 1636 * Returns true if modifying the header part of the cloned buffer 1637 * does not requires the data to be copied. 1638 */ 1639 static inline int skb_clone_writable(struct sk_buff *skb, unsigned int len) 1640 { 1641 return !skb_header_cloned(skb) && 1642 skb_headroom(skb) + len <= skb->hdr_len; 1643 } 1644 1645 static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom, 1646 int cloned) 1647 { 1648 int delta = 0; 1649 1650 if (headroom < NET_SKB_PAD) 1651 headroom = NET_SKB_PAD; 1652 if (headroom > skb_headroom(skb)) 1653 delta = headroom - skb_headroom(skb); 1654 1655 if (delta || cloned) 1656 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0, 1657 GFP_ATOMIC); 1658 return 0; 1659 } 1660 1661 /** 1662 * skb_cow - copy header of skb when it is required 1663 * @skb: buffer to cow 1664 * @headroom: needed headroom 1665 * 1666 * If the skb passed lacks sufficient headroom or its data part 1667 * is shared, data is reallocated. If reallocation fails, an error 1668 * is returned and original skb is not changed. 1669 * 1670 * The result is skb with writable area skb->head...skb->tail 1671 * and at least @headroom of space at head. 1672 */ 1673 static inline int skb_cow(struct sk_buff *skb, unsigned int headroom) 1674 { 1675 return __skb_cow(skb, headroom, skb_cloned(skb)); 1676 } 1677 1678 /** 1679 * skb_cow_head - skb_cow but only making the head writable 1680 * @skb: buffer to cow 1681 * @headroom: needed headroom 1682 * 1683 * This function is identical to skb_cow except that we replace the 1684 * skb_cloned check by skb_header_cloned. It should be used when 1685 * you only need to push on some header and do not need to modify 1686 * the data. 1687 */ 1688 static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom) 1689 { 1690 return __skb_cow(skb, headroom, skb_header_cloned(skb)); 1691 } 1692 1693 /** 1694 * skb_padto - pad an skbuff up to a minimal size 1695 * @skb: buffer to pad 1696 * @len: minimal length 1697 * 1698 * Pads up a buffer to ensure the trailing bytes exist and are 1699 * blanked. If the buffer already contains sufficient data it 1700 * is untouched. Otherwise it is extended. Returns zero on 1701 * success. The skb is freed on error. 1702 */ 1703 1704 static inline int skb_padto(struct sk_buff *skb, unsigned int len) 1705 { 1706 unsigned int size = skb->len; 1707 if (likely(size >= len)) 1708 return 0; 1709 return skb_pad(skb, len - size); 1710 } 1711 1712 static inline int skb_add_data(struct sk_buff *skb, 1713 char __user *from, int copy) 1714 { 1715 const int off = skb->len; 1716 1717 if (skb->ip_summed == CHECKSUM_NONE) { 1718 int err = 0; 1719 __wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy), 1720 copy, 0, &err); 1721 if (!err) { 1722 skb->csum = csum_block_add(skb->csum, csum, off); 1723 return 0; 1724 } 1725 } else if (!copy_from_user(skb_put(skb, copy), from, copy)) 1726 return 0; 1727 1728 __skb_trim(skb, off); 1729 return -EFAULT; 1730 } 1731 1732 static inline int skb_can_coalesce(struct sk_buff *skb, int i, 1733 struct page *page, int off) 1734 { 1735 if (i) { 1736 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; 1737 1738 return page == frag->page && 1739 off == frag->page_offset + frag->size; 1740 } 1741 return 0; 1742 } 1743 1744 static inline int __skb_linearize(struct sk_buff *skb) 1745 { 1746 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM; 1747 } 1748 1749 /** 1750 * skb_linearize - convert paged skb to linear one 1751 * @skb: buffer to linarize 1752 * 1753 * If there is no free memory -ENOMEM is returned, otherwise zero 1754 * is returned and the old skb data released. 1755 */ 1756 static inline int skb_linearize(struct sk_buff *skb) 1757 { 1758 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0; 1759 } 1760 1761 /** 1762 * skb_linearize_cow - make sure skb is linear and writable 1763 * @skb: buffer to process 1764 * 1765 * If there is no free memory -ENOMEM is returned, otherwise zero 1766 * is returned and the old skb data released. 1767 */ 1768 static inline int skb_linearize_cow(struct sk_buff *skb) 1769 { 1770 return skb_is_nonlinear(skb) || skb_cloned(skb) ? 1771 __skb_linearize(skb) : 0; 1772 } 1773 1774 /** 1775 * skb_postpull_rcsum - update checksum for received skb after pull 1776 * @skb: buffer to update 1777 * @start: start of data before pull 1778 * @len: length of data pulled 1779 * 1780 * After doing a pull on a received packet, you need to call this to 1781 * update the CHECKSUM_COMPLETE checksum, or set ip_summed to 1782 * CHECKSUM_NONE so that it can be recomputed from scratch. 1783 */ 1784 1785 static inline void skb_postpull_rcsum(struct sk_buff *skb, 1786 const void *start, unsigned int len) 1787 { 1788 if (skb->ip_summed == CHECKSUM_COMPLETE) 1789 skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0)); 1790 } 1791 1792 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); 1793 1794 /** 1795 * pskb_trim_rcsum - trim received skb and update checksum 1796 * @skb: buffer to trim 1797 * @len: new length 1798 * 1799 * This is exactly the same as pskb_trim except that it ensures the 1800 * checksum of received packets are still valid after the operation. 1801 */ 1802 1803 static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) 1804 { 1805 if (likely(len >= skb->len)) 1806 return 0; 1807 if (skb->ip_summed == CHECKSUM_COMPLETE) 1808 skb->ip_summed = CHECKSUM_NONE; 1809 return __pskb_trim(skb, len); 1810 } 1811 1812 #define skb_queue_walk(queue, skb) \ 1813 for (skb = (queue)->next; \ 1814 skb != (struct sk_buff *)(queue); \ 1815 skb = skb->next) 1816 1817 #define skb_queue_walk_safe(queue, skb, tmp) \ 1818 for (skb = (queue)->next, tmp = skb->next; \ 1819 skb != (struct sk_buff *)(queue); \ 1820 skb = tmp, tmp = skb->next) 1821 1822 #define skb_queue_walk_from(queue, skb) \ 1823 for (; skb != (struct sk_buff *)(queue); \ 1824 skb = skb->next) 1825 1826 #define skb_queue_walk_from_safe(queue, skb, tmp) \ 1827 for (tmp = skb->next; \ 1828 skb != (struct sk_buff *)(queue); \ 1829 skb = tmp, tmp = skb->next) 1830 1831 #define skb_queue_reverse_walk(queue, skb) \ 1832 for (skb = (queue)->prev; \ 1833 skb != (struct sk_buff *)(queue); \ 1834 skb = skb->prev) 1835 1836 #define skb_queue_reverse_walk_safe(queue, skb, tmp) \ 1837 for (skb = (queue)->prev, tmp = skb->prev; \ 1838 skb != (struct sk_buff *)(queue); \ 1839 skb = tmp, tmp = skb->prev) 1840 1841 #define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \ 1842 for (tmp = skb->prev; \ 1843 skb != (struct sk_buff *)(queue); \ 1844 skb = tmp, tmp = skb->prev) 1845 1846 static inline bool skb_has_frag_list(const struct sk_buff *skb) 1847 { 1848 return skb_shinfo(skb)->frag_list != NULL; 1849 } 1850 1851 static inline void skb_frag_list_init(struct sk_buff *skb) 1852 { 1853 skb_shinfo(skb)->frag_list = NULL; 1854 } 1855 1856 static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag) 1857 { 1858 frag->next = skb_shinfo(skb)->frag_list; 1859 skb_shinfo(skb)->frag_list = frag; 1860 } 1861 1862 #define skb_walk_frags(skb, iter) \ 1863 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next) 1864 1865 extern struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags, 1866 int *peeked, int *err); 1867 extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, 1868 int noblock, int *err); 1869 extern unsigned int datagram_poll(struct file *file, struct socket *sock, 1870 struct poll_table_struct *wait); 1871 extern int skb_copy_datagram_iovec(const struct sk_buff *from, 1872 int offset, struct iovec *to, 1873 int size); 1874 extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, 1875 int hlen, 1876 struct iovec *iov); 1877 extern int skb_copy_datagram_from_iovec(struct sk_buff *skb, 1878 int offset, 1879 const struct iovec *from, 1880 int from_offset, 1881 int len); 1882 extern int skb_copy_datagram_const_iovec(const struct sk_buff *from, 1883 int offset, 1884 const struct iovec *to, 1885 int to_offset, 1886 int size); 1887 extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb); 1888 extern void skb_free_datagram_locked(struct sock *sk, 1889 struct sk_buff *skb); 1890 extern int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, 1891 unsigned int flags); 1892 extern __wsum skb_checksum(const struct sk_buff *skb, int offset, 1893 int len, __wsum csum); 1894 extern int skb_copy_bits(const struct sk_buff *skb, int offset, 1895 void *to, int len); 1896 extern int skb_store_bits(struct sk_buff *skb, int offset, 1897 const void *from, int len); 1898 extern __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, 1899 int offset, u8 *to, int len, 1900 __wsum csum); 1901 extern int skb_splice_bits(struct sk_buff *skb, 1902 unsigned int offset, 1903 struct pipe_inode_info *pipe, 1904 unsigned int len, 1905 unsigned int flags); 1906 extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); 1907 extern void skb_split(struct sk_buff *skb, 1908 struct sk_buff *skb1, const u32 len); 1909 extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, 1910 int shiftlen); 1911 1912 extern struct sk_buff *skb_segment(struct sk_buff *skb, u32 features); 1913 1914 static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, 1915 int len, void *buffer) 1916 { 1917 int hlen = skb_headlen(skb); 1918 1919 if (hlen - offset >= len) 1920 return skb->data + offset; 1921 1922 if (skb_copy_bits(skb, offset, buffer, len) < 0) 1923 return NULL; 1924 1925 return buffer; 1926 } 1927 1928 static inline void skb_copy_from_linear_data(const struct sk_buff *skb, 1929 void *to, 1930 const unsigned int len) 1931 { 1932 memcpy(to, skb->data, len); 1933 } 1934 1935 static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb, 1936 const int offset, void *to, 1937 const unsigned int len) 1938 { 1939 memcpy(to, skb->data + offset, len); 1940 } 1941 1942 static inline void skb_copy_to_linear_data(struct sk_buff *skb, 1943 const void *from, 1944 const unsigned int len) 1945 { 1946 memcpy(skb->data, from, len); 1947 } 1948 1949 static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb, 1950 const int offset, 1951 const void *from, 1952 const unsigned int len) 1953 { 1954 memcpy(skb->data + offset, from, len); 1955 } 1956 1957 extern void skb_init(void); 1958 1959 static inline ktime_t skb_get_ktime(const struct sk_buff *skb) 1960 { 1961 return skb->tstamp; 1962 } 1963 1964 /** 1965 * skb_get_timestamp - get timestamp from a skb 1966 * @skb: skb to get stamp from 1967 * @stamp: pointer to struct timeval to store stamp in 1968 * 1969 * Timestamps are stored in the skb as offsets to a base timestamp. 1970 * This function converts the offset back to a struct timeval and stores 1971 * it in stamp. 1972 */ 1973 static inline void skb_get_timestamp(const struct sk_buff *skb, 1974 struct timeval *stamp) 1975 { 1976 *stamp = ktime_to_timeval(skb->tstamp); 1977 } 1978 1979 static inline void skb_get_timestampns(const struct sk_buff *skb, 1980 struct timespec *stamp) 1981 { 1982 *stamp = ktime_to_timespec(skb->tstamp); 1983 } 1984 1985 static inline void __net_timestamp(struct sk_buff *skb) 1986 { 1987 skb->tstamp = ktime_get_real(); 1988 } 1989 1990 static inline ktime_t net_timedelta(ktime_t t) 1991 { 1992 return ktime_sub(ktime_get_real(), t); 1993 } 1994 1995 static inline ktime_t net_invalid_timestamp(void) 1996 { 1997 return ktime_set(0, 0); 1998 } 1999 2000 extern void skb_timestamping_init(void); 2001 2002 #ifdef CONFIG_NETWORK_PHY_TIMESTAMPING 2003 2004 extern void skb_clone_tx_timestamp(struct sk_buff *skb); 2005 extern bool skb_defer_rx_timestamp(struct sk_buff *skb); 2006 2007 #else /* CONFIG_NETWORK_PHY_TIMESTAMPING */ 2008 2009 static inline void skb_clone_tx_timestamp(struct sk_buff *skb) 2010 { 2011 } 2012 2013 static inline bool skb_defer_rx_timestamp(struct sk_buff *skb) 2014 { 2015 return false; 2016 } 2017 2018 #endif /* !CONFIG_NETWORK_PHY_TIMESTAMPING */ 2019 2020 /** 2021 * skb_complete_tx_timestamp() - deliver cloned skb with tx timestamps 2022 * 2023 * @skb: clone of the the original outgoing packet 2024 * @hwtstamps: hardware time stamps 2025 * 2026 */ 2027 void skb_complete_tx_timestamp(struct sk_buff *skb, 2028 struct skb_shared_hwtstamps *hwtstamps); 2029 2030 /** 2031 * skb_tstamp_tx - queue clone of skb with send time stamps 2032 * @orig_skb: the original outgoing packet 2033 * @hwtstamps: hardware time stamps, may be NULL if not available 2034 * 2035 * If the skb has a socket associated, then this function clones the 2036 * skb (thus sharing the actual data and optional structures), stores 2037 * the optional hardware time stamping information (if non NULL) or 2038 * generates a software time stamp (otherwise), then queues the clone 2039 * to the error queue of the socket. Errors are silently ignored. 2040 */ 2041 extern void skb_tstamp_tx(struct sk_buff *orig_skb, 2042 struct skb_shared_hwtstamps *hwtstamps); 2043 2044 static inline void sw_tx_timestamp(struct sk_buff *skb) 2045 { 2046 if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP && 2047 !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) 2048 skb_tstamp_tx(skb, NULL); 2049 } 2050 2051 /** 2052 * skb_tx_timestamp() - Driver hook for transmit timestamping 2053 * 2054 * Ethernet MAC Drivers should call this function in their hard_xmit() 2055 * function immediately before giving the sk_buff to the MAC hardware. 2056 * 2057 * @skb: A socket buffer. 2058 */ 2059 static inline void skb_tx_timestamp(struct sk_buff *skb) 2060 { 2061 skb_clone_tx_timestamp(skb); 2062 sw_tx_timestamp(skb); 2063 } 2064 2065 extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len); 2066 extern __sum16 __skb_checksum_complete(struct sk_buff *skb); 2067 2068 static inline int skb_csum_unnecessary(const struct sk_buff *skb) 2069 { 2070 return skb->ip_summed & CHECKSUM_UNNECESSARY; 2071 } 2072 2073 /** 2074 * skb_checksum_complete - Calculate checksum of an entire packet 2075 * @skb: packet to process 2076 * 2077 * This function calculates the checksum over the entire packet plus 2078 * the value of skb->csum. The latter can be used to supply the 2079 * checksum of a pseudo header as used by TCP/UDP. It returns the 2080 * checksum. 2081 * 2082 * For protocols that contain complete checksums such as ICMP/TCP/UDP, 2083 * this function can be used to verify that checksum on received 2084 * packets. In that case the function should return zero if the 2085 * checksum is correct. In particular, this function will return zero 2086 * if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the 2087 * hardware has already verified the correctness of the checksum. 2088 */ 2089 static inline __sum16 skb_checksum_complete(struct sk_buff *skb) 2090 { 2091 return skb_csum_unnecessary(skb) ? 2092 0 : __skb_checksum_complete(skb); 2093 } 2094 2095 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 2096 extern void nf_conntrack_destroy(struct nf_conntrack *nfct); 2097 static inline void nf_conntrack_put(struct nf_conntrack *nfct) 2098 { 2099 if (nfct && atomic_dec_and_test(&nfct->use)) 2100 nf_conntrack_destroy(nfct); 2101 } 2102 static inline void nf_conntrack_get(struct nf_conntrack *nfct) 2103 { 2104 if (nfct) 2105 atomic_inc(&nfct->use); 2106 } 2107 #endif 2108 #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED 2109 static inline void nf_conntrack_get_reasm(struct sk_buff *skb) 2110 { 2111 if (skb) 2112 atomic_inc(&skb->users); 2113 } 2114 static inline void nf_conntrack_put_reasm(struct sk_buff *skb) 2115 { 2116 if (skb) 2117 kfree_skb(skb); 2118 } 2119 #endif 2120 #ifdef CONFIG_BRIDGE_NETFILTER 2121 static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge) 2122 { 2123 if (nf_bridge && atomic_dec_and_test(&nf_bridge->use)) 2124 kfree(nf_bridge); 2125 } 2126 static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge) 2127 { 2128 if (nf_bridge) 2129 atomic_inc(&nf_bridge->use); 2130 } 2131 #endif /* CONFIG_BRIDGE_NETFILTER */ 2132 static inline void nf_reset(struct sk_buff *skb) 2133 { 2134 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 2135 nf_conntrack_put(skb->nfct); 2136 skb->nfct = NULL; 2137 #endif 2138 #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED 2139 nf_conntrack_put_reasm(skb->nfct_reasm); 2140 skb->nfct_reasm = NULL; 2141 #endif 2142 #ifdef CONFIG_BRIDGE_NETFILTER 2143 nf_bridge_put(skb->nf_bridge); 2144 skb->nf_bridge = NULL; 2145 #endif 2146 } 2147 2148 /* Note: This doesn't put any conntrack and bridge info in dst. */ 2149 static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src) 2150 { 2151 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 2152 dst->nfct = src->nfct; 2153 nf_conntrack_get(src->nfct); 2154 dst->nfctinfo = src->nfctinfo; 2155 #endif 2156 #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED 2157 dst->nfct_reasm = src->nfct_reasm; 2158 nf_conntrack_get_reasm(src->nfct_reasm); 2159 #endif 2160 #ifdef CONFIG_BRIDGE_NETFILTER 2161 dst->nf_bridge = src->nf_bridge; 2162 nf_bridge_get(src->nf_bridge); 2163 #endif 2164 } 2165 2166 static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src) 2167 { 2168 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 2169 nf_conntrack_put(dst->nfct); 2170 #endif 2171 #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED 2172 nf_conntrack_put_reasm(dst->nfct_reasm); 2173 #endif 2174 #ifdef CONFIG_BRIDGE_NETFILTER 2175 nf_bridge_put(dst->nf_bridge); 2176 #endif 2177 __nf_copy(dst, src); 2178 } 2179 2180 #ifdef CONFIG_NETWORK_SECMARK 2181 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from) 2182 { 2183 to->secmark = from->secmark; 2184 } 2185 2186 static inline void skb_init_secmark(struct sk_buff *skb) 2187 { 2188 skb->secmark = 0; 2189 } 2190 #else 2191 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from) 2192 { } 2193 2194 static inline void skb_init_secmark(struct sk_buff *skb) 2195 { } 2196 #endif 2197 2198 static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping) 2199 { 2200 skb->queue_mapping = queue_mapping; 2201 } 2202 2203 static inline u16 skb_get_queue_mapping(const struct sk_buff *skb) 2204 { 2205 return skb->queue_mapping; 2206 } 2207 2208 static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from) 2209 { 2210 to->queue_mapping = from->queue_mapping; 2211 } 2212 2213 static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue) 2214 { 2215 skb->queue_mapping = rx_queue + 1; 2216 } 2217 2218 static inline u16 skb_get_rx_queue(const struct sk_buff *skb) 2219 { 2220 return skb->queue_mapping - 1; 2221 } 2222 2223 static inline bool skb_rx_queue_recorded(const struct sk_buff *skb) 2224 { 2225 return skb->queue_mapping != 0; 2226 } 2227 2228 extern u16 __skb_tx_hash(const struct net_device *dev, 2229 const struct sk_buff *skb, 2230 unsigned int num_tx_queues); 2231 2232 #ifdef CONFIG_XFRM 2233 static inline struct sec_path *skb_sec_path(struct sk_buff *skb) 2234 { 2235 return skb->sp; 2236 } 2237 #else 2238 static inline struct sec_path *skb_sec_path(struct sk_buff *skb) 2239 { 2240 return NULL; 2241 } 2242 #endif 2243 2244 static inline int skb_is_gso(const struct sk_buff *skb) 2245 { 2246 return skb_shinfo(skb)->gso_size; 2247 } 2248 2249 static inline int skb_is_gso_v6(const struct sk_buff *skb) 2250 { 2251 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; 2252 } 2253 2254 extern void __skb_warn_lro_forwarding(const struct sk_buff *skb); 2255 2256 static inline bool skb_warn_if_lro(const struct sk_buff *skb) 2257 { 2258 /* LRO sets gso_size but not gso_type, whereas if GSO is really 2259 * wanted then gso_type will be set. */ 2260 struct skb_shared_info *shinfo = skb_shinfo(skb); 2261 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 && 2262 unlikely(shinfo->gso_type == 0)) { 2263 __skb_warn_lro_forwarding(skb); 2264 return true; 2265 } 2266 return false; 2267 } 2268 2269 static inline void skb_forward_csum(struct sk_buff *skb) 2270 { 2271 /* Unfortunately we don't support this one. Any brave souls? */ 2272 if (skb->ip_summed == CHECKSUM_COMPLETE) 2273 skb->ip_summed = CHECKSUM_NONE; 2274 } 2275 2276 /** 2277 * skb_checksum_none_assert - make sure skb ip_summed is CHECKSUM_NONE 2278 * @skb: skb to check 2279 * 2280 * fresh skbs have their ip_summed set to CHECKSUM_NONE. 2281 * Instead of forcing ip_summed to CHECKSUM_NONE, we can 2282 * use this helper, to document places where we make this assertion. 2283 */ 2284 static inline void skb_checksum_none_assert(struct sk_buff *skb) 2285 { 2286 #ifdef DEBUG 2287 BUG_ON(skb->ip_summed != CHECKSUM_NONE); 2288 #endif 2289 } 2290 2291 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off); 2292 2293 #endif /* __KERNEL__ */ 2294 #endif /* _LINUX_SKBUFF_H */ 2295