1 /* 2 * Definitions for the 'struct sk_buff' memory handlers. 3 * 4 * Authors: 5 * Alan Cox, <[email protected]> 6 * Florian La Roche, <[email protected]> 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License 10 * as published by the Free Software Foundation; either version 11 * 2 of the License, or (at your option) any later version. 12 */ 13 14 #ifndef _LINUX_SKBUFF_H 15 #define _LINUX_SKBUFF_H 16 17 #include <linux/kernel.h> 18 #include <linux/compiler.h> 19 #include <linux/time.h> 20 #include <linux/cache.h> 21 22 #include <asm/atomic.h> 23 #include <asm/types.h> 24 #include <linux/spinlock.h> 25 #include <linux/net.h> 26 #include <linux/textsearch.h> 27 #include <net/checksum.h> 28 #include <linux/rcupdate.h> 29 #include <linux/dmaengine.h> 30 #include <linux/hrtimer.h> 31 32 #define HAVE_ALLOC_SKB /* For the drivers to know */ 33 #define HAVE_ALIGNABLE_SKB /* Ditto 8) */ 34 35 /* Don't change this without changing skb_csum_unnecessary! */ 36 #define CHECKSUM_NONE 0 37 #define CHECKSUM_UNNECESSARY 1 38 #define CHECKSUM_COMPLETE 2 39 #define CHECKSUM_PARTIAL 3 40 41 #define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \ 42 ~(SMP_CACHE_BYTES - 1)) 43 #define SKB_WITH_OVERHEAD(X) \ 44 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) 45 #define SKB_MAX_ORDER(X, ORDER) \ 46 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X)) 47 #define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0)) 48 #define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2)) 49 50 /* A. Checksumming of received packets by device. 51 * 52 * NONE: device failed to checksum this packet. 53 * skb->csum is undefined. 54 * 55 * UNNECESSARY: device parsed packet and wouldbe verified checksum. 56 * skb->csum is undefined. 57 * It is bad option, but, unfortunately, many of vendors do this. 58 * Apparently with secret goal to sell you new device, when you 59 * will add new protocol to your host. F.e. IPv6. 8) 60 * 61 * COMPLETE: the most generic way. Device supplied checksum of _all_ 62 * the packet as seen by netif_rx in skb->csum. 63 * NOTE: Even if device supports only some protocols, but 64 * is able to produce some skb->csum, it MUST use COMPLETE, 65 * not UNNECESSARY. 66 * 67 * PARTIAL: identical to the case for output below. This may occur 68 * on a packet received directly from another Linux OS, e.g., 69 * a virtualised Linux kernel on the same host. The packet can 70 * be treated in the same way as UNNECESSARY except that on 71 * output (i.e., forwarding) the checksum must be filled in 72 * by the OS or the hardware. 73 * 74 * B. Checksumming on output. 75 * 76 * NONE: skb is checksummed by protocol or csum is not required. 77 * 78 * PARTIAL: device is required to csum packet as seen by hard_start_xmit 79 * from skb->csum_start to the end and to record the checksum 80 * at skb->csum_start + skb->csum_offset. 81 * 82 * Device must show its capabilities in dev->features, set 83 * at device setup time. 84 * NETIF_F_HW_CSUM - it is clever device, it is able to checksum 85 * everything. 86 * NETIF_F_NO_CSUM - loopback or reliable single hop media. 87 * NETIF_F_IP_CSUM - device is dumb. It is able to csum only 88 * TCP/UDP over IPv4. Sigh. Vendors like this 89 * way by an unknown reason. Though, see comment above 90 * about CHECKSUM_UNNECESSARY. 8) 91 * NETIF_F_IPV6_CSUM about as dumb as the last one but does IPv6 instead. 92 * 93 * Any questions? No questions, good. --ANK 94 */ 95 96 struct net_device; 97 struct scatterlist; 98 struct pipe_inode_info; 99 100 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 101 struct nf_conntrack { 102 atomic_t use; 103 }; 104 #endif 105 106 #ifdef CONFIG_BRIDGE_NETFILTER 107 struct nf_bridge_info { 108 atomic_t use; 109 struct net_device *physindev; 110 struct net_device *physoutdev; 111 unsigned int mask; 112 unsigned long data[32 / sizeof(unsigned long)]; 113 }; 114 #endif 115 116 struct sk_buff_head { 117 /* These two members must be first. */ 118 struct sk_buff *next; 119 struct sk_buff *prev; 120 121 __u32 qlen; 122 spinlock_t lock; 123 }; 124 125 struct sk_buff; 126 127 /* To allow 64K frame to be packed as single skb without frag_list */ 128 #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2) 129 130 typedef struct skb_frag_struct skb_frag_t; 131 132 struct skb_frag_struct { 133 struct page *page; 134 __u32 page_offset; 135 __u32 size; 136 }; 137 138 /* This data is invariant across clones and lives at 139 * the end of the header data, ie. at skb->end. 140 */ 141 struct skb_shared_info { 142 atomic_t dataref; 143 unsigned short nr_frags; 144 unsigned short gso_size; 145 /* Warning: this field is not always filled in (UFO)! */ 146 unsigned short gso_segs; 147 unsigned short gso_type; 148 __be32 ip6_frag_id; 149 #ifdef CONFIG_HAS_DMA 150 unsigned int num_dma_maps; 151 #endif 152 struct sk_buff *frag_list; 153 skb_frag_t frags[MAX_SKB_FRAGS]; 154 #ifdef CONFIG_HAS_DMA 155 dma_addr_t dma_maps[MAX_SKB_FRAGS + 1]; 156 #endif 157 }; 158 159 /* We divide dataref into two halves. The higher 16 bits hold references 160 * to the payload part of skb->data. The lower 16 bits hold references to 161 * the entire skb->data. A clone of a headerless skb holds the length of 162 * the header in skb->hdr_len. 163 * 164 * All users must obey the rule that the skb->data reference count must be 165 * greater than or equal to the payload reference count. 166 * 167 * Holding a reference to the payload part means that the user does not 168 * care about modifications to the header part of skb->data. 169 */ 170 #define SKB_DATAREF_SHIFT 16 171 #define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1) 172 173 174 enum { 175 SKB_FCLONE_UNAVAILABLE, 176 SKB_FCLONE_ORIG, 177 SKB_FCLONE_CLONE, 178 }; 179 180 enum { 181 SKB_GSO_TCPV4 = 1 << 0, 182 SKB_GSO_UDP = 1 << 1, 183 184 /* This indicates the skb is from an untrusted source. */ 185 SKB_GSO_DODGY = 1 << 2, 186 187 /* This indicates the tcp segment has CWR set. */ 188 SKB_GSO_TCP_ECN = 1 << 3, 189 190 SKB_GSO_TCPV6 = 1 << 4, 191 }; 192 193 #if BITS_PER_LONG > 32 194 #define NET_SKBUFF_DATA_USES_OFFSET 1 195 #endif 196 197 #ifdef NET_SKBUFF_DATA_USES_OFFSET 198 typedef unsigned int sk_buff_data_t; 199 #else 200 typedef unsigned char *sk_buff_data_t; 201 #endif 202 203 /** 204 * struct sk_buff - socket buffer 205 * @next: Next buffer in list 206 * @prev: Previous buffer in list 207 * @sk: Socket we are owned by 208 * @tstamp: Time we arrived 209 * @dev: Device we arrived on/are leaving by 210 * @transport_header: Transport layer header 211 * @network_header: Network layer header 212 * @mac_header: Link layer header 213 * @dst: destination entry 214 * @sp: the security path, used for xfrm 215 * @cb: Control buffer. Free for use by every layer. Put private vars here 216 * @len: Length of actual data 217 * @data_len: Data length 218 * @mac_len: Length of link layer header 219 * @hdr_len: writable header length of cloned skb 220 * @csum: Checksum (must include start/offset pair) 221 * @csum_start: Offset from skb->head where checksumming should start 222 * @csum_offset: Offset from csum_start where checksum should be stored 223 * @local_df: allow local fragmentation 224 * @cloned: Head may be cloned (check refcnt to be sure) 225 * @nohdr: Payload reference only, must not modify header 226 * @pkt_type: Packet class 227 * @fclone: skbuff clone status 228 * @ip_summed: Driver fed us an IP checksum 229 * @priority: Packet queueing priority 230 * @users: User count - see {datagram,tcp}.c 231 * @protocol: Packet protocol from driver 232 * @truesize: Buffer size 233 * @head: Head of buffer 234 * @data: Data head pointer 235 * @tail: Tail pointer 236 * @end: End pointer 237 * @destructor: Destruct function 238 * @mark: Generic packet mark 239 * @nfct: Associated connection, if any 240 * @ipvs_property: skbuff is owned by ipvs 241 * @peeked: this packet has been seen already, so stats have been 242 * done for it, don't do them again 243 * @nf_trace: netfilter packet trace flag 244 * @nfctinfo: Relationship of this skb to the connection 245 * @nfct_reasm: netfilter conntrack re-assembly pointer 246 * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c 247 * @iif: ifindex of device we arrived on 248 * @queue_mapping: Queue mapping for multiqueue devices 249 * @tc_index: Traffic control index 250 * @tc_verd: traffic control verdict 251 * @ndisc_nodetype: router type (from link layer) 252 * @do_not_encrypt: set to prevent encryption of this frame 253 * @requeue: set to indicate that the wireless core should attempt 254 * a software retry on this frame if we failed to 255 * receive an ACK for it 256 * @dma_cookie: a cookie to one of several possible DMA operations 257 * done by skb DMA functions 258 * @secmark: security marking 259 * @vlan_tci: vlan tag control information 260 */ 261 262 struct sk_buff { 263 /* These two members must be first. */ 264 struct sk_buff *next; 265 struct sk_buff *prev; 266 267 struct sock *sk; 268 ktime_t tstamp; 269 struct net_device *dev; 270 271 union { 272 struct dst_entry *dst; 273 struct rtable *rtable; 274 }; 275 #ifdef CONFIG_XFRM 276 struct sec_path *sp; 277 #endif 278 /* 279 * This is the control buffer. It is free to use for every 280 * layer. Please put your private variables there. If you 281 * want to keep them across layers you have to do a skb_clone() 282 * first. This is owned by whoever has the skb queued ATM. 283 */ 284 char cb[48]; 285 286 unsigned int len, 287 data_len; 288 __u16 mac_len, 289 hdr_len; 290 union { 291 __wsum csum; 292 struct { 293 __u16 csum_start; 294 __u16 csum_offset; 295 }; 296 }; 297 __u32 priority; 298 __u8 local_df:1, 299 cloned:1, 300 ip_summed:2, 301 nohdr:1, 302 nfctinfo:3; 303 __u8 pkt_type:3, 304 fclone:2, 305 ipvs_property:1, 306 peeked:1, 307 nf_trace:1; 308 __be16 protocol; 309 310 void (*destructor)(struct sk_buff *skb); 311 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 312 struct nf_conntrack *nfct; 313 struct sk_buff *nfct_reasm; 314 #endif 315 #ifdef CONFIG_BRIDGE_NETFILTER 316 struct nf_bridge_info *nf_bridge; 317 #endif 318 319 int iif; 320 __u16 queue_mapping; 321 #ifdef CONFIG_NET_SCHED 322 __u16 tc_index; /* traffic control index */ 323 #ifdef CONFIG_NET_CLS_ACT 324 __u16 tc_verd; /* traffic control verdict */ 325 #endif 326 #endif 327 #ifdef CONFIG_IPV6_NDISC_NODETYPE 328 __u8 ndisc_nodetype:2; 329 #endif 330 #if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE) 331 __u8 do_not_encrypt:1; 332 __u8 requeue:1; 333 #endif 334 /* 0/13/14 bit hole */ 335 336 #ifdef CONFIG_NET_DMA 337 dma_cookie_t dma_cookie; 338 #endif 339 #ifdef CONFIG_NETWORK_SECMARK 340 __u32 secmark; 341 #endif 342 343 __u32 mark; 344 345 __u16 vlan_tci; 346 347 sk_buff_data_t transport_header; 348 sk_buff_data_t network_header; 349 sk_buff_data_t mac_header; 350 /* These elements must be at the end, see alloc_skb() for details. */ 351 sk_buff_data_t tail; 352 sk_buff_data_t end; 353 unsigned char *head, 354 *data; 355 unsigned int truesize; 356 atomic_t users; 357 }; 358 359 #ifdef __KERNEL__ 360 /* 361 * Handling routines are only of interest to the kernel 362 */ 363 #include <linux/slab.h> 364 365 #include <asm/system.h> 366 367 #ifdef CONFIG_HAS_DMA 368 #include <linux/dma-mapping.h> 369 extern int skb_dma_map(struct device *dev, struct sk_buff *skb, 370 enum dma_data_direction dir); 371 extern void skb_dma_unmap(struct device *dev, struct sk_buff *skb, 372 enum dma_data_direction dir); 373 #endif 374 375 extern void kfree_skb(struct sk_buff *skb); 376 extern void __kfree_skb(struct sk_buff *skb); 377 extern struct sk_buff *__alloc_skb(unsigned int size, 378 gfp_t priority, int fclone, int node); 379 static inline struct sk_buff *alloc_skb(unsigned int size, 380 gfp_t priority) 381 { 382 return __alloc_skb(size, priority, 0, -1); 383 } 384 385 static inline struct sk_buff *alloc_skb_fclone(unsigned int size, 386 gfp_t priority) 387 { 388 return __alloc_skb(size, priority, 1, -1); 389 } 390 391 extern int skb_recycle_check(struct sk_buff *skb, int skb_size); 392 393 extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); 394 extern struct sk_buff *skb_clone(struct sk_buff *skb, 395 gfp_t priority); 396 extern struct sk_buff *skb_copy(const struct sk_buff *skb, 397 gfp_t priority); 398 extern struct sk_buff *pskb_copy(struct sk_buff *skb, 399 gfp_t gfp_mask); 400 extern int pskb_expand_head(struct sk_buff *skb, 401 int nhead, int ntail, 402 gfp_t gfp_mask); 403 extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, 404 unsigned int headroom); 405 extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 406 int newheadroom, int newtailroom, 407 gfp_t priority); 408 extern int skb_to_sgvec(struct sk_buff *skb, 409 struct scatterlist *sg, int offset, 410 int len); 411 extern int skb_cow_data(struct sk_buff *skb, int tailbits, 412 struct sk_buff **trailer); 413 extern int skb_pad(struct sk_buff *skb, int pad); 414 #define dev_kfree_skb(a) kfree_skb(a) 415 extern void skb_over_panic(struct sk_buff *skb, int len, 416 void *here); 417 extern void skb_under_panic(struct sk_buff *skb, int len, 418 void *here); 419 extern void skb_truesize_bug(struct sk_buff *skb); 420 421 static inline void skb_truesize_check(struct sk_buff *skb) 422 { 423 int len = sizeof(struct sk_buff) + skb->len; 424 425 if (unlikely((int)skb->truesize < len)) 426 skb_truesize_bug(skb); 427 } 428 429 extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, 430 int getfrag(void *from, char *to, int offset, 431 int len,int odd, struct sk_buff *skb), 432 void *from, int length); 433 434 struct skb_seq_state 435 { 436 __u32 lower_offset; 437 __u32 upper_offset; 438 __u32 frag_idx; 439 __u32 stepped_offset; 440 struct sk_buff *root_skb; 441 struct sk_buff *cur_skb; 442 __u8 *frag_data; 443 }; 444 445 extern void skb_prepare_seq_read(struct sk_buff *skb, 446 unsigned int from, unsigned int to, 447 struct skb_seq_state *st); 448 extern unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 449 struct skb_seq_state *st); 450 extern void skb_abort_seq_read(struct skb_seq_state *st); 451 452 extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 453 unsigned int to, struct ts_config *config, 454 struct ts_state *state); 455 456 #ifdef NET_SKBUFF_DATA_USES_OFFSET 457 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) 458 { 459 return skb->head + skb->end; 460 } 461 #else 462 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) 463 { 464 return skb->end; 465 } 466 #endif 467 468 /* Internal */ 469 #define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB))) 470 471 /** 472 * skb_queue_empty - check if a queue is empty 473 * @list: queue head 474 * 475 * Returns true if the queue is empty, false otherwise. 476 */ 477 static inline int skb_queue_empty(const struct sk_buff_head *list) 478 { 479 return list->next == (struct sk_buff *)list; 480 } 481 482 /** 483 * skb_queue_is_last - check if skb is the last entry in the queue 484 * @list: queue head 485 * @skb: buffer 486 * 487 * Returns true if @skb is the last buffer on the list. 488 */ 489 static inline bool skb_queue_is_last(const struct sk_buff_head *list, 490 const struct sk_buff *skb) 491 { 492 return (skb->next == (struct sk_buff *) list); 493 } 494 495 /** 496 * skb_queue_is_first - check if skb is the first entry in the queue 497 * @list: queue head 498 * @skb: buffer 499 * 500 * Returns true if @skb is the first buffer on the list. 501 */ 502 static inline bool skb_queue_is_first(const struct sk_buff_head *list, 503 const struct sk_buff *skb) 504 { 505 return (skb->prev == (struct sk_buff *) list); 506 } 507 508 /** 509 * skb_queue_next - return the next packet in the queue 510 * @list: queue head 511 * @skb: current buffer 512 * 513 * Return the next packet in @list after @skb. It is only valid to 514 * call this if skb_queue_is_last() evaluates to false. 515 */ 516 static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list, 517 const struct sk_buff *skb) 518 { 519 /* This BUG_ON may seem severe, but if we just return then we 520 * are going to dereference garbage. 521 */ 522 BUG_ON(skb_queue_is_last(list, skb)); 523 return skb->next; 524 } 525 526 /** 527 * skb_queue_prev - return the prev packet in the queue 528 * @list: queue head 529 * @skb: current buffer 530 * 531 * Return the prev packet in @list before @skb. It is only valid to 532 * call this if skb_queue_is_first() evaluates to false. 533 */ 534 static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list, 535 const struct sk_buff *skb) 536 { 537 /* This BUG_ON may seem severe, but if we just return then we 538 * are going to dereference garbage. 539 */ 540 BUG_ON(skb_queue_is_first(list, skb)); 541 return skb->prev; 542 } 543 544 /** 545 * skb_get - reference buffer 546 * @skb: buffer to reference 547 * 548 * Makes another reference to a socket buffer and returns a pointer 549 * to the buffer. 550 */ 551 static inline struct sk_buff *skb_get(struct sk_buff *skb) 552 { 553 atomic_inc(&skb->users); 554 return skb; 555 } 556 557 /* 558 * If users == 1, we are the only owner and are can avoid redundant 559 * atomic change. 560 */ 561 562 /** 563 * skb_cloned - is the buffer a clone 564 * @skb: buffer to check 565 * 566 * Returns true if the buffer was generated with skb_clone() and is 567 * one of multiple shared copies of the buffer. Cloned buffers are 568 * shared data so must not be written to under normal circumstances. 569 */ 570 static inline int skb_cloned(const struct sk_buff *skb) 571 { 572 return skb->cloned && 573 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1; 574 } 575 576 /** 577 * skb_header_cloned - is the header a clone 578 * @skb: buffer to check 579 * 580 * Returns true if modifying the header part of the buffer requires 581 * the data to be copied. 582 */ 583 static inline int skb_header_cloned(const struct sk_buff *skb) 584 { 585 int dataref; 586 587 if (!skb->cloned) 588 return 0; 589 590 dataref = atomic_read(&skb_shinfo(skb)->dataref); 591 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT); 592 return dataref != 1; 593 } 594 595 /** 596 * skb_header_release - release reference to header 597 * @skb: buffer to operate on 598 * 599 * Drop a reference to the header part of the buffer. This is done 600 * by acquiring a payload reference. You must not read from the header 601 * part of skb->data after this. 602 */ 603 static inline void skb_header_release(struct sk_buff *skb) 604 { 605 BUG_ON(skb->nohdr); 606 skb->nohdr = 1; 607 atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref); 608 } 609 610 /** 611 * skb_shared - is the buffer shared 612 * @skb: buffer to check 613 * 614 * Returns true if more than one person has a reference to this 615 * buffer. 616 */ 617 static inline int skb_shared(const struct sk_buff *skb) 618 { 619 return atomic_read(&skb->users) != 1; 620 } 621 622 /** 623 * skb_share_check - check if buffer is shared and if so clone it 624 * @skb: buffer to check 625 * @pri: priority for memory allocation 626 * 627 * If the buffer is shared the buffer is cloned and the old copy 628 * drops a reference. A new clone with a single reference is returned. 629 * If the buffer is not shared the original buffer is returned. When 630 * being called from interrupt status or with spinlocks held pri must 631 * be GFP_ATOMIC. 632 * 633 * NULL is returned on a memory allocation failure. 634 */ 635 static inline struct sk_buff *skb_share_check(struct sk_buff *skb, 636 gfp_t pri) 637 { 638 might_sleep_if(pri & __GFP_WAIT); 639 if (skb_shared(skb)) { 640 struct sk_buff *nskb = skb_clone(skb, pri); 641 kfree_skb(skb); 642 skb = nskb; 643 } 644 return skb; 645 } 646 647 /* 648 * Copy shared buffers into a new sk_buff. We effectively do COW on 649 * packets to handle cases where we have a local reader and forward 650 * and a couple of other messy ones. The normal one is tcpdumping 651 * a packet thats being forwarded. 652 */ 653 654 /** 655 * skb_unshare - make a copy of a shared buffer 656 * @skb: buffer to check 657 * @pri: priority for memory allocation 658 * 659 * If the socket buffer is a clone then this function creates a new 660 * copy of the data, drops a reference count on the old copy and returns 661 * the new copy with the reference count at 1. If the buffer is not a clone 662 * the original buffer is returned. When called with a spinlock held or 663 * from interrupt state @pri must be %GFP_ATOMIC 664 * 665 * %NULL is returned on a memory allocation failure. 666 */ 667 static inline struct sk_buff *skb_unshare(struct sk_buff *skb, 668 gfp_t pri) 669 { 670 might_sleep_if(pri & __GFP_WAIT); 671 if (skb_cloned(skb)) { 672 struct sk_buff *nskb = skb_copy(skb, pri); 673 kfree_skb(skb); /* Free our shared copy */ 674 skb = nskb; 675 } 676 return skb; 677 } 678 679 /** 680 * skb_peek 681 * @list_: list to peek at 682 * 683 * Peek an &sk_buff. Unlike most other operations you _MUST_ 684 * be careful with this one. A peek leaves the buffer on the 685 * list and someone else may run off with it. You must hold 686 * the appropriate locks or have a private queue to do this. 687 * 688 * Returns %NULL for an empty list or a pointer to the head element. 689 * The reference count is not incremented and the reference is therefore 690 * volatile. Use with caution. 691 */ 692 static inline struct sk_buff *skb_peek(struct sk_buff_head *list_) 693 { 694 struct sk_buff *list = ((struct sk_buff *)list_)->next; 695 if (list == (struct sk_buff *)list_) 696 list = NULL; 697 return list; 698 } 699 700 /** 701 * skb_peek_tail 702 * @list_: list to peek at 703 * 704 * Peek an &sk_buff. Unlike most other operations you _MUST_ 705 * be careful with this one. A peek leaves the buffer on the 706 * list and someone else may run off with it. You must hold 707 * the appropriate locks or have a private queue to do this. 708 * 709 * Returns %NULL for an empty list or a pointer to the tail element. 710 * The reference count is not incremented and the reference is therefore 711 * volatile. Use with caution. 712 */ 713 static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_) 714 { 715 struct sk_buff *list = ((struct sk_buff *)list_)->prev; 716 if (list == (struct sk_buff *)list_) 717 list = NULL; 718 return list; 719 } 720 721 /** 722 * skb_queue_len - get queue length 723 * @list_: list to measure 724 * 725 * Return the length of an &sk_buff queue. 726 */ 727 static inline __u32 skb_queue_len(const struct sk_buff_head *list_) 728 { 729 return list_->qlen; 730 } 731 732 /** 733 * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head 734 * @list: queue to initialize 735 * 736 * This initializes only the list and queue length aspects of 737 * an sk_buff_head object. This allows to initialize the list 738 * aspects of an sk_buff_head without reinitializing things like 739 * the spinlock. It can also be used for on-stack sk_buff_head 740 * objects where the spinlock is known to not be used. 741 */ 742 static inline void __skb_queue_head_init(struct sk_buff_head *list) 743 { 744 list->prev = list->next = (struct sk_buff *)list; 745 list->qlen = 0; 746 } 747 748 /* 749 * This function creates a split out lock class for each invocation; 750 * this is needed for now since a whole lot of users of the skb-queue 751 * infrastructure in drivers have different locking usage (in hardirq) 752 * than the networking core (in softirq only). In the long run either the 753 * network layer or drivers should need annotation to consolidate the 754 * main types of usage into 3 classes. 755 */ 756 static inline void skb_queue_head_init(struct sk_buff_head *list) 757 { 758 spin_lock_init(&list->lock); 759 __skb_queue_head_init(list); 760 } 761 762 static inline void skb_queue_head_init_class(struct sk_buff_head *list, 763 struct lock_class_key *class) 764 { 765 skb_queue_head_init(list); 766 lockdep_set_class(&list->lock, class); 767 } 768 769 /* 770 * Insert an sk_buff on a list. 771 * 772 * The "__skb_xxxx()" functions are the non-atomic ones that 773 * can only be called with interrupts disabled. 774 */ 775 extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list); 776 static inline void __skb_insert(struct sk_buff *newsk, 777 struct sk_buff *prev, struct sk_buff *next, 778 struct sk_buff_head *list) 779 { 780 newsk->next = next; 781 newsk->prev = prev; 782 next->prev = prev->next = newsk; 783 list->qlen++; 784 } 785 786 static inline void __skb_queue_splice(const struct sk_buff_head *list, 787 struct sk_buff *prev, 788 struct sk_buff *next) 789 { 790 struct sk_buff *first = list->next; 791 struct sk_buff *last = list->prev; 792 793 first->prev = prev; 794 prev->next = first; 795 796 last->next = next; 797 next->prev = last; 798 } 799 800 /** 801 * skb_queue_splice - join two skb lists, this is designed for stacks 802 * @list: the new list to add 803 * @head: the place to add it in the first list 804 */ 805 static inline void skb_queue_splice(const struct sk_buff_head *list, 806 struct sk_buff_head *head) 807 { 808 if (!skb_queue_empty(list)) { 809 __skb_queue_splice(list, (struct sk_buff *) head, head->next); 810 head->qlen += list->qlen; 811 } 812 } 813 814 /** 815 * skb_queue_splice - join two skb lists and reinitialise the emptied list 816 * @list: the new list to add 817 * @head: the place to add it in the first list 818 * 819 * The list at @list is reinitialised 820 */ 821 static inline void skb_queue_splice_init(struct sk_buff_head *list, 822 struct sk_buff_head *head) 823 { 824 if (!skb_queue_empty(list)) { 825 __skb_queue_splice(list, (struct sk_buff *) head, head->next); 826 head->qlen += list->qlen; 827 __skb_queue_head_init(list); 828 } 829 } 830 831 /** 832 * skb_queue_splice_tail - join two skb lists, each list being a queue 833 * @list: the new list to add 834 * @head: the place to add it in the first list 835 */ 836 static inline void skb_queue_splice_tail(const struct sk_buff_head *list, 837 struct sk_buff_head *head) 838 { 839 if (!skb_queue_empty(list)) { 840 __skb_queue_splice(list, head->prev, (struct sk_buff *) head); 841 head->qlen += list->qlen; 842 } 843 } 844 845 /** 846 * skb_queue_splice_tail - join two skb lists and reinitialise the emptied list 847 * @list: the new list to add 848 * @head: the place to add it in the first list 849 * 850 * Each of the lists is a queue. 851 * The list at @list is reinitialised 852 */ 853 static inline void skb_queue_splice_tail_init(struct sk_buff_head *list, 854 struct sk_buff_head *head) 855 { 856 if (!skb_queue_empty(list)) { 857 __skb_queue_splice(list, head->prev, (struct sk_buff *) head); 858 head->qlen += list->qlen; 859 __skb_queue_head_init(list); 860 } 861 } 862 863 /** 864 * __skb_queue_after - queue a buffer at the list head 865 * @list: list to use 866 * @prev: place after this buffer 867 * @newsk: buffer to queue 868 * 869 * Queue a buffer int the middle of a list. This function takes no locks 870 * and you must therefore hold required locks before calling it. 871 * 872 * A buffer cannot be placed on two lists at the same time. 873 */ 874 static inline void __skb_queue_after(struct sk_buff_head *list, 875 struct sk_buff *prev, 876 struct sk_buff *newsk) 877 { 878 __skb_insert(newsk, prev, prev->next, list); 879 } 880 881 extern void skb_append(struct sk_buff *old, struct sk_buff *newsk, 882 struct sk_buff_head *list); 883 884 static inline void __skb_queue_before(struct sk_buff_head *list, 885 struct sk_buff *next, 886 struct sk_buff *newsk) 887 { 888 __skb_insert(newsk, next->prev, next, list); 889 } 890 891 /** 892 * __skb_queue_head - queue a buffer at the list head 893 * @list: list to use 894 * @newsk: buffer to queue 895 * 896 * Queue a buffer at the start of a list. This function takes no locks 897 * and you must therefore hold required locks before calling it. 898 * 899 * A buffer cannot be placed on two lists at the same time. 900 */ 901 extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk); 902 static inline void __skb_queue_head(struct sk_buff_head *list, 903 struct sk_buff *newsk) 904 { 905 __skb_queue_after(list, (struct sk_buff *)list, newsk); 906 } 907 908 /** 909 * __skb_queue_tail - queue a buffer at the list tail 910 * @list: list to use 911 * @newsk: buffer to queue 912 * 913 * Queue a buffer at the end of a list. This function takes no locks 914 * and you must therefore hold required locks before calling it. 915 * 916 * A buffer cannot be placed on two lists at the same time. 917 */ 918 extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk); 919 static inline void __skb_queue_tail(struct sk_buff_head *list, 920 struct sk_buff *newsk) 921 { 922 __skb_queue_before(list, (struct sk_buff *)list, newsk); 923 } 924 925 /* 926 * remove sk_buff from list. _Must_ be called atomically, and with 927 * the list known.. 928 */ 929 extern void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list); 930 static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 931 { 932 struct sk_buff *next, *prev; 933 934 list->qlen--; 935 next = skb->next; 936 prev = skb->prev; 937 skb->next = skb->prev = NULL; 938 next->prev = prev; 939 prev->next = next; 940 } 941 942 /** 943 * __skb_dequeue - remove from the head of the queue 944 * @list: list to dequeue from 945 * 946 * Remove the head of the list. This function does not take any locks 947 * so must be used with appropriate locks held only. The head item is 948 * returned or %NULL if the list is empty. 949 */ 950 extern struct sk_buff *skb_dequeue(struct sk_buff_head *list); 951 static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list) 952 { 953 struct sk_buff *skb = skb_peek(list); 954 if (skb) 955 __skb_unlink(skb, list); 956 return skb; 957 } 958 959 /** 960 * __skb_dequeue_tail - remove from the tail of the queue 961 * @list: list to dequeue from 962 * 963 * Remove the tail of the list. This function does not take any locks 964 * so must be used with appropriate locks held only. The tail item is 965 * returned or %NULL if the list is empty. 966 */ 967 extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list); 968 static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list) 969 { 970 struct sk_buff *skb = skb_peek_tail(list); 971 if (skb) 972 __skb_unlink(skb, list); 973 return skb; 974 } 975 976 977 static inline int skb_is_nonlinear(const struct sk_buff *skb) 978 { 979 return skb->data_len; 980 } 981 982 static inline unsigned int skb_headlen(const struct sk_buff *skb) 983 { 984 return skb->len - skb->data_len; 985 } 986 987 static inline int skb_pagelen(const struct sk_buff *skb) 988 { 989 int i, len = 0; 990 991 for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) 992 len += skb_shinfo(skb)->frags[i].size; 993 return len + skb_headlen(skb); 994 } 995 996 static inline void skb_fill_page_desc(struct sk_buff *skb, int i, 997 struct page *page, int off, int size) 998 { 999 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1000 1001 frag->page = page; 1002 frag->page_offset = off; 1003 frag->size = size; 1004 skb_shinfo(skb)->nr_frags = i + 1; 1005 } 1006 1007 extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, 1008 int off, int size); 1009 1010 #define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags) 1011 #define SKB_FRAG_ASSERT(skb) BUG_ON(skb_shinfo(skb)->frag_list) 1012 #define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb)) 1013 1014 #ifdef NET_SKBUFF_DATA_USES_OFFSET 1015 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb) 1016 { 1017 return skb->head + skb->tail; 1018 } 1019 1020 static inline void skb_reset_tail_pointer(struct sk_buff *skb) 1021 { 1022 skb->tail = skb->data - skb->head; 1023 } 1024 1025 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) 1026 { 1027 skb_reset_tail_pointer(skb); 1028 skb->tail += offset; 1029 } 1030 #else /* NET_SKBUFF_DATA_USES_OFFSET */ 1031 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb) 1032 { 1033 return skb->tail; 1034 } 1035 1036 static inline void skb_reset_tail_pointer(struct sk_buff *skb) 1037 { 1038 skb->tail = skb->data; 1039 } 1040 1041 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) 1042 { 1043 skb->tail = skb->data + offset; 1044 } 1045 1046 #endif /* NET_SKBUFF_DATA_USES_OFFSET */ 1047 1048 /* 1049 * Add data to an sk_buff 1050 */ 1051 extern unsigned char *skb_put(struct sk_buff *skb, unsigned int len); 1052 static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len) 1053 { 1054 unsigned char *tmp = skb_tail_pointer(skb); 1055 SKB_LINEAR_ASSERT(skb); 1056 skb->tail += len; 1057 skb->len += len; 1058 return tmp; 1059 } 1060 1061 extern unsigned char *skb_push(struct sk_buff *skb, unsigned int len); 1062 static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len) 1063 { 1064 skb->data -= len; 1065 skb->len += len; 1066 return skb->data; 1067 } 1068 1069 extern unsigned char *skb_pull(struct sk_buff *skb, unsigned int len); 1070 static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len) 1071 { 1072 skb->len -= len; 1073 BUG_ON(skb->len < skb->data_len); 1074 return skb->data += len; 1075 } 1076 1077 extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta); 1078 1079 static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len) 1080 { 1081 if (len > skb_headlen(skb) && 1082 !__pskb_pull_tail(skb, len - skb_headlen(skb))) 1083 return NULL; 1084 skb->len -= len; 1085 return skb->data += len; 1086 } 1087 1088 static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len) 1089 { 1090 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len); 1091 } 1092 1093 static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len) 1094 { 1095 if (likely(len <= skb_headlen(skb))) 1096 return 1; 1097 if (unlikely(len > skb->len)) 1098 return 0; 1099 return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL; 1100 } 1101 1102 /** 1103 * skb_headroom - bytes at buffer head 1104 * @skb: buffer to check 1105 * 1106 * Return the number of bytes of free space at the head of an &sk_buff. 1107 */ 1108 static inline unsigned int skb_headroom(const struct sk_buff *skb) 1109 { 1110 return skb->data - skb->head; 1111 } 1112 1113 /** 1114 * skb_tailroom - bytes at buffer end 1115 * @skb: buffer to check 1116 * 1117 * Return the number of bytes of free space at the tail of an sk_buff 1118 */ 1119 static inline int skb_tailroom(const struct sk_buff *skb) 1120 { 1121 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail; 1122 } 1123 1124 /** 1125 * skb_reserve - adjust headroom 1126 * @skb: buffer to alter 1127 * @len: bytes to move 1128 * 1129 * Increase the headroom of an empty &sk_buff by reducing the tail 1130 * room. This is only allowed for an empty buffer. 1131 */ 1132 static inline void skb_reserve(struct sk_buff *skb, int len) 1133 { 1134 skb->data += len; 1135 skb->tail += len; 1136 } 1137 1138 #ifdef NET_SKBUFF_DATA_USES_OFFSET 1139 static inline unsigned char *skb_transport_header(const struct sk_buff *skb) 1140 { 1141 return skb->head + skb->transport_header; 1142 } 1143 1144 static inline void skb_reset_transport_header(struct sk_buff *skb) 1145 { 1146 skb->transport_header = skb->data - skb->head; 1147 } 1148 1149 static inline void skb_set_transport_header(struct sk_buff *skb, 1150 const int offset) 1151 { 1152 skb_reset_transport_header(skb); 1153 skb->transport_header += offset; 1154 } 1155 1156 static inline unsigned char *skb_network_header(const struct sk_buff *skb) 1157 { 1158 return skb->head + skb->network_header; 1159 } 1160 1161 static inline void skb_reset_network_header(struct sk_buff *skb) 1162 { 1163 skb->network_header = skb->data - skb->head; 1164 } 1165 1166 static inline void skb_set_network_header(struct sk_buff *skb, const int offset) 1167 { 1168 skb_reset_network_header(skb); 1169 skb->network_header += offset; 1170 } 1171 1172 static inline unsigned char *skb_mac_header(const struct sk_buff *skb) 1173 { 1174 return skb->head + skb->mac_header; 1175 } 1176 1177 static inline int skb_mac_header_was_set(const struct sk_buff *skb) 1178 { 1179 return skb->mac_header != ~0U; 1180 } 1181 1182 static inline void skb_reset_mac_header(struct sk_buff *skb) 1183 { 1184 skb->mac_header = skb->data - skb->head; 1185 } 1186 1187 static inline void skb_set_mac_header(struct sk_buff *skb, const int offset) 1188 { 1189 skb_reset_mac_header(skb); 1190 skb->mac_header += offset; 1191 } 1192 1193 #else /* NET_SKBUFF_DATA_USES_OFFSET */ 1194 1195 static inline unsigned char *skb_transport_header(const struct sk_buff *skb) 1196 { 1197 return skb->transport_header; 1198 } 1199 1200 static inline void skb_reset_transport_header(struct sk_buff *skb) 1201 { 1202 skb->transport_header = skb->data; 1203 } 1204 1205 static inline void skb_set_transport_header(struct sk_buff *skb, 1206 const int offset) 1207 { 1208 skb->transport_header = skb->data + offset; 1209 } 1210 1211 static inline unsigned char *skb_network_header(const struct sk_buff *skb) 1212 { 1213 return skb->network_header; 1214 } 1215 1216 static inline void skb_reset_network_header(struct sk_buff *skb) 1217 { 1218 skb->network_header = skb->data; 1219 } 1220 1221 static inline void skb_set_network_header(struct sk_buff *skb, const int offset) 1222 { 1223 skb->network_header = skb->data + offset; 1224 } 1225 1226 static inline unsigned char *skb_mac_header(const struct sk_buff *skb) 1227 { 1228 return skb->mac_header; 1229 } 1230 1231 static inline int skb_mac_header_was_set(const struct sk_buff *skb) 1232 { 1233 return skb->mac_header != NULL; 1234 } 1235 1236 static inline void skb_reset_mac_header(struct sk_buff *skb) 1237 { 1238 skb->mac_header = skb->data; 1239 } 1240 1241 static inline void skb_set_mac_header(struct sk_buff *skb, const int offset) 1242 { 1243 skb->mac_header = skb->data + offset; 1244 } 1245 #endif /* NET_SKBUFF_DATA_USES_OFFSET */ 1246 1247 static inline int skb_transport_offset(const struct sk_buff *skb) 1248 { 1249 return skb_transport_header(skb) - skb->data; 1250 } 1251 1252 static inline u32 skb_network_header_len(const struct sk_buff *skb) 1253 { 1254 return skb->transport_header - skb->network_header; 1255 } 1256 1257 static inline int skb_network_offset(const struct sk_buff *skb) 1258 { 1259 return skb_network_header(skb) - skb->data; 1260 } 1261 1262 /* 1263 * CPUs often take a performance hit when accessing unaligned memory 1264 * locations. The actual performance hit varies, it can be small if the 1265 * hardware handles it or large if we have to take an exception and fix it 1266 * in software. 1267 * 1268 * Since an ethernet header is 14 bytes network drivers often end up with 1269 * the IP header at an unaligned offset. The IP header can be aligned by 1270 * shifting the start of the packet by 2 bytes. Drivers should do this 1271 * with: 1272 * 1273 * skb_reserve(NET_IP_ALIGN); 1274 * 1275 * The downside to this alignment of the IP header is that the DMA is now 1276 * unaligned. On some architectures the cost of an unaligned DMA is high 1277 * and this cost outweighs the gains made by aligning the IP header. 1278 * 1279 * Since this trade off varies between architectures, we allow NET_IP_ALIGN 1280 * to be overridden. 1281 */ 1282 #ifndef NET_IP_ALIGN 1283 #define NET_IP_ALIGN 2 1284 #endif 1285 1286 /* 1287 * The networking layer reserves some headroom in skb data (via 1288 * dev_alloc_skb). This is used to avoid having to reallocate skb data when 1289 * the header has to grow. In the default case, if the header has to grow 1290 * 16 bytes or less we avoid the reallocation. 1291 * 1292 * Unfortunately this headroom changes the DMA alignment of the resulting 1293 * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive 1294 * on some architectures. An architecture can override this value, 1295 * perhaps setting it to a cacheline in size (since that will maintain 1296 * cacheline alignment of the DMA). It must be a power of 2. 1297 * 1298 * Various parts of the networking layer expect at least 16 bytes of 1299 * headroom, you should not reduce this. 1300 */ 1301 #ifndef NET_SKB_PAD 1302 #define NET_SKB_PAD 16 1303 #endif 1304 1305 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len); 1306 1307 static inline void __skb_trim(struct sk_buff *skb, unsigned int len) 1308 { 1309 if (unlikely(skb->data_len)) { 1310 WARN_ON(1); 1311 return; 1312 } 1313 skb->len = len; 1314 skb_set_tail_pointer(skb, len); 1315 } 1316 1317 extern void skb_trim(struct sk_buff *skb, unsigned int len); 1318 1319 static inline int __pskb_trim(struct sk_buff *skb, unsigned int len) 1320 { 1321 if (skb->data_len) 1322 return ___pskb_trim(skb, len); 1323 __skb_trim(skb, len); 1324 return 0; 1325 } 1326 1327 static inline int pskb_trim(struct sk_buff *skb, unsigned int len) 1328 { 1329 return (len < skb->len) ? __pskb_trim(skb, len) : 0; 1330 } 1331 1332 /** 1333 * pskb_trim_unique - remove end from a paged unique (not cloned) buffer 1334 * @skb: buffer to alter 1335 * @len: new length 1336 * 1337 * This is identical to pskb_trim except that the caller knows that 1338 * the skb is not cloned so we should never get an error due to out- 1339 * of-memory. 1340 */ 1341 static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len) 1342 { 1343 int err = pskb_trim(skb, len); 1344 BUG_ON(err); 1345 } 1346 1347 /** 1348 * skb_orphan - orphan a buffer 1349 * @skb: buffer to orphan 1350 * 1351 * If a buffer currently has an owner then we call the owner's 1352 * destructor function and make the @skb unowned. The buffer continues 1353 * to exist but is no longer charged to its former owner. 1354 */ 1355 static inline void skb_orphan(struct sk_buff *skb) 1356 { 1357 if (skb->destructor) 1358 skb->destructor(skb); 1359 skb->destructor = NULL; 1360 skb->sk = NULL; 1361 } 1362 1363 /** 1364 * __skb_queue_purge - empty a list 1365 * @list: list to empty 1366 * 1367 * Delete all buffers on an &sk_buff list. Each buffer is removed from 1368 * the list and one reference dropped. This function does not take the 1369 * list lock and the caller must hold the relevant locks to use it. 1370 */ 1371 extern void skb_queue_purge(struct sk_buff_head *list); 1372 static inline void __skb_queue_purge(struct sk_buff_head *list) 1373 { 1374 struct sk_buff *skb; 1375 while ((skb = __skb_dequeue(list)) != NULL) 1376 kfree_skb(skb); 1377 } 1378 1379 /** 1380 * __dev_alloc_skb - allocate an skbuff for receiving 1381 * @length: length to allocate 1382 * @gfp_mask: get_free_pages mask, passed to alloc_skb 1383 * 1384 * Allocate a new &sk_buff and assign it a usage count of one. The 1385 * buffer has unspecified headroom built in. Users should allocate 1386 * the headroom they think they need without accounting for the 1387 * built in space. The built in space is used for optimisations. 1388 * 1389 * %NULL is returned if there is no free memory. 1390 */ 1391 static inline struct sk_buff *__dev_alloc_skb(unsigned int length, 1392 gfp_t gfp_mask) 1393 { 1394 struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask); 1395 if (likely(skb)) 1396 skb_reserve(skb, NET_SKB_PAD); 1397 return skb; 1398 } 1399 1400 extern struct sk_buff *dev_alloc_skb(unsigned int length); 1401 1402 extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev, 1403 unsigned int length, gfp_t gfp_mask); 1404 1405 /** 1406 * netdev_alloc_skb - allocate an skbuff for rx on a specific device 1407 * @dev: network device to receive on 1408 * @length: length to allocate 1409 * 1410 * Allocate a new &sk_buff and assign it a usage count of one. The 1411 * buffer has unspecified headroom built in. Users should allocate 1412 * the headroom they think they need without accounting for the 1413 * built in space. The built in space is used for optimisations. 1414 * 1415 * %NULL is returned if there is no free memory. Although this function 1416 * allocates memory it can be called from an interrupt. 1417 */ 1418 static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev, 1419 unsigned int length) 1420 { 1421 return __netdev_alloc_skb(dev, length, GFP_ATOMIC); 1422 } 1423 1424 extern struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask); 1425 1426 /** 1427 * netdev_alloc_page - allocate a page for ps-rx on a specific device 1428 * @dev: network device to receive on 1429 * 1430 * Allocate a new page node local to the specified device. 1431 * 1432 * %NULL is returned if there is no free memory. 1433 */ 1434 static inline struct page *netdev_alloc_page(struct net_device *dev) 1435 { 1436 return __netdev_alloc_page(dev, GFP_ATOMIC); 1437 } 1438 1439 static inline void netdev_free_page(struct net_device *dev, struct page *page) 1440 { 1441 __free_page(page); 1442 } 1443 1444 /** 1445 * skb_clone_writable - is the header of a clone writable 1446 * @skb: buffer to check 1447 * @len: length up to which to write 1448 * 1449 * Returns true if modifying the header part of the cloned buffer 1450 * does not requires the data to be copied. 1451 */ 1452 static inline int skb_clone_writable(struct sk_buff *skb, unsigned int len) 1453 { 1454 return !skb_header_cloned(skb) && 1455 skb_headroom(skb) + len <= skb->hdr_len; 1456 } 1457 1458 static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom, 1459 int cloned) 1460 { 1461 int delta = 0; 1462 1463 if (headroom < NET_SKB_PAD) 1464 headroom = NET_SKB_PAD; 1465 if (headroom > skb_headroom(skb)) 1466 delta = headroom - skb_headroom(skb); 1467 1468 if (delta || cloned) 1469 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0, 1470 GFP_ATOMIC); 1471 return 0; 1472 } 1473 1474 /** 1475 * skb_cow - copy header of skb when it is required 1476 * @skb: buffer to cow 1477 * @headroom: needed headroom 1478 * 1479 * If the skb passed lacks sufficient headroom or its data part 1480 * is shared, data is reallocated. If reallocation fails, an error 1481 * is returned and original skb is not changed. 1482 * 1483 * The result is skb with writable area skb->head...skb->tail 1484 * and at least @headroom of space at head. 1485 */ 1486 static inline int skb_cow(struct sk_buff *skb, unsigned int headroom) 1487 { 1488 return __skb_cow(skb, headroom, skb_cloned(skb)); 1489 } 1490 1491 /** 1492 * skb_cow_head - skb_cow but only making the head writable 1493 * @skb: buffer to cow 1494 * @headroom: needed headroom 1495 * 1496 * This function is identical to skb_cow except that we replace the 1497 * skb_cloned check by skb_header_cloned. It should be used when 1498 * you only need to push on some header and do not need to modify 1499 * the data. 1500 */ 1501 static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom) 1502 { 1503 return __skb_cow(skb, headroom, skb_header_cloned(skb)); 1504 } 1505 1506 /** 1507 * skb_padto - pad an skbuff up to a minimal size 1508 * @skb: buffer to pad 1509 * @len: minimal length 1510 * 1511 * Pads up a buffer to ensure the trailing bytes exist and are 1512 * blanked. If the buffer already contains sufficient data it 1513 * is untouched. Otherwise it is extended. Returns zero on 1514 * success. The skb is freed on error. 1515 */ 1516 1517 static inline int skb_padto(struct sk_buff *skb, unsigned int len) 1518 { 1519 unsigned int size = skb->len; 1520 if (likely(size >= len)) 1521 return 0; 1522 return skb_pad(skb, len - size); 1523 } 1524 1525 static inline int skb_add_data(struct sk_buff *skb, 1526 char __user *from, int copy) 1527 { 1528 const int off = skb->len; 1529 1530 if (skb->ip_summed == CHECKSUM_NONE) { 1531 int err = 0; 1532 __wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy), 1533 copy, 0, &err); 1534 if (!err) { 1535 skb->csum = csum_block_add(skb->csum, csum, off); 1536 return 0; 1537 } 1538 } else if (!copy_from_user(skb_put(skb, copy), from, copy)) 1539 return 0; 1540 1541 __skb_trim(skb, off); 1542 return -EFAULT; 1543 } 1544 1545 static inline int skb_can_coalesce(struct sk_buff *skb, int i, 1546 struct page *page, int off) 1547 { 1548 if (i) { 1549 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; 1550 1551 return page == frag->page && 1552 off == frag->page_offset + frag->size; 1553 } 1554 return 0; 1555 } 1556 1557 static inline int __skb_linearize(struct sk_buff *skb) 1558 { 1559 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM; 1560 } 1561 1562 /** 1563 * skb_linearize - convert paged skb to linear one 1564 * @skb: buffer to linarize 1565 * 1566 * If there is no free memory -ENOMEM is returned, otherwise zero 1567 * is returned and the old skb data released. 1568 */ 1569 static inline int skb_linearize(struct sk_buff *skb) 1570 { 1571 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0; 1572 } 1573 1574 /** 1575 * skb_linearize_cow - make sure skb is linear and writable 1576 * @skb: buffer to process 1577 * 1578 * If there is no free memory -ENOMEM is returned, otherwise zero 1579 * is returned and the old skb data released. 1580 */ 1581 static inline int skb_linearize_cow(struct sk_buff *skb) 1582 { 1583 return skb_is_nonlinear(skb) || skb_cloned(skb) ? 1584 __skb_linearize(skb) : 0; 1585 } 1586 1587 /** 1588 * skb_postpull_rcsum - update checksum for received skb after pull 1589 * @skb: buffer to update 1590 * @start: start of data before pull 1591 * @len: length of data pulled 1592 * 1593 * After doing a pull on a received packet, you need to call this to 1594 * update the CHECKSUM_COMPLETE checksum, or set ip_summed to 1595 * CHECKSUM_NONE so that it can be recomputed from scratch. 1596 */ 1597 1598 static inline void skb_postpull_rcsum(struct sk_buff *skb, 1599 const void *start, unsigned int len) 1600 { 1601 if (skb->ip_summed == CHECKSUM_COMPLETE) 1602 skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0)); 1603 } 1604 1605 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); 1606 1607 /** 1608 * pskb_trim_rcsum - trim received skb and update checksum 1609 * @skb: buffer to trim 1610 * @len: new length 1611 * 1612 * This is exactly the same as pskb_trim except that it ensures the 1613 * checksum of received packets are still valid after the operation. 1614 */ 1615 1616 static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) 1617 { 1618 if (likely(len >= skb->len)) 1619 return 0; 1620 if (skb->ip_summed == CHECKSUM_COMPLETE) 1621 skb->ip_summed = CHECKSUM_NONE; 1622 return __pskb_trim(skb, len); 1623 } 1624 1625 #define skb_queue_walk(queue, skb) \ 1626 for (skb = (queue)->next; \ 1627 prefetch(skb->next), (skb != (struct sk_buff *)(queue)); \ 1628 skb = skb->next) 1629 1630 #define skb_queue_walk_safe(queue, skb, tmp) \ 1631 for (skb = (queue)->next, tmp = skb->next; \ 1632 skb != (struct sk_buff *)(queue); \ 1633 skb = tmp, tmp = skb->next) 1634 1635 #define skb_queue_walk_from(queue, skb) \ 1636 for (; prefetch(skb->next), (skb != (struct sk_buff *)(queue)); \ 1637 skb = skb->next) 1638 1639 #define skb_queue_walk_from_safe(queue, skb, tmp) \ 1640 for (tmp = skb->next; \ 1641 skb != (struct sk_buff *)(queue); \ 1642 skb = tmp, tmp = skb->next) 1643 1644 #define skb_queue_reverse_walk(queue, skb) \ 1645 for (skb = (queue)->prev; \ 1646 prefetch(skb->prev), (skb != (struct sk_buff *)(queue)); \ 1647 skb = skb->prev) 1648 1649 1650 extern struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags, 1651 int *peeked, int *err); 1652 extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, 1653 int noblock, int *err); 1654 extern unsigned int datagram_poll(struct file *file, struct socket *sock, 1655 struct poll_table_struct *wait); 1656 extern int skb_copy_datagram_iovec(const struct sk_buff *from, 1657 int offset, struct iovec *to, 1658 int size); 1659 extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, 1660 int hlen, 1661 struct iovec *iov); 1662 extern int skb_copy_datagram_from_iovec(struct sk_buff *skb, 1663 int offset, 1664 struct iovec *from, 1665 int len); 1666 extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb); 1667 extern int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, 1668 unsigned int flags); 1669 extern __wsum skb_checksum(const struct sk_buff *skb, int offset, 1670 int len, __wsum csum); 1671 extern int skb_copy_bits(const struct sk_buff *skb, int offset, 1672 void *to, int len); 1673 extern int skb_store_bits(struct sk_buff *skb, int offset, 1674 const void *from, int len); 1675 extern __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, 1676 int offset, u8 *to, int len, 1677 __wsum csum); 1678 extern int skb_splice_bits(struct sk_buff *skb, 1679 unsigned int offset, 1680 struct pipe_inode_info *pipe, 1681 unsigned int len, 1682 unsigned int flags); 1683 extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); 1684 extern void skb_split(struct sk_buff *skb, 1685 struct sk_buff *skb1, const u32 len); 1686 extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, 1687 int shiftlen); 1688 1689 extern struct sk_buff *skb_segment(struct sk_buff *skb, int features); 1690 extern int skb_gro_receive(struct sk_buff **head, 1691 struct sk_buff *skb); 1692 1693 static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, 1694 int len, void *buffer) 1695 { 1696 int hlen = skb_headlen(skb); 1697 1698 if (hlen - offset >= len) 1699 return skb->data + offset; 1700 1701 if (skb_copy_bits(skb, offset, buffer, len) < 0) 1702 return NULL; 1703 1704 return buffer; 1705 } 1706 1707 static inline void skb_copy_from_linear_data(const struct sk_buff *skb, 1708 void *to, 1709 const unsigned int len) 1710 { 1711 memcpy(to, skb->data, len); 1712 } 1713 1714 static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb, 1715 const int offset, void *to, 1716 const unsigned int len) 1717 { 1718 memcpy(to, skb->data + offset, len); 1719 } 1720 1721 static inline void skb_copy_to_linear_data(struct sk_buff *skb, 1722 const void *from, 1723 const unsigned int len) 1724 { 1725 memcpy(skb->data, from, len); 1726 } 1727 1728 static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb, 1729 const int offset, 1730 const void *from, 1731 const unsigned int len) 1732 { 1733 memcpy(skb->data + offset, from, len); 1734 } 1735 1736 extern void skb_init(void); 1737 1738 /** 1739 * skb_get_timestamp - get timestamp from a skb 1740 * @skb: skb to get stamp from 1741 * @stamp: pointer to struct timeval to store stamp in 1742 * 1743 * Timestamps are stored in the skb as offsets to a base timestamp. 1744 * This function converts the offset back to a struct timeval and stores 1745 * it in stamp. 1746 */ 1747 static inline void skb_get_timestamp(const struct sk_buff *skb, struct timeval *stamp) 1748 { 1749 *stamp = ktime_to_timeval(skb->tstamp); 1750 } 1751 1752 static inline void __net_timestamp(struct sk_buff *skb) 1753 { 1754 skb->tstamp = ktime_get_real(); 1755 } 1756 1757 static inline ktime_t net_timedelta(ktime_t t) 1758 { 1759 return ktime_sub(ktime_get_real(), t); 1760 } 1761 1762 static inline ktime_t net_invalid_timestamp(void) 1763 { 1764 return ktime_set(0, 0); 1765 } 1766 1767 extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len); 1768 extern __sum16 __skb_checksum_complete(struct sk_buff *skb); 1769 1770 static inline int skb_csum_unnecessary(const struct sk_buff *skb) 1771 { 1772 return skb->ip_summed & CHECKSUM_UNNECESSARY; 1773 } 1774 1775 /** 1776 * skb_checksum_complete - Calculate checksum of an entire packet 1777 * @skb: packet to process 1778 * 1779 * This function calculates the checksum over the entire packet plus 1780 * the value of skb->csum. The latter can be used to supply the 1781 * checksum of a pseudo header as used by TCP/UDP. It returns the 1782 * checksum. 1783 * 1784 * For protocols that contain complete checksums such as ICMP/TCP/UDP, 1785 * this function can be used to verify that checksum on received 1786 * packets. In that case the function should return zero if the 1787 * checksum is correct. In particular, this function will return zero 1788 * if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the 1789 * hardware has already verified the correctness of the checksum. 1790 */ 1791 static inline __sum16 skb_checksum_complete(struct sk_buff *skb) 1792 { 1793 return skb_csum_unnecessary(skb) ? 1794 0 : __skb_checksum_complete(skb); 1795 } 1796 1797 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 1798 extern void nf_conntrack_destroy(struct nf_conntrack *nfct); 1799 static inline void nf_conntrack_put(struct nf_conntrack *nfct) 1800 { 1801 if (nfct && atomic_dec_and_test(&nfct->use)) 1802 nf_conntrack_destroy(nfct); 1803 } 1804 static inline void nf_conntrack_get(struct nf_conntrack *nfct) 1805 { 1806 if (nfct) 1807 atomic_inc(&nfct->use); 1808 } 1809 static inline void nf_conntrack_get_reasm(struct sk_buff *skb) 1810 { 1811 if (skb) 1812 atomic_inc(&skb->users); 1813 } 1814 static inline void nf_conntrack_put_reasm(struct sk_buff *skb) 1815 { 1816 if (skb) 1817 kfree_skb(skb); 1818 } 1819 #endif 1820 #ifdef CONFIG_BRIDGE_NETFILTER 1821 static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge) 1822 { 1823 if (nf_bridge && atomic_dec_and_test(&nf_bridge->use)) 1824 kfree(nf_bridge); 1825 } 1826 static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge) 1827 { 1828 if (nf_bridge) 1829 atomic_inc(&nf_bridge->use); 1830 } 1831 #endif /* CONFIG_BRIDGE_NETFILTER */ 1832 static inline void nf_reset(struct sk_buff *skb) 1833 { 1834 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 1835 nf_conntrack_put(skb->nfct); 1836 skb->nfct = NULL; 1837 nf_conntrack_put_reasm(skb->nfct_reasm); 1838 skb->nfct_reasm = NULL; 1839 #endif 1840 #ifdef CONFIG_BRIDGE_NETFILTER 1841 nf_bridge_put(skb->nf_bridge); 1842 skb->nf_bridge = NULL; 1843 #endif 1844 } 1845 1846 /* Note: This doesn't put any conntrack and bridge info in dst. */ 1847 static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src) 1848 { 1849 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 1850 dst->nfct = src->nfct; 1851 nf_conntrack_get(src->nfct); 1852 dst->nfctinfo = src->nfctinfo; 1853 dst->nfct_reasm = src->nfct_reasm; 1854 nf_conntrack_get_reasm(src->nfct_reasm); 1855 #endif 1856 #ifdef CONFIG_BRIDGE_NETFILTER 1857 dst->nf_bridge = src->nf_bridge; 1858 nf_bridge_get(src->nf_bridge); 1859 #endif 1860 } 1861 1862 static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src) 1863 { 1864 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 1865 nf_conntrack_put(dst->nfct); 1866 nf_conntrack_put_reasm(dst->nfct_reasm); 1867 #endif 1868 #ifdef CONFIG_BRIDGE_NETFILTER 1869 nf_bridge_put(dst->nf_bridge); 1870 #endif 1871 __nf_copy(dst, src); 1872 } 1873 1874 #ifdef CONFIG_NETWORK_SECMARK 1875 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from) 1876 { 1877 to->secmark = from->secmark; 1878 } 1879 1880 static inline void skb_init_secmark(struct sk_buff *skb) 1881 { 1882 skb->secmark = 0; 1883 } 1884 #else 1885 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from) 1886 { } 1887 1888 static inline void skb_init_secmark(struct sk_buff *skb) 1889 { } 1890 #endif 1891 1892 static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping) 1893 { 1894 skb->queue_mapping = queue_mapping; 1895 } 1896 1897 static inline u16 skb_get_queue_mapping(struct sk_buff *skb) 1898 { 1899 return skb->queue_mapping; 1900 } 1901 1902 static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from) 1903 { 1904 to->queue_mapping = from->queue_mapping; 1905 } 1906 1907 #ifdef CONFIG_XFRM 1908 static inline struct sec_path *skb_sec_path(struct sk_buff *skb) 1909 { 1910 return skb->sp; 1911 } 1912 #else 1913 static inline struct sec_path *skb_sec_path(struct sk_buff *skb) 1914 { 1915 return NULL; 1916 } 1917 #endif 1918 1919 static inline int skb_is_gso(const struct sk_buff *skb) 1920 { 1921 return skb_shinfo(skb)->gso_size; 1922 } 1923 1924 static inline int skb_is_gso_v6(const struct sk_buff *skb) 1925 { 1926 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; 1927 } 1928 1929 extern void __skb_warn_lro_forwarding(const struct sk_buff *skb); 1930 1931 static inline bool skb_warn_if_lro(const struct sk_buff *skb) 1932 { 1933 /* LRO sets gso_size but not gso_type, whereas if GSO is really 1934 * wanted then gso_type will be set. */ 1935 struct skb_shared_info *shinfo = skb_shinfo(skb); 1936 if (shinfo->gso_size != 0 && unlikely(shinfo->gso_type == 0)) { 1937 __skb_warn_lro_forwarding(skb); 1938 return true; 1939 } 1940 return false; 1941 } 1942 1943 static inline void skb_forward_csum(struct sk_buff *skb) 1944 { 1945 /* Unfortunately we don't support this one. Any brave souls? */ 1946 if (skb->ip_summed == CHECKSUM_COMPLETE) 1947 skb->ip_summed = CHECKSUM_NONE; 1948 } 1949 1950 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off); 1951 #endif /* __KERNEL__ */ 1952 #endif /* _LINUX_SKBUFF_H */ 1953