1 /* 2 * Definitions for the 'struct sk_buff' memory handlers. 3 * 4 * Authors: 5 * Alan Cox, <[email protected]> 6 * Florian La Roche, <[email protected]> 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License 10 * as published by the Free Software Foundation; either version 11 * 2 of the License, or (at your option) any later version. 12 */ 13 14 #ifndef _LINUX_SKBUFF_H 15 #define _LINUX_SKBUFF_H 16 17 #include <linux/kernel.h> 18 #include <linux/compiler.h> 19 #include <linux/time.h> 20 #include <linux/cache.h> 21 22 #include <asm/atomic.h> 23 #include <asm/types.h> 24 #include <linux/spinlock.h> 25 #include <linux/net.h> 26 #include <linux/textsearch.h> 27 #include <net/checksum.h> 28 #include <linux/rcupdate.h> 29 #include <linux/dmaengine.h> 30 #include <linux/hrtimer.h> 31 32 #define HAVE_ALLOC_SKB /* For the drivers to know */ 33 #define HAVE_ALIGNABLE_SKB /* Ditto 8) */ 34 35 /* Don't change this without changing skb_csum_unnecessary! */ 36 #define CHECKSUM_NONE 0 37 #define CHECKSUM_UNNECESSARY 1 38 #define CHECKSUM_COMPLETE 2 39 #define CHECKSUM_PARTIAL 3 40 41 #define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \ 42 ~(SMP_CACHE_BYTES - 1)) 43 #define SKB_WITH_OVERHEAD(X) \ 44 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) 45 #define SKB_MAX_ORDER(X, ORDER) \ 46 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X)) 47 #define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0)) 48 #define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2)) 49 50 /* A. Checksumming of received packets by device. 51 * 52 * NONE: device failed to checksum this packet. 53 * skb->csum is undefined. 54 * 55 * UNNECESSARY: device parsed packet and wouldbe verified checksum. 56 * skb->csum is undefined. 57 * It is bad option, but, unfortunately, many of vendors do this. 58 * Apparently with secret goal to sell you new device, when you 59 * will add new protocol to your host. F.e. IPv6. 8) 60 * 61 * COMPLETE: the most generic way. Device supplied checksum of _all_ 62 * the packet as seen by netif_rx in skb->csum. 63 * NOTE: Even if device supports only some protocols, but 64 * is able to produce some skb->csum, it MUST use COMPLETE, 65 * not UNNECESSARY. 66 * 67 * PARTIAL: identical to the case for output below. This may occur 68 * on a packet received directly from another Linux OS, e.g., 69 * a virtualised Linux kernel on the same host. The packet can 70 * be treated in the same way as UNNECESSARY except that on 71 * output (i.e., forwarding) the checksum must be filled in 72 * by the OS or the hardware. 73 * 74 * B. Checksumming on output. 75 * 76 * NONE: skb is checksummed by protocol or csum is not required. 77 * 78 * PARTIAL: device is required to csum packet as seen by hard_start_xmit 79 * from skb->csum_start to the end and to record the checksum 80 * at skb->csum_start + skb->csum_offset. 81 * 82 * Device must show its capabilities in dev->features, set 83 * at device setup time. 84 * NETIF_F_HW_CSUM - it is clever device, it is able to checksum 85 * everything. 86 * NETIF_F_NO_CSUM - loopback or reliable single hop media. 87 * NETIF_F_IP_CSUM - device is dumb. It is able to csum only 88 * TCP/UDP over IPv4. Sigh. Vendors like this 89 * way by an unknown reason. Though, see comment above 90 * about CHECKSUM_UNNECESSARY. 8) 91 * NETIF_F_IPV6_CSUM about as dumb as the last one but does IPv6 instead. 92 * 93 * Any questions? No questions, good. --ANK 94 */ 95 96 struct net_device; 97 struct scatterlist; 98 struct pipe_inode_info; 99 100 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 101 struct nf_conntrack { 102 atomic_t use; 103 }; 104 #endif 105 106 #ifdef CONFIG_BRIDGE_NETFILTER 107 struct nf_bridge_info { 108 atomic_t use; 109 struct net_device *physindev; 110 struct net_device *physoutdev; 111 unsigned int mask; 112 unsigned long data[32 / sizeof(unsigned long)]; 113 }; 114 #endif 115 116 struct sk_buff_head { 117 /* These two members must be first. */ 118 struct sk_buff *next; 119 struct sk_buff *prev; 120 121 __u32 qlen; 122 spinlock_t lock; 123 }; 124 125 struct sk_buff; 126 127 /* To allow 64K frame to be packed as single skb without frag_list */ 128 #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2) 129 130 typedef struct skb_frag_struct skb_frag_t; 131 132 struct skb_frag_struct { 133 struct page *page; 134 __u32 page_offset; 135 __u32 size; 136 }; 137 138 /* This data is invariant across clones and lives at 139 * the end of the header data, ie. at skb->end. 140 */ 141 struct skb_shared_info { 142 atomic_t dataref; 143 unsigned short nr_frags; 144 unsigned short gso_size; 145 /* Warning: this field is not always filled in (UFO)! */ 146 unsigned short gso_segs; 147 unsigned short gso_type; 148 __be32 ip6_frag_id; 149 struct sk_buff *frag_list; 150 skb_frag_t frags[MAX_SKB_FRAGS]; 151 }; 152 153 /* We divide dataref into two halves. The higher 16 bits hold references 154 * to the payload part of skb->data. The lower 16 bits hold references to 155 * the entire skb->data. A clone of a headerless skb holds the length of 156 * the header in skb->hdr_len. 157 * 158 * All users must obey the rule that the skb->data reference count must be 159 * greater than or equal to the payload reference count. 160 * 161 * Holding a reference to the payload part means that the user does not 162 * care about modifications to the header part of skb->data. 163 */ 164 #define SKB_DATAREF_SHIFT 16 165 #define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1) 166 167 168 enum { 169 SKB_FCLONE_UNAVAILABLE, 170 SKB_FCLONE_ORIG, 171 SKB_FCLONE_CLONE, 172 }; 173 174 enum { 175 SKB_GSO_TCPV4 = 1 << 0, 176 SKB_GSO_UDP = 1 << 1, 177 178 /* This indicates the skb is from an untrusted source. */ 179 SKB_GSO_DODGY = 1 << 2, 180 181 /* This indicates the tcp segment has CWR set. */ 182 SKB_GSO_TCP_ECN = 1 << 3, 183 184 SKB_GSO_TCPV6 = 1 << 4, 185 }; 186 187 #if BITS_PER_LONG > 32 188 #define NET_SKBUFF_DATA_USES_OFFSET 1 189 #endif 190 191 #ifdef NET_SKBUFF_DATA_USES_OFFSET 192 typedef unsigned int sk_buff_data_t; 193 #else 194 typedef unsigned char *sk_buff_data_t; 195 #endif 196 197 /** 198 * struct sk_buff - socket buffer 199 * @next: Next buffer in list 200 * @prev: Previous buffer in list 201 * @sk: Socket we are owned by 202 * @tstamp: Time we arrived 203 * @dev: Device we arrived on/are leaving by 204 * @transport_header: Transport layer header 205 * @network_header: Network layer header 206 * @mac_header: Link layer header 207 * @dst: destination entry 208 * @sp: the security path, used for xfrm 209 * @cb: Control buffer. Free for use by every layer. Put private vars here 210 * @len: Length of actual data 211 * @data_len: Data length 212 * @mac_len: Length of link layer header 213 * @hdr_len: writable header length of cloned skb 214 * @csum: Checksum (must include start/offset pair) 215 * @csum_start: Offset from skb->head where checksumming should start 216 * @csum_offset: Offset from csum_start where checksum should be stored 217 * @local_df: allow local fragmentation 218 * @cloned: Head may be cloned (check refcnt to be sure) 219 * @nohdr: Payload reference only, must not modify header 220 * @pkt_type: Packet class 221 * @fclone: skbuff clone status 222 * @ip_summed: Driver fed us an IP checksum 223 * @priority: Packet queueing priority 224 * @users: User count - see {datagram,tcp}.c 225 * @protocol: Packet protocol from driver 226 * @truesize: Buffer size 227 * @head: Head of buffer 228 * @data: Data head pointer 229 * @tail: Tail pointer 230 * @end: End pointer 231 * @destructor: Destruct function 232 * @mark: Generic packet mark 233 * @nfct: Associated connection, if any 234 * @ipvs_property: skbuff is owned by ipvs 235 * @nf_trace: netfilter packet trace flag 236 * @nfctinfo: Relationship of this skb to the connection 237 * @nfct_reasm: netfilter conntrack re-assembly pointer 238 * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c 239 * @iif: ifindex of device we arrived on 240 * @queue_mapping: Queue mapping for multiqueue devices 241 * @tc_index: Traffic control index 242 * @tc_verd: traffic control verdict 243 * @dma_cookie: a cookie to one of several possible DMA operations 244 * done by skb DMA functions 245 * @secmark: security marking 246 */ 247 248 struct sk_buff { 249 /* These two members must be first. */ 250 struct sk_buff *next; 251 struct sk_buff *prev; 252 253 struct sock *sk; 254 ktime_t tstamp; 255 struct net_device *dev; 256 257 struct dst_entry *dst; 258 struct sec_path *sp; 259 260 /* 261 * This is the control buffer. It is free to use for every 262 * layer. Please put your private variables there. If you 263 * want to keep them across layers you have to do a skb_clone() 264 * first. This is owned by whoever has the skb queued ATM. 265 */ 266 char cb[48]; 267 268 unsigned int len, 269 data_len; 270 __u16 mac_len, 271 hdr_len; 272 union { 273 __wsum csum; 274 struct { 275 __u16 csum_start; 276 __u16 csum_offset; 277 }; 278 }; 279 __u32 priority; 280 __u8 local_df:1, 281 cloned:1, 282 ip_summed:2, 283 nohdr:1, 284 nfctinfo:3; 285 __u8 pkt_type:3, 286 fclone:2, 287 ipvs_property:1, 288 peeked:1, 289 nf_trace:1; 290 __be16 protocol; 291 292 void (*destructor)(struct sk_buff *skb); 293 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 294 struct nf_conntrack *nfct; 295 struct sk_buff *nfct_reasm; 296 #endif 297 #ifdef CONFIG_BRIDGE_NETFILTER 298 struct nf_bridge_info *nf_bridge; 299 #endif 300 301 int iif; 302 #ifdef CONFIG_NETDEVICES_MULTIQUEUE 303 __u16 queue_mapping; 304 #endif 305 #ifdef CONFIG_NET_SCHED 306 __u16 tc_index; /* traffic control index */ 307 #ifdef CONFIG_NET_CLS_ACT 308 __u16 tc_verd; /* traffic control verdict */ 309 #endif 310 #endif 311 /* 2 byte hole */ 312 313 #ifdef CONFIG_NET_DMA 314 dma_cookie_t dma_cookie; 315 #endif 316 #ifdef CONFIG_NETWORK_SECMARK 317 __u32 secmark; 318 #endif 319 320 __u32 mark; 321 322 sk_buff_data_t transport_header; 323 sk_buff_data_t network_header; 324 sk_buff_data_t mac_header; 325 /* These elements must be at the end, see alloc_skb() for details. */ 326 sk_buff_data_t tail; 327 sk_buff_data_t end; 328 unsigned char *head, 329 *data; 330 unsigned int truesize; 331 atomic_t users; 332 }; 333 334 #ifdef __KERNEL__ 335 /* 336 * Handling routines are only of interest to the kernel 337 */ 338 #include <linux/slab.h> 339 340 #include <asm/system.h> 341 342 extern void kfree_skb(struct sk_buff *skb); 343 extern void __kfree_skb(struct sk_buff *skb); 344 extern struct sk_buff *__alloc_skb(unsigned int size, 345 gfp_t priority, int fclone, int node); 346 static inline struct sk_buff *alloc_skb(unsigned int size, 347 gfp_t priority) 348 { 349 return __alloc_skb(size, priority, 0, -1); 350 } 351 352 static inline struct sk_buff *alloc_skb_fclone(unsigned int size, 353 gfp_t priority) 354 { 355 return __alloc_skb(size, priority, 1, -1); 356 } 357 358 extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); 359 extern struct sk_buff *skb_clone(struct sk_buff *skb, 360 gfp_t priority); 361 extern struct sk_buff *skb_copy(const struct sk_buff *skb, 362 gfp_t priority); 363 extern struct sk_buff *pskb_copy(struct sk_buff *skb, 364 gfp_t gfp_mask); 365 extern int pskb_expand_head(struct sk_buff *skb, 366 int nhead, int ntail, 367 gfp_t gfp_mask); 368 extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, 369 unsigned int headroom); 370 extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 371 int newheadroom, int newtailroom, 372 gfp_t priority); 373 extern int skb_to_sgvec(struct sk_buff *skb, 374 struct scatterlist *sg, int offset, 375 int len); 376 extern int skb_cow_data(struct sk_buff *skb, int tailbits, 377 struct sk_buff **trailer); 378 extern int skb_pad(struct sk_buff *skb, int pad); 379 #define dev_kfree_skb(a) kfree_skb(a) 380 extern void skb_over_panic(struct sk_buff *skb, int len, 381 void *here); 382 extern void skb_under_panic(struct sk_buff *skb, int len, 383 void *here); 384 extern void skb_truesize_bug(struct sk_buff *skb); 385 386 static inline void skb_truesize_check(struct sk_buff *skb) 387 { 388 int len = sizeof(struct sk_buff) + skb->len; 389 390 if (unlikely((int)skb->truesize < len)) 391 skb_truesize_bug(skb); 392 } 393 394 extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, 395 int getfrag(void *from, char *to, int offset, 396 int len,int odd, struct sk_buff *skb), 397 void *from, int length); 398 399 struct skb_seq_state 400 { 401 __u32 lower_offset; 402 __u32 upper_offset; 403 __u32 frag_idx; 404 __u32 stepped_offset; 405 struct sk_buff *root_skb; 406 struct sk_buff *cur_skb; 407 __u8 *frag_data; 408 }; 409 410 extern void skb_prepare_seq_read(struct sk_buff *skb, 411 unsigned int from, unsigned int to, 412 struct skb_seq_state *st); 413 extern unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 414 struct skb_seq_state *st); 415 extern void skb_abort_seq_read(struct skb_seq_state *st); 416 417 extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 418 unsigned int to, struct ts_config *config, 419 struct ts_state *state); 420 421 #ifdef NET_SKBUFF_DATA_USES_OFFSET 422 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) 423 { 424 return skb->head + skb->end; 425 } 426 #else 427 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) 428 { 429 return skb->end; 430 } 431 #endif 432 433 /* Internal */ 434 #define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB))) 435 436 /** 437 * skb_queue_empty - check if a queue is empty 438 * @list: queue head 439 * 440 * Returns true if the queue is empty, false otherwise. 441 */ 442 static inline int skb_queue_empty(const struct sk_buff_head *list) 443 { 444 return list->next == (struct sk_buff *)list; 445 } 446 447 /** 448 * skb_get - reference buffer 449 * @skb: buffer to reference 450 * 451 * Makes another reference to a socket buffer and returns a pointer 452 * to the buffer. 453 */ 454 static inline struct sk_buff *skb_get(struct sk_buff *skb) 455 { 456 atomic_inc(&skb->users); 457 return skb; 458 } 459 460 /* 461 * If users == 1, we are the only owner and are can avoid redundant 462 * atomic change. 463 */ 464 465 /** 466 * skb_cloned - is the buffer a clone 467 * @skb: buffer to check 468 * 469 * Returns true if the buffer was generated with skb_clone() and is 470 * one of multiple shared copies of the buffer. Cloned buffers are 471 * shared data so must not be written to under normal circumstances. 472 */ 473 static inline int skb_cloned(const struct sk_buff *skb) 474 { 475 return skb->cloned && 476 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1; 477 } 478 479 /** 480 * skb_header_cloned - is the header a clone 481 * @skb: buffer to check 482 * 483 * Returns true if modifying the header part of the buffer requires 484 * the data to be copied. 485 */ 486 static inline int skb_header_cloned(const struct sk_buff *skb) 487 { 488 int dataref; 489 490 if (!skb->cloned) 491 return 0; 492 493 dataref = atomic_read(&skb_shinfo(skb)->dataref); 494 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT); 495 return dataref != 1; 496 } 497 498 /** 499 * skb_header_release - release reference to header 500 * @skb: buffer to operate on 501 * 502 * Drop a reference to the header part of the buffer. This is done 503 * by acquiring a payload reference. You must not read from the header 504 * part of skb->data after this. 505 */ 506 static inline void skb_header_release(struct sk_buff *skb) 507 { 508 BUG_ON(skb->nohdr); 509 skb->nohdr = 1; 510 atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref); 511 } 512 513 /** 514 * skb_shared - is the buffer shared 515 * @skb: buffer to check 516 * 517 * Returns true if more than one person has a reference to this 518 * buffer. 519 */ 520 static inline int skb_shared(const struct sk_buff *skb) 521 { 522 return atomic_read(&skb->users) != 1; 523 } 524 525 /** 526 * skb_share_check - check if buffer is shared and if so clone it 527 * @skb: buffer to check 528 * @pri: priority for memory allocation 529 * 530 * If the buffer is shared the buffer is cloned and the old copy 531 * drops a reference. A new clone with a single reference is returned. 532 * If the buffer is not shared the original buffer is returned. When 533 * being called from interrupt status or with spinlocks held pri must 534 * be GFP_ATOMIC. 535 * 536 * NULL is returned on a memory allocation failure. 537 */ 538 static inline struct sk_buff *skb_share_check(struct sk_buff *skb, 539 gfp_t pri) 540 { 541 might_sleep_if(pri & __GFP_WAIT); 542 if (skb_shared(skb)) { 543 struct sk_buff *nskb = skb_clone(skb, pri); 544 kfree_skb(skb); 545 skb = nskb; 546 } 547 return skb; 548 } 549 550 /* 551 * Copy shared buffers into a new sk_buff. We effectively do COW on 552 * packets to handle cases where we have a local reader and forward 553 * and a couple of other messy ones. The normal one is tcpdumping 554 * a packet thats being forwarded. 555 */ 556 557 /** 558 * skb_unshare - make a copy of a shared buffer 559 * @skb: buffer to check 560 * @pri: priority for memory allocation 561 * 562 * If the socket buffer is a clone then this function creates a new 563 * copy of the data, drops a reference count on the old copy and returns 564 * the new copy with the reference count at 1. If the buffer is not a clone 565 * the original buffer is returned. When called with a spinlock held or 566 * from interrupt state @pri must be %GFP_ATOMIC 567 * 568 * %NULL is returned on a memory allocation failure. 569 */ 570 static inline struct sk_buff *skb_unshare(struct sk_buff *skb, 571 gfp_t pri) 572 { 573 might_sleep_if(pri & __GFP_WAIT); 574 if (skb_cloned(skb)) { 575 struct sk_buff *nskb = skb_copy(skb, pri); 576 kfree_skb(skb); /* Free our shared copy */ 577 skb = nskb; 578 } 579 return skb; 580 } 581 582 /** 583 * skb_peek 584 * @list_: list to peek at 585 * 586 * Peek an &sk_buff. Unlike most other operations you _MUST_ 587 * be careful with this one. A peek leaves the buffer on the 588 * list and someone else may run off with it. You must hold 589 * the appropriate locks or have a private queue to do this. 590 * 591 * Returns %NULL for an empty list or a pointer to the head element. 592 * The reference count is not incremented and the reference is therefore 593 * volatile. Use with caution. 594 */ 595 static inline struct sk_buff *skb_peek(struct sk_buff_head *list_) 596 { 597 struct sk_buff *list = ((struct sk_buff *)list_)->next; 598 if (list == (struct sk_buff *)list_) 599 list = NULL; 600 return list; 601 } 602 603 /** 604 * skb_peek_tail 605 * @list_: list to peek at 606 * 607 * Peek an &sk_buff. Unlike most other operations you _MUST_ 608 * be careful with this one. A peek leaves the buffer on the 609 * list and someone else may run off with it. You must hold 610 * the appropriate locks or have a private queue to do this. 611 * 612 * Returns %NULL for an empty list or a pointer to the tail element. 613 * The reference count is not incremented and the reference is therefore 614 * volatile. Use with caution. 615 */ 616 static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_) 617 { 618 struct sk_buff *list = ((struct sk_buff *)list_)->prev; 619 if (list == (struct sk_buff *)list_) 620 list = NULL; 621 return list; 622 } 623 624 /** 625 * skb_queue_len - get queue length 626 * @list_: list to measure 627 * 628 * Return the length of an &sk_buff queue. 629 */ 630 static inline __u32 skb_queue_len(const struct sk_buff_head *list_) 631 { 632 return list_->qlen; 633 } 634 635 /* 636 * This function creates a split out lock class for each invocation; 637 * this is needed for now since a whole lot of users of the skb-queue 638 * infrastructure in drivers have different locking usage (in hardirq) 639 * than the networking core (in softirq only). In the long run either the 640 * network layer or drivers should need annotation to consolidate the 641 * main types of usage into 3 classes. 642 */ 643 static inline void skb_queue_head_init(struct sk_buff_head *list) 644 { 645 spin_lock_init(&list->lock); 646 list->prev = list->next = (struct sk_buff *)list; 647 list->qlen = 0; 648 } 649 650 static inline void skb_queue_head_init_class(struct sk_buff_head *list, 651 struct lock_class_key *class) 652 { 653 skb_queue_head_init(list); 654 lockdep_set_class(&list->lock, class); 655 } 656 657 /* 658 * Insert an sk_buff at the start of a list. 659 * 660 * The "__skb_xxxx()" functions are the non-atomic ones that 661 * can only be called with interrupts disabled. 662 */ 663 664 /** 665 * __skb_queue_after - queue a buffer at the list head 666 * @list: list to use 667 * @prev: place after this buffer 668 * @newsk: buffer to queue 669 * 670 * Queue a buffer int the middle of a list. This function takes no locks 671 * and you must therefore hold required locks before calling it. 672 * 673 * A buffer cannot be placed on two lists at the same time. 674 */ 675 static inline void __skb_queue_after(struct sk_buff_head *list, 676 struct sk_buff *prev, 677 struct sk_buff *newsk) 678 { 679 struct sk_buff *next; 680 list->qlen++; 681 682 next = prev->next; 683 newsk->next = next; 684 newsk->prev = prev; 685 next->prev = prev->next = newsk; 686 } 687 688 /** 689 * __skb_queue_head - queue a buffer at the list head 690 * @list: list to use 691 * @newsk: buffer to queue 692 * 693 * Queue a buffer at the start of a list. This function takes no locks 694 * and you must therefore hold required locks before calling it. 695 * 696 * A buffer cannot be placed on two lists at the same time. 697 */ 698 extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk); 699 static inline void __skb_queue_head(struct sk_buff_head *list, 700 struct sk_buff *newsk) 701 { 702 __skb_queue_after(list, (struct sk_buff *)list, newsk); 703 } 704 705 /** 706 * __skb_queue_tail - queue a buffer at the list tail 707 * @list: list to use 708 * @newsk: buffer to queue 709 * 710 * Queue a buffer at the end of a list. This function takes no locks 711 * and you must therefore hold required locks before calling it. 712 * 713 * A buffer cannot be placed on two lists at the same time. 714 */ 715 extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk); 716 static inline void __skb_queue_tail(struct sk_buff_head *list, 717 struct sk_buff *newsk) 718 { 719 struct sk_buff *prev, *next; 720 721 list->qlen++; 722 next = (struct sk_buff *)list; 723 prev = next->prev; 724 newsk->next = next; 725 newsk->prev = prev; 726 next->prev = prev->next = newsk; 727 } 728 729 730 /** 731 * __skb_dequeue - remove from the head of the queue 732 * @list: list to dequeue from 733 * 734 * Remove the head of the list. This function does not take any locks 735 * so must be used with appropriate locks held only. The head item is 736 * returned or %NULL if the list is empty. 737 */ 738 extern struct sk_buff *skb_dequeue(struct sk_buff_head *list); 739 static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list) 740 { 741 struct sk_buff *next, *prev, *result; 742 743 prev = (struct sk_buff *) list; 744 next = prev->next; 745 result = NULL; 746 if (next != prev) { 747 result = next; 748 next = next->next; 749 list->qlen--; 750 next->prev = prev; 751 prev->next = next; 752 result->next = result->prev = NULL; 753 } 754 return result; 755 } 756 757 758 /* 759 * Insert a packet on a list. 760 */ 761 extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list); 762 static inline void __skb_insert(struct sk_buff *newsk, 763 struct sk_buff *prev, struct sk_buff *next, 764 struct sk_buff_head *list) 765 { 766 newsk->next = next; 767 newsk->prev = prev; 768 next->prev = prev->next = newsk; 769 list->qlen++; 770 } 771 772 /* 773 * Place a packet after a given packet in a list. 774 */ 775 extern void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list); 776 static inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 777 { 778 __skb_insert(newsk, old, old->next, list); 779 } 780 781 /* 782 * remove sk_buff from list. _Must_ be called atomically, and with 783 * the list known.. 784 */ 785 extern void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list); 786 static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 787 { 788 struct sk_buff *next, *prev; 789 790 list->qlen--; 791 next = skb->next; 792 prev = skb->prev; 793 skb->next = skb->prev = NULL; 794 next->prev = prev; 795 prev->next = next; 796 } 797 798 799 /* XXX: more streamlined implementation */ 800 801 /** 802 * __skb_dequeue_tail - remove from the tail of the queue 803 * @list: list to dequeue from 804 * 805 * Remove the tail of the list. This function does not take any locks 806 * so must be used with appropriate locks held only. The tail item is 807 * returned or %NULL if the list is empty. 808 */ 809 extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list); 810 static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list) 811 { 812 struct sk_buff *skb = skb_peek_tail(list); 813 if (skb) 814 __skb_unlink(skb, list); 815 return skb; 816 } 817 818 819 static inline int skb_is_nonlinear(const struct sk_buff *skb) 820 { 821 return skb->data_len; 822 } 823 824 static inline unsigned int skb_headlen(const struct sk_buff *skb) 825 { 826 return skb->len - skb->data_len; 827 } 828 829 static inline int skb_pagelen(const struct sk_buff *skb) 830 { 831 int i, len = 0; 832 833 for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) 834 len += skb_shinfo(skb)->frags[i].size; 835 return len + skb_headlen(skb); 836 } 837 838 static inline void skb_fill_page_desc(struct sk_buff *skb, int i, 839 struct page *page, int off, int size) 840 { 841 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 842 843 frag->page = page; 844 frag->page_offset = off; 845 frag->size = size; 846 skb_shinfo(skb)->nr_frags = i + 1; 847 } 848 849 #define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags) 850 #define SKB_FRAG_ASSERT(skb) BUG_ON(skb_shinfo(skb)->frag_list) 851 #define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb)) 852 853 #ifdef NET_SKBUFF_DATA_USES_OFFSET 854 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb) 855 { 856 return skb->head + skb->tail; 857 } 858 859 static inline void skb_reset_tail_pointer(struct sk_buff *skb) 860 { 861 skb->tail = skb->data - skb->head; 862 } 863 864 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) 865 { 866 skb_reset_tail_pointer(skb); 867 skb->tail += offset; 868 } 869 #else /* NET_SKBUFF_DATA_USES_OFFSET */ 870 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb) 871 { 872 return skb->tail; 873 } 874 875 static inline void skb_reset_tail_pointer(struct sk_buff *skb) 876 { 877 skb->tail = skb->data; 878 } 879 880 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) 881 { 882 skb->tail = skb->data + offset; 883 } 884 885 #endif /* NET_SKBUFF_DATA_USES_OFFSET */ 886 887 /* 888 * Add data to an sk_buff 889 */ 890 static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len) 891 { 892 unsigned char *tmp = skb_tail_pointer(skb); 893 SKB_LINEAR_ASSERT(skb); 894 skb->tail += len; 895 skb->len += len; 896 return tmp; 897 } 898 899 /** 900 * skb_put - add data to a buffer 901 * @skb: buffer to use 902 * @len: amount of data to add 903 * 904 * This function extends the used data area of the buffer. If this would 905 * exceed the total buffer size the kernel will panic. A pointer to the 906 * first byte of the extra data is returned. 907 */ 908 static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len) 909 { 910 unsigned char *tmp = skb_tail_pointer(skb); 911 SKB_LINEAR_ASSERT(skb); 912 skb->tail += len; 913 skb->len += len; 914 if (unlikely(skb->tail > skb->end)) 915 skb_over_panic(skb, len, current_text_addr()); 916 return tmp; 917 } 918 919 static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len) 920 { 921 skb->data -= len; 922 skb->len += len; 923 return skb->data; 924 } 925 926 /** 927 * skb_push - add data to the start of a buffer 928 * @skb: buffer to use 929 * @len: amount of data to add 930 * 931 * This function extends the used data area of the buffer at the buffer 932 * start. If this would exceed the total buffer headroom the kernel will 933 * panic. A pointer to the first byte of the extra data is returned. 934 */ 935 static inline unsigned char *skb_push(struct sk_buff *skb, unsigned int len) 936 { 937 skb->data -= len; 938 skb->len += len; 939 if (unlikely(skb->data<skb->head)) 940 skb_under_panic(skb, len, current_text_addr()); 941 return skb->data; 942 } 943 944 static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len) 945 { 946 skb->len -= len; 947 BUG_ON(skb->len < skb->data_len); 948 return skb->data += len; 949 } 950 951 /** 952 * skb_pull - remove data from the start of a buffer 953 * @skb: buffer to use 954 * @len: amount of data to remove 955 * 956 * This function removes data from the start of a buffer, returning 957 * the memory to the headroom. A pointer to the next data in the buffer 958 * is returned. Once the data has been pulled future pushes will overwrite 959 * the old data. 960 */ 961 static inline unsigned char *skb_pull(struct sk_buff *skb, unsigned int len) 962 { 963 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len); 964 } 965 966 extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta); 967 968 static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len) 969 { 970 if (len > skb_headlen(skb) && 971 !__pskb_pull_tail(skb, len-skb_headlen(skb))) 972 return NULL; 973 skb->len -= len; 974 return skb->data += len; 975 } 976 977 static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len) 978 { 979 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len); 980 } 981 982 static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len) 983 { 984 if (likely(len <= skb_headlen(skb))) 985 return 1; 986 if (unlikely(len > skb->len)) 987 return 0; 988 return __pskb_pull_tail(skb, len-skb_headlen(skb)) != NULL; 989 } 990 991 /** 992 * skb_headroom - bytes at buffer head 993 * @skb: buffer to check 994 * 995 * Return the number of bytes of free space at the head of an &sk_buff. 996 */ 997 static inline unsigned int skb_headroom(const struct sk_buff *skb) 998 { 999 return skb->data - skb->head; 1000 } 1001 1002 /** 1003 * skb_tailroom - bytes at buffer end 1004 * @skb: buffer to check 1005 * 1006 * Return the number of bytes of free space at the tail of an sk_buff 1007 */ 1008 static inline int skb_tailroom(const struct sk_buff *skb) 1009 { 1010 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail; 1011 } 1012 1013 /** 1014 * skb_reserve - adjust headroom 1015 * @skb: buffer to alter 1016 * @len: bytes to move 1017 * 1018 * Increase the headroom of an empty &sk_buff by reducing the tail 1019 * room. This is only allowed for an empty buffer. 1020 */ 1021 static inline void skb_reserve(struct sk_buff *skb, int len) 1022 { 1023 skb->data += len; 1024 skb->tail += len; 1025 } 1026 1027 #ifdef NET_SKBUFF_DATA_USES_OFFSET 1028 static inline unsigned char *skb_transport_header(const struct sk_buff *skb) 1029 { 1030 return skb->head + skb->transport_header; 1031 } 1032 1033 static inline void skb_reset_transport_header(struct sk_buff *skb) 1034 { 1035 skb->transport_header = skb->data - skb->head; 1036 } 1037 1038 static inline void skb_set_transport_header(struct sk_buff *skb, 1039 const int offset) 1040 { 1041 skb_reset_transport_header(skb); 1042 skb->transport_header += offset; 1043 } 1044 1045 static inline unsigned char *skb_network_header(const struct sk_buff *skb) 1046 { 1047 return skb->head + skb->network_header; 1048 } 1049 1050 static inline void skb_reset_network_header(struct sk_buff *skb) 1051 { 1052 skb->network_header = skb->data - skb->head; 1053 } 1054 1055 static inline void skb_set_network_header(struct sk_buff *skb, const int offset) 1056 { 1057 skb_reset_network_header(skb); 1058 skb->network_header += offset; 1059 } 1060 1061 static inline unsigned char *skb_mac_header(const struct sk_buff *skb) 1062 { 1063 return skb->head + skb->mac_header; 1064 } 1065 1066 static inline int skb_mac_header_was_set(const struct sk_buff *skb) 1067 { 1068 return skb->mac_header != ~0U; 1069 } 1070 1071 static inline void skb_reset_mac_header(struct sk_buff *skb) 1072 { 1073 skb->mac_header = skb->data - skb->head; 1074 } 1075 1076 static inline void skb_set_mac_header(struct sk_buff *skb, const int offset) 1077 { 1078 skb_reset_mac_header(skb); 1079 skb->mac_header += offset; 1080 } 1081 1082 #else /* NET_SKBUFF_DATA_USES_OFFSET */ 1083 1084 static inline unsigned char *skb_transport_header(const struct sk_buff *skb) 1085 { 1086 return skb->transport_header; 1087 } 1088 1089 static inline void skb_reset_transport_header(struct sk_buff *skb) 1090 { 1091 skb->transport_header = skb->data; 1092 } 1093 1094 static inline void skb_set_transport_header(struct sk_buff *skb, 1095 const int offset) 1096 { 1097 skb->transport_header = skb->data + offset; 1098 } 1099 1100 static inline unsigned char *skb_network_header(const struct sk_buff *skb) 1101 { 1102 return skb->network_header; 1103 } 1104 1105 static inline void skb_reset_network_header(struct sk_buff *skb) 1106 { 1107 skb->network_header = skb->data; 1108 } 1109 1110 static inline void skb_set_network_header(struct sk_buff *skb, const int offset) 1111 { 1112 skb->network_header = skb->data + offset; 1113 } 1114 1115 static inline unsigned char *skb_mac_header(const struct sk_buff *skb) 1116 { 1117 return skb->mac_header; 1118 } 1119 1120 static inline int skb_mac_header_was_set(const struct sk_buff *skb) 1121 { 1122 return skb->mac_header != NULL; 1123 } 1124 1125 static inline void skb_reset_mac_header(struct sk_buff *skb) 1126 { 1127 skb->mac_header = skb->data; 1128 } 1129 1130 static inline void skb_set_mac_header(struct sk_buff *skb, const int offset) 1131 { 1132 skb->mac_header = skb->data + offset; 1133 } 1134 #endif /* NET_SKBUFF_DATA_USES_OFFSET */ 1135 1136 static inline int skb_transport_offset(const struct sk_buff *skb) 1137 { 1138 return skb_transport_header(skb) - skb->data; 1139 } 1140 1141 static inline u32 skb_network_header_len(const struct sk_buff *skb) 1142 { 1143 return skb->transport_header - skb->network_header; 1144 } 1145 1146 static inline int skb_network_offset(const struct sk_buff *skb) 1147 { 1148 return skb_network_header(skb) - skb->data; 1149 } 1150 1151 /* 1152 * CPUs often take a performance hit when accessing unaligned memory 1153 * locations. The actual performance hit varies, it can be small if the 1154 * hardware handles it or large if we have to take an exception and fix it 1155 * in software. 1156 * 1157 * Since an ethernet header is 14 bytes network drivers often end up with 1158 * the IP header at an unaligned offset. The IP header can be aligned by 1159 * shifting the start of the packet by 2 bytes. Drivers should do this 1160 * with: 1161 * 1162 * skb_reserve(NET_IP_ALIGN); 1163 * 1164 * The downside to this alignment of the IP header is that the DMA is now 1165 * unaligned. On some architectures the cost of an unaligned DMA is high 1166 * and this cost outweighs the gains made by aligning the IP header. 1167 * 1168 * Since this trade off varies between architectures, we allow NET_IP_ALIGN 1169 * to be overridden. 1170 */ 1171 #ifndef NET_IP_ALIGN 1172 #define NET_IP_ALIGN 2 1173 #endif 1174 1175 /* 1176 * The networking layer reserves some headroom in skb data (via 1177 * dev_alloc_skb). This is used to avoid having to reallocate skb data when 1178 * the header has to grow. In the default case, if the header has to grow 1179 * 16 bytes or less we avoid the reallocation. 1180 * 1181 * Unfortunately this headroom changes the DMA alignment of the resulting 1182 * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive 1183 * on some architectures. An architecture can override this value, 1184 * perhaps setting it to a cacheline in size (since that will maintain 1185 * cacheline alignment of the DMA). It must be a power of 2. 1186 * 1187 * Various parts of the networking layer expect at least 16 bytes of 1188 * headroom, you should not reduce this. 1189 */ 1190 #ifndef NET_SKB_PAD 1191 #define NET_SKB_PAD 16 1192 #endif 1193 1194 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len); 1195 1196 static inline void __skb_trim(struct sk_buff *skb, unsigned int len) 1197 { 1198 if (unlikely(skb->data_len)) { 1199 WARN_ON(1); 1200 return; 1201 } 1202 skb->len = len; 1203 skb_set_tail_pointer(skb, len); 1204 } 1205 1206 /** 1207 * skb_trim - remove end from a buffer 1208 * @skb: buffer to alter 1209 * @len: new length 1210 * 1211 * Cut the length of a buffer down by removing data from the tail. If 1212 * the buffer is already under the length specified it is not modified. 1213 * The skb must be linear. 1214 */ 1215 static inline void skb_trim(struct sk_buff *skb, unsigned int len) 1216 { 1217 if (skb->len > len) 1218 __skb_trim(skb, len); 1219 } 1220 1221 1222 static inline int __pskb_trim(struct sk_buff *skb, unsigned int len) 1223 { 1224 if (skb->data_len) 1225 return ___pskb_trim(skb, len); 1226 __skb_trim(skb, len); 1227 return 0; 1228 } 1229 1230 static inline int pskb_trim(struct sk_buff *skb, unsigned int len) 1231 { 1232 return (len < skb->len) ? __pskb_trim(skb, len) : 0; 1233 } 1234 1235 /** 1236 * pskb_trim_unique - remove end from a paged unique (not cloned) buffer 1237 * @skb: buffer to alter 1238 * @len: new length 1239 * 1240 * This is identical to pskb_trim except that the caller knows that 1241 * the skb is not cloned so we should never get an error due to out- 1242 * of-memory. 1243 */ 1244 static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len) 1245 { 1246 int err = pskb_trim(skb, len); 1247 BUG_ON(err); 1248 } 1249 1250 /** 1251 * skb_orphan - orphan a buffer 1252 * @skb: buffer to orphan 1253 * 1254 * If a buffer currently has an owner then we call the owner's 1255 * destructor function and make the @skb unowned. The buffer continues 1256 * to exist but is no longer charged to its former owner. 1257 */ 1258 static inline void skb_orphan(struct sk_buff *skb) 1259 { 1260 if (skb->destructor) 1261 skb->destructor(skb); 1262 skb->destructor = NULL; 1263 skb->sk = NULL; 1264 } 1265 1266 /** 1267 * __skb_queue_purge - empty a list 1268 * @list: list to empty 1269 * 1270 * Delete all buffers on an &sk_buff list. Each buffer is removed from 1271 * the list and one reference dropped. This function does not take the 1272 * list lock and the caller must hold the relevant locks to use it. 1273 */ 1274 extern void skb_queue_purge(struct sk_buff_head *list); 1275 static inline void __skb_queue_purge(struct sk_buff_head *list) 1276 { 1277 struct sk_buff *skb; 1278 while ((skb = __skb_dequeue(list)) != NULL) 1279 kfree_skb(skb); 1280 } 1281 1282 /** 1283 * __dev_alloc_skb - allocate an skbuff for receiving 1284 * @length: length to allocate 1285 * @gfp_mask: get_free_pages mask, passed to alloc_skb 1286 * 1287 * Allocate a new &sk_buff and assign it a usage count of one. The 1288 * buffer has unspecified headroom built in. Users should allocate 1289 * the headroom they think they need without accounting for the 1290 * built in space. The built in space is used for optimisations. 1291 * 1292 * %NULL is returned if there is no free memory. 1293 */ 1294 static inline struct sk_buff *__dev_alloc_skb(unsigned int length, 1295 gfp_t gfp_mask) 1296 { 1297 struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask); 1298 if (likely(skb)) 1299 skb_reserve(skb, NET_SKB_PAD); 1300 return skb; 1301 } 1302 1303 /** 1304 * dev_alloc_skb - allocate an skbuff for receiving 1305 * @length: length to allocate 1306 * 1307 * Allocate a new &sk_buff and assign it a usage count of one. The 1308 * buffer has unspecified headroom built in. Users should allocate 1309 * the headroom they think they need without accounting for the 1310 * built in space. The built in space is used for optimisations. 1311 * 1312 * %NULL is returned if there is no free memory. Although this function 1313 * allocates memory it can be called from an interrupt. 1314 */ 1315 static inline struct sk_buff *dev_alloc_skb(unsigned int length) 1316 { 1317 return __dev_alloc_skb(length, GFP_ATOMIC); 1318 } 1319 1320 extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev, 1321 unsigned int length, gfp_t gfp_mask); 1322 1323 /** 1324 * netdev_alloc_skb - allocate an skbuff for rx on a specific device 1325 * @dev: network device to receive on 1326 * @length: length to allocate 1327 * 1328 * Allocate a new &sk_buff and assign it a usage count of one. The 1329 * buffer has unspecified headroom built in. Users should allocate 1330 * the headroom they think they need without accounting for the 1331 * built in space. The built in space is used for optimisations. 1332 * 1333 * %NULL is returned if there is no free memory. Although this function 1334 * allocates memory it can be called from an interrupt. 1335 */ 1336 static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev, 1337 unsigned int length) 1338 { 1339 return __netdev_alloc_skb(dev, length, GFP_ATOMIC); 1340 } 1341 1342 /** 1343 * skb_clone_writable - is the header of a clone writable 1344 * @skb: buffer to check 1345 * @len: length up to which to write 1346 * 1347 * Returns true if modifying the header part of the cloned buffer 1348 * does not requires the data to be copied. 1349 */ 1350 static inline int skb_clone_writable(struct sk_buff *skb, unsigned int len) 1351 { 1352 return !skb_header_cloned(skb) && 1353 skb_headroom(skb) + len <= skb->hdr_len; 1354 } 1355 1356 static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom, 1357 int cloned) 1358 { 1359 int delta = 0; 1360 1361 if (headroom < NET_SKB_PAD) 1362 headroom = NET_SKB_PAD; 1363 if (headroom > skb_headroom(skb)) 1364 delta = headroom - skb_headroom(skb); 1365 1366 if (delta || cloned) 1367 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0, 1368 GFP_ATOMIC); 1369 return 0; 1370 } 1371 1372 /** 1373 * skb_cow - copy header of skb when it is required 1374 * @skb: buffer to cow 1375 * @headroom: needed headroom 1376 * 1377 * If the skb passed lacks sufficient headroom or its data part 1378 * is shared, data is reallocated. If reallocation fails, an error 1379 * is returned and original skb is not changed. 1380 * 1381 * The result is skb with writable area skb->head...skb->tail 1382 * and at least @headroom of space at head. 1383 */ 1384 static inline int skb_cow(struct sk_buff *skb, unsigned int headroom) 1385 { 1386 return __skb_cow(skb, headroom, skb_cloned(skb)); 1387 } 1388 1389 /** 1390 * skb_cow_head - skb_cow but only making the head writable 1391 * @skb: buffer to cow 1392 * @headroom: needed headroom 1393 * 1394 * This function is identical to skb_cow except that we replace the 1395 * skb_cloned check by skb_header_cloned. It should be used when 1396 * you only need to push on some header and do not need to modify 1397 * the data. 1398 */ 1399 static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom) 1400 { 1401 return __skb_cow(skb, headroom, skb_header_cloned(skb)); 1402 } 1403 1404 /** 1405 * skb_padto - pad an skbuff up to a minimal size 1406 * @skb: buffer to pad 1407 * @len: minimal length 1408 * 1409 * Pads up a buffer to ensure the trailing bytes exist and are 1410 * blanked. If the buffer already contains sufficient data it 1411 * is untouched. Otherwise it is extended. Returns zero on 1412 * success. The skb is freed on error. 1413 */ 1414 1415 static inline int skb_padto(struct sk_buff *skb, unsigned int len) 1416 { 1417 unsigned int size = skb->len; 1418 if (likely(size >= len)) 1419 return 0; 1420 return skb_pad(skb, len-size); 1421 } 1422 1423 static inline int skb_add_data(struct sk_buff *skb, 1424 char __user *from, int copy) 1425 { 1426 const int off = skb->len; 1427 1428 if (skb->ip_summed == CHECKSUM_NONE) { 1429 int err = 0; 1430 __wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy), 1431 copy, 0, &err); 1432 if (!err) { 1433 skb->csum = csum_block_add(skb->csum, csum, off); 1434 return 0; 1435 } 1436 } else if (!copy_from_user(skb_put(skb, copy), from, copy)) 1437 return 0; 1438 1439 __skb_trim(skb, off); 1440 return -EFAULT; 1441 } 1442 1443 static inline int skb_can_coalesce(struct sk_buff *skb, int i, 1444 struct page *page, int off) 1445 { 1446 if (i) { 1447 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; 1448 1449 return page == frag->page && 1450 off == frag->page_offset + frag->size; 1451 } 1452 return 0; 1453 } 1454 1455 static inline int __skb_linearize(struct sk_buff *skb) 1456 { 1457 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM; 1458 } 1459 1460 /** 1461 * skb_linearize - convert paged skb to linear one 1462 * @skb: buffer to linarize 1463 * 1464 * If there is no free memory -ENOMEM is returned, otherwise zero 1465 * is returned and the old skb data released. 1466 */ 1467 static inline int skb_linearize(struct sk_buff *skb) 1468 { 1469 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0; 1470 } 1471 1472 /** 1473 * skb_linearize_cow - make sure skb is linear and writable 1474 * @skb: buffer to process 1475 * 1476 * If there is no free memory -ENOMEM is returned, otherwise zero 1477 * is returned and the old skb data released. 1478 */ 1479 static inline int skb_linearize_cow(struct sk_buff *skb) 1480 { 1481 return skb_is_nonlinear(skb) || skb_cloned(skb) ? 1482 __skb_linearize(skb) : 0; 1483 } 1484 1485 /** 1486 * skb_postpull_rcsum - update checksum for received skb after pull 1487 * @skb: buffer to update 1488 * @start: start of data before pull 1489 * @len: length of data pulled 1490 * 1491 * After doing a pull on a received packet, you need to call this to 1492 * update the CHECKSUM_COMPLETE checksum, or set ip_summed to 1493 * CHECKSUM_NONE so that it can be recomputed from scratch. 1494 */ 1495 1496 static inline void skb_postpull_rcsum(struct sk_buff *skb, 1497 const void *start, unsigned int len) 1498 { 1499 if (skb->ip_summed == CHECKSUM_COMPLETE) 1500 skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0)); 1501 } 1502 1503 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); 1504 1505 /** 1506 * pskb_trim_rcsum - trim received skb and update checksum 1507 * @skb: buffer to trim 1508 * @len: new length 1509 * 1510 * This is exactly the same as pskb_trim except that it ensures the 1511 * checksum of received packets are still valid after the operation. 1512 */ 1513 1514 static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) 1515 { 1516 if (likely(len >= skb->len)) 1517 return 0; 1518 if (skb->ip_summed == CHECKSUM_COMPLETE) 1519 skb->ip_summed = CHECKSUM_NONE; 1520 return __pskb_trim(skb, len); 1521 } 1522 1523 #define skb_queue_walk(queue, skb) \ 1524 for (skb = (queue)->next; \ 1525 prefetch(skb->next), (skb != (struct sk_buff *)(queue)); \ 1526 skb = skb->next) 1527 1528 #define skb_queue_walk_safe(queue, skb, tmp) \ 1529 for (skb = (queue)->next, tmp = skb->next; \ 1530 skb != (struct sk_buff *)(queue); \ 1531 skb = tmp, tmp = skb->next) 1532 1533 #define skb_queue_reverse_walk(queue, skb) \ 1534 for (skb = (queue)->prev; \ 1535 prefetch(skb->prev), (skb != (struct sk_buff *)(queue)); \ 1536 skb = skb->prev) 1537 1538 1539 extern struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags, 1540 int *peeked, int *err); 1541 extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, 1542 int noblock, int *err); 1543 extern unsigned int datagram_poll(struct file *file, struct socket *sock, 1544 struct poll_table_struct *wait); 1545 extern int skb_copy_datagram_iovec(const struct sk_buff *from, 1546 int offset, struct iovec *to, 1547 int size); 1548 extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, 1549 int hlen, 1550 struct iovec *iov); 1551 extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb); 1552 extern int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, 1553 unsigned int flags); 1554 extern __wsum skb_checksum(const struct sk_buff *skb, int offset, 1555 int len, __wsum csum); 1556 extern int skb_copy_bits(const struct sk_buff *skb, int offset, 1557 void *to, int len); 1558 extern int skb_store_bits(struct sk_buff *skb, int offset, 1559 const void *from, int len); 1560 extern __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, 1561 int offset, u8 *to, int len, 1562 __wsum csum); 1563 extern int skb_splice_bits(struct sk_buff *skb, 1564 unsigned int offset, 1565 struct pipe_inode_info *pipe, 1566 unsigned int len, 1567 unsigned int flags); 1568 extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); 1569 extern void skb_split(struct sk_buff *skb, 1570 struct sk_buff *skb1, const u32 len); 1571 1572 extern struct sk_buff *skb_segment(struct sk_buff *skb, int features); 1573 1574 static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, 1575 int len, void *buffer) 1576 { 1577 int hlen = skb_headlen(skb); 1578 1579 if (hlen - offset >= len) 1580 return skb->data + offset; 1581 1582 if (skb_copy_bits(skb, offset, buffer, len) < 0) 1583 return NULL; 1584 1585 return buffer; 1586 } 1587 1588 static inline void skb_copy_from_linear_data(const struct sk_buff *skb, 1589 void *to, 1590 const unsigned int len) 1591 { 1592 memcpy(to, skb->data, len); 1593 } 1594 1595 static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb, 1596 const int offset, void *to, 1597 const unsigned int len) 1598 { 1599 memcpy(to, skb->data + offset, len); 1600 } 1601 1602 static inline void skb_copy_to_linear_data(struct sk_buff *skb, 1603 const void *from, 1604 const unsigned int len) 1605 { 1606 memcpy(skb->data, from, len); 1607 } 1608 1609 static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb, 1610 const int offset, 1611 const void *from, 1612 const unsigned int len) 1613 { 1614 memcpy(skb->data + offset, from, len); 1615 } 1616 1617 extern void skb_init(void); 1618 1619 /** 1620 * skb_get_timestamp - get timestamp from a skb 1621 * @skb: skb to get stamp from 1622 * @stamp: pointer to struct timeval to store stamp in 1623 * 1624 * Timestamps are stored in the skb as offsets to a base timestamp. 1625 * This function converts the offset back to a struct timeval and stores 1626 * it in stamp. 1627 */ 1628 static inline void skb_get_timestamp(const struct sk_buff *skb, struct timeval *stamp) 1629 { 1630 *stamp = ktime_to_timeval(skb->tstamp); 1631 } 1632 1633 static inline void __net_timestamp(struct sk_buff *skb) 1634 { 1635 skb->tstamp = ktime_get_real(); 1636 } 1637 1638 static inline ktime_t net_timedelta(ktime_t t) 1639 { 1640 return ktime_sub(ktime_get_real(), t); 1641 } 1642 1643 static inline ktime_t net_invalid_timestamp(void) 1644 { 1645 return ktime_set(0, 0); 1646 } 1647 1648 extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len); 1649 extern __sum16 __skb_checksum_complete(struct sk_buff *skb); 1650 1651 static inline int skb_csum_unnecessary(const struct sk_buff *skb) 1652 { 1653 return skb->ip_summed & CHECKSUM_UNNECESSARY; 1654 } 1655 1656 /** 1657 * skb_checksum_complete - Calculate checksum of an entire packet 1658 * @skb: packet to process 1659 * 1660 * This function calculates the checksum over the entire packet plus 1661 * the value of skb->csum. The latter can be used to supply the 1662 * checksum of a pseudo header as used by TCP/UDP. It returns the 1663 * checksum. 1664 * 1665 * For protocols that contain complete checksums such as ICMP/TCP/UDP, 1666 * this function can be used to verify that checksum on received 1667 * packets. In that case the function should return zero if the 1668 * checksum is correct. In particular, this function will return zero 1669 * if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the 1670 * hardware has already verified the correctness of the checksum. 1671 */ 1672 static inline __sum16 skb_checksum_complete(struct sk_buff *skb) 1673 { 1674 return skb_csum_unnecessary(skb) ? 1675 0 : __skb_checksum_complete(skb); 1676 } 1677 1678 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 1679 extern void nf_conntrack_destroy(struct nf_conntrack *nfct); 1680 static inline void nf_conntrack_put(struct nf_conntrack *nfct) 1681 { 1682 if (nfct && atomic_dec_and_test(&nfct->use)) 1683 nf_conntrack_destroy(nfct); 1684 } 1685 static inline void nf_conntrack_get(struct nf_conntrack *nfct) 1686 { 1687 if (nfct) 1688 atomic_inc(&nfct->use); 1689 } 1690 static inline void nf_conntrack_get_reasm(struct sk_buff *skb) 1691 { 1692 if (skb) 1693 atomic_inc(&skb->users); 1694 } 1695 static inline void nf_conntrack_put_reasm(struct sk_buff *skb) 1696 { 1697 if (skb) 1698 kfree_skb(skb); 1699 } 1700 #endif 1701 #ifdef CONFIG_BRIDGE_NETFILTER 1702 static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge) 1703 { 1704 if (nf_bridge && atomic_dec_and_test(&nf_bridge->use)) 1705 kfree(nf_bridge); 1706 } 1707 static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge) 1708 { 1709 if (nf_bridge) 1710 atomic_inc(&nf_bridge->use); 1711 } 1712 #endif /* CONFIG_BRIDGE_NETFILTER */ 1713 static inline void nf_reset(struct sk_buff *skb) 1714 { 1715 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 1716 nf_conntrack_put(skb->nfct); 1717 skb->nfct = NULL; 1718 nf_conntrack_put_reasm(skb->nfct_reasm); 1719 skb->nfct_reasm = NULL; 1720 #endif 1721 #ifdef CONFIG_BRIDGE_NETFILTER 1722 nf_bridge_put(skb->nf_bridge); 1723 skb->nf_bridge = NULL; 1724 #endif 1725 } 1726 1727 /* Note: This doesn't put any conntrack and bridge info in dst. */ 1728 static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src) 1729 { 1730 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 1731 dst->nfct = src->nfct; 1732 nf_conntrack_get(src->nfct); 1733 dst->nfctinfo = src->nfctinfo; 1734 dst->nfct_reasm = src->nfct_reasm; 1735 nf_conntrack_get_reasm(src->nfct_reasm); 1736 #endif 1737 #ifdef CONFIG_BRIDGE_NETFILTER 1738 dst->nf_bridge = src->nf_bridge; 1739 nf_bridge_get(src->nf_bridge); 1740 #endif 1741 } 1742 1743 static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src) 1744 { 1745 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 1746 nf_conntrack_put(dst->nfct); 1747 nf_conntrack_put_reasm(dst->nfct_reasm); 1748 #endif 1749 #ifdef CONFIG_BRIDGE_NETFILTER 1750 nf_bridge_put(dst->nf_bridge); 1751 #endif 1752 __nf_copy(dst, src); 1753 } 1754 1755 #ifdef CONFIG_NETWORK_SECMARK 1756 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from) 1757 { 1758 to->secmark = from->secmark; 1759 } 1760 1761 static inline void skb_init_secmark(struct sk_buff *skb) 1762 { 1763 skb->secmark = 0; 1764 } 1765 #else 1766 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from) 1767 { } 1768 1769 static inline void skb_init_secmark(struct sk_buff *skb) 1770 { } 1771 #endif 1772 1773 static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping) 1774 { 1775 #ifdef CONFIG_NETDEVICES_MULTIQUEUE 1776 skb->queue_mapping = queue_mapping; 1777 #endif 1778 } 1779 1780 static inline u16 skb_get_queue_mapping(struct sk_buff *skb) 1781 { 1782 #ifdef CONFIG_NETDEVICES_MULTIQUEUE 1783 return skb->queue_mapping; 1784 #else 1785 return 0; 1786 #endif 1787 } 1788 1789 static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from) 1790 { 1791 #ifdef CONFIG_NETDEVICES_MULTIQUEUE 1792 to->queue_mapping = from->queue_mapping; 1793 #endif 1794 } 1795 1796 static inline int skb_is_gso(const struct sk_buff *skb) 1797 { 1798 return skb_shinfo(skb)->gso_size; 1799 } 1800 1801 static inline int skb_is_gso_v6(const struct sk_buff *skb) 1802 { 1803 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; 1804 } 1805 1806 static inline void skb_forward_csum(struct sk_buff *skb) 1807 { 1808 /* Unfortunately we don't support this one. Any brave souls? */ 1809 if (skb->ip_summed == CHECKSUM_COMPLETE) 1810 skb->ip_summed = CHECKSUM_NONE; 1811 } 1812 1813 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off); 1814 #endif /* __KERNEL__ */ 1815 #endif /* _LINUX_SKBUFF_H */ 1816