1 /* 2 * Definitions for the 'struct sk_buff' memory handlers. 3 * 4 * Authors: 5 * Alan Cox, <[email protected]> 6 * Florian La Roche, <[email protected]> 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License 10 * as published by the Free Software Foundation; either version 11 * 2 of the License, or (at your option) any later version. 12 */ 13 14 #ifndef _LINUX_SKBUFF_H 15 #define _LINUX_SKBUFF_H 16 17 #include <linux/kernel.h> 18 #include <linux/compiler.h> 19 #include <linux/time.h> 20 #include <linux/cache.h> 21 22 #include <asm/atomic.h> 23 #include <asm/types.h> 24 #include <linux/spinlock.h> 25 #include <linux/net.h> 26 #include <linux/textsearch.h> 27 #include <net/checksum.h> 28 #include <linux/rcupdate.h> 29 #include <linux/dmaengine.h> 30 #include <linux/hrtimer.h> 31 32 /* Don't change this without changing skb_csum_unnecessary! */ 33 #define CHECKSUM_NONE 0 34 #define CHECKSUM_UNNECESSARY 1 35 #define CHECKSUM_COMPLETE 2 36 #define CHECKSUM_PARTIAL 3 37 38 #define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \ 39 ~(SMP_CACHE_BYTES - 1)) 40 #define SKB_WITH_OVERHEAD(X) \ 41 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) 42 #define SKB_MAX_ORDER(X, ORDER) \ 43 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X)) 44 #define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0)) 45 #define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2)) 46 47 /* A. Checksumming of received packets by device. 48 * 49 * NONE: device failed to checksum this packet. 50 * skb->csum is undefined. 51 * 52 * UNNECESSARY: device parsed packet and wouldbe verified checksum. 53 * skb->csum is undefined. 54 * It is bad option, but, unfortunately, many of vendors do this. 55 * Apparently with secret goal to sell you new device, when you 56 * will add new protocol to your host. F.e. IPv6. 8) 57 * 58 * COMPLETE: the most generic way. Device supplied checksum of _all_ 59 * the packet as seen by netif_rx in skb->csum. 60 * NOTE: Even if device supports only some protocols, but 61 * is able to produce some skb->csum, it MUST use COMPLETE, 62 * not UNNECESSARY. 63 * 64 * PARTIAL: identical to the case for output below. This may occur 65 * on a packet received directly from another Linux OS, e.g., 66 * a virtualised Linux kernel on the same host. The packet can 67 * be treated in the same way as UNNECESSARY except that on 68 * output (i.e., forwarding) the checksum must be filled in 69 * by the OS or the hardware. 70 * 71 * B. Checksumming on output. 72 * 73 * NONE: skb is checksummed by protocol or csum is not required. 74 * 75 * PARTIAL: device is required to csum packet as seen by hard_start_xmit 76 * from skb->csum_start to the end and to record the checksum 77 * at skb->csum_start + skb->csum_offset. 78 * 79 * Device must show its capabilities in dev->features, set 80 * at device setup time. 81 * NETIF_F_HW_CSUM - it is clever device, it is able to checksum 82 * everything. 83 * NETIF_F_NO_CSUM - loopback or reliable single hop media. 84 * NETIF_F_IP_CSUM - device is dumb. It is able to csum only 85 * TCP/UDP over IPv4. Sigh. Vendors like this 86 * way by an unknown reason. Though, see comment above 87 * about CHECKSUM_UNNECESSARY. 8) 88 * NETIF_F_IPV6_CSUM about as dumb as the last one but does IPv6 instead. 89 * 90 * Any questions? No questions, good. --ANK 91 */ 92 93 struct net_device; 94 struct scatterlist; 95 struct pipe_inode_info; 96 97 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 98 struct nf_conntrack { 99 atomic_t use; 100 }; 101 #endif 102 103 #ifdef CONFIG_BRIDGE_NETFILTER 104 struct nf_bridge_info { 105 atomic_t use; 106 struct net_device *physindev; 107 struct net_device *physoutdev; 108 unsigned int mask; 109 unsigned long data[32 / sizeof(unsigned long)]; 110 }; 111 #endif 112 113 struct sk_buff_head { 114 /* These two members must be first. */ 115 struct sk_buff *next; 116 struct sk_buff *prev; 117 118 __u32 qlen; 119 spinlock_t lock; 120 }; 121 122 struct sk_buff; 123 124 /* To allow 64K frame to be packed as single skb without frag_list */ 125 #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2) 126 127 typedef struct skb_frag_struct skb_frag_t; 128 129 struct skb_frag_struct { 130 struct page *page; 131 __u32 page_offset; 132 __u32 size; 133 }; 134 135 #define HAVE_HW_TIME_STAMP 136 137 /** 138 * struct skb_shared_hwtstamps - hardware time stamps 139 * @hwtstamp: hardware time stamp transformed into duration 140 * since arbitrary point in time 141 * @syststamp: hwtstamp transformed to system time base 142 * 143 * Software time stamps generated by ktime_get_real() are stored in 144 * skb->tstamp. The relation between the different kinds of time 145 * stamps is as follows: 146 * 147 * syststamp and tstamp can be compared against each other in 148 * arbitrary combinations. The accuracy of a 149 * syststamp/tstamp/"syststamp from other device" comparison is 150 * limited by the accuracy of the transformation into system time 151 * base. This depends on the device driver and its underlying 152 * hardware. 153 * 154 * hwtstamps can only be compared against other hwtstamps from 155 * the same device. 156 * 157 * This structure is attached to packets as part of the 158 * &skb_shared_info. Use skb_hwtstamps() to get a pointer. 159 */ 160 struct skb_shared_hwtstamps { 161 ktime_t hwtstamp; 162 ktime_t syststamp; 163 }; 164 165 /** 166 * struct skb_shared_tx - instructions for time stamping of outgoing packets 167 * @hardware: generate hardware time stamp 168 * @software: generate software time stamp 169 * @in_progress: device driver is going to provide 170 * hardware time stamp 171 * @flags: all shared_tx flags 172 * 173 * These flags are attached to packets as part of the 174 * &skb_shared_info. Use skb_tx() to get a pointer. 175 */ 176 union skb_shared_tx { 177 struct { 178 __u8 hardware:1, 179 software:1, 180 in_progress:1; 181 }; 182 __u8 flags; 183 }; 184 185 /* This data is invariant across clones and lives at 186 * the end of the header data, ie. at skb->end. 187 */ 188 struct skb_shared_info { 189 atomic_t dataref; 190 unsigned short nr_frags; 191 unsigned short gso_size; 192 #ifdef CONFIG_HAS_DMA 193 dma_addr_t dma_head; 194 #endif 195 /* Warning: this field is not always filled in (UFO)! */ 196 unsigned short gso_segs; 197 unsigned short gso_type; 198 __be32 ip6_frag_id; 199 union skb_shared_tx tx_flags; 200 struct sk_buff *frag_list; 201 struct skb_shared_hwtstamps hwtstamps; 202 skb_frag_t frags[MAX_SKB_FRAGS]; 203 #ifdef CONFIG_HAS_DMA 204 dma_addr_t dma_maps[MAX_SKB_FRAGS]; 205 #endif 206 /* Intermediate layers must ensure that destructor_arg 207 * remains valid until skb destructor */ 208 void * destructor_arg; 209 }; 210 211 /* We divide dataref into two halves. The higher 16 bits hold references 212 * to the payload part of skb->data. The lower 16 bits hold references to 213 * the entire skb->data. A clone of a headerless skb holds the length of 214 * the header in skb->hdr_len. 215 * 216 * All users must obey the rule that the skb->data reference count must be 217 * greater than or equal to the payload reference count. 218 * 219 * Holding a reference to the payload part means that the user does not 220 * care about modifications to the header part of skb->data. 221 */ 222 #define SKB_DATAREF_SHIFT 16 223 #define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1) 224 225 226 enum { 227 SKB_FCLONE_UNAVAILABLE, 228 SKB_FCLONE_ORIG, 229 SKB_FCLONE_CLONE, 230 }; 231 232 enum { 233 SKB_GSO_TCPV4 = 1 << 0, 234 SKB_GSO_UDP = 1 << 1, 235 236 /* This indicates the skb is from an untrusted source. */ 237 SKB_GSO_DODGY = 1 << 2, 238 239 /* This indicates the tcp segment has CWR set. */ 240 SKB_GSO_TCP_ECN = 1 << 3, 241 242 SKB_GSO_TCPV6 = 1 << 4, 243 244 SKB_GSO_FCOE = 1 << 5, 245 }; 246 247 #if BITS_PER_LONG > 32 248 #define NET_SKBUFF_DATA_USES_OFFSET 1 249 #endif 250 251 #ifdef NET_SKBUFF_DATA_USES_OFFSET 252 typedef unsigned int sk_buff_data_t; 253 #else 254 typedef unsigned char *sk_buff_data_t; 255 #endif 256 257 /** 258 * struct sk_buff - socket buffer 259 * @next: Next buffer in list 260 * @prev: Previous buffer in list 261 * @sk: Socket we are owned by 262 * @tstamp: Time we arrived 263 * @dev: Device we arrived on/are leaving by 264 * @transport_header: Transport layer header 265 * @network_header: Network layer header 266 * @mac_header: Link layer header 267 * @dst: destination entry 268 * @sp: the security path, used for xfrm 269 * @cb: Control buffer. Free for use by every layer. Put private vars here 270 * @len: Length of actual data 271 * @data_len: Data length 272 * @mac_len: Length of link layer header 273 * @hdr_len: writable header length of cloned skb 274 * @csum: Checksum (must include start/offset pair) 275 * @csum_start: Offset from skb->head where checksumming should start 276 * @csum_offset: Offset from csum_start where checksum should be stored 277 * @local_df: allow local fragmentation 278 * @cloned: Head may be cloned (check refcnt to be sure) 279 * @nohdr: Payload reference only, must not modify header 280 * @pkt_type: Packet class 281 * @fclone: skbuff clone status 282 * @ip_summed: Driver fed us an IP checksum 283 * @priority: Packet queueing priority 284 * @users: User count - see {datagram,tcp}.c 285 * @protocol: Packet protocol from driver 286 * @truesize: Buffer size 287 * @head: Head of buffer 288 * @data: Data head pointer 289 * @tail: Tail pointer 290 * @end: End pointer 291 * @destructor: Destruct function 292 * @mark: Generic packet mark 293 * @nfct: Associated connection, if any 294 * @ipvs_property: skbuff is owned by ipvs 295 * @peeked: this packet has been seen already, so stats have been 296 * done for it, don't do them again 297 * @nf_trace: netfilter packet trace flag 298 * @nfctinfo: Relationship of this skb to the connection 299 * @nfct_reasm: netfilter conntrack re-assembly pointer 300 * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c 301 * @iif: ifindex of device we arrived on 302 * @queue_mapping: Queue mapping for multiqueue devices 303 * @tc_index: Traffic control index 304 * @tc_verd: traffic control verdict 305 * @ndisc_nodetype: router type (from link layer) 306 * @do_not_encrypt: set to prevent encryption of this frame 307 * @dma_cookie: a cookie to one of several possible DMA operations 308 * done by skb DMA functions 309 * @secmark: security marking 310 * @vlan_tci: vlan tag control information 311 */ 312 313 struct sk_buff { 314 /* These two members must be first. */ 315 struct sk_buff *next; 316 struct sk_buff *prev; 317 318 struct sock *sk; 319 ktime_t tstamp; 320 struct net_device *dev; 321 322 unsigned long _skb_dst; 323 #ifdef CONFIG_XFRM 324 struct sec_path *sp; 325 #endif 326 /* 327 * This is the control buffer. It is free to use for every 328 * layer. Please put your private variables there. If you 329 * want to keep them across layers you have to do a skb_clone() 330 * first. This is owned by whoever has the skb queued ATM. 331 */ 332 char cb[48]; 333 334 unsigned int len, 335 data_len; 336 __u16 mac_len, 337 hdr_len; 338 union { 339 __wsum csum; 340 struct { 341 __u16 csum_start; 342 __u16 csum_offset; 343 }; 344 }; 345 __u32 priority; 346 __u8 local_df:1, 347 cloned:1, 348 ip_summed:2, 349 nohdr:1, 350 nfctinfo:3; 351 __u8 pkt_type:3, 352 fclone:2, 353 ipvs_property:1, 354 peeked:1, 355 nf_trace:1; 356 __be16 protocol; 357 358 void (*destructor)(struct sk_buff *skb); 359 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 360 struct nf_conntrack *nfct; 361 struct sk_buff *nfct_reasm; 362 #endif 363 #ifdef CONFIG_BRIDGE_NETFILTER 364 struct nf_bridge_info *nf_bridge; 365 #endif 366 367 int iif; 368 __u16 queue_mapping; 369 #ifdef CONFIG_NET_SCHED 370 __u16 tc_index; /* traffic control index */ 371 #ifdef CONFIG_NET_CLS_ACT 372 __u16 tc_verd; /* traffic control verdict */ 373 #endif 374 #endif 375 #ifdef CONFIG_IPV6_NDISC_NODETYPE 376 __u8 ndisc_nodetype:2; 377 #endif 378 #if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE) 379 __u8 do_not_encrypt:1; 380 #endif 381 /* 0/13/14 bit hole */ 382 383 #ifdef CONFIG_NET_DMA 384 dma_cookie_t dma_cookie; 385 #endif 386 #ifdef CONFIG_NETWORK_SECMARK 387 __u32 secmark; 388 #endif 389 390 __u32 mark; 391 392 __u16 vlan_tci; 393 394 sk_buff_data_t transport_header; 395 sk_buff_data_t network_header; 396 sk_buff_data_t mac_header; 397 /* These elements must be at the end, see alloc_skb() for details. */ 398 sk_buff_data_t tail; 399 sk_buff_data_t end; 400 unsigned char *head, 401 *data; 402 unsigned int truesize; 403 atomic_t users; 404 }; 405 406 #ifdef __KERNEL__ 407 /* 408 * Handling routines are only of interest to the kernel 409 */ 410 #include <linux/slab.h> 411 412 #include <asm/system.h> 413 414 #ifdef CONFIG_HAS_DMA 415 #include <linux/dma-mapping.h> 416 extern int skb_dma_map(struct device *dev, struct sk_buff *skb, 417 enum dma_data_direction dir); 418 extern void skb_dma_unmap(struct device *dev, struct sk_buff *skb, 419 enum dma_data_direction dir); 420 #endif 421 422 static inline struct dst_entry *skb_dst(const struct sk_buff *skb) 423 { 424 return (struct dst_entry *)skb->_skb_dst; 425 } 426 427 static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst) 428 { 429 skb->_skb_dst = (unsigned long)dst; 430 } 431 432 static inline struct rtable *skb_rtable(const struct sk_buff *skb) 433 { 434 return (struct rtable *)skb_dst(skb); 435 } 436 437 extern void kfree_skb(struct sk_buff *skb); 438 extern void consume_skb(struct sk_buff *skb); 439 extern void __kfree_skb(struct sk_buff *skb); 440 extern struct sk_buff *__alloc_skb(unsigned int size, 441 gfp_t priority, int fclone, int node); 442 static inline struct sk_buff *alloc_skb(unsigned int size, 443 gfp_t priority) 444 { 445 return __alloc_skb(size, priority, 0, -1); 446 } 447 448 static inline struct sk_buff *alloc_skb_fclone(unsigned int size, 449 gfp_t priority) 450 { 451 return __alloc_skb(size, priority, 1, -1); 452 } 453 454 extern int skb_recycle_check(struct sk_buff *skb, int skb_size); 455 456 extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); 457 extern struct sk_buff *skb_clone(struct sk_buff *skb, 458 gfp_t priority); 459 extern struct sk_buff *skb_copy(const struct sk_buff *skb, 460 gfp_t priority); 461 extern struct sk_buff *pskb_copy(struct sk_buff *skb, 462 gfp_t gfp_mask); 463 extern int pskb_expand_head(struct sk_buff *skb, 464 int nhead, int ntail, 465 gfp_t gfp_mask); 466 extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, 467 unsigned int headroom); 468 extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 469 int newheadroom, int newtailroom, 470 gfp_t priority); 471 extern int skb_to_sgvec(struct sk_buff *skb, 472 struct scatterlist *sg, int offset, 473 int len); 474 extern int skb_cow_data(struct sk_buff *skb, int tailbits, 475 struct sk_buff **trailer); 476 extern int skb_pad(struct sk_buff *skb, int pad); 477 #define dev_kfree_skb(a) consume_skb(a) 478 #define dev_consume_skb(a) kfree_skb_clean(a) 479 extern void skb_over_panic(struct sk_buff *skb, int len, 480 void *here); 481 extern void skb_under_panic(struct sk_buff *skb, int len, 482 void *here); 483 484 extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, 485 int getfrag(void *from, char *to, int offset, 486 int len,int odd, struct sk_buff *skb), 487 void *from, int length); 488 489 struct skb_seq_state 490 { 491 __u32 lower_offset; 492 __u32 upper_offset; 493 __u32 frag_idx; 494 __u32 stepped_offset; 495 struct sk_buff *root_skb; 496 struct sk_buff *cur_skb; 497 __u8 *frag_data; 498 }; 499 500 extern void skb_prepare_seq_read(struct sk_buff *skb, 501 unsigned int from, unsigned int to, 502 struct skb_seq_state *st); 503 extern unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 504 struct skb_seq_state *st); 505 extern void skb_abort_seq_read(struct skb_seq_state *st); 506 507 extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 508 unsigned int to, struct ts_config *config, 509 struct ts_state *state); 510 511 #ifdef NET_SKBUFF_DATA_USES_OFFSET 512 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) 513 { 514 return skb->head + skb->end; 515 } 516 #else 517 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) 518 { 519 return skb->end; 520 } 521 #endif 522 523 /* Internal */ 524 #define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB))) 525 526 static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb) 527 { 528 return &skb_shinfo(skb)->hwtstamps; 529 } 530 531 static inline union skb_shared_tx *skb_tx(struct sk_buff *skb) 532 { 533 return &skb_shinfo(skb)->tx_flags; 534 } 535 536 /** 537 * skb_queue_empty - check if a queue is empty 538 * @list: queue head 539 * 540 * Returns true if the queue is empty, false otherwise. 541 */ 542 static inline int skb_queue_empty(const struct sk_buff_head *list) 543 { 544 return list->next == (struct sk_buff *)list; 545 } 546 547 /** 548 * skb_queue_is_last - check if skb is the last entry in the queue 549 * @list: queue head 550 * @skb: buffer 551 * 552 * Returns true if @skb is the last buffer on the list. 553 */ 554 static inline bool skb_queue_is_last(const struct sk_buff_head *list, 555 const struct sk_buff *skb) 556 { 557 return (skb->next == (struct sk_buff *) list); 558 } 559 560 /** 561 * skb_queue_is_first - check if skb is the first entry in the queue 562 * @list: queue head 563 * @skb: buffer 564 * 565 * Returns true if @skb is the first buffer on the list. 566 */ 567 static inline bool skb_queue_is_first(const struct sk_buff_head *list, 568 const struct sk_buff *skb) 569 { 570 return (skb->prev == (struct sk_buff *) list); 571 } 572 573 /** 574 * skb_queue_next - return the next packet in the queue 575 * @list: queue head 576 * @skb: current buffer 577 * 578 * Return the next packet in @list after @skb. It is only valid to 579 * call this if skb_queue_is_last() evaluates to false. 580 */ 581 static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list, 582 const struct sk_buff *skb) 583 { 584 /* This BUG_ON may seem severe, but if we just return then we 585 * are going to dereference garbage. 586 */ 587 BUG_ON(skb_queue_is_last(list, skb)); 588 return skb->next; 589 } 590 591 /** 592 * skb_queue_prev - return the prev packet in the queue 593 * @list: queue head 594 * @skb: current buffer 595 * 596 * Return the prev packet in @list before @skb. It is only valid to 597 * call this if skb_queue_is_first() evaluates to false. 598 */ 599 static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list, 600 const struct sk_buff *skb) 601 { 602 /* This BUG_ON may seem severe, but if we just return then we 603 * are going to dereference garbage. 604 */ 605 BUG_ON(skb_queue_is_first(list, skb)); 606 return skb->prev; 607 } 608 609 /** 610 * skb_get - reference buffer 611 * @skb: buffer to reference 612 * 613 * Makes another reference to a socket buffer and returns a pointer 614 * to the buffer. 615 */ 616 static inline struct sk_buff *skb_get(struct sk_buff *skb) 617 { 618 atomic_inc(&skb->users); 619 return skb; 620 } 621 622 /* 623 * If users == 1, we are the only owner and are can avoid redundant 624 * atomic change. 625 */ 626 627 /** 628 * skb_cloned - is the buffer a clone 629 * @skb: buffer to check 630 * 631 * Returns true if the buffer was generated with skb_clone() and is 632 * one of multiple shared copies of the buffer. Cloned buffers are 633 * shared data so must not be written to under normal circumstances. 634 */ 635 static inline int skb_cloned(const struct sk_buff *skb) 636 { 637 return skb->cloned && 638 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1; 639 } 640 641 /** 642 * skb_header_cloned - is the header a clone 643 * @skb: buffer to check 644 * 645 * Returns true if modifying the header part of the buffer requires 646 * the data to be copied. 647 */ 648 static inline int skb_header_cloned(const struct sk_buff *skb) 649 { 650 int dataref; 651 652 if (!skb->cloned) 653 return 0; 654 655 dataref = atomic_read(&skb_shinfo(skb)->dataref); 656 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT); 657 return dataref != 1; 658 } 659 660 /** 661 * skb_header_release - release reference to header 662 * @skb: buffer to operate on 663 * 664 * Drop a reference to the header part of the buffer. This is done 665 * by acquiring a payload reference. You must not read from the header 666 * part of skb->data after this. 667 */ 668 static inline void skb_header_release(struct sk_buff *skb) 669 { 670 BUG_ON(skb->nohdr); 671 skb->nohdr = 1; 672 atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref); 673 } 674 675 /** 676 * skb_shared - is the buffer shared 677 * @skb: buffer to check 678 * 679 * Returns true if more than one person has a reference to this 680 * buffer. 681 */ 682 static inline int skb_shared(const struct sk_buff *skb) 683 { 684 return atomic_read(&skb->users) != 1; 685 } 686 687 /** 688 * skb_share_check - check if buffer is shared and if so clone it 689 * @skb: buffer to check 690 * @pri: priority for memory allocation 691 * 692 * If the buffer is shared the buffer is cloned and the old copy 693 * drops a reference. A new clone with a single reference is returned. 694 * If the buffer is not shared the original buffer is returned. When 695 * being called from interrupt status or with spinlocks held pri must 696 * be GFP_ATOMIC. 697 * 698 * NULL is returned on a memory allocation failure. 699 */ 700 static inline struct sk_buff *skb_share_check(struct sk_buff *skb, 701 gfp_t pri) 702 { 703 might_sleep_if(pri & __GFP_WAIT); 704 if (skb_shared(skb)) { 705 struct sk_buff *nskb = skb_clone(skb, pri); 706 kfree_skb(skb); 707 skb = nskb; 708 } 709 return skb; 710 } 711 712 /* 713 * Copy shared buffers into a new sk_buff. We effectively do COW on 714 * packets to handle cases where we have a local reader and forward 715 * and a couple of other messy ones. The normal one is tcpdumping 716 * a packet thats being forwarded. 717 */ 718 719 /** 720 * skb_unshare - make a copy of a shared buffer 721 * @skb: buffer to check 722 * @pri: priority for memory allocation 723 * 724 * If the socket buffer is a clone then this function creates a new 725 * copy of the data, drops a reference count on the old copy and returns 726 * the new copy with the reference count at 1. If the buffer is not a clone 727 * the original buffer is returned. When called with a spinlock held or 728 * from interrupt state @pri must be %GFP_ATOMIC 729 * 730 * %NULL is returned on a memory allocation failure. 731 */ 732 static inline struct sk_buff *skb_unshare(struct sk_buff *skb, 733 gfp_t pri) 734 { 735 might_sleep_if(pri & __GFP_WAIT); 736 if (skb_cloned(skb)) { 737 struct sk_buff *nskb = skb_copy(skb, pri); 738 kfree_skb(skb); /* Free our shared copy */ 739 skb = nskb; 740 } 741 return skb; 742 } 743 744 /** 745 * skb_peek 746 * @list_: list to peek at 747 * 748 * Peek an &sk_buff. Unlike most other operations you _MUST_ 749 * be careful with this one. A peek leaves the buffer on the 750 * list and someone else may run off with it. You must hold 751 * the appropriate locks or have a private queue to do this. 752 * 753 * Returns %NULL for an empty list or a pointer to the head element. 754 * The reference count is not incremented and the reference is therefore 755 * volatile. Use with caution. 756 */ 757 static inline struct sk_buff *skb_peek(struct sk_buff_head *list_) 758 { 759 struct sk_buff *list = ((struct sk_buff *)list_)->next; 760 if (list == (struct sk_buff *)list_) 761 list = NULL; 762 return list; 763 } 764 765 /** 766 * skb_peek_tail 767 * @list_: list to peek at 768 * 769 * Peek an &sk_buff. Unlike most other operations you _MUST_ 770 * be careful with this one. A peek leaves the buffer on the 771 * list and someone else may run off with it. You must hold 772 * the appropriate locks or have a private queue to do this. 773 * 774 * Returns %NULL for an empty list or a pointer to the tail element. 775 * The reference count is not incremented and the reference is therefore 776 * volatile. Use with caution. 777 */ 778 static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_) 779 { 780 struct sk_buff *list = ((struct sk_buff *)list_)->prev; 781 if (list == (struct sk_buff *)list_) 782 list = NULL; 783 return list; 784 } 785 786 /** 787 * skb_queue_len - get queue length 788 * @list_: list to measure 789 * 790 * Return the length of an &sk_buff queue. 791 */ 792 static inline __u32 skb_queue_len(const struct sk_buff_head *list_) 793 { 794 return list_->qlen; 795 } 796 797 /** 798 * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head 799 * @list: queue to initialize 800 * 801 * This initializes only the list and queue length aspects of 802 * an sk_buff_head object. This allows to initialize the list 803 * aspects of an sk_buff_head without reinitializing things like 804 * the spinlock. It can also be used for on-stack sk_buff_head 805 * objects where the spinlock is known to not be used. 806 */ 807 static inline void __skb_queue_head_init(struct sk_buff_head *list) 808 { 809 list->prev = list->next = (struct sk_buff *)list; 810 list->qlen = 0; 811 } 812 813 /* 814 * This function creates a split out lock class for each invocation; 815 * this is needed for now since a whole lot of users of the skb-queue 816 * infrastructure in drivers have different locking usage (in hardirq) 817 * than the networking core (in softirq only). In the long run either the 818 * network layer or drivers should need annotation to consolidate the 819 * main types of usage into 3 classes. 820 */ 821 static inline void skb_queue_head_init(struct sk_buff_head *list) 822 { 823 spin_lock_init(&list->lock); 824 __skb_queue_head_init(list); 825 } 826 827 static inline void skb_queue_head_init_class(struct sk_buff_head *list, 828 struct lock_class_key *class) 829 { 830 skb_queue_head_init(list); 831 lockdep_set_class(&list->lock, class); 832 } 833 834 /* 835 * Insert an sk_buff on a list. 836 * 837 * The "__skb_xxxx()" functions are the non-atomic ones that 838 * can only be called with interrupts disabled. 839 */ 840 extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list); 841 static inline void __skb_insert(struct sk_buff *newsk, 842 struct sk_buff *prev, struct sk_buff *next, 843 struct sk_buff_head *list) 844 { 845 newsk->next = next; 846 newsk->prev = prev; 847 next->prev = prev->next = newsk; 848 list->qlen++; 849 } 850 851 static inline void __skb_queue_splice(const struct sk_buff_head *list, 852 struct sk_buff *prev, 853 struct sk_buff *next) 854 { 855 struct sk_buff *first = list->next; 856 struct sk_buff *last = list->prev; 857 858 first->prev = prev; 859 prev->next = first; 860 861 last->next = next; 862 next->prev = last; 863 } 864 865 /** 866 * skb_queue_splice - join two skb lists, this is designed for stacks 867 * @list: the new list to add 868 * @head: the place to add it in the first list 869 */ 870 static inline void skb_queue_splice(const struct sk_buff_head *list, 871 struct sk_buff_head *head) 872 { 873 if (!skb_queue_empty(list)) { 874 __skb_queue_splice(list, (struct sk_buff *) head, head->next); 875 head->qlen += list->qlen; 876 } 877 } 878 879 /** 880 * skb_queue_splice - join two skb lists and reinitialise the emptied list 881 * @list: the new list to add 882 * @head: the place to add it in the first list 883 * 884 * The list at @list is reinitialised 885 */ 886 static inline void skb_queue_splice_init(struct sk_buff_head *list, 887 struct sk_buff_head *head) 888 { 889 if (!skb_queue_empty(list)) { 890 __skb_queue_splice(list, (struct sk_buff *) head, head->next); 891 head->qlen += list->qlen; 892 __skb_queue_head_init(list); 893 } 894 } 895 896 /** 897 * skb_queue_splice_tail - join two skb lists, each list being a queue 898 * @list: the new list to add 899 * @head: the place to add it in the first list 900 */ 901 static inline void skb_queue_splice_tail(const struct sk_buff_head *list, 902 struct sk_buff_head *head) 903 { 904 if (!skb_queue_empty(list)) { 905 __skb_queue_splice(list, head->prev, (struct sk_buff *) head); 906 head->qlen += list->qlen; 907 } 908 } 909 910 /** 911 * skb_queue_splice_tail - join two skb lists and reinitialise the emptied list 912 * @list: the new list to add 913 * @head: the place to add it in the first list 914 * 915 * Each of the lists is a queue. 916 * The list at @list is reinitialised 917 */ 918 static inline void skb_queue_splice_tail_init(struct sk_buff_head *list, 919 struct sk_buff_head *head) 920 { 921 if (!skb_queue_empty(list)) { 922 __skb_queue_splice(list, head->prev, (struct sk_buff *) head); 923 head->qlen += list->qlen; 924 __skb_queue_head_init(list); 925 } 926 } 927 928 /** 929 * __skb_queue_after - queue a buffer at the list head 930 * @list: list to use 931 * @prev: place after this buffer 932 * @newsk: buffer to queue 933 * 934 * Queue a buffer int the middle of a list. This function takes no locks 935 * and you must therefore hold required locks before calling it. 936 * 937 * A buffer cannot be placed on two lists at the same time. 938 */ 939 static inline void __skb_queue_after(struct sk_buff_head *list, 940 struct sk_buff *prev, 941 struct sk_buff *newsk) 942 { 943 __skb_insert(newsk, prev, prev->next, list); 944 } 945 946 extern void skb_append(struct sk_buff *old, struct sk_buff *newsk, 947 struct sk_buff_head *list); 948 949 static inline void __skb_queue_before(struct sk_buff_head *list, 950 struct sk_buff *next, 951 struct sk_buff *newsk) 952 { 953 __skb_insert(newsk, next->prev, next, list); 954 } 955 956 /** 957 * __skb_queue_head - queue a buffer at the list head 958 * @list: list to use 959 * @newsk: buffer to queue 960 * 961 * Queue a buffer at the start of a list. This function takes no locks 962 * and you must therefore hold required locks before calling it. 963 * 964 * A buffer cannot be placed on two lists at the same time. 965 */ 966 extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk); 967 static inline void __skb_queue_head(struct sk_buff_head *list, 968 struct sk_buff *newsk) 969 { 970 __skb_queue_after(list, (struct sk_buff *)list, newsk); 971 } 972 973 /** 974 * __skb_queue_tail - queue a buffer at the list tail 975 * @list: list to use 976 * @newsk: buffer to queue 977 * 978 * Queue a buffer at the end of a list. This function takes no locks 979 * and you must therefore hold required locks before calling it. 980 * 981 * A buffer cannot be placed on two lists at the same time. 982 */ 983 extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk); 984 static inline void __skb_queue_tail(struct sk_buff_head *list, 985 struct sk_buff *newsk) 986 { 987 __skb_queue_before(list, (struct sk_buff *)list, newsk); 988 } 989 990 /* 991 * remove sk_buff from list. _Must_ be called atomically, and with 992 * the list known.. 993 */ 994 extern void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list); 995 static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 996 { 997 struct sk_buff *next, *prev; 998 999 list->qlen--; 1000 next = skb->next; 1001 prev = skb->prev; 1002 skb->next = skb->prev = NULL; 1003 next->prev = prev; 1004 prev->next = next; 1005 } 1006 1007 /** 1008 * __skb_dequeue - remove from the head of the queue 1009 * @list: list to dequeue from 1010 * 1011 * Remove the head of the list. This function does not take any locks 1012 * so must be used with appropriate locks held only. The head item is 1013 * returned or %NULL if the list is empty. 1014 */ 1015 extern struct sk_buff *skb_dequeue(struct sk_buff_head *list); 1016 static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list) 1017 { 1018 struct sk_buff *skb = skb_peek(list); 1019 if (skb) 1020 __skb_unlink(skb, list); 1021 return skb; 1022 } 1023 1024 /** 1025 * __skb_dequeue_tail - remove from the tail of the queue 1026 * @list: list to dequeue from 1027 * 1028 * Remove the tail of the list. This function does not take any locks 1029 * so must be used with appropriate locks held only. The tail item is 1030 * returned or %NULL if the list is empty. 1031 */ 1032 extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list); 1033 static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list) 1034 { 1035 struct sk_buff *skb = skb_peek_tail(list); 1036 if (skb) 1037 __skb_unlink(skb, list); 1038 return skb; 1039 } 1040 1041 1042 static inline int skb_is_nonlinear(const struct sk_buff *skb) 1043 { 1044 return skb->data_len; 1045 } 1046 1047 static inline unsigned int skb_headlen(const struct sk_buff *skb) 1048 { 1049 return skb->len - skb->data_len; 1050 } 1051 1052 static inline int skb_pagelen(const struct sk_buff *skb) 1053 { 1054 int i, len = 0; 1055 1056 for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) 1057 len += skb_shinfo(skb)->frags[i].size; 1058 return len + skb_headlen(skb); 1059 } 1060 1061 static inline void skb_fill_page_desc(struct sk_buff *skb, int i, 1062 struct page *page, int off, int size) 1063 { 1064 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1065 1066 frag->page = page; 1067 frag->page_offset = off; 1068 frag->size = size; 1069 skb_shinfo(skb)->nr_frags = i + 1; 1070 } 1071 1072 extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, 1073 int off, int size); 1074 1075 #define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags) 1076 #define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frags(skb)) 1077 #define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb)) 1078 1079 #ifdef NET_SKBUFF_DATA_USES_OFFSET 1080 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb) 1081 { 1082 return skb->head + skb->tail; 1083 } 1084 1085 static inline void skb_reset_tail_pointer(struct sk_buff *skb) 1086 { 1087 skb->tail = skb->data - skb->head; 1088 } 1089 1090 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) 1091 { 1092 skb_reset_tail_pointer(skb); 1093 skb->tail += offset; 1094 } 1095 #else /* NET_SKBUFF_DATA_USES_OFFSET */ 1096 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb) 1097 { 1098 return skb->tail; 1099 } 1100 1101 static inline void skb_reset_tail_pointer(struct sk_buff *skb) 1102 { 1103 skb->tail = skb->data; 1104 } 1105 1106 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) 1107 { 1108 skb->tail = skb->data + offset; 1109 } 1110 1111 #endif /* NET_SKBUFF_DATA_USES_OFFSET */ 1112 1113 /* 1114 * Add data to an sk_buff 1115 */ 1116 extern unsigned char *skb_put(struct sk_buff *skb, unsigned int len); 1117 static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len) 1118 { 1119 unsigned char *tmp = skb_tail_pointer(skb); 1120 SKB_LINEAR_ASSERT(skb); 1121 skb->tail += len; 1122 skb->len += len; 1123 return tmp; 1124 } 1125 1126 extern unsigned char *skb_push(struct sk_buff *skb, unsigned int len); 1127 static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len) 1128 { 1129 skb->data -= len; 1130 skb->len += len; 1131 return skb->data; 1132 } 1133 1134 extern unsigned char *skb_pull(struct sk_buff *skb, unsigned int len); 1135 static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len) 1136 { 1137 skb->len -= len; 1138 BUG_ON(skb->len < skb->data_len); 1139 return skb->data += len; 1140 } 1141 1142 extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta); 1143 1144 static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len) 1145 { 1146 if (len > skb_headlen(skb) && 1147 !__pskb_pull_tail(skb, len - skb_headlen(skb))) 1148 return NULL; 1149 skb->len -= len; 1150 return skb->data += len; 1151 } 1152 1153 static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len) 1154 { 1155 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len); 1156 } 1157 1158 static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len) 1159 { 1160 if (likely(len <= skb_headlen(skb))) 1161 return 1; 1162 if (unlikely(len > skb->len)) 1163 return 0; 1164 return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL; 1165 } 1166 1167 /** 1168 * skb_headroom - bytes at buffer head 1169 * @skb: buffer to check 1170 * 1171 * Return the number of bytes of free space at the head of an &sk_buff. 1172 */ 1173 static inline unsigned int skb_headroom(const struct sk_buff *skb) 1174 { 1175 return skb->data - skb->head; 1176 } 1177 1178 /** 1179 * skb_tailroom - bytes at buffer end 1180 * @skb: buffer to check 1181 * 1182 * Return the number of bytes of free space at the tail of an sk_buff 1183 */ 1184 static inline int skb_tailroom(const struct sk_buff *skb) 1185 { 1186 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail; 1187 } 1188 1189 /** 1190 * skb_reserve - adjust headroom 1191 * @skb: buffer to alter 1192 * @len: bytes to move 1193 * 1194 * Increase the headroom of an empty &sk_buff by reducing the tail 1195 * room. This is only allowed for an empty buffer. 1196 */ 1197 static inline void skb_reserve(struct sk_buff *skb, int len) 1198 { 1199 skb->data += len; 1200 skb->tail += len; 1201 } 1202 1203 #ifdef NET_SKBUFF_DATA_USES_OFFSET 1204 static inline unsigned char *skb_transport_header(const struct sk_buff *skb) 1205 { 1206 return skb->head + skb->transport_header; 1207 } 1208 1209 static inline void skb_reset_transport_header(struct sk_buff *skb) 1210 { 1211 skb->transport_header = skb->data - skb->head; 1212 } 1213 1214 static inline void skb_set_transport_header(struct sk_buff *skb, 1215 const int offset) 1216 { 1217 skb_reset_transport_header(skb); 1218 skb->transport_header += offset; 1219 } 1220 1221 static inline unsigned char *skb_network_header(const struct sk_buff *skb) 1222 { 1223 return skb->head + skb->network_header; 1224 } 1225 1226 static inline void skb_reset_network_header(struct sk_buff *skb) 1227 { 1228 skb->network_header = skb->data - skb->head; 1229 } 1230 1231 static inline void skb_set_network_header(struct sk_buff *skb, const int offset) 1232 { 1233 skb_reset_network_header(skb); 1234 skb->network_header += offset; 1235 } 1236 1237 static inline unsigned char *skb_mac_header(const struct sk_buff *skb) 1238 { 1239 return skb->head + skb->mac_header; 1240 } 1241 1242 static inline int skb_mac_header_was_set(const struct sk_buff *skb) 1243 { 1244 return skb->mac_header != ~0U; 1245 } 1246 1247 static inline void skb_reset_mac_header(struct sk_buff *skb) 1248 { 1249 skb->mac_header = skb->data - skb->head; 1250 } 1251 1252 static inline void skb_set_mac_header(struct sk_buff *skb, const int offset) 1253 { 1254 skb_reset_mac_header(skb); 1255 skb->mac_header += offset; 1256 } 1257 1258 #else /* NET_SKBUFF_DATA_USES_OFFSET */ 1259 1260 static inline unsigned char *skb_transport_header(const struct sk_buff *skb) 1261 { 1262 return skb->transport_header; 1263 } 1264 1265 static inline void skb_reset_transport_header(struct sk_buff *skb) 1266 { 1267 skb->transport_header = skb->data; 1268 } 1269 1270 static inline void skb_set_transport_header(struct sk_buff *skb, 1271 const int offset) 1272 { 1273 skb->transport_header = skb->data + offset; 1274 } 1275 1276 static inline unsigned char *skb_network_header(const struct sk_buff *skb) 1277 { 1278 return skb->network_header; 1279 } 1280 1281 static inline void skb_reset_network_header(struct sk_buff *skb) 1282 { 1283 skb->network_header = skb->data; 1284 } 1285 1286 static inline void skb_set_network_header(struct sk_buff *skb, const int offset) 1287 { 1288 skb->network_header = skb->data + offset; 1289 } 1290 1291 static inline unsigned char *skb_mac_header(const struct sk_buff *skb) 1292 { 1293 return skb->mac_header; 1294 } 1295 1296 static inline int skb_mac_header_was_set(const struct sk_buff *skb) 1297 { 1298 return skb->mac_header != NULL; 1299 } 1300 1301 static inline void skb_reset_mac_header(struct sk_buff *skb) 1302 { 1303 skb->mac_header = skb->data; 1304 } 1305 1306 static inline void skb_set_mac_header(struct sk_buff *skb, const int offset) 1307 { 1308 skb->mac_header = skb->data + offset; 1309 } 1310 #endif /* NET_SKBUFF_DATA_USES_OFFSET */ 1311 1312 static inline int skb_transport_offset(const struct sk_buff *skb) 1313 { 1314 return skb_transport_header(skb) - skb->data; 1315 } 1316 1317 static inline u32 skb_network_header_len(const struct sk_buff *skb) 1318 { 1319 return skb->transport_header - skb->network_header; 1320 } 1321 1322 static inline int skb_network_offset(const struct sk_buff *skb) 1323 { 1324 return skb_network_header(skb) - skb->data; 1325 } 1326 1327 /* 1328 * CPUs often take a performance hit when accessing unaligned memory 1329 * locations. The actual performance hit varies, it can be small if the 1330 * hardware handles it or large if we have to take an exception and fix it 1331 * in software. 1332 * 1333 * Since an ethernet header is 14 bytes network drivers often end up with 1334 * the IP header at an unaligned offset. The IP header can be aligned by 1335 * shifting the start of the packet by 2 bytes. Drivers should do this 1336 * with: 1337 * 1338 * skb_reserve(NET_IP_ALIGN); 1339 * 1340 * The downside to this alignment of the IP header is that the DMA is now 1341 * unaligned. On some architectures the cost of an unaligned DMA is high 1342 * and this cost outweighs the gains made by aligning the IP header. 1343 * 1344 * Since this trade off varies between architectures, we allow NET_IP_ALIGN 1345 * to be overridden. 1346 */ 1347 #ifndef NET_IP_ALIGN 1348 #define NET_IP_ALIGN 2 1349 #endif 1350 1351 /* 1352 * The networking layer reserves some headroom in skb data (via 1353 * dev_alloc_skb). This is used to avoid having to reallocate skb data when 1354 * the header has to grow. In the default case, if the header has to grow 1355 * 32 bytes or less we avoid the reallocation. 1356 * 1357 * Unfortunately this headroom changes the DMA alignment of the resulting 1358 * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive 1359 * on some architectures. An architecture can override this value, 1360 * perhaps setting it to a cacheline in size (since that will maintain 1361 * cacheline alignment of the DMA). It must be a power of 2. 1362 * 1363 * Various parts of the networking layer expect at least 32 bytes of 1364 * headroom, you should not reduce this. 1365 */ 1366 #ifndef NET_SKB_PAD 1367 #define NET_SKB_PAD 32 1368 #endif 1369 1370 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len); 1371 1372 static inline void __skb_trim(struct sk_buff *skb, unsigned int len) 1373 { 1374 if (unlikely(skb->data_len)) { 1375 WARN_ON(1); 1376 return; 1377 } 1378 skb->len = len; 1379 skb_set_tail_pointer(skb, len); 1380 } 1381 1382 extern void skb_trim(struct sk_buff *skb, unsigned int len); 1383 1384 static inline int __pskb_trim(struct sk_buff *skb, unsigned int len) 1385 { 1386 if (skb->data_len) 1387 return ___pskb_trim(skb, len); 1388 __skb_trim(skb, len); 1389 return 0; 1390 } 1391 1392 static inline int pskb_trim(struct sk_buff *skb, unsigned int len) 1393 { 1394 return (len < skb->len) ? __pskb_trim(skb, len) : 0; 1395 } 1396 1397 /** 1398 * pskb_trim_unique - remove end from a paged unique (not cloned) buffer 1399 * @skb: buffer to alter 1400 * @len: new length 1401 * 1402 * This is identical to pskb_trim except that the caller knows that 1403 * the skb is not cloned so we should never get an error due to out- 1404 * of-memory. 1405 */ 1406 static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len) 1407 { 1408 int err = pskb_trim(skb, len); 1409 BUG_ON(err); 1410 } 1411 1412 /** 1413 * skb_orphan - orphan a buffer 1414 * @skb: buffer to orphan 1415 * 1416 * If a buffer currently has an owner then we call the owner's 1417 * destructor function and make the @skb unowned. The buffer continues 1418 * to exist but is no longer charged to its former owner. 1419 */ 1420 static inline void skb_orphan(struct sk_buff *skb) 1421 { 1422 if (skb->destructor) 1423 skb->destructor(skb); 1424 skb->destructor = NULL; 1425 skb->sk = NULL; 1426 } 1427 1428 /** 1429 * __skb_queue_purge - empty a list 1430 * @list: list to empty 1431 * 1432 * Delete all buffers on an &sk_buff list. Each buffer is removed from 1433 * the list and one reference dropped. This function does not take the 1434 * list lock and the caller must hold the relevant locks to use it. 1435 */ 1436 extern void skb_queue_purge(struct sk_buff_head *list); 1437 static inline void __skb_queue_purge(struct sk_buff_head *list) 1438 { 1439 struct sk_buff *skb; 1440 while ((skb = __skb_dequeue(list)) != NULL) 1441 kfree_skb(skb); 1442 } 1443 1444 /** 1445 * __dev_alloc_skb - allocate an skbuff for receiving 1446 * @length: length to allocate 1447 * @gfp_mask: get_free_pages mask, passed to alloc_skb 1448 * 1449 * Allocate a new &sk_buff and assign it a usage count of one. The 1450 * buffer has unspecified headroom built in. Users should allocate 1451 * the headroom they think they need without accounting for the 1452 * built in space. The built in space is used for optimisations. 1453 * 1454 * %NULL is returned if there is no free memory. 1455 */ 1456 static inline struct sk_buff *__dev_alloc_skb(unsigned int length, 1457 gfp_t gfp_mask) 1458 { 1459 struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask); 1460 if (likely(skb)) 1461 skb_reserve(skb, NET_SKB_PAD); 1462 return skb; 1463 } 1464 1465 extern struct sk_buff *dev_alloc_skb(unsigned int length); 1466 1467 extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev, 1468 unsigned int length, gfp_t gfp_mask); 1469 1470 /** 1471 * netdev_alloc_skb - allocate an skbuff for rx on a specific device 1472 * @dev: network device to receive on 1473 * @length: length to allocate 1474 * 1475 * Allocate a new &sk_buff and assign it a usage count of one. The 1476 * buffer has unspecified headroom built in. Users should allocate 1477 * the headroom they think they need without accounting for the 1478 * built in space. The built in space is used for optimisations. 1479 * 1480 * %NULL is returned if there is no free memory. Although this function 1481 * allocates memory it can be called from an interrupt. 1482 */ 1483 static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev, 1484 unsigned int length) 1485 { 1486 return __netdev_alloc_skb(dev, length, GFP_ATOMIC); 1487 } 1488 1489 extern struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask); 1490 1491 /** 1492 * netdev_alloc_page - allocate a page for ps-rx on a specific device 1493 * @dev: network device to receive on 1494 * 1495 * Allocate a new page node local to the specified device. 1496 * 1497 * %NULL is returned if there is no free memory. 1498 */ 1499 static inline struct page *netdev_alloc_page(struct net_device *dev) 1500 { 1501 return __netdev_alloc_page(dev, GFP_ATOMIC); 1502 } 1503 1504 static inline void netdev_free_page(struct net_device *dev, struct page *page) 1505 { 1506 __free_page(page); 1507 } 1508 1509 /** 1510 * skb_clone_writable - is the header of a clone writable 1511 * @skb: buffer to check 1512 * @len: length up to which to write 1513 * 1514 * Returns true if modifying the header part of the cloned buffer 1515 * does not requires the data to be copied. 1516 */ 1517 static inline int skb_clone_writable(struct sk_buff *skb, unsigned int len) 1518 { 1519 return !skb_header_cloned(skb) && 1520 skb_headroom(skb) + len <= skb->hdr_len; 1521 } 1522 1523 static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom, 1524 int cloned) 1525 { 1526 int delta = 0; 1527 1528 if (headroom < NET_SKB_PAD) 1529 headroom = NET_SKB_PAD; 1530 if (headroom > skb_headroom(skb)) 1531 delta = headroom - skb_headroom(skb); 1532 1533 if (delta || cloned) 1534 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0, 1535 GFP_ATOMIC); 1536 return 0; 1537 } 1538 1539 /** 1540 * skb_cow - copy header of skb when it is required 1541 * @skb: buffer to cow 1542 * @headroom: needed headroom 1543 * 1544 * If the skb passed lacks sufficient headroom or its data part 1545 * is shared, data is reallocated. If reallocation fails, an error 1546 * is returned and original skb is not changed. 1547 * 1548 * The result is skb with writable area skb->head...skb->tail 1549 * and at least @headroom of space at head. 1550 */ 1551 static inline int skb_cow(struct sk_buff *skb, unsigned int headroom) 1552 { 1553 return __skb_cow(skb, headroom, skb_cloned(skb)); 1554 } 1555 1556 /** 1557 * skb_cow_head - skb_cow but only making the head writable 1558 * @skb: buffer to cow 1559 * @headroom: needed headroom 1560 * 1561 * This function is identical to skb_cow except that we replace the 1562 * skb_cloned check by skb_header_cloned. It should be used when 1563 * you only need to push on some header and do not need to modify 1564 * the data. 1565 */ 1566 static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom) 1567 { 1568 return __skb_cow(skb, headroom, skb_header_cloned(skb)); 1569 } 1570 1571 /** 1572 * skb_padto - pad an skbuff up to a minimal size 1573 * @skb: buffer to pad 1574 * @len: minimal length 1575 * 1576 * Pads up a buffer to ensure the trailing bytes exist and are 1577 * blanked. If the buffer already contains sufficient data it 1578 * is untouched. Otherwise it is extended. Returns zero on 1579 * success. The skb is freed on error. 1580 */ 1581 1582 static inline int skb_padto(struct sk_buff *skb, unsigned int len) 1583 { 1584 unsigned int size = skb->len; 1585 if (likely(size >= len)) 1586 return 0; 1587 return skb_pad(skb, len - size); 1588 } 1589 1590 static inline int skb_add_data(struct sk_buff *skb, 1591 char __user *from, int copy) 1592 { 1593 const int off = skb->len; 1594 1595 if (skb->ip_summed == CHECKSUM_NONE) { 1596 int err = 0; 1597 __wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy), 1598 copy, 0, &err); 1599 if (!err) { 1600 skb->csum = csum_block_add(skb->csum, csum, off); 1601 return 0; 1602 } 1603 } else if (!copy_from_user(skb_put(skb, copy), from, copy)) 1604 return 0; 1605 1606 __skb_trim(skb, off); 1607 return -EFAULT; 1608 } 1609 1610 static inline int skb_can_coalesce(struct sk_buff *skb, int i, 1611 struct page *page, int off) 1612 { 1613 if (i) { 1614 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; 1615 1616 return page == frag->page && 1617 off == frag->page_offset + frag->size; 1618 } 1619 return 0; 1620 } 1621 1622 static inline int __skb_linearize(struct sk_buff *skb) 1623 { 1624 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM; 1625 } 1626 1627 /** 1628 * skb_linearize - convert paged skb to linear one 1629 * @skb: buffer to linarize 1630 * 1631 * If there is no free memory -ENOMEM is returned, otherwise zero 1632 * is returned and the old skb data released. 1633 */ 1634 static inline int skb_linearize(struct sk_buff *skb) 1635 { 1636 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0; 1637 } 1638 1639 /** 1640 * skb_linearize_cow - make sure skb is linear and writable 1641 * @skb: buffer to process 1642 * 1643 * If there is no free memory -ENOMEM is returned, otherwise zero 1644 * is returned and the old skb data released. 1645 */ 1646 static inline int skb_linearize_cow(struct sk_buff *skb) 1647 { 1648 return skb_is_nonlinear(skb) || skb_cloned(skb) ? 1649 __skb_linearize(skb) : 0; 1650 } 1651 1652 /** 1653 * skb_postpull_rcsum - update checksum for received skb after pull 1654 * @skb: buffer to update 1655 * @start: start of data before pull 1656 * @len: length of data pulled 1657 * 1658 * After doing a pull on a received packet, you need to call this to 1659 * update the CHECKSUM_COMPLETE checksum, or set ip_summed to 1660 * CHECKSUM_NONE so that it can be recomputed from scratch. 1661 */ 1662 1663 static inline void skb_postpull_rcsum(struct sk_buff *skb, 1664 const void *start, unsigned int len) 1665 { 1666 if (skb->ip_summed == CHECKSUM_COMPLETE) 1667 skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0)); 1668 } 1669 1670 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); 1671 1672 /** 1673 * pskb_trim_rcsum - trim received skb and update checksum 1674 * @skb: buffer to trim 1675 * @len: new length 1676 * 1677 * This is exactly the same as pskb_trim except that it ensures the 1678 * checksum of received packets are still valid after the operation. 1679 */ 1680 1681 static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) 1682 { 1683 if (likely(len >= skb->len)) 1684 return 0; 1685 if (skb->ip_summed == CHECKSUM_COMPLETE) 1686 skb->ip_summed = CHECKSUM_NONE; 1687 return __pskb_trim(skb, len); 1688 } 1689 1690 #define skb_queue_walk(queue, skb) \ 1691 for (skb = (queue)->next; \ 1692 prefetch(skb->next), (skb != (struct sk_buff *)(queue)); \ 1693 skb = skb->next) 1694 1695 #define skb_queue_walk_safe(queue, skb, tmp) \ 1696 for (skb = (queue)->next, tmp = skb->next; \ 1697 skb != (struct sk_buff *)(queue); \ 1698 skb = tmp, tmp = skb->next) 1699 1700 #define skb_queue_walk_from(queue, skb) \ 1701 for (; prefetch(skb->next), (skb != (struct sk_buff *)(queue)); \ 1702 skb = skb->next) 1703 1704 #define skb_queue_walk_from_safe(queue, skb, tmp) \ 1705 for (tmp = skb->next; \ 1706 skb != (struct sk_buff *)(queue); \ 1707 skb = tmp, tmp = skb->next) 1708 1709 #define skb_queue_reverse_walk(queue, skb) \ 1710 for (skb = (queue)->prev; \ 1711 prefetch(skb->prev), (skb != (struct sk_buff *)(queue)); \ 1712 skb = skb->prev) 1713 1714 1715 static inline bool skb_has_frags(const struct sk_buff *skb) 1716 { 1717 return skb_shinfo(skb)->frag_list != NULL; 1718 } 1719 1720 static inline void skb_frag_list_init(struct sk_buff *skb) 1721 { 1722 skb_shinfo(skb)->frag_list = NULL; 1723 } 1724 1725 static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag) 1726 { 1727 frag->next = skb_shinfo(skb)->frag_list; 1728 skb_shinfo(skb)->frag_list = frag; 1729 } 1730 1731 #define skb_walk_frags(skb, iter) \ 1732 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next) 1733 1734 extern struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags, 1735 int *peeked, int *err); 1736 extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, 1737 int noblock, int *err); 1738 extern unsigned int datagram_poll(struct file *file, struct socket *sock, 1739 struct poll_table_struct *wait); 1740 extern int skb_copy_datagram_iovec(const struct sk_buff *from, 1741 int offset, struct iovec *to, 1742 int size); 1743 extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, 1744 int hlen, 1745 struct iovec *iov); 1746 extern int skb_copy_datagram_from_iovec(struct sk_buff *skb, 1747 int offset, 1748 const struct iovec *from, 1749 int from_offset, 1750 int len); 1751 extern int skb_copy_datagram_const_iovec(const struct sk_buff *from, 1752 int offset, 1753 const struct iovec *to, 1754 int to_offset, 1755 int size); 1756 extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb); 1757 extern int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, 1758 unsigned int flags); 1759 extern __wsum skb_checksum(const struct sk_buff *skb, int offset, 1760 int len, __wsum csum); 1761 extern int skb_copy_bits(const struct sk_buff *skb, int offset, 1762 void *to, int len); 1763 extern int skb_store_bits(struct sk_buff *skb, int offset, 1764 const void *from, int len); 1765 extern __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, 1766 int offset, u8 *to, int len, 1767 __wsum csum); 1768 extern int skb_splice_bits(struct sk_buff *skb, 1769 unsigned int offset, 1770 struct pipe_inode_info *pipe, 1771 unsigned int len, 1772 unsigned int flags); 1773 extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); 1774 extern void skb_split(struct sk_buff *skb, 1775 struct sk_buff *skb1, const u32 len); 1776 extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, 1777 int shiftlen); 1778 1779 extern struct sk_buff *skb_segment(struct sk_buff *skb, int features); 1780 1781 static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, 1782 int len, void *buffer) 1783 { 1784 int hlen = skb_headlen(skb); 1785 1786 if (hlen - offset >= len) 1787 return skb->data + offset; 1788 1789 if (skb_copy_bits(skb, offset, buffer, len) < 0) 1790 return NULL; 1791 1792 return buffer; 1793 } 1794 1795 static inline void skb_copy_from_linear_data(const struct sk_buff *skb, 1796 void *to, 1797 const unsigned int len) 1798 { 1799 memcpy(to, skb->data, len); 1800 } 1801 1802 static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb, 1803 const int offset, void *to, 1804 const unsigned int len) 1805 { 1806 memcpy(to, skb->data + offset, len); 1807 } 1808 1809 static inline void skb_copy_to_linear_data(struct sk_buff *skb, 1810 const void *from, 1811 const unsigned int len) 1812 { 1813 memcpy(skb->data, from, len); 1814 } 1815 1816 static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb, 1817 const int offset, 1818 const void *from, 1819 const unsigned int len) 1820 { 1821 memcpy(skb->data + offset, from, len); 1822 } 1823 1824 extern void skb_init(void); 1825 1826 static inline ktime_t skb_get_ktime(const struct sk_buff *skb) 1827 { 1828 return skb->tstamp; 1829 } 1830 1831 /** 1832 * skb_get_timestamp - get timestamp from a skb 1833 * @skb: skb to get stamp from 1834 * @stamp: pointer to struct timeval to store stamp in 1835 * 1836 * Timestamps are stored in the skb as offsets to a base timestamp. 1837 * This function converts the offset back to a struct timeval and stores 1838 * it in stamp. 1839 */ 1840 static inline void skb_get_timestamp(const struct sk_buff *skb, 1841 struct timeval *stamp) 1842 { 1843 *stamp = ktime_to_timeval(skb->tstamp); 1844 } 1845 1846 static inline void skb_get_timestampns(const struct sk_buff *skb, 1847 struct timespec *stamp) 1848 { 1849 *stamp = ktime_to_timespec(skb->tstamp); 1850 } 1851 1852 static inline void __net_timestamp(struct sk_buff *skb) 1853 { 1854 skb->tstamp = ktime_get_real(); 1855 } 1856 1857 static inline ktime_t net_timedelta(ktime_t t) 1858 { 1859 return ktime_sub(ktime_get_real(), t); 1860 } 1861 1862 static inline ktime_t net_invalid_timestamp(void) 1863 { 1864 return ktime_set(0, 0); 1865 } 1866 1867 /** 1868 * skb_tstamp_tx - queue clone of skb with send time stamps 1869 * @orig_skb: the original outgoing packet 1870 * @hwtstamps: hardware time stamps, may be NULL if not available 1871 * 1872 * If the skb has a socket associated, then this function clones the 1873 * skb (thus sharing the actual data and optional structures), stores 1874 * the optional hardware time stamping information (if non NULL) or 1875 * generates a software time stamp (otherwise), then queues the clone 1876 * to the error queue of the socket. Errors are silently ignored. 1877 */ 1878 extern void skb_tstamp_tx(struct sk_buff *orig_skb, 1879 struct skb_shared_hwtstamps *hwtstamps); 1880 1881 extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len); 1882 extern __sum16 __skb_checksum_complete(struct sk_buff *skb); 1883 1884 static inline int skb_csum_unnecessary(const struct sk_buff *skb) 1885 { 1886 return skb->ip_summed & CHECKSUM_UNNECESSARY; 1887 } 1888 1889 /** 1890 * skb_checksum_complete - Calculate checksum of an entire packet 1891 * @skb: packet to process 1892 * 1893 * This function calculates the checksum over the entire packet plus 1894 * the value of skb->csum. The latter can be used to supply the 1895 * checksum of a pseudo header as used by TCP/UDP. It returns the 1896 * checksum. 1897 * 1898 * For protocols that contain complete checksums such as ICMP/TCP/UDP, 1899 * this function can be used to verify that checksum on received 1900 * packets. In that case the function should return zero if the 1901 * checksum is correct. In particular, this function will return zero 1902 * if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the 1903 * hardware has already verified the correctness of the checksum. 1904 */ 1905 static inline __sum16 skb_checksum_complete(struct sk_buff *skb) 1906 { 1907 return skb_csum_unnecessary(skb) ? 1908 0 : __skb_checksum_complete(skb); 1909 } 1910 1911 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 1912 extern void nf_conntrack_destroy(struct nf_conntrack *nfct); 1913 static inline void nf_conntrack_put(struct nf_conntrack *nfct) 1914 { 1915 if (nfct && atomic_dec_and_test(&nfct->use)) 1916 nf_conntrack_destroy(nfct); 1917 } 1918 static inline void nf_conntrack_get(struct nf_conntrack *nfct) 1919 { 1920 if (nfct) 1921 atomic_inc(&nfct->use); 1922 } 1923 static inline void nf_conntrack_get_reasm(struct sk_buff *skb) 1924 { 1925 if (skb) 1926 atomic_inc(&skb->users); 1927 } 1928 static inline void nf_conntrack_put_reasm(struct sk_buff *skb) 1929 { 1930 if (skb) 1931 kfree_skb(skb); 1932 } 1933 #endif 1934 #ifdef CONFIG_BRIDGE_NETFILTER 1935 static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge) 1936 { 1937 if (nf_bridge && atomic_dec_and_test(&nf_bridge->use)) 1938 kfree(nf_bridge); 1939 } 1940 static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge) 1941 { 1942 if (nf_bridge) 1943 atomic_inc(&nf_bridge->use); 1944 } 1945 #endif /* CONFIG_BRIDGE_NETFILTER */ 1946 static inline void nf_reset(struct sk_buff *skb) 1947 { 1948 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 1949 nf_conntrack_put(skb->nfct); 1950 skb->nfct = NULL; 1951 nf_conntrack_put_reasm(skb->nfct_reasm); 1952 skb->nfct_reasm = NULL; 1953 #endif 1954 #ifdef CONFIG_BRIDGE_NETFILTER 1955 nf_bridge_put(skb->nf_bridge); 1956 skb->nf_bridge = NULL; 1957 #endif 1958 } 1959 1960 /* Note: This doesn't put any conntrack and bridge info in dst. */ 1961 static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src) 1962 { 1963 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 1964 dst->nfct = src->nfct; 1965 nf_conntrack_get(src->nfct); 1966 dst->nfctinfo = src->nfctinfo; 1967 dst->nfct_reasm = src->nfct_reasm; 1968 nf_conntrack_get_reasm(src->nfct_reasm); 1969 #endif 1970 #ifdef CONFIG_BRIDGE_NETFILTER 1971 dst->nf_bridge = src->nf_bridge; 1972 nf_bridge_get(src->nf_bridge); 1973 #endif 1974 } 1975 1976 static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src) 1977 { 1978 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 1979 nf_conntrack_put(dst->nfct); 1980 nf_conntrack_put_reasm(dst->nfct_reasm); 1981 #endif 1982 #ifdef CONFIG_BRIDGE_NETFILTER 1983 nf_bridge_put(dst->nf_bridge); 1984 #endif 1985 __nf_copy(dst, src); 1986 } 1987 1988 #ifdef CONFIG_NETWORK_SECMARK 1989 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from) 1990 { 1991 to->secmark = from->secmark; 1992 } 1993 1994 static inline void skb_init_secmark(struct sk_buff *skb) 1995 { 1996 skb->secmark = 0; 1997 } 1998 #else 1999 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from) 2000 { } 2001 2002 static inline void skb_init_secmark(struct sk_buff *skb) 2003 { } 2004 #endif 2005 2006 static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping) 2007 { 2008 skb->queue_mapping = queue_mapping; 2009 } 2010 2011 static inline u16 skb_get_queue_mapping(const struct sk_buff *skb) 2012 { 2013 return skb->queue_mapping; 2014 } 2015 2016 static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from) 2017 { 2018 to->queue_mapping = from->queue_mapping; 2019 } 2020 2021 static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue) 2022 { 2023 skb->queue_mapping = rx_queue + 1; 2024 } 2025 2026 static inline u16 skb_get_rx_queue(const struct sk_buff *skb) 2027 { 2028 return skb->queue_mapping - 1; 2029 } 2030 2031 static inline bool skb_rx_queue_recorded(const struct sk_buff *skb) 2032 { 2033 return (skb->queue_mapping != 0); 2034 } 2035 2036 extern u16 skb_tx_hash(const struct net_device *dev, 2037 const struct sk_buff *skb); 2038 2039 #ifdef CONFIG_XFRM 2040 static inline struct sec_path *skb_sec_path(struct sk_buff *skb) 2041 { 2042 return skb->sp; 2043 } 2044 #else 2045 static inline struct sec_path *skb_sec_path(struct sk_buff *skb) 2046 { 2047 return NULL; 2048 } 2049 #endif 2050 2051 static inline int skb_is_gso(const struct sk_buff *skb) 2052 { 2053 return skb_shinfo(skb)->gso_size; 2054 } 2055 2056 static inline int skb_is_gso_v6(const struct sk_buff *skb) 2057 { 2058 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; 2059 } 2060 2061 extern void __skb_warn_lro_forwarding(const struct sk_buff *skb); 2062 2063 static inline bool skb_warn_if_lro(const struct sk_buff *skb) 2064 { 2065 /* LRO sets gso_size but not gso_type, whereas if GSO is really 2066 * wanted then gso_type will be set. */ 2067 struct skb_shared_info *shinfo = skb_shinfo(skb); 2068 if (shinfo->gso_size != 0 && unlikely(shinfo->gso_type == 0)) { 2069 __skb_warn_lro_forwarding(skb); 2070 return true; 2071 } 2072 return false; 2073 } 2074 2075 static inline void skb_forward_csum(struct sk_buff *skb) 2076 { 2077 /* Unfortunately we don't support this one. Any brave souls? */ 2078 if (skb->ip_summed == CHECKSUM_COMPLETE) 2079 skb->ip_summed = CHECKSUM_NONE; 2080 } 2081 2082 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off); 2083 #endif /* __KERNEL__ */ 2084 #endif /* _LINUX_SKBUFF_H */ 2085