1 /* 2 * Definitions for the 'struct sk_buff' memory handlers. 3 * 4 * Authors: 5 * Alan Cox, <[email protected]> 6 * Florian La Roche, <[email protected]> 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License 10 * as published by the Free Software Foundation; either version 11 * 2 of the License, or (at your option) any later version. 12 */ 13 14 #ifndef _LINUX_SKBUFF_H 15 #define _LINUX_SKBUFF_H 16 17 #include <linux/kernel.h> 18 #include <linux/kmemcheck.h> 19 #include <linux/compiler.h> 20 #include <linux/time.h> 21 #include <linux/bug.h> 22 #include <linux/cache.h> 23 24 #include <linux/atomic.h> 25 #include <asm/types.h> 26 #include <linux/spinlock.h> 27 #include <linux/net.h> 28 #include <linux/textsearch.h> 29 #include <net/checksum.h> 30 #include <linux/rcupdate.h> 31 #include <linux/dmaengine.h> 32 #include <linux/hrtimer.h> 33 #include <linux/dma-mapping.h> 34 #include <linux/netdev_features.h> 35 36 /* Don't change this without changing skb_csum_unnecessary! */ 37 #define CHECKSUM_NONE 0 38 #define CHECKSUM_UNNECESSARY 1 39 #define CHECKSUM_COMPLETE 2 40 #define CHECKSUM_PARTIAL 3 41 42 #define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \ 43 ~(SMP_CACHE_BYTES - 1)) 44 #define SKB_WITH_OVERHEAD(X) \ 45 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) 46 #define SKB_MAX_ORDER(X, ORDER) \ 47 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X)) 48 #define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0)) 49 #define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2)) 50 51 /* return minimum truesize of one skb containing X bytes of data */ 52 #define SKB_TRUESIZE(X) ((X) + \ 53 SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \ 54 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) 55 56 /* A. Checksumming of received packets by device. 57 * 58 * NONE: device failed to checksum this packet. 59 * skb->csum is undefined. 60 * 61 * UNNECESSARY: device parsed packet and wouldbe verified checksum. 62 * skb->csum is undefined. 63 * It is bad option, but, unfortunately, many of vendors do this. 64 * Apparently with secret goal to sell you new device, when you 65 * will add new protocol to your host. F.e. IPv6. 8) 66 * 67 * COMPLETE: the most generic way. Device supplied checksum of _all_ 68 * the packet as seen by netif_rx in skb->csum. 69 * NOTE: Even if device supports only some protocols, but 70 * is able to produce some skb->csum, it MUST use COMPLETE, 71 * not UNNECESSARY. 72 * 73 * PARTIAL: identical to the case for output below. This may occur 74 * on a packet received directly from another Linux OS, e.g., 75 * a virtualised Linux kernel on the same host. The packet can 76 * be treated in the same way as UNNECESSARY except that on 77 * output (i.e., forwarding) the checksum must be filled in 78 * by the OS or the hardware. 79 * 80 * B. Checksumming on output. 81 * 82 * NONE: skb is checksummed by protocol or csum is not required. 83 * 84 * PARTIAL: device is required to csum packet as seen by hard_start_xmit 85 * from skb->csum_start to the end and to record the checksum 86 * at skb->csum_start + skb->csum_offset. 87 * 88 * Device must show its capabilities in dev->features, set 89 * at device setup time. 90 * NETIF_F_HW_CSUM - it is clever device, it is able to checksum 91 * everything. 92 * NETIF_F_IP_CSUM - device is dumb. It is able to csum only 93 * TCP/UDP over IPv4. Sigh. Vendors like this 94 * way by an unknown reason. Though, see comment above 95 * about CHECKSUM_UNNECESSARY. 8) 96 * NETIF_F_IPV6_CSUM about as dumb as the last one but does IPv6 instead. 97 * 98 * UNNECESSARY: device will do per protocol specific csum. Protocol drivers 99 * that do not want net to perform the checksum calculation should use 100 * this flag in their outgoing skbs. 101 * NETIF_F_FCOE_CRC this indicates the device can do FCoE FC CRC 102 * offload. Correspondingly, the FCoE protocol driver 103 * stack should use CHECKSUM_UNNECESSARY. 104 * 105 * Any questions? No questions, good. --ANK 106 */ 107 108 struct net_device; 109 struct scatterlist; 110 struct pipe_inode_info; 111 112 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 113 struct nf_conntrack { 114 atomic_t use; 115 }; 116 #endif 117 118 #ifdef CONFIG_BRIDGE_NETFILTER 119 struct nf_bridge_info { 120 atomic_t use; 121 unsigned int mask; 122 struct net_device *physindev; 123 struct net_device *physoutdev; 124 unsigned long data[32 / sizeof(unsigned long)]; 125 }; 126 #endif 127 128 struct sk_buff_head { 129 /* These two members must be first. */ 130 struct sk_buff *next; 131 struct sk_buff *prev; 132 133 __u32 qlen; 134 spinlock_t lock; 135 }; 136 137 struct sk_buff; 138 139 /* To allow 64K frame to be packed as single skb without frag_list we 140 * require 64K/PAGE_SIZE pages plus 1 additional page to allow for 141 * buffers which do not start on a page boundary. 142 * 143 * Since GRO uses frags we allocate at least 16 regardless of page 144 * size. 145 */ 146 #if (65536/PAGE_SIZE + 1) < 16 147 #define MAX_SKB_FRAGS 16UL 148 #else 149 #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1) 150 #endif 151 152 typedef struct skb_frag_struct skb_frag_t; 153 154 struct skb_frag_struct { 155 struct { 156 struct page *p; 157 } page; 158 #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) 159 __u32 page_offset; 160 __u32 size; 161 #else 162 __u16 page_offset; 163 __u16 size; 164 #endif 165 }; 166 167 static inline unsigned int skb_frag_size(const skb_frag_t *frag) 168 { 169 return frag->size; 170 } 171 172 static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size) 173 { 174 frag->size = size; 175 } 176 177 static inline void skb_frag_size_add(skb_frag_t *frag, int delta) 178 { 179 frag->size += delta; 180 } 181 182 static inline void skb_frag_size_sub(skb_frag_t *frag, int delta) 183 { 184 frag->size -= delta; 185 } 186 187 #define HAVE_HW_TIME_STAMP 188 189 /** 190 * struct skb_shared_hwtstamps - hardware time stamps 191 * @hwtstamp: hardware time stamp transformed into duration 192 * since arbitrary point in time 193 * @syststamp: hwtstamp transformed to system time base 194 * 195 * Software time stamps generated by ktime_get_real() are stored in 196 * skb->tstamp. The relation between the different kinds of time 197 * stamps is as follows: 198 * 199 * syststamp and tstamp can be compared against each other in 200 * arbitrary combinations. The accuracy of a 201 * syststamp/tstamp/"syststamp from other device" comparison is 202 * limited by the accuracy of the transformation into system time 203 * base. This depends on the device driver and its underlying 204 * hardware. 205 * 206 * hwtstamps can only be compared against other hwtstamps from 207 * the same device. 208 * 209 * This structure is attached to packets as part of the 210 * &skb_shared_info. Use skb_hwtstamps() to get a pointer. 211 */ 212 struct skb_shared_hwtstamps { 213 ktime_t hwtstamp; 214 ktime_t syststamp; 215 }; 216 217 /* Definitions for tx_flags in struct skb_shared_info */ 218 enum { 219 /* generate hardware time stamp */ 220 SKBTX_HW_TSTAMP = 1 << 0, 221 222 /* generate software time stamp */ 223 SKBTX_SW_TSTAMP = 1 << 1, 224 225 /* device driver is going to provide hardware time stamp */ 226 SKBTX_IN_PROGRESS = 1 << 2, 227 228 /* device driver supports TX zero-copy buffers */ 229 SKBTX_DEV_ZEROCOPY = 1 << 3, 230 231 /* generate wifi status information (where possible) */ 232 SKBTX_WIFI_STATUS = 1 << 4, 233 }; 234 235 /* 236 * The callback notifies userspace to release buffers when skb DMA is done in 237 * lower device, the skb last reference should be 0 when calling this. 238 * The ctx field is used to track device context. 239 * The desc field is used to track userspace buffer index. 240 */ 241 struct ubuf_info { 242 void (*callback)(struct ubuf_info *); 243 void *ctx; 244 unsigned long desc; 245 }; 246 247 /* This data is invariant across clones and lives at 248 * the end of the header data, ie. at skb->end. 249 */ 250 struct skb_shared_info { 251 unsigned char nr_frags; 252 __u8 tx_flags; 253 unsigned short gso_size; 254 /* Warning: this field is not always filled in (UFO)! */ 255 unsigned short gso_segs; 256 unsigned short gso_type; 257 struct sk_buff *frag_list; 258 struct skb_shared_hwtstamps hwtstamps; 259 __be32 ip6_frag_id; 260 261 /* 262 * Warning : all fields before dataref are cleared in __alloc_skb() 263 */ 264 atomic_t dataref; 265 266 /* Intermediate layers must ensure that destructor_arg 267 * remains valid until skb destructor */ 268 void * destructor_arg; 269 270 /* must be last field, see pskb_expand_head() */ 271 skb_frag_t frags[MAX_SKB_FRAGS]; 272 }; 273 274 /* We divide dataref into two halves. The higher 16 bits hold references 275 * to the payload part of skb->data. The lower 16 bits hold references to 276 * the entire skb->data. A clone of a headerless skb holds the length of 277 * the header in skb->hdr_len. 278 * 279 * All users must obey the rule that the skb->data reference count must be 280 * greater than or equal to the payload reference count. 281 * 282 * Holding a reference to the payload part means that the user does not 283 * care about modifications to the header part of skb->data. 284 */ 285 #define SKB_DATAREF_SHIFT 16 286 #define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1) 287 288 289 enum { 290 SKB_FCLONE_UNAVAILABLE, 291 SKB_FCLONE_ORIG, 292 SKB_FCLONE_CLONE, 293 }; 294 295 enum { 296 SKB_GSO_TCPV4 = 1 << 0, 297 SKB_GSO_UDP = 1 << 1, 298 299 /* This indicates the skb is from an untrusted source. */ 300 SKB_GSO_DODGY = 1 << 2, 301 302 /* This indicates the tcp segment has CWR set. */ 303 SKB_GSO_TCP_ECN = 1 << 3, 304 305 SKB_GSO_TCPV6 = 1 << 4, 306 307 SKB_GSO_FCOE = 1 << 5, 308 }; 309 310 #if BITS_PER_LONG > 32 311 #define NET_SKBUFF_DATA_USES_OFFSET 1 312 #endif 313 314 #ifdef NET_SKBUFF_DATA_USES_OFFSET 315 typedef unsigned int sk_buff_data_t; 316 #else 317 typedef unsigned char *sk_buff_data_t; 318 #endif 319 320 #if defined(CONFIG_NF_DEFRAG_IPV4) || defined(CONFIG_NF_DEFRAG_IPV4_MODULE) || \ 321 defined(CONFIG_NF_DEFRAG_IPV6) || defined(CONFIG_NF_DEFRAG_IPV6_MODULE) 322 #define NET_SKBUFF_NF_DEFRAG_NEEDED 1 323 #endif 324 325 /** 326 * struct sk_buff - socket buffer 327 * @next: Next buffer in list 328 * @prev: Previous buffer in list 329 * @tstamp: Time we arrived 330 * @sk: Socket we are owned by 331 * @dev: Device we arrived on/are leaving by 332 * @cb: Control buffer. Free for use by every layer. Put private vars here 333 * @_skb_refdst: destination entry (with norefcount bit) 334 * @sp: the security path, used for xfrm 335 * @len: Length of actual data 336 * @data_len: Data length 337 * @mac_len: Length of link layer header 338 * @hdr_len: writable header length of cloned skb 339 * @csum: Checksum (must include start/offset pair) 340 * @csum_start: Offset from skb->head where checksumming should start 341 * @csum_offset: Offset from csum_start where checksum should be stored 342 * @priority: Packet queueing priority 343 * @local_df: allow local fragmentation 344 * @cloned: Head may be cloned (check refcnt to be sure) 345 * @ip_summed: Driver fed us an IP checksum 346 * @nohdr: Payload reference only, must not modify header 347 * @nfctinfo: Relationship of this skb to the connection 348 * @pkt_type: Packet class 349 * @fclone: skbuff clone status 350 * @ipvs_property: skbuff is owned by ipvs 351 * @peeked: this packet has been seen already, so stats have been 352 * done for it, don't do them again 353 * @nf_trace: netfilter packet trace flag 354 * @protocol: Packet protocol from driver 355 * @destructor: Destruct function 356 * @nfct: Associated connection, if any 357 * @nfct_reasm: netfilter conntrack re-assembly pointer 358 * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c 359 * @skb_iif: ifindex of device we arrived on 360 * @tc_index: Traffic control index 361 * @tc_verd: traffic control verdict 362 * @rxhash: the packet hash computed on receive 363 * @queue_mapping: Queue mapping for multiqueue devices 364 * @ndisc_nodetype: router type (from link layer) 365 * @ooo_okay: allow the mapping of a socket to a queue to be changed 366 * @l4_rxhash: indicate rxhash is a canonical 4-tuple hash over transport 367 * ports. 368 * @wifi_acked_valid: wifi_acked was set 369 * @wifi_acked: whether frame was acked on wifi or not 370 * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS 371 * @dma_cookie: a cookie to one of several possible DMA operations 372 * done by skb DMA functions 373 * @secmark: security marking 374 * @mark: Generic packet mark 375 * @dropcount: total number of sk_receive_queue overflows 376 * @vlan_tci: vlan tag control information 377 * @transport_header: Transport layer header 378 * @network_header: Network layer header 379 * @mac_header: Link layer header 380 * @tail: Tail pointer 381 * @end: End pointer 382 * @head: Head of buffer 383 * @data: Data head pointer 384 * @truesize: Buffer size 385 * @users: User count - see {datagram,tcp}.c 386 */ 387 388 struct sk_buff { 389 /* These two members must be first. */ 390 struct sk_buff *next; 391 struct sk_buff *prev; 392 393 ktime_t tstamp; 394 395 struct sock *sk; 396 struct net_device *dev; 397 398 /* 399 * This is the control buffer. It is free to use for every 400 * layer. Please put your private variables there. If you 401 * want to keep them across layers you have to do a skb_clone() 402 * first. This is owned by whoever has the skb queued ATM. 403 */ 404 char cb[48] __aligned(8); 405 406 unsigned long _skb_refdst; 407 #ifdef CONFIG_XFRM 408 struct sec_path *sp; 409 #endif 410 unsigned int len, 411 data_len; 412 __u16 mac_len, 413 hdr_len; 414 union { 415 __wsum csum; 416 struct { 417 __u16 csum_start; 418 __u16 csum_offset; 419 }; 420 }; 421 __u32 priority; 422 kmemcheck_bitfield_begin(flags1); 423 __u8 local_df:1, 424 cloned:1, 425 ip_summed:2, 426 nohdr:1, 427 nfctinfo:3; 428 __u8 pkt_type:3, 429 fclone:2, 430 ipvs_property:1, 431 peeked:1, 432 nf_trace:1; 433 kmemcheck_bitfield_end(flags1); 434 __be16 protocol; 435 436 void (*destructor)(struct sk_buff *skb); 437 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 438 struct nf_conntrack *nfct; 439 #endif 440 #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED 441 struct sk_buff *nfct_reasm; 442 #endif 443 #ifdef CONFIG_BRIDGE_NETFILTER 444 struct nf_bridge_info *nf_bridge; 445 #endif 446 447 int skb_iif; 448 449 __u32 rxhash; 450 451 __u16 vlan_tci; 452 453 #ifdef CONFIG_NET_SCHED 454 __u16 tc_index; /* traffic control index */ 455 #ifdef CONFIG_NET_CLS_ACT 456 __u16 tc_verd; /* traffic control verdict */ 457 #endif 458 #endif 459 460 __u16 queue_mapping; 461 kmemcheck_bitfield_begin(flags2); 462 #ifdef CONFIG_IPV6_NDISC_NODETYPE 463 __u8 ndisc_nodetype:2; 464 #endif 465 __u8 ooo_okay:1; 466 __u8 l4_rxhash:1; 467 __u8 wifi_acked_valid:1; 468 __u8 wifi_acked:1; 469 __u8 no_fcs:1; 470 __u8 head_frag:1; 471 /* 8/10 bit hole (depending on ndisc_nodetype presence) */ 472 kmemcheck_bitfield_end(flags2); 473 474 #ifdef CONFIG_NET_DMA 475 dma_cookie_t dma_cookie; 476 #endif 477 #ifdef CONFIG_NETWORK_SECMARK 478 __u32 secmark; 479 #endif 480 union { 481 __u32 mark; 482 __u32 dropcount; 483 __u32 avail_size; 484 }; 485 486 sk_buff_data_t transport_header; 487 sk_buff_data_t network_header; 488 sk_buff_data_t mac_header; 489 /* These elements must be at the end, see alloc_skb() for details. */ 490 sk_buff_data_t tail; 491 sk_buff_data_t end; 492 unsigned char *head, 493 *data; 494 unsigned int truesize; 495 atomic_t users; 496 }; 497 498 #ifdef __KERNEL__ 499 /* 500 * Handling routines are only of interest to the kernel 501 */ 502 #include <linux/slab.h> 503 504 505 /* 506 * skb might have a dst pointer attached, refcounted or not. 507 * _skb_refdst low order bit is set if refcount was _not_ taken 508 */ 509 #define SKB_DST_NOREF 1UL 510 #define SKB_DST_PTRMASK ~(SKB_DST_NOREF) 511 512 /** 513 * skb_dst - returns skb dst_entry 514 * @skb: buffer 515 * 516 * Returns skb dst_entry, regardless of reference taken or not. 517 */ 518 static inline struct dst_entry *skb_dst(const struct sk_buff *skb) 519 { 520 /* If refdst was not refcounted, check we still are in a 521 * rcu_read_lock section 522 */ 523 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) && 524 !rcu_read_lock_held() && 525 !rcu_read_lock_bh_held()); 526 return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK); 527 } 528 529 /** 530 * skb_dst_set - sets skb dst 531 * @skb: buffer 532 * @dst: dst entry 533 * 534 * Sets skb dst, assuming a reference was taken on dst and should 535 * be released by skb_dst_drop() 536 */ 537 static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst) 538 { 539 skb->_skb_refdst = (unsigned long)dst; 540 } 541 542 extern void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst); 543 544 /** 545 * skb_dst_is_noref - Test if skb dst isn't refcounted 546 * @skb: buffer 547 */ 548 static inline bool skb_dst_is_noref(const struct sk_buff *skb) 549 { 550 return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb); 551 } 552 553 static inline struct rtable *skb_rtable(const struct sk_buff *skb) 554 { 555 return (struct rtable *)skb_dst(skb); 556 } 557 558 extern void kfree_skb(struct sk_buff *skb); 559 extern void consume_skb(struct sk_buff *skb); 560 extern void __kfree_skb(struct sk_buff *skb); 561 extern struct kmem_cache *skbuff_head_cache; 562 563 extern void kfree_skb_partial(struct sk_buff *skb, bool head_stolen); 564 extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, 565 bool *fragstolen, int *delta_truesize); 566 567 extern struct sk_buff *__alloc_skb(unsigned int size, 568 gfp_t priority, int fclone, int node); 569 extern struct sk_buff *build_skb(void *data, unsigned int frag_size); 570 static inline struct sk_buff *alloc_skb(unsigned int size, 571 gfp_t priority) 572 { 573 return __alloc_skb(size, priority, 0, NUMA_NO_NODE); 574 } 575 576 static inline struct sk_buff *alloc_skb_fclone(unsigned int size, 577 gfp_t priority) 578 { 579 return __alloc_skb(size, priority, 1, NUMA_NO_NODE); 580 } 581 582 extern void skb_recycle(struct sk_buff *skb); 583 extern bool skb_recycle_check(struct sk_buff *skb, int skb_size); 584 585 extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); 586 extern int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask); 587 extern struct sk_buff *skb_clone(struct sk_buff *skb, 588 gfp_t priority); 589 extern struct sk_buff *skb_copy(const struct sk_buff *skb, 590 gfp_t priority); 591 extern struct sk_buff *__pskb_copy(struct sk_buff *skb, 592 int headroom, gfp_t gfp_mask); 593 594 extern int pskb_expand_head(struct sk_buff *skb, 595 int nhead, int ntail, 596 gfp_t gfp_mask); 597 extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, 598 unsigned int headroom); 599 extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 600 int newheadroom, int newtailroom, 601 gfp_t priority); 602 extern int skb_to_sgvec(struct sk_buff *skb, 603 struct scatterlist *sg, int offset, 604 int len); 605 extern int skb_cow_data(struct sk_buff *skb, int tailbits, 606 struct sk_buff **trailer); 607 extern int skb_pad(struct sk_buff *skb, int pad); 608 #define dev_kfree_skb(a) consume_skb(a) 609 610 extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, 611 int getfrag(void *from, char *to, int offset, 612 int len,int odd, struct sk_buff *skb), 613 void *from, int length); 614 615 struct skb_seq_state { 616 __u32 lower_offset; 617 __u32 upper_offset; 618 __u32 frag_idx; 619 __u32 stepped_offset; 620 struct sk_buff *root_skb; 621 struct sk_buff *cur_skb; 622 __u8 *frag_data; 623 }; 624 625 extern void skb_prepare_seq_read(struct sk_buff *skb, 626 unsigned int from, unsigned int to, 627 struct skb_seq_state *st); 628 extern unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 629 struct skb_seq_state *st); 630 extern void skb_abort_seq_read(struct skb_seq_state *st); 631 632 extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 633 unsigned int to, struct ts_config *config, 634 struct ts_state *state); 635 636 extern void __skb_get_rxhash(struct sk_buff *skb); 637 static inline __u32 skb_get_rxhash(struct sk_buff *skb) 638 { 639 if (!skb->rxhash) 640 __skb_get_rxhash(skb); 641 642 return skb->rxhash; 643 } 644 645 #ifdef NET_SKBUFF_DATA_USES_OFFSET 646 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) 647 { 648 return skb->head + skb->end; 649 } 650 651 static inline unsigned int skb_end_offset(const struct sk_buff *skb) 652 { 653 return skb->end; 654 } 655 #else 656 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) 657 { 658 return skb->end; 659 } 660 661 static inline unsigned int skb_end_offset(const struct sk_buff *skb) 662 { 663 return skb->end - skb->head; 664 } 665 #endif 666 667 /* Internal */ 668 #define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB))) 669 670 static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb) 671 { 672 return &skb_shinfo(skb)->hwtstamps; 673 } 674 675 /** 676 * skb_queue_empty - check if a queue is empty 677 * @list: queue head 678 * 679 * Returns true if the queue is empty, false otherwise. 680 */ 681 static inline int skb_queue_empty(const struct sk_buff_head *list) 682 { 683 return list->next == (struct sk_buff *)list; 684 } 685 686 /** 687 * skb_queue_is_last - check if skb is the last entry in the queue 688 * @list: queue head 689 * @skb: buffer 690 * 691 * Returns true if @skb is the last buffer on the list. 692 */ 693 static inline bool skb_queue_is_last(const struct sk_buff_head *list, 694 const struct sk_buff *skb) 695 { 696 return skb->next == (struct sk_buff *)list; 697 } 698 699 /** 700 * skb_queue_is_first - check if skb is the first entry in the queue 701 * @list: queue head 702 * @skb: buffer 703 * 704 * Returns true if @skb is the first buffer on the list. 705 */ 706 static inline bool skb_queue_is_first(const struct sk_buff_head *list, 707 const struct sk_buff *skb) 708 { 709 return skb->prev == (struct sk_buff *)list; 710 } 711 712 /** 713 * skb_queue_next - return the next packet in the queue 714 * @list: queue head 715 * @skb: current buffer 716 * 717 * Return the next packet in @list after @skb. It is only valid to 718 * call this if skb_queue_is_last() evaluates to false. 719 */ 720 static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list, 721 const struct sk_buff *skb) 722 { 723 /* This BUG_ON may seem severe, but if we just return then we 724 * are going to dereference garbage. 725 */ 726 BUG_ON(skb_queue_is_last(list, skb)); 727 return skb->next; 728 } 729 730 /** 731 * skb_queue_prev - return the prev packet in the queue 732 * @list: queue head 733 * @skb: current buffer 734 * 735 * Return the prev packet in @list before @skb. It is only valid to 736 * call this if skb_queue_is_first() evaluates to false. 737 */ 738 static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list, 739 const struct sk_buff *skb) 740 { 741 /* This BUG_ON may seem severe, but if we just return then we 742 * are going to dereference garbage. 743 */ 744 BUG_ON(skb_queue_is_first(list, skb)); 745 return skb->prev; 746 } 747 748 /** 749 * skb_get - reference buffer 750 * @skb: buffer to reference 751 * 752 * Makes another reference to a socket buffer and returns a pointer 753 * to the buffer. 754 */ 755 static inline struct sk_buff *skb_get(struct sk_buff *skb) 756 { 757 atomic_inc(&skb->users); 758 return skb; 759 } 760 761 /* 762 * If users == 1, we are the only owner and are can avoid redundant 763 * atomic change. 764 */ 765 766 /** 767 * skb_cloned - is the buffer a clone 768 * @skb: buffer to check 769 * 770 * Returns true if the buffer was generated with skb_clone() and is 771 * one of multiple shared copies of the buffer. Cloned buffers are 772 * shared data so must not be written to under normal circumstances. 773 */ 774 static inline int skb_cloned(const struct sk_buff *skb) 775 { 776 return skb->cloned && 777 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1; 778 } 779 780 /** 781 * skb_header_cloned - is the header a clone 782 * @skb: buffer to check 783 * 784 * Returns true if modifying the header part of the buffer requires 785 * the data to be copied. 786 */ 787 static inline int skb_header_cloned(const struct sk_buff *skb) 788 { 789 int dataref; 790 791 if (!skb->cloned) 792 return 0; 793 794 dataref = atomic_read(&skb_shinfo(skb)->dataref); 795 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT); 796 return dataref != 1; 797 } 798 799 /** 800 * skb_header_release - release reference to header 801 * @skb: buffer to operate on 802 * 803 * Drop a reference to the header part of the buffer. This is done 804 * by acquiring a payload reference. You must not read from the header 805 * part of skb->data after this. 806 */ 807 static inline void skb_header_release(struct sk_buff *skb) 808 { 809 BUG_ON(skb->nohdr); 810 skb->nohdr = 1; 811 atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref); 812 } 813 814 /** 815 * skb_shared - is the buffer shared 816 * @skb: buffer to check 817 * 818 * Returns true if more than one person has a reference to this 819 * buffer. 820 */ 821 static inline int skb_shared(const struct sk_buff *skb) 822 { 823 return atomic_read(&skb->users) != 1; 824 } 825 826 /** 827 * skb_share_check - check if buffer is shared and if so clone it 828 * @skb: buffer to check 829 * @pri: priority for memory allocation 830 * 831 * If the buffer is shared the buffer is cloned and the old copy 832 * drops a reference. A new clone with a single reference is returned. 833 * If the buffer is not shared the original buffer is returned. When 834 * being called from interrupt status or with spinlocks held pri must 835 * be GFP_ATOMIC. 836 * 837 * NULL is returned on a memory allocation failure. 838 */ 839 static inline struct sk_buff *skb_share_check(struct sk_buff *skb, 840 gfp_t pri) 841 { 842 might_sleep_if(pri & __GFP_WAIT); 843 if (skb_shared(skb)) { 844 struct sk_buff *nskb = skb_clone(skb, pri); 845 kfree_skb(skb); 846 skb = nskb; 847 } 848 return skb; 849 } 850 851 /* 852 * Copy shared buffers into a new sk_buff. We effectively do COW on 853 * packets to handle cases where we have a local reader and forward 854 * and a couple of other messy ones. The normal one is tcpdumping 855 * a packet thats being forwarded. 856 */ 857 858 /** 859 * skb_unshare - make a copy of a shared buffer 860 * @skb: buffer to check 861 * @pri: priority for memory allocation 862 * 863 * If the socket buffer is a clone then this function creates a new 864 * copy of the data, drops a reference count on the old copy and returns 865 * the new copy with the reference count at 1. If the buffer is not a clone 866 * the original buffer is returned. When called with a spinlock held or 867 * from interrupt state @pri must be %GFP_ATOMIC 868 * 869 * %NULL is returned on a memory allocation failure. 870 */ 871 static inline struct sk_buff *skb_unshare(struct sk_buff *skb, 872 gfp_t pri) 873 { 874 might_sleep_if(pri & __GFP_WAIT); 875 if (skb_cloned(skb)) { 876 struct sk_buff *nskb = skb_copy(skb, pri); 877 kfree_skb(skb); /* Free our shared copy */ 878 skb = nskb; 879 } 880 return skb; 881 } 882 883 /** 884 * skb_peek - peek at the head of an &sk_buff_head 885 * @list_: list to peek at 886 * 887 * Peek an &sk_buff. Unlike most other operations you _MUST_ 888 * be careful with this one. A peek leaves the buffer on the 889 * list and someone else may run off with it. You must hold 890 * the appropriate locks or have a private queue to do this. 891 * 892 * Returns %NULL for an empty list or a pointer to the head element. 893 * The reference count is not incremented and the reference is therefore 894 * volatile. Use with caution. 895 */ 896 static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_) 897 { 898 struct sk_buff *skb = list_->next; 899 900 if (skb == (struct sk_buff *)list_) 901 skb = NULL; 902 return skb; 903 } 904 905 /** 906 * skb_peek_next - peek skb following the given one from a queue 907 * @skb: skb to start from 908 * @list_: list to peek at 909 * 910 * Returns %NULL when the end of the list is met or a pointer to the 911 * next element. The reference count is not incremented and the 912 * reference is therefore volatile. Use with caution. 913 */ 914 static inline struct sk_buff *skb_peek_next(struct sk_buff *skb, 915 const struct sk_buff_head *list_) 916 { 917 struct sk_buff *next = skb->next; 918 919 if (next == (struct sk_buff *)list_) 920 next = NULL; 921 return next; 922 } 923 924 /** 925 * skb_peek_tail - peek at the tail of an &sk_buff_head 926 * @list_: list to peek at 927 * 928 * Peek an &sk_buff. Unlike most other operations you _MUST_ 929 * be careful with this one. A peek leaves the buffer on the 930 * list and someone else may run off with it. You must hold 931 * the appropriate locks or have a private queue to do this. 932 * 933 * Returns %NULL for an empty list or a pointer to the tail element. 934 * The reference count is not incremented and the reference is therefore 935 * volatile. Use with caution. 936 */ 937 static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_) 938 { 939 struct sk_buff *skb = list_->prev; 940 941 if (skb == (struct sk_buff *)list_) 942 skb = NULL; 943 return skb; 944 945 } 946 947 /** 948 * skb_queue_len - get queue length 949 * @list_: list to measure 950 * 951 * Return the length of an &sk_buff queue. 952 */ 953 static inline __u32 skb_queue_len(const struct sk_buff_head *list_) 954 { 955 return list_->qlen; 956 } 957 958 /** 959 * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head 960 * @list: queue to initialize 961 * 962 * This initializes only the list and queue length aspects of 963 * an sk_buff_head object. This allows to initialize the list 964 * aspects of an sk_buff_head without reinitializing things like 965 * the spinlock. It can also be used for on-stack sk_buff_head 966 * objects where the spinlock is known to not be used. 967 */ 968 static inline void __skb_queue_head_init(struct sk_buff_head *list) 969 { 970 list->prev = list->next = (struct sk_buff *)list; 971 list->qlen = 0; 972 } 973 974 /* 975 * This function creates a split out lock class for each invocation; 976 * this is needed for now since a whole lot of users of the skb-queue 977 * infrastructure in drivers have different locking usage (in hardirq) 978 * than the networking core (in softirq only). In the long run either the 979 * network layer or drivers should need annotation to consolidate the 980 * main types of usage into 3 classes. 981 */ 982 static inline void skb_queue_head_init(struct sk_buff_head *list) 983 { 984 spin_lock_init(&list->lock); 985 __skb_queue_head_init(list); 986 } 987 988 static inline void skb_queue_head_init_class(struct sk_buff_head *list, 989 struct lock_class_key *class) 990 { 991 skb_queue_head_init(list); 992 lockdep_set_class(&list->lock, class); 993 } 994 995 /* 996 * Insert an sk_buff on a list. 997 * 998 * The "__skb_xxxx()" functions are the non-atomic ones that 999 * can only be called with interrupts disabled. 1000 */ 1001 extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list); 1002 static inline void __skb_insert(struct sk_buff *newsk, 1003 struct sk_buff *prev, struct sk_buff *next, 1004 struct sk_buff_head *list) 1005 { 1006 newsk->next = next; 1007 newsk->prev = prev; 1008 next->prev = prev->next = newsk; 1009 list->qlen++; 1010 } 1011 1012 static inline void __skb_queue_splice(const struct sk_buff_head *list, 1013 struct sk_buff *prev, 1014 struct sk_buff *next) 1015 { 1016 struct sk_buff *first = list->next; 1017 struct sk_buff *last = list->prev; 1018 1019 first->prev = prev; 1020 prev->next = first; 1021 1022 last->next = next; 1023 next->prev = last; 1024 } 1025 1026 /** 1027 * skb_queue_splice - join two skb lists, this is designed for stacks 1028 * @list: the new list to add 1029 * @head: the place to add it in the first list 1030 */ 1031 static inline void skb_queue_splice(const struct sk_buff_head *list, 1032 struct sk_buff_head *head) 1033 { 1034 if (!skb_queue_empty(list)) { 1035 __skb_queue_splice(list, (struct sk_buff *) head, head->next); 1036 head->qlen += list->qlen; 1037 } 1038 } 1039 1040 /** 1041 * skb_queue_splice_init - join two skb lists and reinitialise the emptied list 1042 * @list: the new list to add 1043 * @head: the place to add it in the first list 1044 * 1045 * The list at @list is reinitialised 1046 */ 1047 static inline void skb_queue_splice_init(struct sk_buff_head *list, 1048 struct sk_buff_head *head) 1049 { 1050 if (!skb_queue_empty(list)) { 1051 __skb_queue_splice(list, (struct sk_buff *) head, head->next); 1052 head->qlen += list->qlen; 1053 __skb_queue_head_init(list); 1054 } 1055 } 1056 1057 /** 1058 * skb_queue_splice_tail - join two skb lists, each list being a queue 1059 * @list: the new list to add 1060 * @head: the place to add it in the first list 1061 */ 1062 static inline void skb_queue_splice_tail(const struct sk_buff_head *list, 1063 struct sk_buff_head *head) 1064 { 1065 if (!skb_queue_empty(list)) { 1066 __skb_queue_splice(list, head->prev, (struct sk_buff *) head); 1067 head->qlen += list->qlen; 1068 } 1069 } 1070 1071 /** 1072 * skb_queue_splice_tail_init - join two skb lists and reinitialise the emptied list 1073 * @list: the new list to add 1074 * @head: the place to add it in the first list 1075 * 1076 * Each of the lists is a queue. 1077 * The list at @list is reinitialised 1078 */ 1079 static inline void skb_queue_splice_tail_init(struct sk_buff_head *list, 1080 struct sk_buff_head *head) 1081 { 1082 if (!skb_queue_empty(list)) { 1083 __skb_queue_splice(list, head->prev, (struct sk_buff *) head); 1084 head->qlen += list->qlen; 1085 __skb_queue_head_init(list); 1086 } 1087 } 1088 1089 /** 1090 * __skb_queue_after - queue a buffer at the list head 1091 * @list: list to use 1092 * @prev: place after this buffer 1093 * @newsk: buffer to queue 1094 * 1095 * Queue a buffer int the middle of a list. This function takes no locks 1096 * and you must therefore hold required locks before calling it. 1097 * 1098 * A buffer cannot be placed on two lists at the same time. 1099 */ 1100 static inline void __skb_queue_after(struct sk_buff_head *list, 1101 struct sk_buff *prev, 1102 struct sk_buff *newsk) 1103 { 1104 __skb_insert(newsk, prev, prev->next, list); 1105 } 1106 1107 extern void skb_append(struct sk_buff *old, struct sk_buff *newsk, 1108 struct sk_buff_head *list); 1109 1110 static inline void __skb_queue_before(struct sk_buff_head *list, 1111 struct sk_buff *next, 1112 struct sk_buff *newsk) 1113 { 1114 __skb_insert(newsk, next->prev, next, list); 1115 } 1116 1117 /** 1118 * __skb_queue_head - queue a buffer at the list head 1119 * @list: list to use 1120 * @newsk: buffer to queue 1121 * 1122 * Queue a buffer at the start of a list. This function takes no locks 1123 * and you must therefore hold required locks before calling it. 1124 * 1125 * A buffer cannot be placed on two lists at the same time. 1126 */ 1127 extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk); 1128 static inline void __skb_queue_head(struct sk_buff_head *list, 1129 struct sk_buff *newsk) 1130 { 1131 __skb_queue_after(list, (struct sk_buff *)list, newsk); 1132 } 1133 1134 /** 1135 * __skb_queue_tail - queue a buffer at the list tail 1136 * @list: list to use 1137 * @newsk: buffer to queue 1138 * 1139 * Queue a buffer at the end of a list. This function takes no locks 1140 * and you must therefore hold required locks before calling it. 1141 * 1142 * A buffer cannot be placed on two lists at the same time. 1143 */ 1144 extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk); 1145 static inline void __skb_queue_tail(struct sk_buff_head *list, 1146 struct sk_buff *newsk) 1147 { 1148 __skb_queue_before(list, (struct sk_buff *)list, newsk); 1149 } 1150 1151 /* 1152 * remove sk_buff from list. _Must_ be called atomically, and with 1153 * the list known.. 1154 */ 1155 extern void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list); 1156 static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 1157 { 1158 struct sk_buff *next, *prev; 1159 1160 list->qlen--; 1161 next = skb->next; 1162 prev = skb->prev; 1163 skb->next = skb->prev = NULL; 1164 next->prev = prev; 1165 prev->next = next; 1166 } 1167 1168 /** 1169 * __skb_dequeue - remove from the head of the queue 1170 * @list: list to dequeue from 1171 * 1172 * Remove the head of the list. This function does not take any locks 1173 * so must be used with appropriate locks held only. The head item is 1174 * returned or %NULL if the list is empty. 1175 */ 1176 extern struct sk_buff *skb_dequeue(struct sk_buff_head *list); 1177 static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list) 1178 { 1179 struct sk_buff *skb = skb_peek(list); 1180 if (skb) 1181 __skb_unlink(skb, list); 1182 return skb; 1183 } 1184 1185 /** 1186 * __skb_dequeue_tail - remove from the tail of the queue 1187 * @list: list to dequeue from 1188 * 1189 * Remove the tail of the list. This function does not take any locks 1190 * so must be used with appropriate locks held only. The tail item is 1191 * returned or %NULL if the list is empty. 1192 */ 1193 extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list); 1194 static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list) 1195 { 1196 struct sk_buff *skb = skb_peek_tail(list); 1197 if (skb) 1198 __skb_unlink(skb, list); 1199 return skb; 1200 } 1201 1202 1203 static inline bool skb_is_nonlinear(const struct sk_buff *skb) 1204 { 1205 return skb->data_len; 1206 } 1207 1208 static inline unsigned int skb_headlen(const struct sk_buff *skb) 1209 { 1210 return skb->len - skb->data_len; 1211 } 1212 1213 static inline int skb_pagelen(const struct sk_buff *skb) 1214 { 1215 int i, len = 0; 1216 1217 for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) 1218 len += skb_frag_size(&skb_shinfo(skb)->frags[i]); 1219 return len + skb_headlen(skb); 1220 } 1221 1222 /** 1223 * __skb_fill_page_desc - initialise a paged fragment in an skb 1224 * @skb: buffer containing fragment to be initialised 1225 * @i: paged fragment index to initialise 1226 * @page: the page to use for this fragment 1227 * @off: the offset to the data with @page 1228 * @size: the length of the data 1229 * 1230 * Initialises the @i'th fragment of @skb to point to &size bytes at 1231 * offset @off within @page. 1232 * 1233 * Does not take any additional reference on the fragment. 1234 */ 1235 static inline void __skb_fill_page_desc(struct sk_buff *skb, int i, 1236 struct page *page, int off, int size) 1237 { 1238 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1239 1240 frag->page.p = page; 1241 frag->page_offset = off; 1242 skb_frag_size_set(frag, size); 1243 } 1244 1245 /** 1246 * skb_fill_page_desc - initialise a paged fragment in an skb 1247 * @skb: buffer containing fragment to be initialised 1248 * @i: paged fragment index to initialise 1249 * @page: the page to use for this fragment 1250 * @off: the offset to the data with @page 1251 * @size: the length of the data 1252 * 1253 * As per __skb_fill_page_desc() -- initialises the @i'th fragment of 1254 * @skb to point to &size bytes at offset @off within @page. In 1255 * addition updates @skb such that @i is the last fragment. 1256 * 1257 * Does not take any additional reference on the fragment. 1258 */ 1259 static inline void skb_fill_page_desc(struct sk_buff *skb, int i, 1260 struct page *page, int off, int size) 1261 { 1262 __skb_fill_page_desc(skb, i, page, off, size); 1263 skb_shinfo(skb)->nr_frags = i + 1; 1264 } 1265 1266 extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, 1267 int off, int size, unsigned int truesize); 1268 1269 #define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags) 1270 #define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frag_list(skb)) 1271 #define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb)) 1272 1273 #ifdef NET_SKBUFF_DATA_USES_OFFSET 1274 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb) 1275 { 1276 return skb->head + skb->tail; 1277 } 1278 1279 static inline void skb_reset_tail_pointer(struct sk_buff *skb) 1280 { 1281 skb->tail = skb->data - skb->head; 1282 } 1283 1284 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) 1285 { 1286 skb_reset_tail_pointer(skb); 1287 skb->tail += offset; 1288 } 1289 #else /* NET_SKBUFF_DATA_USES_OFFSET */ 1290 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb) 1291 { 1292 return skb->tail; 1293 } 1294 1295 static inline void skb_reset_tail_pointer(struct sk_buff *skb) 1296 { 1297 skb->tail = skb->data; 1298 } 1299 1300 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) 1301 { 1302 skb->tail = skb->data + offset; 1303 } 1304 1305 #endif /* NET_SKBUFF_DATA_USES_OFFSET */ 1306 1307 /* 1308 * Add data to an sk_buff 1309 */ 1310 extern unsigned char *skb_put(struct sk_buff *skb, unsigned int len); 1311 static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len) 1312 { 1313 unsigned char *tmp = skb_tail_pointer(skb); 1314 SKB_LINEAR_ASSERT(skb); 1315 skb->tail += len; 1316 skb->len += len; 1317 return tmp; 1318 } 1319 1320 extern unsigned char *skb_push(struct sk_buff *skb, unsigned int len); 1321 static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len) 1322 { 1323 skb->data -= len; 1324 skb->len += len; 1325 return skb->data; 1326 } 1327 1328 extern unsigned char *skb_pull(struct sk_buff *skb, unsigned int len); 1329 static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len) 1330 { 1331 skb->len -= len; 1332 BUG_ON(skb->len < skb->data_len); 1333 return skb->data += len; 1334 } 1335 1336 static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int len) 1337 { 1338 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len); 1339 } 1340 1341 extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta); 1342 1343 static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len) 1344 { 1345 if (len > skb_headlen(skb) && 1346 !__pskb_pull_tail(skb, len - skb_headlen(skb))) 1347 return NULL; 1348 skb->len -= len; 1349 return skb->data += len; 1350 } 1351 1352 static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len) 1353 { 1354 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len); 1355 } 1356 1357 static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len) 1358 { 1359 if (likely(len <= skb_headlen(skb))) 1360 return 1; 1361 if (unlikely(len > skb->len)) 1362 return 0; 1363 return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL; 1364 } 1365 1366 /** 1367 * skb_headroom - bytes at buffer head 1368 * @skb: buffer to check 1369 * 1370 * Return the number of bytes of free space at the head of an &sk_buff. 1371 */ 1372 static inline unsigned int skb_headroom(const struct sk_buff *skb) 1373 { 1374 return skb->data - skb->head; 1375 } 1376 1377 /** 1378 * skb_tailroom - bytes at buffer end 1379 * @skb: buffer to check 1380 * 1381 * Return the number of bytes of free space at the tail of an sk_buff 1382 */ 1383 static inline int skb_tailroom(const struct sk_buff *skb) 1384 { 1385 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail; 1386 } 1387 1388 /** 1389 * skb_availroom - bytes at buffer end 1390 * @skb: buffer to check 1391 * 1392 * Return the number of bytes of free space at the tail of an sk_buff 1393 * allocated by sk_stream_alloc() 1394 */ 1395 static inline int skb_availroom(const struct sk_buff *skb) 1396 { 1397 return skb_is_nonlinear(skb) ? 0 : skb->avail_size - skb->len; 1398 } 1399 1400 /** 1401 * skb_reserve - adjust headroom 1402 * @skb: buffer to alter 1403 * @len: bytes to move 1404 * 1405 * Increase the headroom of an empty &sk_buff by reducing the tail 1406 * room. This is only allowed for an empty buffer. 1407 */ 1408 static inline void skb_reserve(struct sk_buff *skb, int len) 1409 { 1410 skb->data += len; 1411 skb->tail += len; 1412 } 1413 1414 static inline void skb_reset_mac_len(struct sk_buff *skb) 1415 { 1416 skb->mac_len = skb->network_header - skb->mac_header; 1417 } 1418 1419 #ifdef NET_SKBUFF_DATA_USES_OFFSET 1420 static inline unsigned char *skb_transport_header(const struct sk_buff *skb) 1421 { 1422 return skb->head + skb->transport_header; 1423 } 1424 1425 static inline void skb_reset_transport_header(struct sk_buff *skb) 1426 { 1427 skb->transport_header = skb->data - skb->head; 1428 } 1429 1430 static inline void skb_set_transport_header(struct sk_buff *skb, 1431 const int offset) 1432 { 1433 skb_reset_transport_header(skb); 1434 skb->transport_header += offset; 1435 } 1436 1437 static inline unsigned char *skb_network_header(const struct sk_buff *skb) 1438 { 1439 return skb->head + skb->network_header; 1440 } 1441 1442 static inline void skb_reset_network_header(struct sk_buff *skb) 1443 { 1444 skb->network_header = skb->data - skb->head; 1445 } 1446 1447 static inline void skb_set_network_header(struct sk_buff *skb, const int offset) 1448 { 1449 skb_reset_network_header(skb); 1450 skb->network_header += offset; 1451 } 1452 1453 static inline unsigned char *skb_mac_header(const struct sk_buff *skb) 1454 { 1455 return skb->head + skb->mac_header; 1456 } 1457 1458 static inline int skb_mac_header_was_set(const struct sk_buff *skb) 1459 { 1460 return skb->mac_header != ~0U; 1461 } 1462 1463 static inline void skb_reset_mac_header(struct sk_buff *skb) 1464 { 1465 skb->mac_header = skb->data - skb->head; 1466 } 1467 1468 static inline void skb_set_mac_header(struct sk_buff *skb, const int offset) 1469 { 1470 skb_reset_mac_header(skb); 1471 skb->mac_header += offset; 1472 } 1473 1474 #else /* NET_SKBUFF_DATA_USES_OFFSET */ 1475 1476 static inline unsigned char *skb_transport_header(const struct sk_buff *skb) 1477 { 1478 return skb->transport_header; 1479 } 1480 1481 static inline void skb_reset_transport_header(struct sk_buff *skb) 1482 { 1483 skb->transport_header = skb->data; 1484 } 1485 1486 static inline void skb_set_transport_header(struct sk_buff *skb, 1487 const int offset) 1488 { 1489 skb->transport_header = skb->data + offset; 1490 } 1491 1492 static inline unsigned char *skb_network_header(const struct sk_buff *skb) 1493 { 1494 return skb->network_header; 1495 } 1496 1497 static inline void skb_reset_network_header(struct sk_buff *skb) 1498 { 1499 skb->network_header = skb->data; 1500 } 1501 1502 static inline void skb_set_network_header(struct sk_buff *skb, const int offset) 1503 { 1504 skb->network_header = skb->data + offset; 1505 } 1506 1507 static inline unsigned char *skb_mac_header(const struct sk_buff *skb) 1508 { 1509 return skb->mac_header; 1510 } 1511 1512 static inline int skb_mac_header_was_set(const struct sk_buff *skb) 1513 { 1514 return skb->mac_header != NULL; 1515 } 1516 1517 static inline void skb_reset_mac_header(struct sk_buff *skb) 1518 { 1519 skb->mac_header = skb->data; 1520 } 1521 1522 static inline void skb_set_mac_header(struct sk_buff *skb, const int offset) 1523 { 1524 skb->mac_header = skb->data + offset; 1525 } 1526 #endif /* NET_SKBUFF_DATA_USES_OFFSET */ 1527 1528 static inline void skb_mac_header_rebuild(struct sk_buff *skb) 1529 { 1530 if (skb_mac_header_was_set(skb)) { 1531 const unsigned char *old_mac = skb_mac_header(skb); 1532 1533 skb_set_mac_header(skb, -skb->mac_len); 1534 memmove(skb_mac_header(skb), old_mac, skb->mac_len); 1535 } 1536 } 1537 1538 static inline int skb_checksum_start_offset(const struct sk_buff *skb) 1539 { 1540 return skb->csum_start - skb_headroom(skb); 1541 } 1542 1543 static inline int skb_transport_offset(const struct sk_buff *skb) 1544 { 1545 return skb_transport_header(skb) - skb->data; 1546 } 1547 1548 static inline u32 skb_network_header_len(const struct sk_buff *skb) 1549 { 1550 return skb->transport_header - skb->network_header; 1551 } 1552 1553 static inline int skb_network_offset(const struct sk_buff *skb) 1554 { 1555 return skb_network_header(skb) - skb->data; 1556 } 1557 1558 static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len) 1559 { 1560 return pskb_may_pull(skb, skb_network_offset(skb) + len); 1561 } 1562 1563 /* 1564 * CPUs often take a performance hit when accessing unaligned memory 1565 * locations. The actual performance hit varies, it can be small if the 1566 * hardware handles it or large if we have to take an exception and fix it 1567 * in software. 1568 * 1569 * Since an ethernet header is 14 bytes network drivers often end up with 1570 * the IP header at an unaligned offset. The IP header can be aligned by 1571 * shifting the start of the packet by 2 bytes. Drivers should do this 1572 * with: 1573 * 1574 * skb_reserve(skb, NET_IP_ALIGN); 1575 * 1576 * The downside to this alignment of the IP header is that the DMA is now 1577 * unaligned. On some architectures the cost of an unaligned DMA is high 1578 * and this cost outweighs the gains made by aligning the IP header. 1579 * 1580 * Since this trade off varies between architectures, we allow NET_IP_ALIGN 1581 * to be overridden. 1582 */ 1583 #ifndef NET_IP_ALIGN 1584 #define NET_IP_ALIGN 2 1585 #endif 1586 1587 /* 1588 * The networking layer reserves some headroom in skb data (via 1589 * dev_alloc_skb). This is used to avoid having to reallocate skb data when 1590 * the header has to grow. In the default case, if the header has to grow 1591 * 32 bytes or less we avoid the reallocation. 1592 * 1593 * Unfortunately this headroom changes the DMA alignment of the resulting 1594 * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive 1595 * on some architectures. An architecture can override this value, 1596 * perhaps setting it to a cacheline in size (since that will maintain 1597 * cacheline alignment of the DMA). It must be a power of 2. 1598 * 1599 * Various parts of the networking layer expect at least 32 bytes of 1600 * headroom, you should not reduce this. 1601 * 1602 * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS) 1603 * to reduce average number of cache lines per packet. 1604 * get_rps_cpus() for example only access one 64 bytes aligned block : 1605 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8) 1606 */ 1607 #ifndef NET_SKB_PAD 1608 #define NET_SKB_PAD max(32, L1_CACHE_BYTES) 1609 #endif 1610 1611 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len); 1612 1613 static inline void __skb_trim(struct sk_buff *skb, unsigned int len) 1614 { 1615 if (unlikely(skb_is_nonlinear(skb))) { 1616 WARN_ON(1); 1617 return; 1618 } 1619 skb->len = len; 1620 skb_set_tail_pointer(skb, len); 1621 } 1622 1623 extern void skb_trim(struct sk_buff *skb, unsigned int len); 1624 1625 static inline int __pskb_trim(struct sk_buff *skb, unsigned int len) 1626 { 1627 if (skb->data_len) 1628 return ___pskb_trim(skb, len); 1629 __skb_trim(skb, len); 1630 return 0; 1631 } 1632 1633 static inline int pskb_trim(struct sk_buff *skb, unsigned int len) 1634 { 1635 return (len < skb->len) ? __pskb_trim(skb, len) : 0; 1636 } 1637 1638 /** 1639 * pskb_trim_unique - remove end from a paged unique (not cloned) buffer 1640 * @skb: buffer to alter 1641 * @len: new length 1642 * 1643 * This is identical to pskb_trim except that the caller knows that 1644 * the skb is not cloned so we should never get an error due to out- 1645 * of-memory. 1646 */ 1647 static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len) 1648 { 1649 int err = pskb_trim(skb, len); 1650 BUG_ON(err); 1651 } 1652 1653 /** 1654 * skb_orphan - orphan a buffer 1655 * @skb: buffer to orphan 1656 * 1657 * If a buffer currently has an owner then we call the owner's 1658 * destructor function and make the @skb unowned. The buffer continues 1659 * to exist but is no longer charged to its former owner. 1660 */ 1661 static inline void skb_orphan(struct sk_buff *skb) 1662 { 1663 if (skb->destructor) 1664 skb->destructor(skb); 1665 skb->destructor = NULL; 1666 skb->sk = NULL; 1667 } 1668 1669 /** 1670 * skb_orphan_frags - orphan the frags contained in a buffer 1671 * @skb: buffer to orphan frags from 1672 * @gfp_mask: allocation mask for replacement pages 1673 * 1674 * For each frag in the SKB which needs a destructor (i.e. has an 1675 * owner) create a copy of that frag and release the original 1676 * page by calling the destructor. 1677 */ 1678 static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask) 1679 { 1680 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY))) 1681 return 0; 1682 return skb_copy_ubufs(skb, gfp_mask); 1683 } 1684 1685 /** 1686 * __skb_queue_purge - empty a list 1687 * @list: list to empty 1688 * 1689 * Delete all buffers on an &sk_buff list. Each buffer is removed from 1690 * the list and one reference dropped. This function does not take the 1691 * list lock and the caller must hold the relevant locks to use it. 1692 */ 1693 extern void skb_queue_purge(struct sk_buff_head *list); 1694 static inline void __skb_queue_purge(struct sk_buff_head *list) 1695 { 1696 struct sk_buff *skb; 1697 while ((skb = __skb_dequeue(list)) != NULL) 1698 kfree_skb(skb); 1699 } 1700 1701 extern void *netdev_alloc_frag(unsigned int fragsz); 1702 1703 extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev, 1704 unsigned int length, 1705 gfp_t gfp_mask); 1706 1707 /** 1708 * netdev_alloc_skb - allocate an skbuff for rx on a specific device 1709 * @dev: network device to receive on 1710 * @length: length to allocate 1711 * 1712 * Allocate a new &sk_buff and assign it a usage count of one. The 1713 * buffer has unspecified headroom built in. Users should allocate 1714 * the headroom they think they need without accounting for the 1715 * built in space. The built in space is used for optimisations. 1716 * 1717 * %NULL is returned if there is no free memory. Although this function 1718 * allocates memory it can be called from an interrupt. 1719 */ 1720 static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev, 1721 unsigned int length) 1722 { 1723 return __netdev_alloc_skb(dev, length, GFP_ATOMIC); 1724 } 1725 1726 /* legacy helper around __netdev_alloc_skb() */ 1727 static inline struct sk_buff *__dev_alloc_skb(unsigned int length, 1728 gfp_t gfp_mask) 1729 { 1730 return __netdev_alloc_skb(NULL, length, gfp_mask); 1731 } 1732 1733 /* legacy helper around netdev_alloc_skb() */ 1734 static inline struct sk_buff *dev_alloc_skb(unsigned int length) 1735 { 1736 return netdev_alloc_skb(NULL, length); 1737 } 1738 1739 1740 static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev, 1741 unsigned int length, gfp_t gfp) 1742 { 1743 struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp); 1744 1745 if (NET_IP_ALIGN && skb) 1746 skb_reserve(skb, NET_IP_ALIGN); 1747 return skb; 1748 } 1749 1750 static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev, 1751 unsigned int length) 1752 { 1753 return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC); 1754 } 1755 1756 /** 1757 * skb_frag_page - retrieve the page refered to by a paged fragment 1758 * @frag: the paged fragment 1759 * 1760 * Returns the &struct page associated with @frag. 1761 */ 1762 static inline struct page *skb_frag_page(const skb_frag_t *frag) 1763 { 1764 return frag->page.p; 1765 } 1766 1767 /** 1768 * __skb_frag_ref - take an addition reference on a paged fragment. 1769 * @frag: the paged fragment 1770 * 1771 * Takes an additional reference on the paged fragment @frag. 1772 */ 1773 static inline void __skb_frag_ref(skb_frag_t *frag) 1774 { 1775 get_page(skb_frag_page(frag)); 1776 } 1777 1778 /** 1779 * skb_frag_ref - take an addition reference on a paged fragment of an skb. 1780 * @skb: the buffer 1781 * @f: the fragment offset. 1782 * 1783 * Takes an additional reference on the @f'th paged fragment of @skb. 1784 */ 1785 static inline void skb_frag_ref(struct sk_buff *skb, int f) 1786 { 1787 __skb_frag_ref(&skb_shinfo(skb)->frags[f]); 1788 } 1789 1790 /** 1791 * __skb_frag_unref - release a reference on a paged fragment. 1792 * @frag: the paged fragment 1793 * 1794 * Releases a reference on the paged fragment @frag. 1795 */ 1796 static inline void __skb_frag_unref(skb_frag_t *frag) 1797 { 1798 put_page(skb_frag_page(frag)); 1799 } 1800 1801 /** 1802 * skb_frag_unref - release a reference on a paged fragment of an skb. 1803 * @skb: the buffer 1804 * @f: the fragment offset 1805 * 1806 * Releases a reference on the @f'th paged fragment of @skb. 1807 */ 1808 static inline void skb_frag_unref(struct sk_buff *skb, int f) 1809 { 1810 __skb_frag_unref(&skb_shinfo(skb)->frags[f]); 1811 } 1812 1813 /** 1814 * skb_frag_address - gets the address of the data contained in a paged fragment 1815 * @frag: the paged fragment buffer 1816 * 1817 * Returns the address of the data within @frag. The page must already 1818 * be mapped. 1819 */ 1820 static inline void *skb_frag_address(const skb_frag_t *frag) 1821 { 1822 return page_address(skb_frag_page(frag)) + frag->page_offset; 1823 } 1824 1825 /** 1826 * skb_frag_address_safe - gets the address of the data contained in a paged fragment 1827 * @frag: the paged fragment buffer 1828 * 1829 * Returns the address of the data within @frag. Checks that the page 1830 * is mapped and returns %NULL otherwise. 1831 */ 1832 static inline void *skb_frag_address_safe(const skb_frag_t *frag) 1833 { 1834 void *ptr = page_address(skb_frag_page(frag)); 1835 if (unlikely(!ptr)) 1836 return NULL; 1837 1838 return ptr + frag->page_offset; 1839 } 1840 1841 /** 1842 * __skb_frag_set_page - sets the page contained in a paged fragment 1843 * @frag: the paged fragment 1844 * @page: the page to set 1845 * 1846 * Sets the fragment @frag to contain @page. 1847 */ 1848 static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page) 1849 { 1850 frag->page.p = page; 1851 } 1852 1853 /** 1854 * skb_frag_set_page - sets the page contained in a paged fragment of an skb 1855 * @skb: the buffer 1856 * @f: the fragment offset 1857 * @page: the page to set 1858 * 1859 * Sets the @f'th fragment of @skb to contain @page. 1860 */ 1861 static inline void skb_frag_set_page(struct sk_buff *skb, int f, 1862 struct page *page) 1863 { 1864 __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page); 1865 } 1866 1867 /** 1868 * skb_frag_dma_map - maps a paged fragment via the DMA API 1869 * @dev: the device to map the fragment to 1870 * @frag: the paged fragment to map 1871 * @offset: the offset within the fragment (starting at the 1872 * fragment's own offset) 1873 * @size: the number of bytes to map 1874 * @dir: the direction of the mapping (%PCI_DMA_*) 1875 * 1876 * Maps the page associated with @frag to @device. 1877 */ 1878 static inline dma_addr_t skb_frag_dma_map(struct device *dev, 1879 const skb_frag_t *frag, 1880 size_t offset, size_t size, 1881 enum dma_data_direction dir) 1882 { 1883 return dma_map_page(dev, skb_frag_page(frag), 1884 frag->page_offset + offset, size, dir); 1885 } 1886 1887 static inline struct sk_buff *pskb_copy(struct sk_buff *skb, 1888 gfp_t gfp_mask) 1889 { 1890 return __pskb_copy(skb, skb_headroom(skb), gfp_mask); 1891 } 1892 1893 /** 1894 * skb_clone_writable - is the header of a clone writable 1895 * @skb: buffer to check 1896 * @len: length up to which to write 1897 * 1898 * Returns true if modifying the header part of the cloned buffer 1899 * does not requires the data to be copied. 1900 */ 1901 static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len) 1902 { 1903 return !skb_header_cloned(skb) && 1904 skb_headroom(skb) + len <= skb->hdr_len; 1905 } 1906 1907 static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom, 1908 int cloned) 1909 { 1910 int delta = 0; 1911 1912 if (headroom > skb_headroom(skb)) 1913 delta = headroom - skb_headroom(skb); 1914 1915 if (delta || cloned) 1916 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0, 1917 GFP_ATOMIC); 1918 return 0; 1919 } 1920 1921 /** 1922 * skb_cow - copy header of skb when it is required 1923 * @skb: buffer to cow 1924 * @headroom: needed headroom 1925 * 1926 * If the skb passed lacks sufficient headroom or its data part 1927 * is shared, data is reallocated. If reallocation fails, an error 1928 * is returned and original skb is not changed. 1929 * 1930 * The result is skb with writable area skb->head...skb->tail 1931 * and at least @headroom of space at head. 1932 */ 1933 static inline int skb_cow(struct sk_buff *skb, unsigned int headroom) 1934 { 1935 return __skb_cow(skb, headroom, skb_cloned(skb)); 1936 } 1937 1938 /** 1939 * skb_cow_head - skb_cow but only making the head writable 1940 * @skb: buffer to cow 1941 * @headroom: needed headroom 1942 * 1943 * This function is identical to skb_cow except that we replace the 1944 * skb_cloned check by skb_header_cloned. It should be used when 1945 * you only need to push on some header and do not need to modify 1946 * the data. 1947 */ 1948 static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom) 1949 { 1950 return __skb_cow(skb, headroom, skb_header_cloned(skb)); 1951 } 1952 1953 /** 1954 * skb_padto - pad an skbuff up to a minimal size 1955 * @skb: buffer to pad 1956 * @len: minimal length 1957 * 1958 * Pads up a buffer to ensure the trailing bytes exist and are 1959 * blanked. If the buffer already contains sufficient data it 1960 * is untouched. Otherwise it is extended. Returns zero on 1961 * success. The skb is freed on error. 1962 */ 1963 1964 static inline int skb_padto(struct sk_buff *skb, unsigned int len) 1965 { 1966 unsigned int size = skb->len; 1967 if (likely(size >= len)) 1968 return 0; 1969 return skb_pad(skb, len - size); 1970 } 1971 1972 static inline int skb_add_data(struct sk_buff *skb, 1973 char __user *from, int copy) 1974 { 1975 const int off = skb->len; 1976 1977 if (skb->ip_summed == CHECKSUM_NONE) { 1978 int err = 0; 1979 __wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy), 1980 copy, 0, &err); 1981 if (!err) { 1982 skb->csum = csum_block_add(skb->csum, csum, off); 1983 return 0; 1984 } 1985 } else if (!copy_from_user(skb_put(skb, copy), from, copy)) 1986 return 0; 1987 1988 __skb_trim(skb, off); 1989 return -EFAULT; 1990 } 1991 1992 static inline bool skb_can_coalesce(struct sk_buff *skb, int i, 1993 const struct page *page, int off) 1994 { 1995 if (i) { 1996 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; 1997 1998 return page == skb_frag_page(frag) && 1999 off == frag->page_offset + skb_frag_size(frag); 2000 } 2001 return false; 2002 } 2003 2004 static inline int __skb_linearize(struct sk_buff *skb) 2005 { 2006 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM; 2007 } 2008 2009 /** 2010 * skb_linearize - convert paged skb to linear one 2011 * @skb: buffer to linarize 2012 * 2013 * If there is no free memory -ENOMEM is returned, otherwise zero 2014 * is returned and the old skb data released. 2015 */ 2016 static inline int skb_linearize(struct sk_buff *skb) 2017 { 2018 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0; 2019 } 2020 2021 /** 2022 * skb_linearize_cow - make sure skb is linear and writable 2023 * @skb: buffer to process 2024 * 2025 * If there is no free memory -ENOMEM is returned, otherwise zero 2026 * is returned and the old skb data released. 2027 */ 2028 static inline int skb_linearize_cow(struct sk_buff *skb) 2029 { 2030 return skb_is_nonlinear(skb) || skb_cloned(skb) ? 2031 __skb_linearize(skb) : 0; 2032 } 2033 2034 /** 2035 * skb_postpull_rcsum - update checksum for received skb after pull 2036 * @skb: buffer to update 2037 * @start: start of data before pull 2038 * @len: length of data pulled 2039 * 2040 * After doing a pull on a received packet, you need to call this to 2041 * update the CHECKSUM_COMPLETE checksum, or set ip_summed to 2042 * CHECKSUM_NONE so that it can be recomputed from scratch. 2043 */ 2044 2045 static inline void skb_postpull_rcsum(struct sk_buff *skb, 2046 const void *start, unsigned int len) 2047 { 2048 if (skb->ip_summed == CHECKSUM_COMPLETE) 2049 skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0)); 2050 } 2051 2052 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); 2053 2054 /** 2055 * pskb_trim_rcsum - trim received skb and update checksum 2056 * @skb: buffer to trim 2057 * @len: new length 2058 * 2059 * This is exactly the same as pskb_trim except that it ensures the 2060 * checksum of received packets are still valid after the operation. 2061 */ 2062 2063 static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) 2064 { 2065 if (likely(len >= skb->len)) 2066 return 0; 2067 if (skb->ip_summed == CHECKSUM_COMPLETE) 2068 skb->ip_summed = CHECKSUM_NONE; 2069 return __pskb_trim(skb, len); 2070 } 2071 2072 #define skb_queue_walk(queue, skb) \ 2073 for (skb = (queue)->next; \ 2074 skb != (struct sk_buff *)(queue); \ 2075 skb = skb->next) 2076 2077 #define skb_queue_walk_safe(queue, skb, tmp) \ 2078 for (skb = (queue)->next, tmp = skb->next; \ 2079 skb != (struct sk_buff *)(queue); \ 2080 skb = tmp, tmp = skb->next) 2081 2082 #define skb_queue_walk_from(queue, skb) \ 2083 for (; skb != (struct sk_buff *)(queue); \ 2084 skb = skb->next) 2085 2086 #define skb_queue_walk_from_safe(queue, skb, tmp) \ 2087 for (tmp = skb->next; \ 2088 skb != (struct sk_buff *)(queue); \ 2089 skb = tmp, tmp = skb->next) 2090 2091 #define skb_queue_reverse_walk(queue, skb) \ 2092 for (skb = (queue)->prev; \ 2093 skb != (struct sk_buff *)(queue); \ 2094 skb = skb->prev) 2095 2096 #define skb_queue_reverse_walk_safe(queue, skb, tmp) \ 2097 for (skb = (queue)->prev, tmp = skb->prev; \ 2098 skb != (struct sk_buff *)(queue); \ 2099 skb = tmp, tmp = skb->prev) 2100 2101 #define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \ 2102 for (tmp = skb->prev; \ 2103 skb != (struct sk_buff *)(queue); \ 2104 skb = tmp, tmp = skb->prev) 2105 2106 static inline bool skb_has_frag_list(const struct sk_buff *skb) 2107 { 2108 return skb_shinfo(skb)->frag_list != NULL; 2109 } 2110 2111 static inline void skb_frag_list_init(struct sk_buff *skb) 2112 { 2113 skb_shinfo(skb)->frag_list = NULL; 2114 } 2115 2116 static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag) 2117 { 2118 frag->next = skb_shinfo(skb)->frag_list; 2119 skb_shinfo(skb)->frag_list = frag; 2120 } 2121 2122 #define skb_walk_frags(skb, iter) \ 2123 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next) 2124 2125 extern struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags, 2126 int *peeked, int *off, int *err); 2127 extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, 2128 int noblock, int *err); 2129 extern unsigned int datagram_poll(struct file *file, struct socket *sock, 2130 struct poll_table_struct *wait); 2131 extern int skb_copy_datagram_iovec(const struct sk_buff *from, 2132 int offset, struct iovec *to, 2133 int size); 2134 extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, 2135 int hlen, 2136 struct iovec *iov); 2137 extern int skb_copy_datagram_from_iovec(struct sk_buff *skb, 2138 int offset, 2139 const struct iovec *from, 2140 int from_offset, 2141 int len); 2142 extern int skb_copy_datagram_const_iovec(const struct sk_buff *from, 2143 int offset, 2144 const struct iovec *to, 2145 int to_offset, 2146 int size); 2147 extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb); 2148 extern void skb_free_datagram_locked(struct sock *sk, 2149 struct sk_buff *skb); 2150 extern int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, 2151 unsigned int flags); 2152 extern __wsum skb_checksum(const struct sk_buff *skb, int offset, 2153 int len, __wsum csum); 2154 extern int skb_copy_bits(const struct sk_buff *skb, int offset, 2155 void *to, int len); 2156 extern int skb_store_bits(struct sk_buff *skb, int offset, 2157 const void *from, int len); 2158 extern __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, 2159 int offset, u8 *to, int len, 2160 __wsum csum); 2161 extern int skb_splice_bits(struct sk_buff *skb, 2162 unsigned int offset, 2163 struct pipe_inode_info *pipe, 2164 unsigned int len, 2165 unsigned int flags); 2166 extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); 2167 extern void skb_split(struct sk_buff *skb, 2168 struct sk_buff *skb1, const u32 len); 2169 extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, 2170 int shiftlen); 2171 2172 extern struct sk_buff *skb_segment(struct sk_buff *skb, 2173 netdev_features_t features); 2174 2175 static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, 2176 int len, void *buffer) 2177 { 2178 int hlen = skb_headlen(skb); 2179 2180 if (hlen - offset >= len) 2181 return skb->data + offset; 2182 2183 if (skb_copy_bits(skb, offset, buffer, len) < 0) 2184 return NULL; 2185 2186 return buffer; 2187 } 2188 2189 static inline void skb_copy_from_linear_data(const struct sk_buff *skb, 2190 void *to, 2191 const unsigned int len) 2192 { 2193 memcpy(to, skb->data, len); 2194 } 2195 2196 static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb, 2197 const int offset, void *to, 2198 const unsigned int len) 2199 { 2200 memcpy(to, skb->data + offset, len); 2201 } 2202 2203 static inline void skb_copy_to_linear_data(struct sk_buff *skb, 2204 const void *from, 2205 const unsigned int len) 2206 { 2207 memcpy(skb->data, from, len); 2208 } 2209 2210 static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb, 2211 const int offset, 2212 const void *from, 2213 const unsigned int len) 2214 { 2215 memcpy(skb->data + offset, from, len); 2216 } 2217 2218 extern void skb_init(void); 2219 2220 static inline ktime_t skb_get_ktime(const struct sk_buff *skb) 2221 { 2222 return skb->tstamp; 2223 } 2224 2225 /** 2226 * skb_get_timestamp - get timestamp from a skb 2227 * @skb: skb to get stamp from 2228 * @stamp: pointer to struct timeval to store stamp in 2229 * 2230 * Timestamps are stored in the skb as offsets to a base timestamp. 2231 * This function converts the offset back to a struct timeval and stores 2232 * it in stamp. 2233 */ 2234 static inline void skb_get_timestamp(const struct sk_buff *skb, 2235 struct timeval *stamp) 2236 { 2237 *stamp = ktime_to_timeval(skb->tstamp); 2238 } 2239 2240 static inline void skb_get_timestampns(const struct sk_buff *skb, 2241 struct timespec *stamp) 2242 { 2243 *stamp = ktime_to_timespec(skb->tstamp); 2244 } 2245 2246 static inline void __net_timestamp(struct sk_buff *skb) 2247 { 2248 skb->tstamp = ktime_get_real(); 2249 } 2250 2251 static inline ktime_t net_timedelta(ktime_t t) 2252 { 2253 return ktime_sub(ktime_get_real(), t); 2254 } 2255 2256 static inline ktime_t net_invalid_timestamp(void) 2257 { 2258 return ktime_set(0, 0); 2259 } 2260 2261 extern void skb_timestamping_init(void); 2262 2263 #ifdef CONFIG_NETWORK_PHY_TIMESTAMPING 2264 2265 extern void skb_clone_tx_timestamp(struct sk_buff *skb); 2266 extern bool skb_defer_rx_timestamp(struct sk_buff *skb); 2267 2268 #else /* CONFIG_NETWORK_PHY_TIMESTAMPING */ 2269 2270 static inline void skb_clone_tx_timestamp(struct sk_buff *skb) 2271 { 2272 } 2273 2274 static inline bool skb_defer_rx_timestamp(struct sk_buff *skb) 2275 { 2276 return false; 2277 } 2278 2279 #endif /* !CONFIG_NETWORK_PHY_TIMESTAMPING */ 2280 2281 /** 2282 * skb_complete_tx_timestamp() - deliver cloned skb with tx timestamps 2283 * 2284 * PHY drivers may accept clones of transmitted packets for 2285 * timestamping via their phy_driver.txtstamp method. These drivers 2286 * must call this function to return the skb back to the stack, with 2287 * or without a timestamp. 2288 * 2289 * @skb: clone of the the original outgoing packet 2290 * @hwtstamps: hardware time stamps, may be NULL if not available 2291 * 2292 */ 2293 void skb_complete_tx_timestamp(struct sk_buff *skb, 2294 struct skb_shared_hwtstamps *hwtstamps); 2295 2296 /** 2297 * skb_tstamp_tx - queue clone of skb with send time stamps 2298 * @orig_skb: the original outgoing packet 2299 * @hwtstamps: hardware time stamps, may be NULL if not available 2300 * 2301 * If the skb has a socket associated, then this function clones the 2302 * skb (thus sharing the actual data and optional structures), stores 2303 * the optional hardware time stamping information (if non NULL) or 2304 * generates a software time stamp (otherwise), then queues the clone 2305 * to the error queue of the socket. Errors are silently ignored. 2306 */ 2307 extern void skb_tstamp_tx(struct sk_buff *orig_skb, 2308 struct skb_shared_hwtstamps *hwtstamps); 2309 2310 static inline void sw_tx_timestamp(struct sk_buff *skb) 2311 { 2312 if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP && 2313 !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) 2314 skb_tstamp_tx(skb, NULL); 2315 } 2316 2317 /** 2318 * skb_tx_timestamp() - Driver hook for transmit timestamping 2319 * 2320 * Ethernet MAC Drivers should call this function in their hard_xmit() 2321 * function immediately before giving the sk_buff to the MAC hardware. 2322 * 2323 * @skb: A socket buffer. 2324 */ 2325 static inline void skb_tx_timestamp(struct sk_buff *skb) 2326 { 2327 skb_clone_tx_timestamp(skb); 2328 sw_tx_timestamp(skb); 2329 } 2330 2331 /** 2332 * skb_complete_wifi_ack - deliver skb with wifi status 2333 * 2334 * @skb: the original outgoing packet 2335 * @acked: ack status 2336 * 2337 */ 2338 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked); 2339 2340 extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len); 2341 extern __sum16 __skb_checksum_complete(struct sk_buff *skb); 2342 2343 static inline int skb_csum_unnecessary(const struct sk_buff *skb) 2344 { 2345 return skb->ip_summed & CHECKSUM_UNNECESSARY; 2346 } 2347 2348 /** 2349 * skb_checksum_complete - Calculate checksum of an entire packet 2350 * @skb: packet to process 2351 * 2352 * This function calculates the checksum over the entire packet plus 2353 * the value of skb->csum. The latter can be used to supply the 2354 * checksum of a pseudo header as used by TCP/UDP. It returns the 2355 * checksum. 2356 * 2357 * For protocols that contain complete checksums such as ICMP/TCP/UDP, 2358 * this function can be used to verify that checksum on received 2359 * packets. In that case the function should return zero if the 2360 * checksum is correct. In particular, this function will return zero 2361 * if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the 2362 * hardware has already verified the correctness of the checksum. 2363 */ 2364 static inline __sum16 skb_checksum_complete(struct sk_buff *skb) 2365 { 2366 return skb_csum_unnecessary(skb) ? 2367 0 : __skb_checksum_complete(skb); 2368 } 2369 2370 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 2371 extern void nf_conntrack_destroy(struct nf_conntrack *nfct); 2372 static inline void nf_conntrack_put(struct nf_conntrack *nfct) 2373 { 2374 if (nfct && atomic_dec_and_test(&nfct->use)) 2375 nf_conntrack_destroy(nfct); 2376 } 2377 static inline void nf_conntrack_get(struct nf_conntrack *nfct) 2378 { 2379 if (nfct) 2380 atomic_inc(&nfct->use); 2381 } 2382 #endif 2383 #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED 2384 static inline void nf_conntrack_get_reasm(struct sk_buff *skb) 2385 { 2386 if (skb) 2387 atomic_inc(&skb->users); 2388 } 2389 static inline void nf_conntrack_put_reasm(struct sk_buff *skb) 2390 { 2391 if (skb) 2392 kfree_skb(skb); 2393 } 2394 #endif 2395 #ifdef CONFIG_BRIDGE_NETFILTER 2396 static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge) 2397 { 2398 if (nf_bridge && atomic_dec_and_test(&nf_bridge->use)) 2399 kfree(nf_bridge); 2400 } 2401 static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge) 2402 { 2403 if (nf_bridge) 2404 atomic_inc(&nf_bridge->use); 2405 } 2406 #endif /* CONFIG_BRIDGE_NETFILTER */ 2407 static inline void nf_reset(struct sk_buff *skb) 2408 { 2409 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 2410 nf_conntrack_put(skb->nfct); 2411 skb->nfct = NULL; 2412 #endif 2413 #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED 2414 nf_conntrack_put_reasm(skb->nfct_reasm); 2415 skb->nfct_reasm = NULL; 2416 #endif 2417 #ifdef CONFIG_BRIDGE_NETFILTER 2418 nf_bridge_put(skb->nf_bridge); 2419 skb->nf_bridge = NULL; 2420 #endif 2421 } 2422 2423 /* Note: This doesn't put any conntrack and bridge info in dst. */ 2424 static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src) 2425 { 2426 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 2427 dst->nfct = src->nfct; 2428 nf_conntrack_get(src->nfct); 2429 dst->nfctinfo = src->nfctinfo; 2430 #endif 2431 #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED 2432 dst->nfct_reasm = src->nfct_reasm; 2433 nf_conntrack_get_reasm(src->nfct_reasm); 2434 #endif 2435 #ifdef CONFIG_BRIDGE_NETFILTER 2436 dst->nf_bridge = src->nf_bridge; 2437 nf_bridge_get(src->nf_bridge); 2438 #endif 2439 } 2440 2441 static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src) 2442 { 2443 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 2444 nf_conntrack_put(dst->nfct); 2445 #endif 2446 #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED 2447 nf_conntrack_put_reasm(dst->nfct_reasm); 2448 #endif 2449 #ifdef CONFIG_BRIDGE_NETFILTER 2450 nf_bridge_put(dst->nf_bridge); 2451 #endif 2452 __nf_copy(dst, src); 2453 } 2454 2455 #ifdef CONFIG_NETWORK_SECMARK 2456 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from) 2457 { 2458 to->secmark = from->secmark; 2459 } 2460 2461 static inline void skb_init_secmark(struct sk_buff *skb) 2462 { 2463 skb->secmark = 0; 2464 } 2465 #else 2466 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from) 2467 { } 2468 2469 static inline void skb_init_secmark(struct sk_buff *skb) 2470 { } 2471 #endif 2472 2473 static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping) 2474 { 2475 skb->queue_mapping = queue_mapping; 2476 } 2477 2478 static inline u16 skb_get_queue_mapping(const struct sk_buff *skb) 2479 { 2480 return skb->queue_mapping; 2481 } 2482 2483 static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from) 2484 { 2485 to->queue_mapping = from->queue_mapping; 2486 } 2487 2488 static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue) 2489 { 2490 skb->queue_mapping = rx_queue + 1; 2491 } 2492 2493 static inline u16 skb_get_rx_queue(const struct sk_buff *skb) 2494 { 2495 return skb->queue_mapping - 1; 2496 } 2497 2498 static inline bool skb_rx_queue_recorded(const struct sk_buff *skb) 2499 { 2500 return skb->queue_mapping != 0; 2501 } 2502 2503 extern u16 __skb_tx_hash(const struct net_device *dev, 2504 const struct sk_buff *skb, 2505 unsigned int num_tx_queues); 2506 2507 #ifdef CONFIG_XFRM 2508 static inline struct sec_path *skb_sec_path(struct sk_buff *skb) 2509 { 2510 return skb->sp; 2511 } 2512 #else 2513 static inline struct sec_path *skb_sec_path(struct sk_buff *skb) 2514 { 2515 return NULL; 2516 } 2517 #endif 2518 2519 static inline bool skb_is_gso(const struct sk_buff *skb) 2520 { 2521 return skb_shinfo(skb)->gso_size; 2522 } 2523 2524 static inline bool skb_is_gso_v6(const struct sk_buff *skb) 2525 { 2526 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; 2527 } 2528 2529 extern void __skb_warn_lro_forwarding(const struct sk_buff *skb); 2530 2531 static inline bool skb_warn_if_lro(const struct sk_buff *skb) 2532 { 2533 /* LRO sets gso_size but not gso_type, whereas if GSO is really 2534 * wanted then gso_type will be set. */ 2535 const struct skb_shared_info *shinfo = skb_shinfo(skb); 2536 2537 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 && 2538 unlikely(shinfo->gso_type == 0)) { 2539 __skb_warn_lro_forwarding(skb); 2540 return true; 2541 } 2542 return false; 2543 } 2544 2545 static inline void skb_forward_csum(struct sk_buff *skb) 2546 { 2547 /* Unfortunately we don't support this one. Any brave souls? */ 2548 if (skb->ip_summed == CHECKSUM_COMPLETE) 2549 skb->ip_summed = CHECKSUM_NONE; 2550 } 2551 2552 /** 2553 * skb_checksum_none_assert - make sure skb ip_summed is CHECKSUM_NONE 2554 * @skb: skb to check 2555 * 2556 * fresh skbs have their ip_summed set to CHECKSUM_NONE. 2557 * Instead of forcing ip_summed to CHECKSUM_NONE, we can 2558 * use this helper, to document places where we make this assertion. 2559 */ 2560 static inline void skb_checksum_none_assert(const struct sk_buff *skb) 2561 { 2562 #ifdef DEBUG 2563 BUG_ON(skb->ip_summed != CHECKSUM_NONE); 2564 #endif 2565 } 2566 2567 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off); 2568 2569 static inline bool skb_is_recycleable(const struct sk_buff *skb, int skb_size) 2570 { 2571 if (irqs_disabled()) 2572 return false; 2573 2574 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) 2575 return false; 2576 2577 if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE) 2578 return false; 2579 2580 skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD); 2581 if (skb_end_offset(skb) < skb_size) 2582 return false; 2583 2584 if (skb_shared(skb) || skb_cloned(skb)) 2585 return false; 2586 2587 return true; 2588 } 2589 2590 /** 2591 * skb_head_is_locked - Determine if the skb->head is locked down 2592 * @skb: skb to check 2593 * 2594 * The head on skbs build around a head frag can be removed if they are 2595 * not cloned. This function returns true if the skb head is locked down 2596 * due to either being allocated via kmalloc, or by being a clone with 2597 * multiple references to the head. 2598 */ 2599 static inline bool skb_head_is_locked(const struct sk_buff *skb) 2600 { 2601 return !skb->head_frag || skb_cloned(skb); 2602 } 2603 #endif /* __KERNEL__ */ 2604 #endif /* _LINUX_SKBUFF_H */ 2605