1 /* 2 * Definitions for the 'struct sk_buff' memory handlers. 3 * 4 * Authors: 5 * Alan Cox, <[email protected]> 6 * Florian La Roche, <[email protected]> 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License 10 * as published by the Free Software Foundation; either version 11 * 2 of the License, or (at your option) any later version. 12 */ 13 14 #ifndef _LINUX_SKBUFF_H 15 #define _LINUX_SKBUFF_H 16 17 #include <linux/kernel.h> 18 #include <linux/kmemcheck.h> 19 #include <linux/compiler.h> 20 #include <linux/time.h> 21 #include <linux/bug.h> 22 #include <linux/cache.h> 23 24 #include <linux/atomic.h> 25 #include <asm/types.h> 26 #include <linux/spinlock.h> 27 #include <linux/net.h> 28 #include <linux/textsearch.h> 29 #include <net/checksum.h> 30 #include <linux/rcupdate.h> 31 #include <linux/dmaengine.h> 32 #include <linux/hrtimer.h> 33 #include <linux/dma-mapping.h> 34 #include <linux/netdev_features.h> 35 36 /* Don't change this without changing skb_csum_unnecessary! */ 37 #define CHECKSUM_NONE 0 38 #define CHECKSUM_UNNECESSARY 1 39 #define CHECKSUM_COMPLETE 2 40 #define CHECKSUM_PARTIAL 3 41 42 #define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \ 43 ~(SMP_CACHE_BYTES - 1)) 44 #define SKB_WITH_OVERHEAD(X) \ 45 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) 46 #define SKB_MAX_ORDER(X, ORDER) \ 47 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X)) 48 #define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0)) 49 #define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2)) 50 51 /* return minimum truesize of one skb containing X bytes of data */ 52 #define SKB_TRUESIZE(X) ((X) + \ 53 SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \ 54 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) 55 56 /* A. Checksumming of received packets by device. 57 * 58 * NONE: device failed to checksum this packet. 59 * skb->csum is undefined. 60 * 61 * UNNECESSARY: device parsed packet and wouldbe verified checksum. 62 * skb->csum is undefined. 63 * It is bad option, but, unfortunately, many of vendors do this. 64 * Apparently with secret goal to sell you new device, when you 65 * will add new protocol to your host. F.e. IPv6. 8) 66 * 67 * COMPLETE: the most generic way. Device supplied checksum of _all_ 68 * the packet as seen by netif_rx in skb->csum. 69 * NOTE: Even if device supports only some protocols, but 70 * is able to produce some skb->csum, it MUST use COMPLETE, 71 * not UNNECESSARY. 72 * 73 * PARTIAL: identical to the case for output below. This may occur 74 * on a packet received directly from another Linux OS, e.g., 75 * a virtualised Linux kernel on the same host. The packet can 76 * be treated in the same way as UNNECESSARY except that on 77 * output (i.e., forwarding) the checksum must be filled in 78 * by the OS or the hardware. 79 * 80 * B. Checksumming on output. 81 * 82 * NONE: skb is checksummed by protocol or csum is not required. 83 * 84 * PARTIAL: device is required to csum packet as seen by hard_start_xmit 85 * from skb->csum_start to the end and to record the checksum 86 * at skb->csum_start + skb->csum_offset. 87 * 88 * Device must show its capabilities in dev->features, set 89 * at device setup time. 90 * NETIF_F_HW_CSUM - it is clever device, it is able to checksum 91 * everything. 92 * NETIF_F_IP_CSUM - device is dumb. It is able to csum only 93 * TCP/UDP over IPv4. Sigh. Vendors like this 94 * way by an unknown reason. Though, see comment above 95 * about CHECKSUM_UNNECESSARY. 8) 96 * NETIF_F_IPV6_CSUM about as dumb as the last one but does IPv6 instead. 97 * 98 * UNNECESSARY: device will do per protocol specific csum. Protocol drivers 99 * that do not want net to perform the checksum calculation should use 100 * this flag in their outgoing skbs. 101 * NETIF_F_FCOE_CRC this indicates the device can do FCoE FC CRC 102 * offload. Correspondingly, the FCoE protocol driver 103 * stack should use CHECKSUM_UNNECESSARY. 104 * 105 * Any questions? No questions, good. --ANK 106 */ 107 108 struct net_device; 109 struct scatterlist; 110 struct pipe_inode_info; 111 112 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 113 struct nf_conntrack { 114 atomic_t use; 115 }; 116 #endif 117 118 #ifdef CONFIG_BRIDGE_NETFILTER 119 struct nf_bridge_info { 120 atomic_t use; 121 unsigned int mask; 122 struct net_device *physindev; 123 struct net_device *physoutdev; 124 unsigned long data[32 / sizeof(unsigned long)]; 125 }; 126 #endif 127 128 struct sk_buff_head { 129 /* These two members must be first. */ 130 struct sk_buff *next; 131 struct sk_buff *prev; 132 133 __u32 qlen; 134 spinlock_t lock; 135 }; 136 137 struct sk_buff; 138 139 /* To allow 64K frame to be packed as single skb without frag_list we 140 * require 64K/PAGE_SIZE pages plus 1 additional page to allow for 141 * buffers which do not start on a page boundary. 142 * 143 * Since GRO uses frags we allocate at least 16 regardless of page 144 * size. 145 */ 146 #if (65536/PAGE_SIZE + 1) < 16 147 #define MAX_SKB_FRAGS 16UL 148 #else 149 #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1) 150 #endif 151 152 typedef struct skb_frag_struct skb_frag_t; 153 154 struct skb_frag_struct { 155 struct { 156 struct page *p; 157 } page; 158 #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) 159 __u32 page_offset; 160 __u32 size; 161 #else 162 __u16 page_offset; 163 __u16 size; 164 #endif 165 }; 166 167 static inline unsigned int skb_frag_size(const skb_frag_t *frag) 168 { 169 return frag->size; 170 } 171 172 static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size) 173 { 174 frag->size = size; 175 } 176 177 static inline void skb_frag_size_add(skb_frag_t *frag, int delta) 178 { 179 frag->size += delta; 180 } 181 182 static inline void skb_frag_size_sub(skb_frag_t *frag, int delta) 183 { 184 frag->size -= delta; 185 } 186 187 #define HAVE_HW_TIME_STAMP 188 189 /** 190 * struct skb_shared_hwtstamps - hardware time stamps 191 * @hwtstamp: hardware time stamp transformed into duration 192 * since arbitrary point in time 193 * @syststamp: hwtstamp transformed to system time base 194 * 195 * Software time stamps generated by ktime_get_real() are stored in 196 * skb->tstamp. The relation between the different kinds of time 197 * stamps is as follows: 198 * 199 * syststamp and tstamp can be compared against each other in 200 * arbitrary combinations. The accuracy of a 201 * syststamp/tstamp/"syststamp from other device" comparison is 202 * limited by the accuracy of the transformation into system time 203 * base. This depends on the device driver and its underlying 204 * hardware. 205 * 206 * hwtstamps can only be compared against other hwtstamps from 207 * the same device. 208 * 209 * This structure is attached to packets as part of the 210 * &skb_shared_info. Use skb_hwtstamps() to get a pointer. 211 */ 212 struct skb_shared_hwtstamps { 213 ktime_t hwtstamp; 214 ktime_t syststamp; 215 }; 216 217 /* Definitions for tx_flags in struct skb_shared_info */ 218 enum { 219 /* generate hardware time stamp */ 220 SKBTX_HW_TSTAMP = 1 << 0, 221 222 /* generate software time stamp */ 223 SKBTX_SW_TSTAMP = 1 << 1, 224 225 /* device driver is going to provide hardware time stamp */ 226 SKBTX_IN_PROGRESS = 1 << 2, 227 228 /* ensure the originating sk reference is available on driver level */ 229 SKBTX_DRV_NEEDS_SK_REF = 1 << 3, 230 231 /* device driver supports TX zero-copy buffers */ 232 SKBTX_DEV_ZEROCOPY = 1 << 4, 233 234 /* generate wifi status information (where possible) */ 235 SKBTX_WIFI_STATUS = 1 << 5, 236 }; 237 238 /* 239 * The callback notifies userspace to release buffers when skb DMA is done in 240 * lower device, the skb last reference should be 0 when calling this. 241 * The ctx field is used to track device context. 242 * The desc field is used to track userspace buffer index. 243 */ 244 struct ubuf_info { 245 void (*callback)(struct ubuf_info *); 246 void *ctx; 247 unsigned long desc; 248 }; 249 250 /* This data is invariant across clones and lives at 251 * the end of the header data, ie. at skb->end. 252 */ 253 struct skb_shared_info { 254 unsigned char nr_frags; 255 __u8 tx_flags; 256 unsigned short gso_size; 257 /* Warning: this field is not always filled in (UFO)! */ 258 unsigned short gso_segs; 259 unsigned short gso_type; 260 struct sk_buff *frag_list; 261 struct skb_shared_hwtstamps hwtstamps; 262 __be32 ip6_frag_id; 263 264 /* 265 * Warning : all fields before dataref are cleared in __alloc_skb() 266 */ 267 atomic_t dataref; 268 269 /* Intermediate layers must ensure that destructor_arg 270 * remains valid until skb destructor */ 271 void * destructor_arg; 272 273 /* must be last field, see pskb_expand_head() */ 274 skb_frag_t frags[MAX_SKB_FRAGS]; 275 }; 276 277 /* We divide dataref into two halves. The higher 16 bits hold references 278 * to the payload part of skb->data. The lower 16 bits hold references to 279 * the entire skb->data. A clone of a headerless skb holds the length of 280 * the header in skb->hdr_len. 281 * 282 * All users must obey the rule that the skb->data reference count must be 283 * greater than or equal to the payload reference count. 284 * 285 * Holding a reference to the payload part means that the user does not 286 * care about modifications to the header part of skb->data. 287 */ 288 #define SKB_DATAREF_SHIFT 16 289 #define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1) 290 291 292 enum { 293 SKB_FCLONE_UNAVAILABLE, 294 SKB_FCLONE_ORIG, 295 SKB_FCLONE_CLONE, 296 }; 297 298 enum { 299 SKB_GSO_TCPV4 = 1 << 0, 300 SKB_GSO_UDP = 1 << 1, 301 302 /* This indicates the skb is from an untrusted source. */ 303 SKB_GSO_DODGY = 1 << 2, 304 305 /* This indicates the tcp segment has CWR set. */ 306 SKB_GSO_TCP_ECN = 1 << 3, 307 308 SKB_GSO_TCPV6 = 1 << 4, 309 310 SKB_GSO_FCOE = 1 << 5, 311 }; 312 313 #if BITS_PER_LONG > 32 314 #define NET_SKBUFF_DATA_USES_OFFSET 1 315 #endif 316 317 #ifdef NET_SKBUFF_DATA_USES_OFFSET 318 typedef unsigned int sk_buff_data_t; 319 #else 320 typedef unsigned char *sk_buff_data_t; 321 #endif 322 323 #if defined(CONFIG_NF_DEFRAG_IPV4) || defined(CONFIG_NF_DEFRAG_IPV4_MODULE) || \ 324 defined(CONFIG_NF_DEFRAG_IPV6) || defined(CONFIG_NF_DEFRAG_IPV6_MODULE) 325 #define NET_SKBUFF_NF_DEFRAG_NEEDED 1 326 #endif 327 328 /** 329 * struct sk_buff - socket buffer 330 * @next: Next buffer in list 331 * @prev: Previous buffer in list 332 * @tstamp: Time we arrived 333 * @sk: Socket we are owned by 334 * @dev: Device we arrived on/are leaving by 335 * @cb: Control buffer. Free for use by every layer. Put private vars here 336 * @_skb_refdst: destination entry (with norefcount bit) 337 * @sp: the security path, used for xfrm 338 * @len: Length of actual data 339 * @data_len: Data length 340 * @mac_len: Length of link layer header 341 * @hdr_len: writable header length of cloned skb 342 * @csum: Checksum (must include start/offset pair) 343 * @csum_start: Offset from skb->head where checksumming should start 344 * @csum_offset: Offset from csum_start where checksum should be stored 345 * @priority: Packet queueing priority 346 * @local_df: allow local fragmentation 347 * @cloned: Head may be cloned (check refcnt to be sure) 348 * @ip_summed: Driver fed us an IP checksum 349 * @nohdr: Payload reference only, must not modify header 350 * @nfctinfo: Relationship of this skb to the connection 351 * @pkt_type: Packet class 352 * @fclone: skbuff clone status 353 * @ipvs_property: skbuff is owned by ipvs 354 * @peeked: this packet has been seen already, so stats have been 355 * done for it, don't do them again 356 * @nf_trace: netfilter packet trace flag 357 * @protocol: Packet protocol from driver 358 * @destructor: Destruct function 359 * @nfct: Associated connection, if any 360 * @nfct_reasm: netfilter conntrack re-assembly pointer 361 * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c 362 * @skb_iif: ifindex of device we arrived on 363 * @tc_index: Traffic control index 364 * @tc_verd: traffic control verdict 365 * @rxhash: the packet hash computed on receive 366 * @queue_mapping: Queue mapping for multiqueue devices 367 * @ndisc_nodetype: router type (from link layer) 368 * @ooo_okay: allow the mapping of a socket to a queue to be changed 369 * @l4_rxhash: indicate rxhash is a canonical 4-tuple hash over transport 370 * ports. 371 * @wifi_acked_valid: wifi_acked was set 372 * @wifi_acked: whether frame was acked on wifi or not 373 * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS 374 * @dma_cookie: a cookie to one of several possible DMA operations 375 * done by skb DMA functions 376 * @secmark: security marking 377 * @mark: Generic packet mark 378 * @dropcount: total number of sk_receive_queue overflows 379 * @vlan_tci: vlan tag control information 380 * @transport_header: Transport layer header 381 * @network_header: Network layer header 382 * @mac_header: Link layer header 383 * @tail: Tail pointer 384 * @end: End pointer 385 * @head: Head of buffer 386 * @data: Data head pointer 387 * @truesize: Buffer size 388 * @users: User count - see {datagram,tcp}.c 389 */ 390 391 struct sk_buff { 392 /* These two members must be first. */ 393 struct sk_buff *next; 394 struct sk_buff *prev; 395 396 ktime_t tstamp; 397 398 struct sock *sk; 399 struct net_device *dev; 400 401 /* 402 * This is the control buffer. It is free to use for every 403 * layer. Please put your private variables there. If you 404 * want to keep them across layers you have to do a skb_clone() 405 * first. This is owned by whoever has the skb queued ATM. 406 */ 407 char cb[48] __aligned(8); 408 409 unsigned long _skb_refdst; 410 #ifdef CONFIG_XFRM 411 struct sec_path *sp; 412 #endif 413 unsigned int len, 414 data_len; 415 __u16 mac_len, 416 hdr_len; 417 union { 418 __wsum csum; 419 struct { 420 __u16 csum_start; 421 __u16 csum_offset; 422 }; 423 }; 424 __u32 priority; 425 kmemcheck_bitfield_begin(flags1); 426 __u8 local_df:1, 427 cloned:1, 428 ip_summed:2, 429 nohdr:1, 430 nfctinfo:3; 431 __u8 pkt_type:3, 432 fclone:2, 433 ipvs_property:1, 434 peeked:1, 435 nf_trace:1; 436 kmemcheck_bitfield_end(flags1); 437 __be16 protocol; 438 439 void (*destructor)(struct sk_buff *skb); 440 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 441 struct nf_conntrack *nfct; 442 #endif 443 #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED 444 struct sk_buff *nfct_reasm; 445 #endif 446 #ifdef CONFIG_BRIDGE_NETFILTER 447 struct nf_bridge_info *nf_bridge; 448 #endif 449 450 int skb_iif; 451 452 __u32 rxhash; 453 454 __u16 vlan_tci; 455 456 #ifdef CONFIG_NET_SCHED 457 __u16 tc_index; /* traffic control index */ 458 #ifdef CONFIG_NET_CLS_ACT 459 __u16 tc_verd; /* traffic control verdict */ 460 #endif 461 #endif 462 463 __u16 queue_mapping; 464 kmemcheck_bitfield_begin(flags2); 465 #ifdef CONFIG_IPV6_NDISC_NODETYPE 466 __u8 ndisc_nodetype:2; 467 #endif 468 __u8 ooo_okay:1; 469 __u8 l4_rxhash:1; 470 __u8 wifi_acked_valid:1; 471 __u8 wifi_acked:1; 472 __u8 no_fcs:1; 473 __u8 head_frag:1; 474 /* 8/10 bit hole (depending on ndisc_nodetype presence) */ 475 kmemcheck_bitfield_end(flags2); 476 477 #ifdef CONFIG_NET_DMA 478 dma_cookie_t dma_cookie; 479 #endif 480 #ifdef CONFIG_NETWORK_SECMARK 481 __u32 secmark; 482 #endif 483 union { 484 __u32 mark; 485 __u32 dropcount; 486 __u32 avail_size; 487 }; 488 489 sk_buff_data_t transport_header; 490 sk_buff_data_t network_header; 491 sk_buff_data_t mac_header; 492 /* These elements must be at the end, see alloc_skb() for details. */ 493 sk_buff_data_t tail; 494 sk_buff_data_t end; 495 unsigned char *head, 496 *data; 497 unsigned int truesize; 498 atomic_t users; 499 }; 500 501 #ifdef __KERNEL__ 502 /* 503 * Handling routines are only of interest to the kernel 504 */ 505 #include <linux/slab.h> 506 507 508 /* 509 * skb might have a dst pointer attached, refcounted or not. 510 * _skb_refdst low order bit is set if refcount was _not_ taken 511 */ 512 #define SKB_DST_NOREF 1UL 513 #define SKB_DST_PTRMASK ~(SKB_DST_NOREF) 514 515 /** 516 * skb_dst - returns skb dst_entry 517 * @skb: buffer 518 * 519 * Returns skb dst_entry, regardless of reference taken or not. 520 */ 521 static inline struct dst_entry *skb_dst(const struct sk_buff *skb) 522 { 523 /* If refdst was not refcounted, check we still are in a 524 * rcu_read_lock section 525 */ 526 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) && 527 !rcu_read_lock_held() && 528 !rcu_read_lock_bh_held()); 529 return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK); 530 } 531 532 /** 533 * skb_dst_set - sets skb dst 534 * @skb: buffer 535 * @dst: dst entry 536 * 537 * Sets skb dst, assuming a reference was taken on dst and should 538 * be released by skb_dst_drop() 539 */ 540 static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst) 541 { 542 skb->_skb_refdst = (unsigned long)dst; 543 } 544 545 extern void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst); 546 547 /** 548 * skb_dst_is_noref - Test if skb dst isn't refcounted 549 * @skb: buffer 550 */ 551 static inline bool skb_dst_is_noref(const struct sk_buff *skb) 552 { 553 return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb); 554 } 555 556 static inline struct rtable *skb_rtable(const struct sk_buff *skb) 557 { 558 return (struct rtable *)skb_dst(skb); 559 } 560 561 extern void kfree_skb(struct sk_buff *skb); 562 extern void consume_skb(struct sk_buff *skb); 563 extern void __kfree_skb(struct sk_buff *skb); 564 extern struct kmem_cache *skbuff_head_cache; 565 extern struct sk_buff *__alloc_skb(unsigned int size, 566 gfp_t priority, int fclone, int node); 567 extern struct sk_buff *build_skb(void *data, unsigned int frag_size); 568 static inline struct sk_buff *alloc_skb(unsigned int size, 569 gfp_t priority) 570 { 571 return __alloc_skb(size, priority, 0, NUMA_NO_NODE); 572 } 573 574 static inline struct sk_buff *alloc_skb_fclone(unsigned int size, 575 gfp_t priority) 576 { 577 return __alloc_skb(size, priority, 1, NUMA_NO_NODE); 578 } 579 580 extern void skb_recycle(struct sk_buff *skb); 581 extern bool skb_recycle_check(struct sk_buff *skb, int skb_size); 582 583 extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); 584 extern int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask); 585 extern struct sk_buff *skb_clone(struct sk_buff *skb, 586 gfp_t priority); 587 extern struct sk_buff *skb_copy(const struct sk_buff *skb, 588 gfp_t priority); 589 extern struct sk_buff *__pskb_copy(struct sk_buff *skb, 590 int headroom, gfp_t gfp_mask); 591 592 extern int pskb_expand_head(struct sk_buff *skb, 593 int nhead, int ntail, 594 gfp_t gfp_mask); 595 extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, 596 unsigned int headroom); 597 extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 598 int newheadroom, int newtailroom, 599 gfp_t priority); 600 extern int skb_to_sgvec(struct sk_buff *skb, 601 struct scatterlist *sg, int offset, 602 int len); 603 extern int skb_cow_data(struct sk_buff *skb, int tailbits, 604 struct sk_buff **trailer); 605 extern int skb_pad(struct sk_buff *skb, int pad); 606 #define dev_kfree_skb(a) consume_skb(a) 607 608 extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, 609 int getfrag(void *from, char *to, int offset, 610 int len,int odd, struct sk_buff *skb), 611 void *from, int length); 612 613 struct skb_seq_state { 614 __u32 lower_offset; 615 __u32 upper_offset; 616 __u32 frag_idx; 617 __u32 stepped_offset; 618 struct sk_buff *root_skb; 619 struct sk_buff *cur_skb; 620 __u8 *frag_data; 621 }; 622 623 extern void skb_prepare_seq_read(struct sk_buff *skb, 624 unsigned int from, unsigned int to, 625 struct skb_seq_state *st); 626 extern unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 627 struct skb_seq_state *st); 628 extern void skb_abort_seq_read(struct skb_seq_state *st); 629 630 extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 631 unsigned int to, struct ts_config *config, 632 struct ts_state *state); 633 634 extern void __skb_get_rxhash(struct sk_buff *skb); 635 static inline __u32 skb_get_rxhash(struct sk_buff *skb) 636 { 637 if (!skb->rxhash) 638 __skb_get_rxhash(skb); 639 640 return skb->rxhash; 641 } 642 643 #ifdef NET_SKBUFF_DATA_USES_OFFSET 644 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) 645 { 646 return skb->head + skb->end; 647 } 648 649 static inline unsigned int skb_end_offset(const struct sk_buff *skb) 650 { 651 return skb->end; 652 } 653 #else 654 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) 655 { 656 return skb->end; 657 } 658 659 static inline unsigned int skb_end_offset(const struct sk_buff *skb) 660 { 661 return skb->end - skb->head; 662 } 663 #endif 664 665 /* Internal */ 666 #define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB))) 667 668 static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb) 669 { 670 return &skb_shinfo(skb)->hwtstamps; 671 } 672 673 /** 674 * skb_queue_empty - check if a queue is empty 675 * @list: queue head 676 * 677 * Returns true if the queue is empty, false otherwise. 678 */ 679 static inline int skb_queue_empty(const struct sk_buff_head *list) 680 { 681 return list->next == (struct sk_buff *)list; 682 } 683 684 /** 685 * skb_queue_is_last - check if skb is the last entry in the queue 686 * @list: queue head 687 * @skb: buffer 688 * 689 * Returns true if @skb is the last buffer on the list. 690 */ 691 static inline bool skb_queue_is_last(const struct sk_buff_head *list, 692 const struct sk_buff *skb) 693 { 694 return skb->next == (struct sk_buff *)list; 695 } 696 697 /** 698 * skb_queue_is_first - check if skb is the first entry in the queue 699 * @list: queue head 700 * @skb: buffer 701 * 702 * Returns true if @skb is the first buffer on the list. 703 */ 704 static inline bool skb_queue_is_first(const struct sk_buff_head *list, 705 const struct sk_buff *skb) 706 { 707 return skb->prev == (struct sk_buff *)list; 708 } 709 710 /** 711 * skb_queue_next - return the next packet in the queue 712 * @list: queue head 713 * @skb: current buffer 714 * 715 * Return the next packet in @list after @skb. It is only valid to 716 * call this if skb_queue_is_last() evaluates to false. 717 */ 718 static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list, 719 const struct sk_buff *skb) 720 { 721 /* This BUG_ON may seem severe, but if we just return then we 722 * are going to dereference garbage. 723 */ 724 BUG_ON(skb_queue_is_last(list, skb)); 725 return skb->next; 726 } 727 728 /** 729 * skb_queue_prev - return the prev packet in the queue 730 * @list: queue head 731 * @skb: current buffer 732 * 733 * Return the prev packet in @list before @skb. It is only valid to 734 * call this if skb_queue_is_first() evaluates to false. 735 */ 736 static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list, 737 const struct sk_buff *skb) 738 { 739 /* This BUG_ON may seem severe, but if we just return then we 740 * are going to dereference garbage. 741 */ 742 BUG_ON(skb_queue_is_first(list, skb)); 743 return skb->prev; 744 } 745 746 /** 747 * skb_get - reference buffer 748 * @skb: buffer to reference 749 * 750 * Makes another reference to a socket buffer and returns a pointer 751 * to the buffer. 752 */ 753 static inline struct sk_buff *skb_get(struct sk_buff *skb) 754 { 755 atomic_inc(&skb->users); 756 return skb; 757 } 758 759 /* 760 * If users == 1, we are the only owner and are can avoid redundant 761 * atomic change. 762 */ 763 764 /** 765 * skb_cloned - is the buffer a clone 766 * @skb: buffer to check 767 * 768 * Returns true if the buffer was generated with skb_clone() and is 769 * one of multiple shared copies of the buffer. Cloned buffers are 770 * shared data so must not be written to under normal circumstances. 771 */ 772 static inline int skb_cloned(const struct sk_buff *skb) 773 { 774 return skb->cloned && 775 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1; 776 } 777 778 /** 779 * skb_header_cloned - is the header a clone 780 * @skb: buffer to check 781 * 782 * Returns true if modifying the header part of the buffer requires 783 * the data to be copied. 784 */ 785 static inline int skb_header_cloned(const struct sk_buff *skb) 786 { 787 int dataref; 788 789 if (!skb->cloned) 790 return 0; 791 792 dataref = atomic_read(&skb_shinfo(skb)->dataref); 793 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT); 794 return dataref != 1; 795 } 796 797 /** 798 * skb_header_release - release reference to header 799 * @skb: buffer to operate on 800 * 801 * Drop a reference to the header part of the buffer. This is done 802 * by acquiring a payload reference. You must not read from the header 803 * part of skb->data after this. 804 */ 805 static inline void skb_header_release(struct sk_buff *skb) 806 { 807 BUG_ON(skb->nohdr); 808 skb->nohdr = 1; 809 atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref); 810 } 811 812 /** 813 * skb_shared - is the buffer shared 814 * @skb: buffer to check 815 * 816 * Returns true if more than one person has a reference to this 817 * buffer. 818 */ 819 static inline int skb_shared(const struct sk_buff *skb) 820 { 821 return atomic_read(&skb->users) != 1; 822 } 823 824 /** 825 * skb_share_check - check if buffer is shared and if so clone it 826 * @skb: buffer to check 827 * @pri: priority for memory allocation 828 * 829 * If the buffer is shared the buffer is cloned and the old copy 830 * drops a reference. A new clone with a single reference is returned. 831 * If the buffer is not shared the original buffer is returned. When 832 * being called from interrupt status or with spinlocks held pri must 833 * be GFP_ATOMIC. 834 * 835 * NULL is returned on a memory allocation failure. 836 */ 837 static inline struct sk_buff *skb_share_check(struct sk_buff *skb, 838 gfp_t pri) 839 { 840 might_sleep_if(pri & __GFP_WAIT); 841 if (skb_shared(skb)) { 842 struct sk_buff *nskb = skb_clone(skb, pri); 843 kfree_skb(skb); 844 skb = nskb; 845 } 846 return skb; 847 } 848 849 /* 850 * Copy shared buffers into a new sk_buff. We effectively do COW on 851 * packets to handle cases where we have a local reader and forward 852 * and a couple of other messy ones. The normal one is tcpdumping 853 * a packet thats being forwarded. 854 */ 855 856 /** 857 * skb_unshare - make a copy of a shared buffer 858 * @skb: buffer to check 859 * @pri: priority for memory allocation 860 * 861 * If the socket buffer is a clone then this function creates a new 862 * copy of the data, drops a reference count on the old copy and returns 863 * the new copy with the reference count at 1. If the buffer is not a clone 864 * the original buffer is returned. When called with a spinlock held or 865 * from interrupt state @pri must be %GFP_ATOMIC 866 * 867 * %NULL is returned on a memory allocation failure. 868 */ 869 static inline struct sk_buff *skb_unshare(struct sk_buff *skb, 870 gfp_t pri) 871 { 872 might_sleep_if(pri & __GFP_WAIT); 873 if (skb_cloned(skb)) { 874 struct sk_buff *nskb = skb_copy(skb, pri); 875 kfree_skb(skb); /* Free our shared copy */ 876 skb = nskb; 877 } 878 return skb; 879 } 880 881 /** 882 * skb_peek - peek at the head of an &sk_buff_head 883 * @list_: list to peek at 884 * 885 * Peek an &sk_buff. Unlike most other operations you _MUST_ 886 * be careful with this one. A peek leaves the buffer on the 887 * list and someone else may run off with it. You must hold 888 * the appropriate locks or have a private queue to do this. 889 * 890 * Returns %NULL for an empty list or a pointer to the head element. 891 * The reference count is not incremented and the reference is therefore 892 * volatile. Use with caution. 893 */ 894 static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_) 895 { 896 struct sk_buff *skb = list_->next; 897 898 if (skb == (struct sk_buff *)list_) 899 skb = NULL; 900 return skb; 901 } 902 903 /** 904 * skb_peek_next - peek skb following the given one from a queue 905 * @skb: skb to start from 906 * @list_: list to peek at 907 * 908 * Returns %NULL when the end of the list is met or a pointer to the 909 * next element. The reference count is not incremented and the 910 * reference is therefore volatile. Use with caution. 911 */ 912 static inline struct sk_buff *skb_peek_next(struct sk_buff *skb, 913 const struct sk_buff_head *list_) 914 { 915 struct sk_buff *next = skb->next; 916 917 if (next == (struct sk_buff *)list_) 918 next = NULL; 919 return next; 920 } 921 922 /** 923 * skb_peek_tail - peek at the tail of an &sk_buff_head 924 * @list_: list to peek at 925 * 926 * Peek an &sk_buff. Unlike most other operations you _MUST_ 927 * be careful with this one. A peek leaves the buffer on the 928 * list and someone else may run off with it. You must hold 929 * the appropriate locks or have a private queue to do this. 930 * 931 * Returns %NULL for an empty list or a pointer to the tail element. 932 * The reference count is not incremented and the reference is therefore 933 * volatile. Use with caution. 934 */ 935 static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_) 936 { 937 struct sk_buff *skb = list_->prev; 938 939 if (skb == (struct sk_buff *)list_) 940 skb = NULL; 941 return skb; 942 943 } 944 945 /** 946 * skb_queue_len - get queue length 947 * @list_: list to measure 948 * 949 * Return the length of an &sk_buff queue. 950 */ 951 static inline __u32 skb_queue_len(const struct sk_buff_head *list_) 952 { 953 return list_->qlen; 954 } 955 956 /** 957 * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head 958 * @list: queue to initialize 959 * 960 * This initializes only the list and queue length aspects of 961 * an sk_buff_head object. This allows to initialize the list 962 * aspects of an sk_buff_head without reinitializing things like 963 * the spinlock. It can also be used for on-stack sk_buff_head 964 * objects where the spinlock is known to not be used. 965 */ 966 static inline void __skb_queue_head_init(struct sk_buff_head *list) 967 { 968 list->prev = list->next = (struct sk_buff *)list; 969 list->qlen = 0; 970 } 971 972 /* 973 * This function creates a split out lock class for each invocation; 974 * this is needed for now since a whole lot of users of the skb-queue 975 * infrastructure in drivers have different locking usage (in hardirq) 976 * than the networking core (in softirq only). In the long run either the 977 * network layer or drivers should need annotation to consolidate the 978 * main types of usage into 3 classes. 979 */ 980 static inline void skb_queue_head_init(struct sk_buff_head *list) 981 { 982 spin_lock_init(&list->lock); 983 __skb_queue_head_init(list); 984 } 985 986 static inline void skb_queue_head_init_class(struct sk_buff_head *list, 987 struct lock_class_key *class) 988 { 989 skb_queue_head_init(list); 990 lockdep_set_class(&list->lock, class); 991 } 992 993 /* 994 * Insert an sk_buff on a list. 995 * 996 * The "__skb_xxxx()" functions are the non-atomic ones that 997 * can only be called with interrupts disabled. 998 */ 999 extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list); 1000 static inline void __skb_insert(struct sk_buff *newsk, 1001 struct sk_buff *prev, struct sk_buff *next, 1002 struct sk_buff_head *list) 1003 { 1004 newsk->next = next; 1005 newsk->prev = prev; 1006 next->prev = prev->next = newsk; 1007 list->qlen++; 1008 } 1009 1010 static inline void __skb_queue_splice(const struct sk_buff_head *list, 1011 struct sk_buff *prev, 1012 struct sk_buff *next) 1013 { 1014 struct sk_buff *first = list->next; 1015 struct sk_buff *last = list->prev; 1016 1017 first->prev = prev; 1018 prev->next = first; 1019 1020 last->next = next; 1021 next->prev = last; 1022 } 1023 1024 /** 1025 * skb_queue_splice - join two skb lists, this is designed for stacks 1026 * @list: the new list to add 1027 * @head: the place to add it in the first list 1028 */ 1029 static inline void skb_queue_splice(const struct sk_buff_head *list, 1030 struct sk_buff_head *head) 1031 { 1032 if (!skb_queue_empty(list)) { 1033 __skb_queue_splice(list, (struct sk_buff *) head, head->next); 1034 head->qlen += list->qlen; 1035 } 1036 } 1037 1038 /** 1039 * skb_queue_splice_init - join two skb lists and reinitialise the emptied list 1040 * @list: the new list to add 1041 * @head: the place to add it in the first list 1042 * 1043 * The list at @list is reinitialised 1044 */ 1045 static inline void skb_queue_splice_init(struct sk_buff_head *list, 1046 struct sk_buff_head *head) 1047 { 1048 if (!skb_queue_empty(list)) { 1049 __skb_queue_splice(list, (struct sk_buff *) head, head->next); 1050 head->qlen += list->qlen; 1051 __skb_queue_head_init(list); 1052 } 1053 } 1054 1055 /** 1056 * skb_queue_splice_tail - join two skb lists, each list being a queue 1057 * @list: the new list to add 1058 * @head: the place to add it in the first list 1059 */ 1060 static inline void skb_queue_splice_tail(const struct sk_buff_head *list, 1061 struct sk_buff_head *head) 1062 { 1063 if (!skb_queue_empty(list)) { 1064 __skb_queue_splice(list, head->prev, (struct sk_buff *) head); 1065 head->qlen += list->qlen; 1066 } 1067 } 1068 1069 /** 1070 * skb_queue_splice_tail_init - join two skb lists and reinitialise the emptied list 1071 * @list: the new list to add 1072 * @head: the place to add it in the first list 1073 * 1074 * Each of the lists is a queue. 1075 * The list at @list is reinitialised 1076 */ 1077 static inline void skb_queue_splice_tail_init(struct sk_buff_head *list, 1078 struct sk_buff_head *head) 1079 { 1080 if (!skb_queue_empty(list)) { 1081 __skb_queue_splice(list, head->prev, (struct sk_buff *) head); 1082 head->qlen += list->qlen; 1083 __skb_queue_head_init(list); 1084 } 1085 } 1086 1087 /** 1088 * __skb_queue_after - queue a buffer at the list head 1089 * @list: list to use 1090 * @prev: place after this buffer 1091 * @newsk: buffer to queue 1092 * 1093 * Queue a buffer int the middle of a list. This function takes no locks 1094 * and you must therefore hold required locks before calling it. 1095 * 1096 * A buffer cannot be placed on two lists at the same time. 1097 */ 1098 static inline void __skb_queue_after(struct sk_buff_head *list, 1099 struct sk_buff *prev, 1100 struct sk_buff *newsk) 1101 { 1102 __skb_insert(newsk, prev, prev->next, list); 1103 } 1104 1105 extern void skb_append(struct sk_buff *old, struct sk_buff *newsk, 1106 struct sk_buff_head *list); 1107 1108 static inline void __skb_queue_before(struct sk_buff_head *list, 1109 struct sk_buff *next, 1110 struct sk_buff *newsk) 1111 { 1112 __skb_insert(newsk, next->prev, next, list); 1113 } 1114 1115 /** 1116 * __skb_queue_head - queue a buffer at the list head 1117 * @list: list to use 1118 * @newsk: buffer to queue 1119 * 1120 * Queue a buffer at the start of a list. This function takes no locks 1121 * and you must therefore hold required locks before calling it. 1122 * 1123 * A buffer cannot be placed on two lists at the same time. 1124 */ 1125 extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk); 1126 static inline void __skb_queue_head(struct sk_buff_head *list, 1127 struct sk_buff *newsk) 1128 { 1129 __skb_queue_after(list, (struct sk_buff *)list, newsk); 1130 } 1131 1132 /** 1133 * __skb_queue_tail - queue a buffer at the list tail 1134 * @list: list to use 1135 * @newsk: buffer to queue 1136 * 1137 * Queue a buffer at the end of a list. This function takes no locks 1138 * and you must therefore hold required locks before calling it. 1139 * 1140 * A buffer cannot be placed on two lists at the same time. 1141 */ 1142 extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk); 1143 static inline void __skb_queue_tail(struct sk_buff_head *list, 1144 struct sk_buff *newsk) 1145 { 1146 __skb_queue_before(list, (struct sk_buff *)list, newsk); 1147 } 1148 1149 /* 1150 * remove sk_buff from list. _Must_ be called atomically, and with 1151 * the list known.. 1152 */ 1153 extern void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list); 1154 static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 1155 { 1156 struct sk_buff *next, *prev; 1157 1158 list->qlen--; 1159 next = skb->next; 1160 prev = skb->prev; 1161 skb->next = skb->prev = NULL; 1162 next->prev = prev; 1163 prev->next = next; 1164 } 1165 1166 /** 1167 * __skb_dequeue - remove from the head of the queue 1168 * @list: list to dequeue from 1169 * 1170 * Remove the head of the list. This function does not take any locks 1171 * so must be used with appropriate locks held only. The head item is 1172 * returned or %NULL if the list is empty. 1173 */ 1174 extern struct sk_buff *skb_dequeue(struct sk_buff_head *list); 1175 static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list) 1176 { 1177 struct sk_buff *skb = skb_peek(list); 1178 if (skb) 1179 __skb_unlink(skb, list); 1180 return skb; 1181 } 1182 1183 /** 1184 * __skb_dequeue_tail - remove from the tail of the queue 1185 * @list: list to dequeue from 1186 * 1187 * Remove the tail of the list. This function does not take any locks 1188 * so must be used with appropriate locks held only. The tail item is 1189 * returned or %NULL if the list is empty. 1190 */ 1191 extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list); 1192 static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list) 1193 { 1194 struct sk_buff *skb = skb_peek_tail(list); 1195 if (skb) 1196 __skb_unlink(skb, list); 1197 return skb; 1198 } 1199 1200 1201 static inline bool skb_is_nonlinear(const struct sk_buff *skb) 1202 { 1203 return skb->data_len; 1204 } 1205 1206 static inline unsigned int skb_headlen(const struct sk_buff *skb) 1207 { 1208 return skb->len - skb->data_len; 1209 } 1210 1211 static inline int skb_pagelen(const struct sk_buff *skb) 1212 { 1213 int i, len = 0; 1214 1215 for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) 1216 len += skb_frag_size(&skb_shinfo(skb)->frags[i]); 1217 return len + skb_headlen(skb); 1218 } 1219 1220 /** 1221 * __skb_fill_page_desc - initialise a paged fragment in an skb 1222 * @skb: buffer containing fragment to be initialised 1223 * @i: paged fragment index to initialise 1224 * @page: the page to use for this fragment 1225 * @off: the offset to the data with @page 1226 * @size: the length of the data 1227 * 1228 * Initialises the @i'th fragment of @skb to point to &size bytes at 1229 * offset @off within @page. 1230 * 1231 * Does not take any additional reference on the fragment. 1232 */ 1233 static inline void __skb_fill_page_desc(struct sk_buff *skb, int i, 1234 struct page *page, int off, int size) 1235 { 1236 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1237 1238 frag->page.p = page; 1239 frag->page_offset = off; 1240 skb_frag_size_set(frag, size); 1241 } 1242 1243 /** 1244 * skb_fill_page_desc - initialise a paged fragment in an skb 1245 * @skb: buffer containing fragment to be initialised 1246 * @i: paged fragment index to initialise 1247 * @page: the page to use for this fragment 1248 * @off: the offset to the data with @page 1249 * @size: the length of the data 1250 * 1251 * As per __skb_fill_page_desc() -- initialises the @i'th fragment of 1252 * @skb to point to &size bytes at offset @off within @page. In 1253 * addition updates @skb such that @i is the last fragment. 1254 * 1255 * Does not take any additional reference on the fragment. 1256 */ 1257 static inline void skb_fill_page_desc(struct sk_buff *skb, int i, 1258 struct page *page, int off, int size) 1259 { 1260 __skb_fill_page_desc(skb, i, page, off, size); 1261 skb_shinfo(skb)->nr_frags = i + 1; 1262 } 1263 1264 extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, 1265 int off, int size, unsigned int truesize); 1266 1267 #define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags) 1268 #define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frag_list(skb)) 1269 #define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb)) 1270 1271 #ifdef NET_SKBUFF_DATA_USES_OFFSET 1272 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb) 1273 { 1274 return skb->head + skb->tail; 1275 } 1276 1277 static inline void skb_reset_tail_pointer(struct sk_buff *skb) 1278 { 1279 skb->tail = skb->data - skb->head; 1280 } 1281 1282 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) 1283 { 1284 skb_reset_tail_pointer(skb); 1285 skb->tail += offset; 1286 } 1287 #else /* NET_SKBUFF_DATA_USES_OFFSET */ 1288 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb) 1289 { 1290 return skb->tail; 1291 } 1292 1293 static inline void skb_reset_tail_pointer(struct sk_buff *skb) 1294 { 1295 skb->tail = skb->data; 1296 } 1297 1298 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) 1299 { 1300 skb->tail = skb->data + offset; 1301 } 1302 1303 #endif /* NET_SKBUFF_DATA_USES_OFFSET */ 1304 1305 /* 1306 * Add data to an sk_buff 1307 */ 1308 extern unsigned char *skb_put(struct sk_buff *skb, unsigned int len); 1309 static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len) 1310 { 1311 unsigned char *tmp = skb_tail_pointer(skb); 1312 SKB_LINEAR_ASSERT(skb); 1313 skb->tail += len; 1314 skb->len += len; 1315 return tmp; 1316 } 1317 1318 extern unsigned char *skb_push(struct sk_buff *skb, unsigned int len); 1319 static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len) 1320 { 1321 skb->data -= len; 1322 skb->len += len; 1323 return skb->data; 1324 } 1325 1326 extern unsigned char *skb_pull(struct sk_buff *skb, unsigned int len); 1327 static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len) 1328 { 1329 skb->len -= len; 1330 BUG_ON(skb->len < skb->data_len); 1331 return skb->data += len; 1332 } 1333 1334 static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int len) 1335 { 1336 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len); 1337 } 1338 1339 extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta); 1340 1341 static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len) 1342 { 1343 if (len > skb_headlen(skb) && 1344 !__pskb_pull_tail(skb, len - skb_headlen(skb))) 1345 return NULL; 1346 skb->len -= len; 1347 return skb->data += len; 1348 } 1349 1350 static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len) 1351 { 1352 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len); 1353 } 1354 1355 static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len) 1356 { 1357 if (likely(len <= skb_headlen(skb))) 1358 return 1; 1359 if (unlikely(len > skb->len)) 1360 return 0; 1361 return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL; 1362 } 1363 1364 /** 1365 * skb_headroom - bytes at buffer head 1366 * @skb: buffer to check 1367 * 1368 * Return the number of bytes of free space at the head of an &sk_buff. 1369 */ 1370 static inline unsigned int skb_headroom(const struct sk_buff *skb) 1371 { 1372 return skb->data - skb->head; 1373 } 1374 1375 /** 1376 * skb_tailroom - bytes at buffer end 1377 * @skb: buffer to check 1378 * 1379 * Return the number of bytes of free space at the tail of an sk_buff 1380 */ 1381 static inline int skb_tailroom(const struct sk_buff *skb) 1382 { 1383 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail; 1384 } 1385 1386 /** 1387 * skb_availroom - bytes at buffer end 1388 * @skb: buffer to check 1389 * 1390 * Return the number of bytes of free space at the tail of an sk_buff 1391 * allocated by sk_stream_alloc() 1392 */ 1393 static inline int skb_availroom(const struct sk_buff *skb) 1394 { 1395 return skb_is_nonlinear(skb) ? 0 : skb->avail_size - skb->len; 1396 } 1397 1398 /** 1399 * skb_reserve - adjust headroom 1400 * @skb: buffer to alter 1401 * @len: bytes to move 1402 * 1403 * Increase the headroom of an empty &sk_buff by reducing the tail 1404 * room. This is only allowed for an empty buffer. 1405 */ 1406 static inline void skb_reserve(struct sk_buff *skb, int len) 1407 { 1408 skb->data += len; 1409 skb->tail += len; 1410 } 1411 1412 static inline void skb_reset_mac_len(struct sk_buff *skb) 1413 { 1414 skb->mac_len = skb->network_header - skb->mac_header; 1415 } 1416 1417 #ifdef NET_SKBUFF_DATA_USES_OFFSET 1418 static inline unsigned char *skb_transport_header(const struct sk_buff *skb) 1419 { 1420 return skb->head + skb->transport_header; 1421 } 1422 1423 static inline void skb_reset_transport_header(struct sk_buff *skb) 1424 { 1425 skb->transport_header = skb->data - skb->head; 1426 } 1427 1428 static inline void skb_set_transport_header(struct sk_buff *skb, 1429 const int offset) 1430 { 1431 skb_reset_transport_header(skb); 1432 skb->transport_header += offset; 1433 } 1434 1435 static inline unsigned char *skb_network_header(const struct sk_buff *skb) 1436 { 1437 return skb->head + skb->network_header; 1438 } 1439 1440 static inline void skb_reset_network_header(struct sk_buff *skb) 1441 { 1442 skb->network_header = skb->data - skb->head; 1443 } 1444 1445 static inline void skb_set_network_header(struct sk_buff *skb, const int offset) 1446 { 1447 skb_reset_network_header(skb); 1448 skb->network_header += offset; 1449 } 1450 1451 static inline unsigned char *skb_mac_header(const struct sk_buff *skb) 1452 { 1453 return skb->head + skb->mac_header; 1454 } 1455 1456 static inline int skb_mac_header_was_set(const struct sk_buff *skb) 1457 { 1458 return skb->mac_header != ~0U; 1459 } 1460 1461 static inline void skb_reset_mac_header(struct sk_buff *skb) 1462 { 1463 skb->mac_header = skb->data - skb->head; 1464 } 1465 1466 static inline void skb_set_mac_header(struct sk_buff *skb, const int offset) 1467 { 1468 skb_reset_mac_header(skb); 1469 skb->mac_header += offset; 1470 } 1471 1472 #else /* NET_SKBUFF_DATA_USES_OFFSET */ 1473 1474 static inline unsigned char *skb_transport_header(const struct sk_buff *skb) 1475 { 1476 return skb->transport_header; 1477 } 1478 1479 static inline void skb_reset_transport_header(struct sk_buff *skb) 1480 { 1481 skb->transport_header = skb->data; 1482 } 1483 1484 static inline void skb_set_transport_header(struct sk_buff *skb, 1485 const int offset) 1486 { 1487 skb->transport_header = skb->data + offset; 1488 } 1489 1490 static inline unsigned char *skb_network_header(const struct sk_buff *skb) 1491 { 1492 return skb->network_header; 1493 } 1494 1495 static inline void skb_reset_network_header(struct sk_buff *skb) 1496 { 1497 skb->network_header = skb->data; 1498 } 1499 1500 static inline void skb_set_network_header(struct sk_buff *skb, const int offset) 1501 { 1502 skb->network_header = skb->data + offset; 1503 } 1504 1505 static inline unsigned char *skb_mac_header(const struct sk_buff *skb) 1506 { 1507 return skb->mac_header; 1508 } 1509 1510 static inline int skb_mac_header_was_set(const struct sk_buff *skb) 1511 { 1512 return skb->mac_header != NULL; 1513 } 1514 1515 static inline void skb_reset_mac_header(struct sk_buff *skb) 1516 { 1517 skb->mac_header = skb->data; 1518 } 1519 1520 static inline void skb_set_mac_header(struct sk_buff *skb, const int offset) 1521 { 1522 skb->mac_header = skb->data + offset; 1523 } 1524 #endif /* NET_SKBUFF_DATA_USES_OFFSET */ 1525 1526 static inline void skb_mac_header_rebuild(struct sk_buff *skb) 1527 { 1528 if (skb_mac_header_was_set(skb)) { 1529 const unsigned char *old_mac = skb_mac_header(skb); 1530 1531 skb_set_mac_header(skb, -skb->mac_len); 1532 memmove(skb_mac_header(skb), old_mac, skb->mac_len); 1533 } 1534 } 1535 1536 static inline int skb_checksum_start_offset(const struct sk_buff *skb) 1537 { 1538 return skb->csum_start - skb_headroom(skb); 1539 } 1540 1541 static inline int skb_transport_offset(const struct sk_buff *skb) 1542 { 1543 return skb_transport_header(skb) - skb->data; 1544 } 1545 1546 static inline u32 skb_network_header_len(const struct sk_buff *skb) 1547 { 1548 return skb->transport_header - skb->network_header; 1549 } 1550 1551 static inline int skb_network_offset(const struct sk_buff *skb) 1552 { 1553 return skb_network_header(skb) - skb->data; 1554 } 1555 1556 static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len) 1557 { 1558 return pskb_may_pull(skb, skb_network_offset(skb) + len); 1559 } 1560 1561 /* 1562 * CPUs often take a performance hit when accessing unaligned memory 1563 * locations. The actual performance hit varies, it can be small if the 1564 * hardware handles it or large if we have to take an exception and fix it 1565 * in software. 1566 * 1567 * Since an ethernet header is 14 bytes network drivers often end up with 1568 * the IP header at an unaligned offset. The IP header can be aligned by 1569 * shifting the start of the packet by 2 bytes. Drivers should do this 1570 * with: 1571 * 1572 * skb_reserve(skb, NET_IP_ALIGN); 1573 * 1574 * The downside to this alignment of the IP header is that the DMA is now 1575 * unaligned. On some architectures the cost of an unaligned DMA is high 1576 * and this cost outweighs the gains made by aligning the IP header. 1577 * 1578 * Since this trade off varies between architectures, we allow NET_IP_ALIGN 1579 * to be overridden. 1580 */ 1581 #ifndef NET_IP_ALIGN 1582 #define NET_IP_ALIGN 2 1583 #endif 1584 1585 /* 1586 * The networking layer reserves some headroom in skb data (via 1587 * dev_alloc_skb). This is used to avoid having to reallocate skb data when 1588 * the header has to grow. In the default case, if the header has to grow 1589 * 32 bytes or less we avoid the reallocation. 1590 * 1591 * Unfortunately this headroom changes the DMA alignment of the resulting 1592 * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive 1593 * on some architectures. An architecture can override this value, 1594 * perhaps setting it to a cacheline in size (since that will maintain 1595 * cacheline alignment of the DMA). It must be a power of 2. 1596 * 1597 * Various parts of the networking layer expect at least 32 bytes of 1598 * headroom, you should not reduce this. 1599 * 1600 * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS) 1601 * to reduce average number of cache lines per packet. 1602 * get_rps_cpus() for example only access one 64 bytes aligned block : 1603 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8) 1604 */ 1605 #ifndef NET_SKB_PAD 1606 #define NET_SKB_PAD max(32, L1_CACHE_BYTES) 1607 #endif 1608 1609 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len); 1610 1611 static inline void __skb_trim(struct sk_buff *skb, unsigned int len) 1612 { 1613 if (unlikely(skb_is_nonlinear(skb))) { 1614 WARN_ON(1); 1615 return; 1616 } 1617 skb->len = len; 1618 skb_set_tail_pointer(skb, len); 1619 } 1620 1621 extern void skb_trim(struct sk_buff *skb, unsigned int len); 1622 1623 static inline int __pskb_trim(struct sk_buff *skb, unsigned int len) 1624 { 1625 if (skb->data_len) 1626 return ___pskb_trim(skb, len); 1627 __skb_trim(skb, len); 1628 return 0; 1629 } 1630 1631 static inline int pskb_trim(struct sk_buff *skb, unsigned int len) 1632 { 1633 return (len < skb->len) ? __pskb_trim(skb, len) : 0; 1634 } 1635 1636 /** 1637 * pskb_trim_unique - remove end from a paged unique (not cloned) buffer 1638 * @skb: buffer to alter 1639 * @len: new length 1640 * 1641 * This is identical to pskb_trim except that the caller knows that 1642 * the skb is not cloned so we should never get an error due to out- 1643 * of-memory. 1644 */ 1645 static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len) 1646 { 1647 int err = pskb_trim(skb, len); 1648 BUG_ON(err); 1649 } 1650 1651 /** 1652 * skb_orphan - orphan a buffer 1653 * @skb: buffer to orphan 1654 * 1655 * If a buffer currently has an owner then we call the owner's 1656 * destructor function and make the @skb unowned. The buffer continues 1657 * to exist but is no longer charged to its former owner. 1658 */ 1659 static inline void skb_orphan(struct sk_buff *skb) 1660 { 1661 if (skb->destructor) 1662 skb->destructor(skb); 1663 skb->destructor = NULL; 1664 skb->sk = NULL; 1665 } 1666 1667 /** 1668 * __skb_queue_purge - empty a list 1669 * @list: list to empty 1670 * 1671 * Delete all buffers on an &sk_buff list. Each buffer is removed from 1672 * the list and one reference dropped. This function does not take the 1673 * list lock and the caller must hold the relevant locks to use it. 1674 */ 1675 extern void skb_queue_purge(struct sk_buff_head *list); 1676 static inline void __skb_queue_purge(struct sk_buff_head *list) 1677 { 1678 struct sk_buff *skb; 1679 while ((skb = __skb_dequeue(list)) != NULL) 1680 kfree_skb(skb); 1681 } 1682 1683 /** 1684 * __dev_alloc_skb - allocate an skbuff for receiving 1685 * @length: length to allocate 1686 * @gfp_mask: get_free_pages mask, passed to alloc_skb 1687 * 1688 * Allocate a new &sk_buff and assign it a usage count of one. The 1689 * buffer has unspecified headroom built in. Users should allocate 1690 * the headroom they think they need without accounting for the 1691 * built in space. The built in space is used for optimisations. 1692 * 1693 * %NULL is returned if there is no free memory. 1694 */ 1695 static inline struct sk_buff *__dev_alloc_skb(unsigned int length, 1696 gfp_t gfp_mask) 1697 { 1698 struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask); 1699 if (likely(skb)) 1700 skb_reserve(skb, NET_SKB_PAD); 1701 return skb; 1702 } 1703 1704 extern struct sk_buff *dev_alloc_skb(unsigned int length); 1705 1706 extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev, 1707 unsigned int length, gfp_t gfp_mask); 1708 1709 /** 1710 * netdev_alloc_skb - allocate an skbuff for rx on a specific device 1711 * @dev: network device to receive on 1712 * @length: length to allocate 1713 * 1714 * Allocate a new &sk_buff and assign it a usage count of one. The 1715 * buffer has unspecified headroom built in. Users should allocate 1716 * the headroom they think they need without accounting for the 1717 * built in space. The built in space is used for optimisations. 1718 * 1719 * %NULL is returned if there is no free memory. Although this function 1720 * allocates memory it can be called from an interrupt. 1721 */ 1722 static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev, 1723 unsigned int length) 1724 { 1725 return __netdev_alloc_skb(dev, length, GFP_ATOMIC); 1726 } 1727 1728 static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev, 1729 unsigned int length, gfp_t gfp) 1730 { 1731 struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp); 1732 1733 if (NET_IP_ALIGN && skb) 1734 skb_reserve(skb, NET_IP_ALIGN); 1735 return skb; 1736 } 1737 1738 static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev, 1739 unsigned int length) 1740 { 1741 return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC); 1742 } 1743 1744 /** 1745 * skb_frag_page - retrieve the page refered to by a paged fragment 1746 * @frag: the paged fragment 1747 * 1748 * Returns the &struct page associated with @frag. 1749 */ 1750 static inline struct page *skb_frag_page(const skb_frag_t *frag) 1751 { 1752 return frag->page.p; 1753 } 1754 1755 /** 1756 * __skb_frag_ref - take an addition reference on a paged fragment. 1757 * @frag: the paged fragment 1758 * 1759 * Takes an additional reference on the paged fragment @frag. 1760 */ 1761 static inline void __skb_frag_ref(skb_frag_t *frag) 1762 { 1763 get_page(skb_frag_page(frag)); 1764 } 1765 1766 /** 1767 * skb_frag_ref - take an addition reference on a paged fragment of an skb. 1768 * @skb: the buffer 1769 * @f: the fragment offset. 1770 * 1771 * Takes an additional reference on the @f'th paged fragment of @skb. 1772 */ 1773 static inline void skb_frag_ref(struct sk_buff *skb, int f) 1774 { 1775 __skb_frag_ref(&skb_shinfo(skb)->frags[f]); 1776 } 1777 1778 /** 1779 * __skb_frag_unref - release a reference on a paged fragment. 1780 * @frag: the paged fragment 1781 * 1782 * Releases a reference on the paged fragment @frag. 1783 */ 1784 static inline void __skb_frag_unref(skb_frag_t *frag) 1785 { 1786 put_page(skb_frag_page(frag)); 1787 } 1788 1789 /** 1790 * skb_frag_unref - release a reference on a paged fragment of an skb. 1791 * @skb: the buffer 1792 * @f: the fragment offset 1793 * 1794 * Releases a reference on the @f'th paged fragment of @skb. 1795 */ 1796 static inline void skb_frag_unref(struct sk_buff *skb, int f) 1797 { 1798 __skb_frag_unref(&skb_shinfo(skb)->frags[f]); 1799 } 1800 1801 /** 1802 * skb_frag_address - gets the address of the data contained in a paged fragment 1803 * @frag: the paged fragment buffer 1804 * 1805 * Returns the address of the data within @frag. The page must already 1806 * be mapped. 1807 */ 1808 static inline void *skb_frag_address(const skb_frag_t *frag) 1809 { 1810 return page_address(skb_frag_page(frag)) + frag->page_offset; 1811 } 1812 1813 /** 1814 * skb_frag_address_safe - gets the address of the data contained in a paged fragment 1815 * @frag: the paged fragment buffer 1816 * 1817 * Returns the address of the data within @frag. Checks that the page 1818 * is mapped and returns %NULL otherwise. 1819 */ 1820 static inline void *skb_frag_address_safe(const skb_frag_t *frag) 1821 { 1822 void *ptr = page_address(skb_frag_page(frag)); 1823 if (unlikely(!ptr)) 1824 return NULL; 1825 1826 return ptr + frag->page_offset; 1827 } 1828 1829 /** 1830 * __skb_frag_set_page - sets the page contained in a paged fragment 1831 * @frag: the paged fragment 1832 * @page: the page to set 1833 * 1834 * Sets the fragment @frag to contain @page. 1835 */ 1836 static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page) 1837 { 1838 frag->page.p = page; 1839 } 1840 1841 /** 1842 * skb_frag_set_page - sets the page contained in a paged fragment of an skb 1843 * @skb: the buffer 1844 * @f: the fragment offset 1845 * @page: the page to set 1846 * 1847 * Sets the @f'th fragment of @skb to contain @page. 1848 */ 1849 static inline void skb_frag_set_page(struct sk_buff *skb, int f, 1850 struct page *page) 1851 { 1852 __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page); 1853 } 1854 1855 /** 1856 * skb_frag_dma_map - maps a paged fragment via the DMA API 1857 * @dev: the device to map the fragment to 1858 * @frag: the paged fragment to map 1859 * @offset: the offset within the fragment (starting at the 1860 * fragment's own offset) 1861 * @size: the number of bytes to map 1862 * @dir: the direction of the mapping (%PCI_DMA_*) 1863 * 1864 * Maps the page associated with @frag to @device. 1865 */ 1866 static inline dma_addr_t skb_frag_dma_map(struct device *dev, 1867 const skb_frag_t *frag, 1868 size_t offset, size_t size, 1869 enum dma_data_direction dir) 1870 { 1871 return dma_map_page(dev, skb_frag_page(frag), 1872 frag->page_offset + offset, size, dir); 1873 } 1874 1875 static inline struct sk_buff *pskb_copy(struct sk_buff *skb, 1876 gfp_t gfp_mask) 1877 { 1878 return __pskb_copy(skb, skb_headroom(skb), gfp_mask); 1879 } 1880 1881 /** 1882 * skb_clone_writable - is the header of a clone writable 1883 * @skb: buffer to check 1884 * @len: length up to which to write 1885 * 1886 * Returns true if modifying the header part of the cloned buffer 1887 * does not requires the data to be copied. 1888 */ 1889 static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len) 1890 { 1891 return !skb_header_cloned(skb) && 1892 skb_headroom(skb) + len <= skb->hdr_len; 1893 } 1894 1895 static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom, 1896 int cloned) 1897 { 1898 int delta = 0; 1899 1900 if (headroom < NET_SKB_PAD) 1901 headroom = NET_SKB_PAD; 1902 if (headroom > skb_headroom(skb)) 1903 delta = headroom - skb_headroom(skb); 1904 1905 if (delta || cloned) 1906 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0, 1907 GFP_ATOMIC); 1908 return 0; 1909 } 1910 1911 /** 1912 * skb_cow - copy header of skb when it is required 1913 * @skb: buffer to cow 1914 * @headroom: needed headroom 1915 * 1916 * If the skb passed lacks sufficient headroom or its data part 1917 * is shared, data is reallocated. If reallocation fails, an error 1918 * is returned and original skb is not changed. 1919 * 1920 * The result is skb with writable area skb->head...skb->tail 1921 * and at least @headroom of space at head. 1922 */ 1923 static inline int skb_cow(struct sk_buff *skb, unsigned int headroom) 1924 { 1925 return __skb_cow(skb, headroom, skb_cloned(skb)); 1926 } 1927 1928 /** 1929 * skb_cow_head - skb_cow but only making the head writable 1930 * @skb: buffer to cow 1931 * @headroom: needed headroom 1932 * 1933 * This function is identical to skb_cow except that we replace the 1934 * skb_cloned check by skb_header_cloned. It should be used when 1935 * you only need to push on some header and do not need to modify 1936 * the data. 1937 */ 1938 static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom) 1939 { 1940 return __skb_cow(skb, headroom, skb_header_cloned(skb)); 1941 } 1942 1943 /** 1944 * skb_padto - pad an skbuff up to a minimal size 1945 * @skb: buffer to pad 1946 * @len: minimal length 1947 * 1948 * Pads up a buffer to ensure the trailing bytes exist and are 1949 * blanked. If the buffer already contains sufficient data it 1950 * is untouched. Otherwise it is extended. Returns zero on 1951 * success. The skb is freed on error. 1952 */ 1953 1954 static inline int skb_padto(struct sk_buff *skb, unsigned int len) 1955 { 1956 unsigned int size = skb->len; 1957 if (likely(size >= len)) 1958 return 0; 1959 return skb_pad(skb, len - size); 1960 } 1961 1962 static inline int skb_add_data(struct sk_buff *skb, 1963 char __user *from, int copy) 1964 { 1965 const int off = skb->len; 1966 1967 if (skb->ip_summed == CHECKSUM_NONE) { 1968 int err = 0; 1969 __wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy), 1970 copy, 0, &err); 1971 if (!err) { 1972 skb->csum = csum_block_add(skb->csum, csum, off); 1973 return 0; 1974 } 1975 } else if (!copy_from_user(skb_put(skb, copy), from, copy)) 1976 return 0; 1977 1978 __skb_trim(skb, off); 1979 return -EFAULT; 1980 } 1981 1982 static inline bool skb_can_coalesce(struct sk_buff *skb, int i, 1983 const struct page *page, int off) 1984 { 1985 if (i) { 1986 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; 1987 1988 return page == skb_frag_page(frag) && 1989 off == frag->page_offset + skb_frag_size(frag); 1990 } 1991 return false; 1992 } 1993 1994 static inline int __skb_linearize(struct sk_buff *skb) 1995 { 1996 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM; 1997 } 1998 1999 /** 2000 * skb_linearize - convert paged skb to linear one 2001 * @skb: buffer to linarize 2002 * 2003 * If there is no free memory -ENOMEM is returned, otherwise zero 2004 * is returned and the old skb data released. 2005 */ 2006 static inline int skb_linearize(struct sk_buff *skb) 2007 { 2008 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0; 2009 } 2010 2011 /** 2012 * skb_linearize_cow - make sure skb is linear and writable 2013 * @skb: buffer to process 2014 * 2015 * If there is no free memory -ENOMEM is returned, otherwise zero 2016 * is returned and the old skb data released. 2017 */ 2018 static inline int skb_linearize_cow(struct sk_buff *skb) 2019 { 2020 return skb_is_nonlinear(skb) || skb_cloned(skb) ? 2021 __skb_linearize(skb) : 0; 2022 } 2023 2024 /** 2025 * skb_postpull_rcsum - update checksum for received skb after pull 2026 * @skb: buffer to update 2027 * @start: start of data before pull 2028 * @len: length of data pulled 2029 * 2030 * After doing a pull on a received packet, you need to call this to 2031 * update the CHECKSUM_COMPLETE checksum, or set ip_summed to 2032 * CHECKSUM_NONE so that it can be recomputed from scratch. 2033 */ 2034 2035 static inline void skb_postpull_rcsum(struct sk_buff *skb, 2036 const void *start, unsigned int len) 2037 { 2038 if (skb->ip_summed == CHECKSUM_COMPLETE) 2039 skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0)); 2040 } 2041 2042 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); 2043 2044 /** 2045 * pskb_trim_rcsum - trim received skb and update checksum 2046 * @skb: buffer to trim 2047 * @len: new length 2048 * 2049 * This is exactly the same as pskb_trim except that it ensures the 2050 * checksum of received packets are still valid after the operation. 2051 */ 2052 2053 static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) 2054 { 2055 if (likely(len >= skb->len)) 2056 return 0; 2057 if (skb->ip_summed == CHECKSUM_COMPLETE) 2058 skb->ip_summed = CHECKSUM_NONE; 2059 return __pskb_trim(skb, len); 2060 } 2061 2062 #define skb_queue_walk(queue, skb) \ 2063 for (skb = (queue)->next; \ 2064 skb != (struct sk_buff *)(queue); \ 2065 skb = skb->next) 2066 2067 #define skb_queue_walk_safe(queue, skb, tmp) \ 2068 for (skb = (queue)->next, tmp = skb->next; \ 2069 skb != (struct sk_buff *)(queue); \ 2070 skb = tmp, tmp = skb->next) 2071 2072 #define skb_queue_walk_from(queue, skb) \ 2073 for (; skb != (struct sk_buff *)(queue); \ 2074 skb = skb->next) 2075 2076 #define skb_queue_walk_from_safe(queue, skb, tmp) \ 2077 for (tmp = skb->next; \ 2078 skb != (struct sk_buff *)(queue); \ 2079 skb = tmp, tmp = skb->next) 2080 2081 #define skb_queue_reverse_walk(queue, skb) \ 2082 for (skb = (queue)->prev; \ 2083 skb != (struct sk_buff *)(queue); \ 2084 skb = skb->prev) 2085 2086 #define skb_queue_reverse_walk_safe(queue, skb, tmp) \ 2087 for (skb = (queue)->prev, tmp = skb->prev; \ 2088 skb != (struct sk_buff *)(queue); \ 2089 skb = tmp, tmp = skb->prev) 2090 2091 #define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \ 2092 for (tmp = skb->prev; \ 2093 skb != (struct sk_buff *)(queue); \ 2094 skb = tmp, tmp = skb->prev) 2095 2096 static inline bool skb_has_frag_list(const struct sk_buff *skb) 2097 { 2098 return skb_shinfo(skb)->frag_list != NULL; 2099 } 2100 2101 static inline void skb_frag_list_init(struct sk_buff *skb) 2102 { 2103 skb_shinfo(skb)->frag_list = NULL; 2104 } 2105 2106 static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag) 2107 { 2108 frag->next = skb_shinfo(skb)->frag_list; 2109 skb_shinfo(skb)->frag_list = frag; 2110 } 2111 2112 #define skb_walk_frags(skb, iter) \ 2113 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next) 2114 2115 extern struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags, 2116 int *peeked, int *off, int *err); 2117 extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, 2118 int noblock, int *err); 2119 extern unsigned int datagram_poll(struct file *file, struct socket *sock, 2120 struct poll_table_struct *wait); 2121 extern int skb_copy_datagram_iovec(const struct sk_buff *from, 2122 int offset, struct iovec *to, 2123 int size); 2124 extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, 2125 int hlen, 2126 struct iovec *iov); 2127 extern int skb_copy_datagram_from_iovec(struct sk_buff *skb, 2128 int offset, 2129 const struct iovec *from, 2130 int from_offset, 2131 int len); 2132 extern int skb_copy_datagram_const_iovec(const struct sk_buff *from, 2133 int offset, 2134 const struct iovec *to, 2135 int to_offset, 2136 int size); 2137 extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb); 2138 extern void skb_free_datagram_locked(struct sock *sk, 2139 struct sk_buff *skb); 2140 extern int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, 2141 unsigned int flags); 2142 extern __wsum skb_checksum(const struct sk_buff *skb, int offset, 2143 int len, __wsum csum); 2144 extern int skb_copy_bits(const struct sk_buff *skb, int offset, 2145 void *to, int len); 2146 extern int skb_store_bits(struct sk_buff *skb, int offset, 2147 const void *from, int len); 2148 extern __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, 2149 int offset, u8 *to, int len, 2150 __wsum csum); 2151 extern int skb_splice_bits(struct sk_buff *skb, 2152 unsigned int offset, 2153 struct pipe_inode_info *pipe, 2154 unsigned int len, 2155 unsigned int flags); 2156 extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); 2157 extern void skb_split(struct sk_buff *skb, 2158 struct sk_buff *skb1, const u32 len); 2159 extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, 2160 int shiftlen); 2161 2162 extern struct sk_buff *skb_segment(struct sk_buff *skb, 2163 netdev_features_t features); 2164 2165 static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, 2166 int len, void *buffer) 2167 { 2168 int hlen = skb_headlen(skb); 2169 2170 if (hlen - offset >= len) 2171 return skb->data + offset; 2172 2173 if (skb_copy_bits(skb, offset, buffer, len) < 0) 2174 return NULL; 2175 2176 return buffer; 2177 } 2178 2179 static inline void skb_copy_from_linear_data(const struct sk_buff *skb, 2180 void *to, 2181 const unsigned int len) 2182 { 2183 memcpy(to, skb->data, len); 2184 } 2185 2186 static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb, 2187 const int offset, void *to, 2188 const unsigned int len) 2189 { 2190 memcpy(to, skb->data + offset, len); 2191 } 2192 2193 static inline void skb_copy_to_linear_data(struct sk_buff *skb, 2194 const void *from, 2195 const unsigned int len) 2196 { 2197 memcpy(skb->data, from, len); 2198 } 2199 2200 static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb, 2201 const int offset, 2202 const void *from, 2203 const unsigned int len) 2204 { 2205 memcpy(skb->data + offset, from, len); 2206 } 2207 2208 extern void skb_init(void); 2209 2210 static inline ktime_t skb_get_ktime(const struct sk_buff *skb) 2211 { 2212 return skb->tstamp; 2213 } 2214 2215 /** 2216 * skb_get_timestamp - get timestamp from a skb 2217 * @skb: skb to get stamp from 2218 * @stamp: pointer to struct timeval to store stamp in 2219 * 2220 * Timestamps are stored in the skb as offsets to a base timestamp. 2221 * This function converts the offset back to a struct timeval and stores 2222 * it in stamp. 2223 */ 2224 static inline void skb_get_timestamp(const struct sk_buff *skb, 2225 struct timeval *stamp) 2226 { 2227 *stamp = ktime_to_timeval(skb->tstamp); 2228 } 2229 2230 static inline void skb_get_timestampns(const struct sk_buff *skb, 2231 struct timespec *stamp) 2232 { 2233 *stamp = ktime_to_timespec(skb->tstamp); 2234 } 2235 2236 static inline void __net_timestamp(struct sk_buff *skb) 2237 { 2238 skb->tstamp = ktime_get_real(); 2239 } 2240 2241 static inline ktime_t net_timedelta(ktime_t t) 2242 { 2243 return ktime_sub(ktime_get_real(), t); 2244 } 2245 2246 static inline ktime_t net_invalid_timestamp(void) 2247 { 2248 return ktime_set(0, 0); 2249 } 2250 2251 extern void skb_timestamping_init(void); 2252 2253 #ifdef CONFIG_NETWORK_PHY_TIMESTAMPING 2254 2255 extern void skb_clone_tx_timestamp(struct sk_buff *skb); 2256 extern bool skb_defer_rx_timestamp(struct sk_buff *skb); 2257 2258 #else /* CONFIG_NETWORK_PHY_TIMESTAMPING */ 2259 2260 static inline void skb_clone_tx_timestamp(struct sk_buff *skb) 2261 { 2262 } 2263 2264 static inline bool skb_defer_rx_timestamp(struct sk_buff *skb) 2265 { 2266 return false; 2267 } 2268 2269 #endif /* !CONFIG_NETWORK_PHY_TIMESTAMPING */ 2270 2271 /** 2272 * skb_complete_tx_timestamp() - deliver cloned skb with tx timestamps 2273 * 2274 * PHY drivers may accept clones of transmitted packets for 2275 * timestamping via their phy_driver.txtstamp method. These drivers 2276 * must call this function to return the skb back to the stack, with 2277 * or without a timestamp. 2278 * 2279 * @skb: clone of the the original outgoing packet 2280 * @hwtstamps: hardware time stamps, may be NULL if not available 2281 * 2282 */ 2283 void skb_complete_tx_timestamp(struct sk_buff *skb, 2284 struct skb_shared_hwtstamps *hwtstamps); 2285 2286 /** 2287 * skb_tstamp_tx - queue clone of skb with send time stamps 2288 * @orig_skb: the original outgoing packet 2289 * @hwtstamps: hardware time stamps, may be NULL if not available 2290 * 2291 * If the skb has a socket associated, then this function clones the 2292 * skb (thus sharing the actual data and optional structures), stores 2293 * the optional hardware time stamping information (if non NULL) or 2294 * generates a software time stamp (otherwise), then queues the clone 2295 * to the error queue of the socket. Errors are silently ignored. 2296 */ 2297 extern void skb_tstamp_tx(struct sk_buff *orig_skb, 2298 struct skb_shared_hwtstamps *hwtstamps); 2299 2300 static inline void sw_tx_timestamp(struct sk_buff *skb) 2301 { 2302 if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP && 2303 !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) 2304 skb_tstamp_tx(skb, NULL); 2305 } 2306 2307 /** 2308 * skb_tx_timestamp() - Driver hook for transmit timestamping 2309 * 2310 * Ethernet MAC Drivers should call this function in their hard_xmit() 2311 * function immediately before giving the sk_buff to the MAC hardware. 2312 * 2313 * @skb: A socket buffer. 2314 */ 2315 static inline void skb_tx_timestamp(struct sk_buff *skb) 2316 { 2317 skb_clone_tx_timestamp(skb); 2318 sw_tx_timestamp(skb); 2319 } 2320 2321 /** 2322 * skb_complete_wifi_ack - deliver skb with wifi status 2323 * 2324 * @skb: the original outgoing packet 2325 * @acked: ack status 2326 * 2327 */ 2328 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked); 2329 2330 extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len); 2331 extern __sum16 __skb_checksum_complete(struct sk_buff *skb); 2332 2333 static inline int skb_csum_unnecessary(const struct sk_buff *skb) 2334 { 2335 return skb->ip_summed & CHECKSUM_UNNECESSARY; 2336 } 2337 2338 /** 2339 * skb_checksum_complete - Calculate checksum of an entire packet 2340 * @skb: packet to process 2341 * 2342 * This function calculates the checksum over the entire packet plus 2343 * the value of skb->csum. The latter can be used to supply the 2344 * checksum of a pseudo header as used by TCP/UDP. It returns the 2345 * checksum. 2346 * 2347 * For protocols that contain complete checksums such as ICMP/TCP/UDP, 2348 * this function can be used to verify that checksum on received 2349 * packets. In that case the function should return zero if the 2350 * checksum is correct. In particular, this function will return zero 2351 * if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the 2352 * hardware has already verified the correctness of the checksum. 2353 */ 2354 static inline __sum16 skb_checksum_complete(struct sk_buff *skb) 2355 { 2356 return skb_csum_unnecessary(skb) ? 2357 0 : __skb_checksum_complete(skb); 2358 } 2359 2360 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 2361 extern void nf_conntrack_destroy(struct nf_conntrack *nfct); 2362 static inline void nf_conntrack_put(struct nf_conntrack *nfct) 2363 { 2364 if (nfct && atomic_dec_and_test(&nfct->use)) 2365 nf_conntrack_destroy(nfct); 2366 } 2367 static inline void nf_conntrack_get(struct nf_conntrack *nfct) 2368 { 2369 if (nfct) 2370 atomic_inc(&nfct->use); 2371 } 2372 #endif 2373 #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED 2374 static inline void nf_conntrack_get_reasm(struct sk_buff *skb) 2375 { 2376 if (skb) 2377 atomic_inc(&skb->users); 2378 } 2379 static inline void nf_conntrack_put_reasm(struct sk_buff *skb) 2380 { 2381 if (skb) 2382 kfree_skb(skb); 2383 } 2384 #endif 2385 #ifdef CONFIG_BRIDGE_NETFILTER 2386 static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge) 2387 { 2388 if (nf_bridge && atomic_dec_and_test(&nf_bridge->use)) 2389 kfree(nf_bridge); 2390 } 2391 static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge) 2392 { 2393 if (nf_bridge) 2394 atomic_inc(&nf_bridge->use); 2395 } 2396 #endif /* CONFIG_BRIDGE_NETFILTER */ 2397 static inline void nf_reset(struct sk_buff *skb) 2398 { 2399 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 2400 nf_conntrack_put(skb->nfct); 2401 skb->nfct = NULL; 2402 #endif 2403 #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED 2404 nf_conntrack_put_reasm(skb->nfct_reasm); 2405 skb->nfct_reasm = NULL; 2406 #endif 2407 #ifdef CONFIG_BRIDGE_NETFILTER 2408 nf_bridge_put(skb->nf_bridge); 2409 skb->nf_bridge = NULL; 2410 #endif 2411 } 2412 2413 /* Note: This doesn't put any conntrack and bridge info in dst. */ 2414 static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src) 2415 { 2416 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 2417 dst->nfct = src->nfct; 2418 nf_conntrack_get(src->nfct); 2419 dst->nfctinfo = src->nfctinfo; 2420 #endif 2421 #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED 2422 dst->nfct_reasm = src->nfct_reasm; 2423 nf_conntrack_get_reasm(src->nfct_reasm); 2424 #endif 2425 #ifdef CONFIG_BRIDGE_NETFILTER 2426 dst->nf_bridge = src->nf_bridge; 2427 nf_bridge_get(src->nf_bridge); 2428 #endif 2429 } 2430 2431 static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src) 2432 { 2433 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 2434 nf_conntrack_put(dst->nfct); 2435 #endif 2436 #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED 2437 nf_conntrack_put_reasm(dst->nfct_reasm); 2438 #endif 2439 #ifdef CONFIG_BRIDGE_NETFILTER 2440 nf_bridge_put(dst->nf_bridge); 2441 #endif 2442 __nf_copy(dst, src); 2443 } 2444 2445 #ifdef CONFIG_NETWORK_SECMARK 2446 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from) 2447 { 2448 to->secmark = from->secmark; 2449 } 2450 2451 static inline void skb_init_secmark(struct sk_buff *skb) 2452 { 2453 skb->secmark = 0; 2454 } 2455 #else 2456 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from) 2457 { } 2458 2459 static inline void skb_init_secmark(struct sk_buff *skb) 2460 { } 2461 #endif 2462 2463 static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping) 2464 { 2465 skb->queue_mapping = queue_mapping; 2466 } 2467 2468 static inline u16 skb_get_queue_mapping(const struct sk_buff *skb) 2469 { 2470 return skb->queue_mapping; 2471 } 2472 2473 static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from) 2474 { 2475 to->queue_mapping = from->queue_mapping; 2476 } 2477 2478 static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue) 2479 { 2480 skb->queue_mapping = rx_queue + 1; 2481 } 2482 2483 static inline u16 skb_get_rx_queue(const struct sk_buff *skb) 2484 { 2485 return skb->queue_mapping - 1; 2486 } 2487 2488 static inline bool skb_rx_queue_recorded(const struct sk_buff *skb) 2489 { 2490 return skb->queue_mapping != 0; 2491 } 2492 2493 extern u16 __skb_tx_hash(const struct net_device *dev, 2494 const struct sk_buff *skb, 2495 unsigned int num_tx_queues); 2496 2497 #ifdef CONFIG_XFRM 2498 static inline struct sec_path *skb_sec_path(struct sk_buff *skb) 2499 { 2500 return skb->sp; 2501 } 2502 #else 2503 static inline struct sec_path *skb_sec_path(struct sk_buff *skb) 2504 { 2505 return NULL; 2506 } 2507 #endif 2508 2509 static inline bool skb_is_gso(const struct sk_buff *skb) 2510 { 2511 return skb_shinfo(skb)->gso_size; 2512 } 2513 2514 static inline bool skb_is_gso_v6(const struct sk_buff *skb) 2515 { 2516 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; 2517 } 2518 2519 extern void __skb_warn_lro_forwarding(const struct sk_buff *skb); 2520 2521 static inline bool skb_warn_if_lro(const struct sk_buff *skb) 2522 { 2523 /* LRO sets gso_size but not gso_type, whereas if GSO is really 2524 * wanted then gso_type will be set. */ 2525 const struct skb_shared_info *shinfo = skb_shinfo(skb); 2526 2527 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 && 2528 unlikely(shinfo->gso_type == 0)) { 2529 __skb_warn_lro_forwarding(skb); 2530 return true; 2531 } 2532 return false; 2533 } 2534 2535 static inline void skb_forward_csum(struct sk_buff *skb) 2536 { 2537 /* Unfortunately we don't support this one. Any brave souls? */ 2538 if (skb->ip_summed == CHECKSUM_COMPLETE) 2539 skb->ip_summed = CHECKSUM_NONE; 2540 } 2541 2542 /** 2543 * skb_checksum_none_assert - make sure skb ip_summed is CHECKSUM_NONE 2544 * @skb: skb to check 2545 * 2546 * fresh skbs have their ip_summed set to CHECKSUM_NONE. 2547 * Instead of forcing ip_summed to CHECKSUM_NONE, we can 2548 * use this helper, to document places where we make this assertion. 2549 */ 2550 static inline void skb_checksum_none_assert(const struct sk_buff *skb) 2551 { 2552 #ifdef DEBUG 2553 BUG_ON(skb->ip_summed != CHECKSUM_NONE); 2554 #endif 2555 } 2556 2557 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off); 2558 2559 static inline bool skb_is_recycleable(const struct sk_buff *skb, int skb_size) 2560 { 2561 if (irqs_disabled()) 2562 return false; 2563 2564 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) 2565 return false; 2566 2567 if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE) 2568 return false; 2569 2570 skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD); 2571 if (skb_end_offset(skb) < skb_size) 2572 return false; 2573 2574 if (skb_shared(skb) || skb_cloned(skb)) 2575 return false; 2576 2577 return true; 2578 } 2579 2580 /** 2581 * skb_head_is_locked - Determine if the skb->head is locked down 2582 * @skb: skb to check 2583 * 2584 * The head on skbs build around a head frag can be removed if they are 2585 * not cloned. This function returns true if the skb head is locked down 2586 * due to either being allocated via kmalloc, or by being a clone with 2587 * multiple references to the head. 2588 */ 2589 static inline bool skb_head_is_locked(const struct sk_buff *skb) 2590 { 2591 return !skb->head_frag || skb_cloned(skb); 2592 } 2593 #endif /* __KERNEL__ */ 2594 #endif /* _LINUX_SKBUFF_H */ 2595