1 /* 2 * Definitions for the 'struct sk_buff' memory handlers. 3 * 4 * Authors: 5 * Alan Cox, <[email protected]> 6 * Florian La Roche, <[email protected]> 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License 10 * as published by the Free Software Foundation; either version 11 * 2 of the License, or (at your option) any later version. 12 */ 13 14 #ifndef _LINUX_SKBUFF_H 15 #define _LINUX_SKBUFF_H 16 17 #include <linux/kernel.h> 18 #include <linux/kmemcheck.h> 19 #include <linux/compiler.h> 20 #include <linux/time.h> 21 #include <linux/bug.h> 22 #include <linux/cache.h> 23 #include <linux/rbtree.h> 24 #include <linux/socket.h> 25 26 #include <linux/atomic.h> 27 #include <asm/types.h> 28 #include <linux/spinlock.h> 29 #include <linux/net.h> 30 #include <linux/textsearch.h> 31 #include <net/checksum.h> 32 #include <linux/rcupdate.h> 33 #include <linux/hrtimer.h> 34 #include <linux/dma-mapping.h> 35 #include <linux/netdev_features.h> 36 #include <linux/sched.h> 37 #include <net/flow_dissector.h> 38 #include <linux/splice.h> 39 #include <linux/in6.h> 40 #include <net/flow.h> 41 42 /* The interface for checksum offload between the stack and networking drivers 43 * is as follows... 44 * 45 * A. IP checksum related features 46 * 47 * Drivers advertise checksum offload capabilities in the features of a device. 48 * From the stack's point of view these are capabilities offered by the driver, 49 * a driver typically only advertises features that it is capable of offloading 50 * to its device. 51 * 52 * The checksum related features are: 53 * 54 * NETIF_F_HW_CSUM - The driver (or its device) is able to compute one 55 * IP (one's complement) checksum for any combination 56 * of protocols or protocol layering. The checksum is 57 * computed and set in a packet per the CHECKSUM_PARTIAL 58 * interface (see below). 59 * 60 * NETIF_F_IP_CSUM - Driver (device) is only able to checksum plain 61 * TCP or UDP packets over IPv4. These are specifically 62 * unencapsulated packets of the form IPv4|TCP or 63 * IPv4|UDP where the Protocol field in the IPv4 header 64 * is TCP or UDP. The IPv4 header may contain IP options 65 * This feature cannot be set in features for a device 66 * with NETIF_F_HW_CSUM also set. This feature is being 67 * DEPRECATED (see below). 68 * 69 * NETIF_F_IPV6_CSUM - Driver (device) is only able to checksum plain 70 * TCP or UDP packets over IPv6. These are specifically 71 * unencapsulated packets of the form IPv6|TCP or 72 * IPv4|UDP where the Next Header field in the IPv6 73 * header is either TCP or UDP. IPv6 extension headers 74 * are not supported with this feature. This feature 75 * cannot be set in features for a device with 76 * NETIF_F_HW_CSUM also set. This feature is being 77 * DEPRECATED (see below). 78 * 79 * NETIF_F_RXCSUM - Driver (device) performs receive checksum offload. 80 * This flag is used only used to disable the RX checksum 81 * feature for a device. The stack will accept receive 82 * checksum indication in packets received on a device 83 * regardless of whether NETIF_F_RXCSUM is set. 84 * 85 * B. Checksumming of received packets by device. Indication of checksum 86 * verification is in set skb->ip_summed. Possible values are: 87 * 88 * CHECKSUM_NONE: 89 * 90 * Device did not checksum this packet e.g. due to lack of capabilities. 91 * The packet contains full (though not verified) checksum in packet but 92 * not in skb->csum. Thus, skb->csum is undefined in this case. 93 * 94 * CHECKSUM_UNNECESSARY: 95 * 96 * The hardware you're dealing with doesn't calculate the full checksum 97 * (as in CHECKSUM_COMPLETE), but it does parse headers and verify checksums 98 * for specific protocols. For such packets it will set CHECKSUM_UNNECESSARY 99 * if their checksums are okay. skb->csum is still undefined in this case 100 * though. A driver or device must never modify the checksum field in the 101 * packet even if checksum is verified. 102 * 103 * CHECKSUM_UNNECESSARY is applicable to following protocols: 104 * TCP: IPv6 and IPv4. 105 * UDP: IPv4 and IPv6. A device may apply CHECKSUM_UNNECESSARY to a 106 * zero UDP checksum for either IPv4 or IPv6, the networking stack 107 * may perform further validation in this case. 108 * GRE: only if the checksum is present in the header. 109 * SCTP: indicates the CRC in SCTP header has been validated. 110 * 111 * skb->csum_level indicates the number of consecutive checksums found in 112 * the packet minus one that have been verified as CHECKSUM_UNNECESSARY. 113 * For instance if a device receives an IPv6->UDP->GRE->IPv4->TCP packet 114 * and a device is able to verify the checksums for UDP (possibly zero), 115 * GRE (checksum flag is set), and TCP-- skb->csum_level would be set to 116 * two. If the device were only able to verify the UDP checksum and not 117 * GRE, either because it doesn't support GRE checksum of because GRE 118 * checksum is bad, skb->csum_level would be set to zero (TCP checksum is 119 * not considered in this case). 120 * 121 * CHECKSUM_COMPLETE: 122 * 123 * This is the most generic way. The device supplied checksum of the _whole_ 124 * packet as seen by netif_rx() and fills out in skb->csum. Meaning, the 125 * hardware doesn't need to parse L3/L4 headers to implement this. 126 * 127 * Note: Even if device supports only some protocols, but is able to produce 128 * skb->csum, it MUST use CHECKSUM_COMPLETE, not CHECKSUM_UNNECESSARY. 129 * 130 * CHECKSUM_PARTIAL: 131 * 132 * A checksum is set up to be offloaded to a device as described in the 133 * output description for CHECKSUM_PARTIAL. This may occur on a packet 134 * received directly from another Linux OS, e.g., a virtualized Linux kernel 135 * on the same host, or it may be set in the input path in GRO or remote 136 * checksum offload. For the purposes of checksum verification, the checksum 137 * referred to by skb->csum_start + skb->csum_offset and any preceding 138 * checksums in the packet are considered verified. Any checksums in the 139 * packet that are after the checksum being offloaded are not considered to 140 * be verified. 141 * 142 * C. Checksumming on transmit for non-GSO. The stack requests checksum offload 143 * in the skb->ip_summed for a packet. Values are: 144 * 145 * CHECKSUM_PARTIAL: 146 * 147 * The driver is required to checksum the packet as seen by hard_start_xmit() 148 * from skb->csum_start up to the end, and to record/write the checksum at 149 * offset skb->csum_start + skb->csum_offset. A driver may verify that the 150 * csum_start and csum_offset values are valid values given the length and 151 * offset of the packet, however they should not attempt to validate that the 152 * checksum refers to a legitimate transport layer checksum-- it is the 153 * purview of the stack to validate that csum_start and csum_offset are set 154 * correctly. 155 * 156 * When the stack requests checksum offload for a packet, the driver MUST 157 * ensure that the checksum is set correctly. A driver can either offload the 158 * checksum calculation to the device, or call skb_checksum_help (in the case 159 * that the device does not support offload for a particular checksum). 160 * 161 * NETIF_F_IP_CSUM and NETIF_F_IPV6_CSUM are being deprecated in favor of 162 * NETIF_F_HW_CSUM. New devices should use NETIF_F_HW_CSUM to indicate 163 * checksum offload capability. If a device has limited checksum capabilities 164 * (for instance can only perform NETIF_F_IP_CSUM or NETIF_F_IPV6_CSUM as 165 * described above) a helper function can be called to resolve 166 * CHECKSUM_PARTIAL. The helper functions are skb_csum_off_chk*. The helper 167 * function takes a spec argument that describes the protocol layer that is 168 * supported for checksum offload and can be called for each packet. If a 169 * packet does not match the specification for offload, skb_checksum_help 170 * is called to resolve the checksum. 171 * 172 * CHECKSUM_NONE: 173 * 174 * The skb was already checksummed by the protocol, or a checksum is not 175 * required. 176 * 177 * CHECKSUM_UNNECESSARY: 178 * 179 * This has the same meaning on as CHECKSUM_NONE for checksum offload on 180 * output. 181 * 182 * CHECKSUM_COMPLETE: 183 * Not used in checksum output. If a driver observes a packet with this value 184 * set in skbuff, if should treat as CHECKSUM_NONE being set. 185 * 186 * D. Non-IP checksum (CRC) offloads 187 * 188 * NETIF_F_SCTP_CRC - This feature indicates that a device is capable of 189 * offloading the SCTP CRC in a packet. To perform this offload the stack 190 * will set ip_summed to CHECKSUM_PARTIAL and set csum_start and csum_offset 191 * accordingly. Note the there is no indication in the skbuff that the 192 * CHECKSUM_PARTIAL refers to an SCTP checksum, a driver that supports 193 * both IP checksum offload and SCTP CRC offload must verify which offload 194 * is configured for a packet presumably by inspecting packet headers. 195 * 196 * NETIF_F_FCOE_CRC - This feature indicates that a device is capable of 197 * offloading the FCOE CRC in a packet. To perform this offload the stack 198 * will set ip_summed to CHECKSUM_PARTIAL and set csum_start and csum_offset 199 * accordingly. Note the there is no indication in the skbuff that the 200 * CHECKSUM_PARTIAL refers to an FCOE checksum, a driver that supports 201 * both IP checksum offload and FCOE CRC offload must verify which offload 202 * is configured for a packet presumably by inspecting packet headers. 203 * 204 * E. Checksumming on output with GSO. 205 * 206 * In the case of a GSO packet (skb_is_gso(skb) is true), checksum offload 207 * is implied by the SKB_GSO_* flags in gso_type. Most obviously, if the 208 * gso_type is SKB_GSO_TCPV4 or SKB_GSO_TCPV6, TCP checksum offload as 209 * part of the GSO operation is implied. If a checksum is being offloaded 210 * with GSO then ip_summed is CHECKSUM_PARTIAL, csum_start and csum_offset 211 * are set to refer to the outermost checksum being offload (two offloaded 212 * checksums are possible with UDP encapsulation). 213 */ 214 215 /* Don't change this without changing skb_csum_unnecessary! */ 216 #define CHECKSUM_NONE 0 217 #define CHECKSUM_UNNECESSARY 1 218 #define CHECKSUM_COMPLETE 2 219 #define CHECKSUM_PARTIAL 3 220 221 /* Maximum value in skb->csum_level */ 222 #define SKB_MAX_CSUM_LEVEL 3 223 224 #define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES) 225 #define SKB_WITH_OVERHEAD(X) \ 226 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) 227 #define SKB_MAX_ORDER(X, ORDER) \ 228 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X)) 229 #define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0)) 230 #define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2)) 231 232 /* return minimum truesize of one skb containing X bytes of data */ 233 #define SKB_TRUESIZE(X) ((X) + \ 234 SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \ 235 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) 236 237 struct net_device; 238 struct scatterlist; 239 struct pipe_inode_info; 240 struct iov_iter; 241 struct napi_struct; 242 243 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 244 struct nf_conntrack { 245 atomic_t use; 246 }; 247 #endif 248 249 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 250 struct nf_bridge_info { 251 atomic_t use; 252 enum { 253 BRNF_PROTO_UNCHANGED, 254 BRNF_PROTO_8021Q, 255 BRNF_PROTO_PPPOE 256 } orig_proto:8; 257 u8 pkt_otherhost:1; 258 u8 in_prerouting:1; 259 u8 bridged_dnat:1; 260 __u16 frag_max_size; 261 struct net_device *physindev; 262 263 /* always valid & non-NULL from FORWARD on, for physdev match */ 264 struct net_device *physoutdev; 265 union { 266 /* prerouting: detect dnat in orig/reply direction */ 267 __be32 ipv4_daddr; 268 struct in6_addr ipv6_daddr; 269 270 /* after prerouting + nat detected: store original source 271 * mac since neigh resolution overwrites it, only used while 272 * skb is out in neigh layer. 273 */ 274 char neigh_header[8]; 275 }; 276 }; 277 #endif 278 279 struct sk_buff_head { 280 /* These two members must be first. */ 281 struct sk_buff *next; 282 struct sk_buff *prev; 283 284 __u32 qlen; 285 spinlock_t lock; 286 }; 287 288 struct sk_buff; 289 290 /* To allow 64K frame to be packed as single skb without frag_list we 291 * require 64K/PAGE_SIZE pages plus 1 additional page to allow for 292 * buffers which do not start on a page boundary. 293 * 294 * Since GRO uses frags we allocate at least 16 regardless of page 295 * size. 296 */ 297 #if (65536/PAGE_SIZE + 1) < 16 298 #define MAX_SKB_FRAGS 16UL 299 #else 300 #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1) 301 #endif 302 extern int sysctl_max_skb_frags; 303 304 typedef struct skb_frag_struct skb_frag_t; 305 306 struct skb_frag_struct { 307 struct { 308 struct page *p; 309 } page; 310 #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) 311 __u32 page_offset; 312 __u32 size; 313 #else 314 __u16 page_offset; 315 __u16 size; 316 #endif 317 }; 318 319 static inline unsigned int skb_frag_size(const skb_frag_t *frag) 320 { 321 return frag->size; 322 } 323 324 static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size) 325 { 326 frag->size = size; 327 } 328 329 static inline void skb_frag_size_add(skb_frag_t *frag, int delta) 330 { 331 frag->size += delta; 332 } 333 334 static inline void skb_frag_size_sub(skb_frag_t *frag, int delta) 335 { 336 frag->size -= delta; 337 } 338 339 #define HAVE_HW_TIME_STAMP 340 341 /** 342 * struct skb_shared_hwtstamps - hardware time stamps 343 * @hwtstamp: hardware time stamp transformed into duration 344 * since arbitrary point in time 345 * 346 * Software time stamps generated by ktime_get_real() are stored in 347 * skb->tstamp. 348 * 349 * hwtstamps can only be compared against other hwtstamps from 350 * the same device. 351 * 352 * This structure is attached to packets as part of the 353 * &skb_shared_info. Use skb_hwtstamps() to get a pointer. 354 */ 355 struct skb_shared_hwtstamps { 356 ktime_t hwtstamp; 357 }; 358 359 /* Definitions for tx_flags in struct skb_shared_info */ 360 enum { 361 /* generate hardware time stamp */ 362 SKBTX_HW_TSTAMP = 1 << 0, 363 364 /* generate software time stamp when queueing packet to NIC */ 365 SKBTX_SW_TSTAMP = 1 << 1, 366 367 /* device driver is going to provide hardware time stamp */ 368 SKBTX_IN_PROGRESS = 1 << 2, 369 370 /* device driver supports TX zero-copy buffers */ 371 SKBTX_DEV_ZEROCOPY = 1 << 3, 372 373 /* generate wifi status information (where possible) */ 374 SKBTX_WIFI_STATUS = 1 << 4, 375 376 /* This indicates at least one fragment might be overwritten 377 * (as in vmsplice(), sendfile() ...) 378 * If we need to compute a TX checksum, we'll need to copy 379 * all frags to avoid possible bad checksum 380 */ 381 SKBTX_SHARED_FRAG = 1 << 5, 382 383 /* generate software time stamp when entering packet scheduling */ 384 SKBTX_SCHED_TSTAMP = 1 << 6, 385 }; 386 387 #define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \ 388 SKBTX_SCHED_TSTAMP) 389 #define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | SKBTX_ANY_SW_TSTAMP) 390 391 /* 392 * The callback notifies userspace to release buffers when skb DMA is done in 393 * lower device, the skb last reference should be 0 when calling this. 394 * The zerocopy_success argument is true if zero copy transmit occurred, 395 * false on data copy or out of memory error caused by data copy attempt. 396 * The ctx field is used to track device context. 397 * The desc field is used to track userspace buffer index. 398 */ 399 struct ubuf_info { 400 void (*callback)(struct ubuf_info *, bool zerocopy_success); 401 void *ctx; 402 unsigned long desc; 403 }; 404 405 /* This data is invariant across clones and lives at 406 * the end of the header data, ie. at skb->end. 407 */ 408 struct skb_shared_info { 409 unsigned char nr_frags; 410 __u8 tx_flags; 411 unsigned short gso_size; 412 /* Warning: this field is not always filled in (UFO)! */ 413 unsigned short gso_segs; 414 unsigned short gso_type; 415 struct sk_buff *frag_list; 416 struct skb_shared_hwtstamps hwtstamps; 417 u32 tskey; 418 __be32 ip6_frag_id; 419 420 /* 421 * Warning : all fields before dataref are cleared in __alloc_skb() 422 */ 423 atomic_t dataref; 424 425 /* Intermediate layers must ensure that destructor_arg 426 * remains valid until skb destructor */ 427 void * destructor_arg; 428 429 /* must be last field, see pskb_expand_head() */ 430 skb_frag_t frags[MAX_SKB_FRAGS]; 431 }; 432 433 /* We divide dataref into two halves. The higher 16 bits hold references 434 * to the payload part of skb->data. The lower 16 bits hold references to 435 * the entire skb->data. A clone of a headerless skb holds the length of 436 * the header in skb->hdr_len. 437 * 438 * All users must obey the rule that the skb->data reference count must be 439 * greater than or equal to the payload reference count. 440 * 441 * Holding a reference to the payload part means that the user does not 442 * care about modifications to the header part of skb->data. 443 */ 444 #define SKB_DATAREF_SHIFT 16 445 #define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1) 446 447 448 enum { 449 SKB_FCLONE_UNAVAILABLE, /* skb has no fclone (from head_cache) */ 450 SKB_FCLONE_ORIG, /* orig skb (from fclone_cache) */ 451 SKB_FCLONE_CLONE, /* companion fclone skb (from fclone_cache) */ 452 }; 453 454 enum { 455 SKB_GSO_TCPV4 = 1 << 0, 456 SKB_GSO_UDP = 1 << 1, 457 458 /* This indicates the skb is from an untrusted source. */ 459 SKB_GSO_DODGY = 1 << 2, 460 461 /* This indicates the tcp segment has CWR set. */ 462 SKB_GSO_TCP_ECN = 1 << 3, 463 464 SKB_GSO_TCP_FIXEDID = 1 << 4, 465 466 SKB_GSO_TCPV6 = 1 << 5, 467 468 SKB_GSO_FCOE = 1 << 6, 469 470 SKB_GSO_GRE = 1 << 7, 471 472 SKB_GSO_GRE_CSUM = 1 << 8, 473 474 SKB_GSO_IPXIP4 = 1 << 9, 475 476 SKB_GSO_IPXIP6 = 1 << 10, 477 478 SKB_GSO_UDP_TUNNEL = 1 << 11, 479 480 SKB_GSO_UDP_TUNNEL_CSUM = 1 << 12, 481 482 SKB_GSO_PARTIAL = 1 << 13, 483 484 SKB_GSO_TUNNEL_REMCSUM = 1 << 14, 485 }; 486 487 #if BITS_PER_LONG > 32 488 #define NET_SKBUFF_DATA_USES_OFFSET 1 489 #endif 490 491 #ifdef NET_SKBUFF_DATA_USES_OFFSET 492 typedef unsigned int sk_buff_data_t; 493 #else 494 typedef unsigned char *sk_buff_data_t; 495 #endif 496 497 /** 498 * struct skb_mstamp - multi resolution time stamps 499 * @stamp_us: timestamp in us resolution 500 * @stamp_jiffies: timestamp in jiffies 501 */ 502 struct skb_mstamp { 503 union { 504 u64 v64; 505 struct { 506 u32 stamp_us; 507 u32 stamp_jiffies; 508 }; 509 }; 510 }; 511 512 /** 513 * skb_mstamp_get - get current timestamp 514 * @cl: place to store timestamps 515 */ 516 static inline void skb_mstamp_get(struct skb_mstamp *cl) 517 { 518 u64 val = local_clock(); 519 520 do_div(val, NSEC_PER_USEC); 521 cl->stamp_us = (u32)val; 522 cl->stamp_jiffies = (u32)jiffies; 523 } 524 525 /** 526 * skb_mstamp_delta - compute the difference in usec between two skb_mstamp 527 * @t1: pointer to newest sample 528 * @t0: pointer to oldest sample 529 */ 530 static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1, 531 const struct skb_mstamp *t0) 532 { 533 s32 delta_us = t1->stamp_us - t0->stamp_us; 534 u32 delta_jiffies = t1->stamp_jiffies - t0->stamp_jiffies; 535 536 /* If delta_us is negative, this might be because interval is too big, 537 * or local_clock() drift is too big : fallback using jiffies. 538 */ 539 if (delta_us <= 0 || 540 delta_jiffies >= (INT_MAX / (USEC_PER_SEC / HZ))) 541 542 delta_us = jiffies_to_usecs(delta_jiffies); 543 544 return delta_us; 545 } 546 547 static inline bool skb_mstamp_after(const struct skb_mstamp *t1, 548 const struct skb_mstamp *t0) 549 { 550 s32 diff = t1->stamp_jiffies - t0->stamp_jiffies; 551 552 if (!diff) 553 diff = t1->stamp_us - t0->stamp_us; 554 return diff > 0; 555 } 556 557 /** 558 * struct sk_buff - socket buffer 559 * @next: Next buffer in list 560 * @prev: Previous buffer in list 561 * @tstamp: Time we arrived/left 562 * @rbnode: RB tree node, alternative to next/prev for netem/tcp 563 * @sk: Socket we are owned by 564 * @dev: Device we arrived on/are leaving by 565 * @cb: Control buffer. Free for use by every layer. Put private vars here 566 * @_skb_refdst: destination entry (with norefcount bit) 567 * @sp: the security path, used for xfrm 568 * @len: Length of actual data 569 * @data_len: Data length 570 * @mac_len: Length of link layer header 571 * @hdr_len: writable header length of cloned skb 572 * @csum: Checksum (must include start/offset pair) 573 * @csum_start: Offset from skb->head where checksumming should start 574 * @csum_offset: Offset from csum_start where checksum should be stored 575 * @priority: Packet queueing priority 576 * @ignore_df: allow local fragmentation 577 * @cloned: Head may be cloned (check refcnt to be sure) 578 * @ip_summed: Driver fed us an IP checksum 579 * @nohdr: Payload reference only, must not modify header 580 * @nfctinfo: Relationship of this skb to the connection 581 * @pkt_type: Packet class 582 * @fclone: skbuff clone status 583 * @ipvs_property: skbuff is owned by ipvs 584 * @peeked: this packet has been seen already, so stats have been 585 * done for it, don't do them again 586 * @nf_trace: netfilter packet trace flag 587 * @protocol: Packet protocol from driver 588 * @destructor: Destruct function 589 * @nfct: Associated connection, if any 590 * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c 591 * @skb_iif: ifindex of device we arrived on 592 * @tc_index: Traffic control index 593 * @tc_verd: traffic control verdict 594 * @hash: the packet hash 595 * @queue_mapping: Queue mapping for multiqueue devices 596 * @xmit_more: More SKBs are pending for this queue 597 * @ndisc_nodetype: router type (from link layer) 598 * @ooo_okay: allow the mapping of a socket to a queue to be changed 599 * @l4_hash: indicate hash is a canonical 4-tuple hash over transport 600 * ports. 601 * @sw_hash: indicates hash was computed in software stack 602 * @wifi_acked_valid: wifi_acked was set 603 * @wifi_acked: whether frame was acked on wifi or not 604 * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS 605 * @napi_id: id of the NAPI struct this skb came from 606 * @secmark: security marking 607 * @offload_fwd_mark: fwding offload mark 608 * @mark: Generic packet mark 609 * @vlan_proto: vlan encapsulation protocol 610 * @vlan_tci: vlan tag control information 611 * @inner_protocol: Protocol (encapsulation) 612 * @inner_transport_header: Inner transport layer header (encapsulation) 613 * @inner_network_header: Network layer header (encapsulation) 614 * @inner_mac_header: Link layer header (encapsulation) 615 * @transport_header: Transport layer header 616 * @network_header: Network layer header 617 * @mac_header: Link layer header 618 * @tail: Tail pointer 619 * @end: End pointer 620 * @head: Head of buffer 621 * @data: Data head pointer 622 * @truesize: Buffer size 623 * @users: User count - see {datagram,tcp}.c 624 */ 625 626 struct sk_buff { 627 union { 628 struct { 629 /* These two members must be first. */ 630 struct sk_buff *next; 631 struct sk_buff *prev; 632 633 union { 634 ktime_t tstamp; 635 struct skb_mstamp skb_mstamp; 636 }; 637 }; 638 struct rb_node rbnode; /* used in netem & tcp stack */ 639 }; 640 struct sock *sk; 641 struct net_device *dev; 642 643 /* 644 * This is the control buffer. It is free to use for every 645 * layer. Please put your private variables there. If you 646 * want to keep them across layers you have to do a skb_clone() 647 * first. This is owned by whoever has the skb queued ATM. 648 */ 649 char cb[48] __aligned(8); 650 651 unsigned long _skb_refdst; 652 void (*destructor)(struct sk_buff *skb); 653 #ifdef CONFIG_XFRM 654 struct sec_path *sp; 655 #endif 656 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 657 struct nf_conntrack *nfct; 658 #endif 659 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 660 struct nf_bridge_info *nf_bridge; 661 #endif 662 unsigned int len, 663 data_len; 664 __u16 mac_len, 665 hdr_len; 666 667 /* Following fields are _not_ copied in __copy_skb_header() 668 * Note that queue_mapping is here mostly to fill a hole. 669 */ 670 kmemcheck_bitfield_begin(flags1); 671 __u16 queue_mapping; 672 __u8 cloned:1, 673 nohdr:1, 674 fclone:2, 675 peeked:1, 676 head_frag:1, 677 xmit_more:1; 678 /* one bit hole */ 679 kmemcheck_bitfield_end(flags1); 680 681 /* fields enclosed in headers_start/headers_end are copied 682 * using a single memcpy() in __copy_skb_header() 683 */ 684 /* private: */ 685 __u32 headers_start[0]; 686 /* public: */ 687 688 /* if you move pkt_type around you also must adapt those constants */ 689 #ifdef __BIG_ENDIAN_BITFIELD 690 #define PKT_TYPE_MAX (7 << 5) 691 #else 692 #define PKT_TYPE_MAX 7 693 #endif 694 #define PKT_TYPE_OFFSET() offsetof(struct sk_buff, __pkt_type_offset) 695 696 __u8 __pkt_type_offset[0]; 697 __u8 pkt_type:3; 698 __u8 pfmemalloc:1; 699 __u8 ignore_df:1; 700 __u8 nfctinfo:3; 701 702 __u8 nf_trace:1; 703 __u8 ip_summed:2; 704 __u8 ooo_okay:1; 705 __u8 l4_hash:1; 706 __u8 sw_hash:1; 707 __u8 wifi_acked_valid:1; 708 __u8 wifi_acked:1; 709 710 __u8 no_fcs:1; 711 /* Indicates the inner headers are valid in the skbuff. */ 712 __u8 encapsulation:1; 713 __u8 encap_hdr_csum:1; 714 __u8 csum_valid:1; 715 __u8 csum_complete_sw:1; 716 __u8 csum_level:2; 717 __u8 csum_bad:1; 718 719 #ifdef CONFIG_IPV6_NDISC_NODETYPE 720 __u8 ndisc_nodetype:2; 721 #endif 722 __u8 ipvs_property:1; 723 __u8 inner_protocol_type:1; 724 __u8 remcsum_offload:1; 725 /* 3 or 5 bit hole */ 726 727 #ifdef CONFIG_NET_SCHED 728 __u16 tc_index; /* traffic control index */ 729 #ifdef CONFIG_NET_CLS_ACT 730 __u16 tc_verd; /* traffic control verdict */ 731 #endif 732 #endif 733 734 union { 735 __wsum csum; 736 struct { 737 __u16 csum_start; 738 __u16 csum_offset; 739 }; 740 }; 741 __u32 priority; 742 int skb_iif; 743 __u32 hash; 744 __be16 vlan_proto; 745 __u16 vlan_tci; 746 #if defined(CONFIG_NET_RX_BUSY_POLL) || defined(CONFIG_XPS) 747 union { 748 unsigned int napi_id; 749 unsigned int sender_cpu; 750 }; 751 #endif 752 union { 753 #ifdef CONFIG_NETWORK_SECMARK 754 __u32 secmark; 755 #endif 756 #ifdef CONFIG_NET_SWITCHDEV 757 __u32 offload_fwd_mark; 758 #endif 759 }; 760 761 union { 762 __u32 mark; 763 __u32 reserved_tailroom; 764 }; 765 766 union { 767 __be16 inner_protocol; 768 __u8 inner_ipproto; 769 }; 770 771 __u16 inner_transport_header; 772 __u16 inner_network_header; 773 __u16 inner_mac_header; 774 775 __be16 protocol; 776 __u16 transport_header; 777 __u16 network_header; 778 __u16 mac_header; 779 780 /* private: */ 781 __u32 headers_end[0]; 782 /* public: */ 783 784 /* These elements must be at the end, see alloc_skb() for details. */ 785 sk_buff_data_t tail; 786 sk_buff_data_t end; 787 unsigned char *head, 788 *data; 789 unsigned int truesize; 790 atomic_t users; 791 }; 792 793 #ifdef __KERNEL__ 794 /* 795 * Handling routines are only of interest to the kernel 796 */ 797 #include <linux/slab.h> 798 799 800 #define SKB_ALLOC_FCLONE 0x01 801 #define SKB_ALLOC_RX 0x02 802 #define SKB_ALLOC_NAPI 0x04 803 804 /* Returns true if the skb was allocated from PFMEMALLOC reserves */ 805 static inline bool skb_pfmemalloc(const struct sk_buff *skb) 806 { 807 return unlikely(skb->pfmemalloc); 808 } 809 810 /* 811 * skb might have a dst pointer attached, refcounted or not. 812 * _skb_refdst low order bit is set if refcount was _not_ taken 813 */ 814 #define SKB_DST_NOREF 1UL 815 #define SKB_DST_PTRMASK ~(SKB_DST_NOREF) 816 817 /** 818 * skb_dst - returns skb dst_entry 819 * @skb: buffer 820 * 821 * Returns skb dst_entry, regardless of reference taken or not. 822 */ 823 static inline struct dst_entry *skb_dst(const struct sk_buff *skb) 824 { 825 /* If refdst was not refcounted, check we still are in a 826 * rcu_read_lock section 827 */ 828 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) && 829 !rcu_read_lock_held() && 830 !rcu_read_lock_bh_held()); 831 return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK); 832 } 833 834 /** 835 * skb_dst_set - sets skb dst 836 * @skb: buffer 837 * @dst: dst entry 838 * 839 * Sets skb dst, assuming a reference was taken on dst and should 840 * be released by skb_dst_drop() 841 */ 842 static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst) 843 { 844 skb->_skb_refdst = (unsigned long)dst; 845 } 846 847 /** 848 * skb_dst_set_noref - sets skb dst, hopefully, without taking reference 849 * @skb: buffer 850 * @dst: dst entry 851 * 852 * Sets skb dst, assuming a reference was not taken on dst. 853 * If dst entry is cached, we do not take reference and dst_release 854 * will be avoided by refdst_drop. If dst entry is not cached, we take 855 * reference, so that last dst_release can destroy the dst immediately. 856 */ 857 static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst) 858 { 859 WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); 860 skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF; 861 } 862 863 /** 864 * skb_dst_is_noref - Test if skb dst isn't refcounted 865 * @skb: buffer 866 */ 867 static inline bool skb_dst_is_noref(const struct sk_buff *skb) 868 { 869 return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb); 870 } 871 872 static inline struct rtable *skb_rtable(const struct sk_buff *skb) 873 { 874 return (struct rtable *)skb_dst(skb); 875 } 876 877 void kfree_skb(struct sk_buff *skb); 878 void kfree_skb_list(struct sk_buff *segs); 879 void skb_tx_error(struct sk_buff *skb); 880 void consume_skb(struct sk_buff *skb); 881 void __kfree_skb(struct sk_buff *skb); 882 extern struct kmem_cache *skbuff_head_cache; 883 884 void kfree_skb_partial(struct sk_buff *skb, bool head_stolen); 885 bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, 886 bool *fragstolen, int *delta_truesize); 887 888 struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags, 889 int node); 890 struct sk_buff *__build_skb(void *data, unsigned int frag_size); 891 struct sk_buff *build_skb(void *data, unsigned int frag_size); 892 static inline struct sk_buff *alloc_skb(unsigned int size, 893 gfp_t priority) 894 { 895 return __alloc_skb(size, priority, 0, NUMA_NO_NODE); 896 } 897 898 struct sk_buff *alloc_skb_with_frags(unsigned long header_len, 899 unsigned long data_len, 900 int max_page_order, 901 int *errcode, 902 gfp_t gfp_mask); 903 904 /* Layout of fast clones : [skb1][skb2][fclone_ref] */ 905 struct sk_buff_fclones { 906 struct sk_buff skb1; 907 908 struct sk_buff skb2; 909 910 atomic_t fclone_ref; 911 }; 912 913 /** 914 * skb_fclone_busy - check if fclone is busy 915 * @skb: buffer 916 * 917 * Returns true if skb is a fast clone, and its clone is not freed. 918 * Some drivers call skb_orphan() in their ndo_start_xmit(), 919 * so we also check that this didnt happen. 920 */ 921 static inline bool skb_fclone_busy(const struct sock *sk, 922 const struct sk_buff *skb) 923 { 924 const struct sk_buff_fclones *fclones; 925 926 fclones = container_of(skb, struct sk_buff_fclones, skb1); 927 928 return skb->fclone == SKB_FCLONE_ORIG && 929 atomic_read(&fclones->fclone_ref) > 1 && 930 fclones->skb2.sk == sk; 931 } 932 933 static inline struct sk_buff *alloc_skb_fclone(unsigned int size, 934 gfp_t priority) 935 { 936 return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE); 937 } 938 939 struct sk_buff *__alloc_skb_head(gfp_t priority, int node); 940 static inline struct sk_buff *alloc_skb_head(gfp_t priority) 941 { 942 return __alloc_skb_head(priority, -1); 943 } 944 945 struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); 946 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask); 947 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority); 948 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority); 949 struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, 950 gfp_t gfp_mask, bool fclone); 951 static inline struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, 952 gfp_t gfp_mask) 953 { 954 return __pskb_copy_fclone(skb, headroom, gfp_mask, false); 955 } 956 957 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask); 958 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, 959 unsigned int headroom); 960 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom, 961 int newtailroom, gfp_t priority); 962 int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, 963 int offset, int len); 964 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, 965 int len); 966 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer); 967 int skb_pad(struct sk_buff *skb, int pad); 968 #define dev_kfree_skb(a) consume_skb(a) 969 970 int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, 971 int getfrag(void *from, char *to, int offset, 972 int len, int odd, struct sk_buff *skb), 973 void *from, int length); 974 975 int skb_append_pagefrags(struct sk_buff *skb, struct page *page, 976 int offset, size_t size); 977 978 struct skb_seq_state { 979 __u32 lower_offset; 980 __u32 upper_offset; 981 __u32 frag_idx; 982 __u32 stepped_offset; 983 struct sk_buff *root_skb; 984 struct sk_buff *cur_skb; 985 __u8 *frag_data; 986 }; 987 988 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, 989 unsigned int to, struct skb_seq_state *st); 990 unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 991 struct skb_seq_state *st); 992 void skb_abort_seq_read(struct skb_seq_state *st); 993 994 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 995 unsigned int to, struct ts_config *config); 996 997 /* 998 * Packet hash types specify the type of hash in skb_set_hash. 999 * 1000 * Hash types refer to the protocol layer addresses which are used to 1001 * construct a packet's hash. The hashes are used to differentiate or identify 1002 * flows of the protocol layer for the hash type. Hash types are either 1003 * layer-2 (L2), layer-3 (L3), or layer-4 (L4). 1004 * 1005 * Properties of hashes: 1006 * 1007 * 1) Two packets in different flows have different hash values 1008 * 2) Two packets in the same flow should have the same hash value 1009 * 1010 * A hash at a higher layer is considered to be more specific. A driver should 1011 * set the most specific hash possible. 1012 * 1013 * A driver cannot indicate a more specific hash than the layer at which a hash 1014 * was computed. For instance an L3 hash cannot be set as an L4 hash. 1015 * 1016 * A driver may indicate a hash level which is less specific than the 1017 * actual layer the hash was computed on. For instance, a hash computed 1018 * at L4 may be considered an L3 hash. This should only be done if the 1019 * driver can't unambiguously determine that the HW computed the hash at 1020 * the higher layer. Note that the "should" in the second property above 1021 * permits this. 1022 */ 1023 enum pkt_hash_types { 1024 PKT_HASH_TYPE_NONE, /* Undefined type */ 1025 PKT_HASH_TYPE_L2, /* Input: src_MAC, dest_MAC */ 1026 PKT_HASH_TYPE_L3, /* Input: src_IP, dst_IP */ 1027 PKT_HASH_TYPE_L4, /* Input: src_IP, dst_IP, src_port, dst_port */ 1028 }; 1029 1030 static inline void skb_clear_hash(struct sk_buff *skb) 1031 { 1032 skb->hash = 0; 1033 skb->sw_hash = 0; 1034 skb->l4_hash = 0; 1035 } 1036 1037 static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb) 1038 { 1039 if (!skb->l4_hash) 1040 skb_clear_hash(skb); 1041 } 1042 1043 static inline void 1044 __skb_set_hash(struct sk_buff *skb, __u32 hash, bool is_sw, bool is_l4) 1045 { 1046 skb->l4_hash = is_l4; 1047 skb->sw_hash = is_sw; 1048 skb->hash = hash; 1049 } 1050 1051 static inline void 1052 skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type) 1053 { 1054 /* Used by drivers to set hash from HW */ 1055 __skb_set_hash(skb, hash, false, type == PKT_HASH_TYPE_L4); 1056 } 1057 1058 static inline void 1059 __skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4) 1060 { 1061 __skb_set_hash(skb, hash, true, is_l4); 1062 } 1063 1064 void __skb_get_hash(struct sk_buff *skb); 1065 u32 skb_get_poff(const struct sk_buff *skb); 1066 u32 __skb_get_poff(const struct sk_buff *skb, void *data, 1067 const struct flow_keys *keys, int hlen); 1068 __be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto, 1069 void *data, int hlen_proto); 1070 1071 static inline __be32 skb_flow_get_ports(const struct sk_buff *skb, 1072 int thoff, u8 ip_proto) 1073 { 1074 return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0); 1075 } 1076 1077 void skb_flow_dissector_init(struct flow_dissector *flow_dissector, 1078 const struct flow_dissector_key *key, 1079 unsigned int key_count); 1080 1081 bool __skb_flow_dissect(const struct sk_buff *skb, 1082 struct flow_dissector *flow_dissector, 1083 void *target_container, 1084 void *data, __be16 proto, int nhoff, int hlen, 1085 unsigned int flags); 1086 1087 static inline bool skb_flow_dissect(const struct sk_buff *skb, 1088 struct flow_dissector *flow_dissector, 1089 void *target_container, unsigned int flags) 1090 { 1091 return __skb_flow_dissect(skb, flow_dissector, target_container, 1092 NULL, 0, 0, 0, flags); 1093 } 1094 1095 static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb, 1096 struct flow_keys *flow, 1097 unsigned int flags) 1098 { 1099 memset(flow, 0, sizeof(*flow)); 1100 return __skb_flow_dissect(skb, &flow_keys_dissector, flow, 1101 NULL, 0, 0, 0, flags); 1102 } 1103 1104 static inline bool skb_flow_dissect_flow_keys_buf(struct flow_keys *flow, 1105 void *data, __be16 proto, 1106 int nhoff, int hlen, 1107 unsigned int flags) 1108 { 1109 memset(flow, 0, sizeof(*flow)); 1110 return __skb_flow_dissect(NULL, &flow_keys_buf_dissector, flow, 1111 data, proto, nhoff, hlen, flags); 1112 } 1113 1114 static inline __u32 skb_get_hash(struct sk_buff *skb) 1115 { 1116 if (!skb->l4_hash && !skb->sw_hash) 1117 __skb_get_hash(skb); 1118 1119 return skb->hash; 1120 } 1121 1122 __u32 __skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6); 1123 1124 static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6) 1125 { 1126 if (!skb->l4_hash && !skb->sw_hash) { 1127 struct flow_keys keys; 1128 __u32 hash = __get_hash_from_flowi6(fl6, &keys); 1129 1130 __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys)); 1131 } 1132 1133 return skb->hash; 1134 } 1135 1136 __u32 __skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 *fl); 1137 1138 static inline __u32 skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 *fl4) 1139 { 1140 if (!skb->l4_hash && !skb->sw_hash) { 1141 struct flow_keys keys; 1142 __u32 hash = __get_hash_from_flowi4(fl4, &keys); 1143 1144 __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys)); 1145 } 1146 1147 return skb->hash; 1148 } 1149 1150 __u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb); 1151 1152 static inline __u32 skb_get_hash_raw(const struct sk_buff *skb) 1153 { 1154 return skb->hash; 1155 } 1156 1157 static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from) 1158 { 1159 to->hash = from->hash; 1160 to->sw_hash = from->sw_hash; 1161 to->l4_hash = from->l4_hash; 1162 }; 1163 1164 #ifdef NET_SKBUFF_DATA_USES_OFFSET 1165 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) 1166 { 1167 return skb->head + skb->end; 1168 } 1169 1170 static inline unsigned int skb_end_offset(const struct sk_buff *skb) 1171 { 1172 return skb->end; 1173 } 1174 #else 1175 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) 1176 { 1177 return skb->end; 1178 } 1179 1180 static inline unsigned int skb_end_offset(const struct sk_buff *skb) 1181 { 1182 return skb->end - skb->head; 1183 } 1184 #endif 1185 1186 /* Internal */ 1187 #define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB))) 1188 1189 static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb) 1190 { 1191 return &skb_shinfo(skb)->hwtstamps; 1192 } 1193 1194 /** 1195 * skb_queue_empty - check if a queue is empty 1196 * @list: queue head 1197 * 1198 * Returns true if the queue is empty, false otherwise. 1199 */ 1200 static inline int skb_queue_empty(const struct sk_buff_head *list) 1201 { 1202 return list->next == (const struct sk_buff *) list; 1203 } 1204 1205 /** 1206 * skb_queue_is_last - check if skb is the last entry in the queue 1207 * @list: queue head 1208 * @skb: buffer 1209 * 1210 * Returns true if @skb is the last buffer on the list. 1211 */ 1212 static inline bool skb_queue_is_last(const struct sk_buff_head *list, 1213 const struct sk_buff *skb) 1214 { 1215 return skb->next == (const struct sk_buff *) list; 1216 } 1217 1218 /** 1219 * skb_queue_is_first - check if skb is the first entry in the queue 1220 * @list: queue head 1221 * @skb: buffer 1222 * 1223 * Returns true if @skb is the first buffer on the list. 1224 */ 1225 static inline bool skb_queue_is_first(const struct sk_buff_head *list, 1226 const struct sk_buff *skb) 1227 { 1228 return skb->prev == (const struct sk_buff *) list; 1229 } 1230 1231 /** 1232 * skb_queue_next - return the next packet in the queue 1233 * @list: queue head 1234 * @skb: current buffer 1235 * 1236 * Return the next packet in @list after @skb. It is only valid to 1237 * call this if skb_queue_is_last() evaluates to false. 1238 */ 1239 static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list, 1240 const struct sk_buff *skb) 1241 { 1242 /* This BUG_ON may seem severe, but if we just return then we 1243 * are going to dereference garbage. 1244 */ 1245 BUG_ON(skb_queue_is_last(list, skb)); 1246 return skb->next; 1247 } 1248 1249 /** 1250 * skb_queue_prev - return the prev packet in the queue 1251 * @list: queue head 1252 * @skb: current buffer 1253 * 1254 * Return the prev packet in @list before @skb. It is only valid to 1255 * call this if skb_queue_is_first() evaluates to false. 1256 */ 1257 static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list, 1258 const struct sk_buff *skb) 1259 { 1260 /* This BUG_ON may seem severe, but if we just return then we 1261 * are going to dereference garbage. 1262 */ 1263 BUG_ON(skb_queue_is_first(list, skb)); 1264 return skb->prev; 1265 } 1266 1267 /** 1268 * skb_get - reference buffer 1269 * @skb: buffer to reference 1270 * 1271 * Makes another reference to a socket buffer and returns a pointer 1272 * to the buffer. 1273 */ 1274 static inline struct sk_buff *skb_get(struct sk_buff *skb) 1275 { 1276 atomic_inc(&skb->users); 1277 return skb; 1278 } 1279 1280 /* 1281 * If users == 1, we are the only owner and are can avoid redundant 1282 * atomic change. 1283 */ 1284 1285 /** 1286 * skb_cloned - is the buffer a clone 1287 * @skb: buffer to check 1288 * 1289 * Returns true if the buffer was generated with skb_clone() and is 1290 * one of multiple shared copies of the buffer. Cloned buffers are 1291 * shared data so must not be written to under normal circumstances. 1292 */ 1293 static inline int skb_cloned(const struct sk_buff *skb) 1294 { 1295 return skb->cloned && 1296 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1; 1297 } 1298 1299 static inline int skb_unclone(struct sk_buff *skb, gfp_t pri) 1300 { 1301 might_sleep_if(gfpflags_allow_blocking(pri)); 1302 1303 if (skb_cloned(skb)) 1304 return pskb_expand_head(skb, 0, 0, pri); 1305 1306 return 0; 1307 } 1308 1309 /** 1310 * skb_header_cloned - is the header a clone 1311 * @skb: buffer to check 1312 * 1313 * Returns true if modifying the header part of the buffer requires 1314 * the data to be copied. 1315 */ 1316 static inline int skb_header_cloned(const struct sk_buff *skb) 1317 { 1318 int dataref; 1319 1320 if (!skb->cloned) 1321 return 0; 1322 1323 dataref = atomic_read(&skb_shinfo(skb)->dataref); 1324 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT); 1325 return dataref != 1; 1326 } 1327 1328 static inline int skb_header_unclone(struct sk_buff *skb, gfp_t pri) 1329 { 1330 might_sleep_if(gfpflags_allow_blocking(pri)); 1331 1332 if (skb_header_cloned(skb)) 1333 return pskb_expand_head(skb, 0, 0, pri); 1334 1335 return 0; 1336 } 1337 1338 /** 1339 * skb_header_release - release reference to header 1340 * @skb: buffer to operate on 1341 * 1342 * Drop a reference to the header part of the buffer. This is done 1343 * by acquiring a payload reference. You must not read from the header 1344 * part of skb->data after this. 1345 * Note : Check if you can use __skb_header_release() instead. 1346 */ 1347 static inline void skb_header_release(struct sk_buff *skb) 1348 { 1349 BUG_ON(skb->nohdr); 1350 skb->nohdr = 1; 1351 atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref); 1352 } 1353 1354 /** 1355 * __skb_header_release - release reference to header 1356 * @skb: buffer to operate on 1357 * 1358 * Variant of skb_header_release() assuming skb is private to caller. 1359 * We can avoid one atomic operation. 1360 */ 1361 static inline void __skb_header_release(struct sk_buff *skb) 1362 { 1363 skb->nohdr = 1; 1364 atomic_set(&skb_shinfo(skb)->dataref, 1 + (1 << SKB_DATAREF_SHIFT)); 1365 } 1366 1367 1368 /** 1369 * skb_shared - is the buffer shared 1370 * @skb: buffer to check 1371 * 1372 * Returns true if more than one person has a reference to this 1373 * buffer. 1374 */ 1375 static inline int skb_shared(const struct sk_buff *skb) 1376 { 1377 return atomic_read(&skb->users) != 1; 1378 } 1379 1380 /** 1381 * skb_share_check - check if buffer is shared and if so clone it 1382 * @skb: buffer to check 1383 * @pri: priority for memory allocation 1384 * 1385 * If the buffer is shared the buffer is cloned and the old copy 1386 * drops a reference. A new clone with a single reference is returned. 1387 * If the buffer is not shared the original buffer is returned. When 1388 * being called from interrupt status or with spinlocks held pri must 1389 * be GFP_ATOMIC. 1390 * 1391 * NULL is returned on a memory allocation failure. 1392 */ 1393 static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri) 1394 { 1395 might_sleep_if(gfpflags_allow_blocking(pri)); 1396 if (skb_shared(skb)) { 1397 struct sk_buff *nskb = skb_clone(skb, pri); 1398 1399 if (likely(nskb)) 1400 consume_skb(skb); 1401 else 1402 kfree_skb(skb); 1403 skb = nskb; 1404 } 1405 return skb; 1406 } 1407 1408 /* 1409 * Copy shared buffers into a new sk_buff. We effectively do COW on 1410 * packets to handle cases where we have a local reader and forward 1411 * and a couple of other messy ones. The normal one is tcpdumping 1412 * a packet thats being forwarded. 1413 */ 1414 1415 /** 1416 * skb_unshare - make a copy of a shared buffer 1417 * @skb: buffer to check 1418 * @pri: priority for memory allocation 1419 * 1420 * If the socket buffer is a clone then this function creates a new 1421 * copy of the data, drops a reference count on the old copy and returns 1422 * the new copy with the reference count at 1. If the buffer is not a clone 1423 * the original buffer is returned. When called with a spinlock held or 1424 * from interrupt state @pri must be %GFP_ATOMIC 1425 * 1426 * %NULL is returned on a memory allocation failure. 1427 */ 1428 static inline struct sk_buff *skb_unshare(struct sk_buff *skb, 1429 gfp_t pri) 1430 { 1431 might_sleep_if(gfpflags_allow_blocking(pri)); 1432 if (skb_cloned(skb)) { 1433 struct sk_buff *nskb = skb_copy(skb, pri); 1434 1435 /* Free our shared copy */ 1436 if (likely(nskb)) 1437 consume_skb(skb); 1438 else 1439 kfree_skb(skb); 1440 skb = nskb; 1441 } 1442 return skb; 1443 } 1444 1445 /** 1446 * skb_peek - peek at the head of an &sk_buff_head 1447 * @list_: list to peek at 1448 * 1449 * Peek an &sk_buff. Unlike most other operations you _MUST_ 1450 * be careful with this one. A peek leaves the buffer on the 1451 * list and someone else may run off with it. You must hold 1452 * the appropriate locks or have a private queue to do this. 1453 * 1454 * Returns %NULL for an empty list or a pointer to the head element. 1455 * The reference count is not incremented and the reference is therefore 1456 * volatile. Use with caution. 1457 */ 1458 static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_) 1459 { 1460 struct sk_buff *skb = list_->next; 1461 1462 if (skb == (struct sk_buff *)list_) 1463 skb = NULL; 1464 return skb; 1465 } 1466 1467 /** 1468 * skb_peek_next - peek skb following the given one from a queue 1469 * @skb: skb to start from 1470 * @list_: list to peek at 1471 * 1472 * Returns %NULL when the end of the list is met or a pointer to the 1473 * next element. The reference count is not incremented and the 1474 * reference is therefore volatile. Use with caution. 1475 */ 1476 static inline struct sk_buff *skb_peek_next(struct sk_buff *skb, 1477 const struct sk_buff_head *list_) 1478 { 1479 struct sk_buff *next = skb->next; 1480 1481 if (next == (struct sk_buff *)list_) 1482 next = NULL; 1483 return next; 1484 } 1485 1486 /** 1487 * skb_peek_tail - peek at the tail of an &sk_buff_head 1488 * @list_: list to peek at 1489 * 1490 * Peek an &sk_buff. Unlike most other operations you _MUST_ 1491 * be careful with this one. A peek leaves the buffer on the 1492 * list and someone else may run off with it. You must hold 1493 * the appropriate locks or have a private queue to do this. 1494 * 1495 * Returns %NULL for an empty list or a pointer to the tail element. 1496 * The reference count is not incremented and the reference is therefore 1497 * volatile. Use with caution. 1498 */ 1499 static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_) 1500 { 1501 struct sk_buff *skb = list_->prev; 1502 1503 if (skb == (struct sk_buff *)list_) 1504 skb = NULL; 1505 return skb; 1506 1507 } 1508 1509 /** 1510 * skb_queue_len - get queue length 1511 * @list_: list to measure 1512 * 1513 * Return the length of an &sk_buff queue. 1514 */ 1515 static inline __u32 skb_queue_len(const struct sk_buff_head *list_) 1516 { 1517 return list_->qlen; 1518 } 1519 1520 /** 1521 * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head 1522 * @list: queue to initialize 1523 * 1524 * This initializes only the list and queue length aspects of 1525 * an sk_buff_head object. This allows to initialize the list 1526 * aspects of an sk_buff_head without reinitializing things like 1527 * the spinlock. It can also be used for on-stack sk_buff_head 1528 * objects where the spinlock is known to not be used. 1529 */ 1530 static inline void __skb_queue_head_init(struct sk_buff_head *list) 1531 { 1532 list->prev = list->next = (struct sk_buff *)list; 1533 list->qlen = 0; 1534 } 1535 1536 /* 1537 * This function creates a split out lock class for each invocation; 1538 * this is needed for now since a whole lot of users of the skb-queue 1539 * infrastructure in drivers have different locking usage (in hardirq) 1540 * than the networking core (in softirq only). In the long run either the 1541 * network layer or drivers should need annotation to consolidate the 1542 * main types of usage into 3 classes. 1543 */ 1544 static inline void skb_queue_head_init(struct sk_buff_head *list) 1545 { 1546 spin_lock_init(&list->lock); 1547 __skb_queue_head_init(list); 1548 } 1549 1550 static inline void skb_queue_head_init_class(struct sk_buff_head *list, 1551 struct lock_class_key *class) 1552 { 1553 skb_queue_head_init(list); 1554 lockdep_set_class(&list->lock, class); 1555 } 1556 1557 /* 1558 * Insert an sk_buff on a list. 1559 * 1560 * The "__skb_xxxx()" functions are the non-atomic ones that 1561 * can only be called with interrupts disabled. 1562 */ 1563 void skb_insert(struct sk_buff *old, struct sk_buff *newsk, 1564 struct sk_buff_head *list); 1565 static inline void __skb_insert(struct sk_buff *newsk, 1566 struct sk_buff *prev, struct sk_buff *next, 1567 struct sk_buff_head *list) 1568 { 1569 newsk->next = next; 1570 newsk->prev = prev; 1571 next->prev = prev->next = newsk; 1572 list->qlen++; 1573 } 1574 1575 static inline void __skb_queue_splice(const struct sk_buff_head *list, 1576 struct sk_buff *prev, 1577 struct sk_buff *next) 1578 { 1579 struct sk_buff *first = list->next; 1580 struct sk_buff *last = list->prev; 1581 1582 first->prev = prev; 1583 prev->next = first; 1584 1585 last->next = next; 1586 next->prev = last; 1587 } 1588 1589 /** 1590 * skb_queue_splice - join two skb lists, this is designed for stacks 1591 * @list: the new list to add 1592 * @head: the place to add it in the first list 1593 */ 1594 static inline void skb_queue_splice(const struct sk_buff_head *list, 1595 struct sk_buff_head *head) 1596 { 1597 if (!skb_queue_empty(list)) { 1598 __skb_queue_splice(list, (struct sk_buff *) head, head->next); 1599 head->qlen += list->qlen; 1600 } 1601 } 1602 1603 /** 1604 * skb_queue_splice_init - join two skb lists and reinitialise the emptied list 1605 * @list: the new list to add 1606 * @head: the place to add it in the first list 1607 * 1608 * The list at @list is reinitialised 1609 */ 1610 static inline void skb_queue_splice_init(struct sk_buff_head *list, 1611 struct sk_buff_head *head) 1612 { 1613 if (!skb_queue_empty(list)) { 1614 __skb_queue_splice(list, (struct sk_buff *) head, head->next); 1615 head->qlen += list->qlen; 1616 __skb_queue_head_init(list); 1617 } 1618 } 1619 1620 /** 1621 * skb_queue_splice_tail - join two skb lists, each list being a queue 1622 * @list: the new list to add 1623 * @head: the place to add it in the first list 1624 */ 1625 static inline void skb_queue_splice_tail(const struct sk_buff_head *list, 1626 struct sk_buff_head *head) 1627 { 1628 if (!skb_queue_empty(list)) { 1629 __skb_queue_splice(list, head->prev, (struct sk_buff *) head); 1630 head->qlen += list->qlen; 1631 } 1632 } 1633 1634 /** 1635 * skb_queue_splice_tail_init - join two skb lists and reinitialise the emptied list 1636 * @list: the new list to add 1637 * @head: the place to add it in the first list 1638 * 1639 * Each of the lists is a queue. 1640 * The list at @list is reinitialised 1641 */ 1642 static inline void skb_queue_splice_tail_init(struct sk_buff_head *list, 1643 struct sk_buff_head *head) 1644 { 1645 if (!skb_queue_empty(list)) { 1646 __skb_queue_splice(list, head->prev, (struct sk_buff *) head); 1647 head->qlen += list->qlen; 1648 __skb_queue_head_init(list); 1649 } 1650 } 1651 1652 /** 1653 * __skb_queue_after - queue a buffer at the list head 1654 * @list: list to use 1655 * @prev: place after this buffer 1656 * @newsk: buffer to queue 1657 * 1658 * Queue a buffer int the middle of a list. This function takes no locks 1659 * and you must therefore hold required locks before calling it. 1660 * 1661 * A buffer cannot be placed on two lists at the same time. 1662 */ 1663 static inline void __skb_queue_after(struct sk_buff_head *list, 1664 struct sk_buff *prev, 1665 struct sk_buff *newsk) 1666 { 1667 __skb_insert(newsk, prev, prev->next, list); 1668 } 1669 1670 void skb_append(struct sk_buff *old, struct sk_buff *newsk, 1671 struct sk_buff_head *list); 1672 1673 static inline void __skb_queue_before(struct sk_buff_head *list, 1674 struct sk_buff *next, 1675 struct sk_buff *newsk) 1676 { 1677 __skb_insert(newsk, next->prev, next, list); 1678 } 1679 1680 /** 1681 * __skb_queue_head - queue a buffer at the list head 1682 * @list: list to use 1683 * @newsk: buffer to queue 1684 * 1685 * Queue a buffer at the start of a list. This function takes no locks 1686 * and you must therefore hold required locks before calling it. 1687 * 1688 * A buffer cannot be placed on two lists at the same time. 1689 */ 1690 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk); 1691 static inline void __skb_queue_head(struct sk_buff_head *list, 1692 struct sk_buff *newsk) 1693 { 1694 __skb_queue_after(list, (struct sk_buff *)list, newsk); 1695 } 1696 1697 /** 1698 * __skb_queue_tail - queue a buffer at the list tail 1699 * @list: list to use 1700 * @newsk: buffer to queue 1701 * 1702 * Queue a buffer at the end of a list. This function takes no locks 1703 * and you must therefore hold required locks before calling it. 1704 * 1705 * A buffer cannot be placed on two lists at the same time. 1706 */ 1707 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk); 1708 static inline void __skb_queue_tail(struct sk_buff_head *list, 1709 struct sk_buff *newsk) 1710 { 1711 __skb_queue_before(list, (struct sk_buff *)list, newsk); 1712 } 1713 1714 /* 1715 * remove sk_buff from list. _Must_ be called atomically, and with 1716 * the list known.. 1717 */ 1718 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list); 1719 static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 1720 { 1721 struct sk_buff *next, *prev; 1722 1723 list->qlen--; 1724 next = skb->next; 1725 prev = skb->prev; 1726 skb->next = skb->prev = NULL; 1727 next->prev = prev; 1728 prev->next = next; 1729 } 1730 1731 /** 1732 * __skb_dequeue - remove from the head of the queue 1733 * @list: list to dequeue from 1734 * 1735 * Remove the head of the list. This function does not take any locks 1736 * so must be used with appropriate locks held only. The head item is 1737 * returned or %NULL if the list is empty. 1738 */ 1739 struct sk_buff *skb_dequeue(struct sk_buff_head *list); 1740 static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list) 1741 { 1742 struct sk_buff *skb = skb_peek(list); 1743 if (skb) 1744 __skb_unlink(skb, list); 1745 return skb; 1746 } 1747 1748 /** 1749 * __skb_dequeue_tail - remove from the tail of the queue 1750 * @list: list to dequeue from 1751 * 1752 * Remove the tail of the list. This function does not take any locks 1753 * so must be used with appropriate locks held only. The tail item is 1754 * returned or %NULL if the list is empty. 1755 */ 1756 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list); 1757 static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list) 1758 { 1759 struct sk_buff *skb = skb_peek_tail(list); 1760 if (skb) 1761 __skb_unlink(skb, list); 1762 return skb; 1763 } 1764 1765 1766 static inline bool skb_is_nonlinear(const struct sk_buff *skb) 1767 { 1768 return skb->data_len; 1769 } 1770 1771 static inline unsigned int skb_headlen(const struct sk_buff *skb) 1772 { 1773 return skb->len - skb->data_len; 1774 } 1775 1776 static inline int skb_pagelen(const struct sk_buff *skb) 1777 { 1778 int i, len = 0; 1779 1780 for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) 1781 len += skb_frag_size(&skb_shinfo(skb)->frags[i]); 1782 return len + skb_headlen(skb); 1783 } 1784 1785 /** 1786 * __skb_fill_page_desc - initialise a paged fragment in an skb 1787 * @skb: buffer containing fragment to be initialised 1788 * @i: paged fragment index to initialise 1789 * @page: the page to use for this fragment 1790 * @off: the offset to the data with @page 1791 * @size: the length of the data 1792 * 1793 * Initialises the @i'th fragment of @skb to point to &size bytes at 1794 * offset @off within @page. 1795 * 1796 * Does not take any additional reference on the fragment. 1797 */ 1798 static inline void __skb_fill_page_desc(struct sk_buff *skb, int i, 1799 struct page *page, int off, int size) 1800 { 1801 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1802 1803 /* 1804 * Propagate page pfmemalloc to the skb if we can. The problem is 1805 * that not all callers have unique ownership of the page but rely 1806 * on page_is_pfmemalloc doing the right thing(tm). 1807 */ 1808 frag->page.p = page; 1809 frag->page_offset = off; 1810 skb_frag_size_set(frag, size); 1811 1812 page = compound_head(page); 1813 if (page_is_pfmemalloc(page)) 1814 skb->pfmemalloc = true; 1815 } 1816 1817 /** 1818 * skb_fill_page_desc - initialise a paged fragment in an skb 1819 * @skb: buffer containing fragment to be initialised 1820 * @i: paged fragment index to initialise 1821 * @page: the page to use for this fragment 1822 * @off: the offset to the data with @page 1823 * @size: the length of the data 1824 * 1825 * As per __skb_fill_page_desc() -- initialises the @i'th fragment of 1826 * @skb to point to @size bytes at offset @off within @page. In 1827 * addition updates @skb such that @i is the last fragment. 1828 * 1829 * Does not take any additional reference on the fragment. 1830 */ 1831 static inline void skb_fill_page_desc(struct sk_buff *skb, int i, 1832 struct page *page, int off, int size) 1833 { 1834 __skb_fill_page_desc(skb, i, page, off, size); 1835 skb_shinfo(skb)->nr_frags = i + 1; 1836 } 1837 1838 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, 1839 int size, unsigned int truesize); 1840 1841 void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, 1842 unsigned int truesize); 1843 1844 #define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags) 1845 #define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frag_list(skb)) 1846 #define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb)) 1847 1848 #ifdef NET_SKBUFF_DATA_USES_OFFSET 1849 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb) 1850 { 1851 return skb->head + skb->tail; 1852 } 1853 1854 static inline void skb_reset_tail_pointer(struct sk_buff *skb) 1855 { 1856 skb->tail = skb->data - skb->head; 1857 } 1858 1859 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) 1860 { 1861 skb_reset_tail_pointer(skb); 1862 skb->tail += offset; 1863 } 1864 1865 #else /* NET_SKBUFF_DATA_USES_OFFSET */ 1866 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb) 1867 { 1868 return skb->tail; 1869 } 1870 1871 static inline void skb_reset_tail_pointer(struct sk_buff *skb) 1872 { 1873 skb->tail = skb->data; 1874 } 1875 1876 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) 1877 { 1878 skb->tail = skb->data + offset; 1879 } 1880 1881 #endif /* NET_SKBUFF_DATA_USES_OFFSET */ 1882 1883 /* 1884 * Add data to an sk_buff 1885 */ 1886 unsigned char *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len); 1887 unsigned char *skb_put(struct sk_buff *skb, unsigned int len); 1888 static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len) 1889 { 1890 unsigned char *tmp = skb_tail_pointer(skb); 1891 SKB_LINEAR_ASSERT(skb); 1892 skb->tail += len; 1893 skb->len += len; 1894 return tmp; 1895 } 1896 1897 unsigned char *skb_push(struct sk_buff *skb, unsigned int len); 1898 static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len) 1899 { 1900 skb->data -= len; 1901 skb->len += len; 1902 return skb->data; 1903 } 1904 1905 unsigned char *skb_pull(struct sk_buff *skb, unsigned int len); 1906 static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len) 1907 { 1908 skb->len -= len; 1909 BUG_ON(skb->len < skb->data_len); 1910 return skb->data += len; 1911 } 1912 1913 static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int len) 1914 { 1915 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len); 1916 } 1917 1918 unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta); 1919 1920 static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len) 1921 { 1922 if (len > skb_headlen(skb) && 1923 !__pskb_pull_tail(skb, len - skb_headlen(skb))) 1924 return NULL; 1925 skb->len -= len; 1926 return skb->data += len; 1927 } 1928 1929 static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len) 1930 { 1931 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len); 1932 } 1933 1934 static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len) 1935 { 1936 if (likely(len <= skb_headlen(skb))) 1937 return 1; 1938 if (unlikely(len > skb->len)) 1939 return 0; 1940 return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL; 1941 } 1942 1943 /** 1944 * skb_headroom - bytes at buffer head 1945 * @skb: buffer to check 1946 * 1947 * Return the number of bytes of free space at the head of an &sk_buff. 1948 */ 1949 static inline unsigned int skb_headroom(const struct sk_buff *skb) 1950 { 1951 return skb->data - skb->head; 1952 } 1953 1954 /** 1955 * skb_tailroom - bytes at buffer end 1956 * @skb: buffer to check 1957 * 1958 * Return the number of bytes of free space at the tail of an sk_buff 1959 */ 1960 static inline int skb_tailroom(const struct sk_buff *skb) 1961 { 1962 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail; 1963 } 1964 1965 /** 1966 * skb_availroom - bytes at buffer end 1967 * @skb: buffer to check 1968 * 1969 * Return the number of bytes of free space at the tail of an sk_buff 1970 * allocated by sk_stream_alloc() 1971 */ 1972 static inline int skb_availroom(const struct sk_buff *skb) 1973 { 1974 if (skb_is_nonlinear(skb)) 1975 return 0; 1976 1977 return skb->end - skb->tail - skb->reserved_tailroom; 1978 } 1979 1980 /** 1981 * skb_reserve - adjust headroom 1982 * @skb: buffer to alter 1983 * @len: bytes to move 1984 * 1985 * Increase the headroom of an empty &sk_buff by reducing the tail 1986 * room. This is only allowed for an empty buffer. 1987 */ 1988 static inline void skb_reserve(struct sk_buff *skb, int len) 1989 { 1990 skb->data += len; 1991 skb->tail += len; 1992 } 1993 1994 /** 1995 * skb_tailroom_reserve - adjust reserved_tailroom 1996 * @skb: buffer to alter 1997 * @mtu: maximum amount of headlen permitted 1998 * @needed_tailroom: minimum amount of reserved_tailroom 1999 * 2000 * Set reserved_tailroom so that headlen can be as large as possible but 2001 * not larger than mtu and tailroom cannot be smaller than 2002 * needed_tailroom. 2003 * The required headroom should already have been reserved before using 2004 * this function. 2005 */ 2006 static inline void skb_tailroom_reserve(struct sk_buff *skb, unsigned int mtu, 2007 unsigned int needed_tailroom) 2008 { 2009 SKB_LINEAR_ASSERT(skb); 2010 if (mtu < skb_tailroom(skb) - needed_tailroom) 2011 /* use at most mtu */ 2012 skb->reserved_tailroom = skb_tailroom(skb) - mtu; 2013 else 2014 /* use up to all available space */ 2015 skb->reserved_tailroom = needed_tailroom; 2016 } 2017 2018 #define ENCAP_TYPE_ETHER 0 2019 #define ENCAP_TYPE_IPPROTO 1 2020 2021 static inline void skb_set_inner_protocol(struct sk_buff *skb, 2022 __be16 protocol) 2023 { 2024 skb->inner_protocol = protocol; 2025 skb->inner_protocol_type = ENCAP_TYPE_ETHER; 2026 } 2027 2028 static inline void skb_set_inner_ipproto(struct sk_buff *skb, 2029 __u8 ipproto) 2030 { 2031 skb->inner_ipproto = ipproto; 2032 skb->inner_protocol_type = ENCAP_TYPE_IPPROTO; 2033 } 2034 2035 static inline void skb_reset_inner_headers(struct sk_buff *skb) 2036 { 2037 skb->inner_mac_header = skb->mac_header; 2038 skb->inner_network_header = skb->network_header; 2039 skb->inner_transport_header = skb->transport_header; 2040 } 2041 2042 static inline void skb_reset_mac_len(struct sk_buff *skb) 2043 { 2044 skb->mac_len = skb->network_header - skb->mac_header; 2045 } 2046 2047 static inline unsigned char *skb_inner_transport_header(const struct sk_buff 2048 *skb) 2049 { 2050 return skb->head + skb->inner_transport_header; 2051 } 2052 2053 static inline int skb_inner_transport_offset(const struct sk_buff *skb) 2054 { 2055 return skb_inner_transport_header(skb) - skb->data; 2056 } 2057 2058 static inline void skb_reset_inner_transport_header(struct sk_buff *skb) 2059 { 2060 skb->inner_transport_header = skb->data - skb->head; 2061 } 2062 2063 static inline void skb_set_inner_transport_header(struct sk_buff *skb, 2064 const int offset) 2065 { 2066 skb_reset_inner_transport_header(skb); 2067 skb->inner_transport_header += offset; 2068 } 2069 2070 static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb) 2071 { 2072 return skb->head + skb->inner_network_header; 2073 } 2074 2075 static inline void skb_reset_inner_network_header(struct sk_buff *skb) 2076 { 2077 skb->inner_network_header = skb->data - skb->head; 2078 } 2079 2080 static inline void skb_set_inner_network_header(struct sk_buff *skb, 2081 const int offset) 2082 { 2083 skb_reset_inner_network_header(skb); 2084 skb->inner_network_header += offset; 2085 } 2086 2087 static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb) 2088 { 2089 return skb->head + skb->inner_mac_header; 2090 } 2091 2092 static inline void skb_reset_inner_mac_header(struct sk_buff *skb) 2093 { 2094 skb->inner_mac_header = skb->data - skb->head; 2095 } 2096 2097 static inline void skb_set_inner_mac_header(struct sk_buff *skb, 2098 const int offset) 2099 { 2100 skb_reset_inner_mac_header(skb); 2101 skb->inner_mac_header += offset; 2102 } 2103 static inline bool skb_transport_header_was_set(const struct sk_buff *skb) 2104 { 2105 return skb->transport_header != (typeof(skb->transport_header))~0U; 2106 } 2107 2108 static inline unsigned char *skb_transport_header(const struct sk_buff *skb) 2109 { 2110 return skb->head + skb->transport_header; 2111 } 2112 2113 static inline void skb_reset_transport_header(struct sk_buff *skb) 2114 { 2115 skb->transport_header = skb->data - skb->head; 2116 } 2117 2118 static inline void skb_set_transport_header(struct sk_buff *skb, 2119 const int offset) 2120 { 2121 skb_reset_transport_header(skb); 2122 skb->transport_header += offset; 2123 } 2124 2125 static inline unsigned char *skb_network_header(const struct sk_buff *skb) 2126 { 2127 return skb->head + skb->network_header; 2128 } 2129 2130 static inline void skb_reset_network_header(struct sk_buff *skb) 2131 { 2132 skb->network_header = skb->data - skb->head; 2133 } 2134 2135 static inline void skb_set_network_header(struct sk_buff *skb, const int offset) 2136 { 2137 skb_reset_network_header(skb); 2138 skb->network_header += offset; 2139 } 2140 2141 static inline unsigned char *skb_mac_header(const struct sk_buff *skb) 2142 { 2143 return skb->head + skb->mac_header; 2144 } 2145 2146 static inline int skb_mac_header_was_set(const struct sk_buff *skb) 2147 { 2148 return skb->mac_header != (typeof(skb->mac_header))~0U; 2149 } 2150 2151 static inline void skb_reset_mac_header(struct sk_buff *skb) 2152 { 2153 skb->mac_header = skb->data - skb->head; 2154 } 2155 2156 static inline void skb_set_mac_header(struct sk_buff *skb, const int offset) 2157 { 2158 skb_reset_mac_header(skb); 2159 skb->mac_header += offset; 2160 } 2161 2162 static inline void skb_pop_mac_header(struct sk_buff *skb) 2163 { 2164 skb->mac_header = skb->network_header; 2165 } 2166 2167 static inline void skb_probe_transport_header(struct sk_buff *skb, 2168 const int offset_hint) 2169 { 2170 struct flow_keys keys; 2171 2172 if (skb_transport_header_was_set(skb)) 2173 return; 2174 else if (skb_flow_dissect_flow_keys(skb, &keys, 0)) 2175 skb_set_transport_header(skb, keys.control.thoff); 2176 else 2177 skb_set_transport_header(skb, offset_hint); 2178 } 2179 2180 static inline void skb_mac_header_rebuild(struct sk_buff *skb) 2181 { 2182 if (skb_mac_header_was_set(skb)) { 2183 const unsigned char *old_mac = skb_mac_header(skb); 2184 2185 skb_set_mac_header(skb, -skb->mac_len); 2186 memmove(skb_mac_header(skb), old_mac, skb->mac_len); 2187 } 2188 } 2189 2190 static inline int skb_checksum_start_offset(const struct sk_buff *skb) 2191 { 2192 return skb->csum_start - skb_headroom(skb); 2193 } 2194 2195 static inline unsigned char *skb_checksum_start(const struct sk_buff *skb) 2196 { 2197 return skb->head + skb->csum_start; 2198 } 2199 2200 static inline int skb_transport_offset(const struct sk_buff *skb) 2201 { 2202 return skb_transport_header(skb) - skb->data; 2203 } 2204 2205 static inline u32 skb_network_header_len(const struct sk_buff *skb) 2206 { 2207 return skb->transport_header - skb->network_header; 2208 } 2209 2210 static inline u32 skb_inner_network_header_len(const struct sk_buff *skb) 2211 { 2212 return skb->inner_transport_header - skb->inner_network_header; 2213 } 2214 2215 static inline int skb_network_offset(const struct sk_buff *skb) 2216 { 2217 return skb_network_header(skb) - skb->data; 2218 } 2219 2220 static inline int skb_inner_network_offset(const struct sk_buff *skb) 2221 { 2222 return skb_inner_network_header(skb) - skb->data; 2223 } 2224 2225 static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len) 2226 { 2227 return pskb_may_pull(skb, skb_network_offset(skb) + len); 2228 } 2229 2230 /* 2231 * CPUs often take a performance hit when accessing unaligned memory 2232 * locations. The actual performance hit varies, it can be small if the 2233 * hardware handles it or large if we have to take an exception and fix it 2234 * in software. 2235 * 2236 * Since an ethernet header is 14 bytes network drivers often end up with 2237 * the IP header at an unaligned offset. The IP header can be aligned by 2238 * shifting the start of the packet by 2 bytes. Drivers should do this 2239 * with: 2240 * 2241 * skb_reserve(skb, NET_IP_ALIGN); 2242 * 2243 * The downside to this alignment of the IP header is that the DMA is now 2244 * unaligned. On some architectures the cost of an unaligned DMA is high 2245 * and this cost outweighs the gains made by aligning the IP header. 2246 * 2247 * Since this trade off varies between architectures, we allow NET_IP_ALIGN 2248 * to be overridden. 2249 */ 2250 #ifndef NET_IP_ALIGN 2251 #define NET_IP_ALIGN 2 2252 #endif 2253 2254 /* 2255 * The networking layer reserves some headroom in skb data (via 2256 * dev_alloc_skb). This is used to avoid having to reallocate skb data when 2257 * the header has to grow. In the default case, if the header has to grow 2258 * 32 bytes or less we avoid the reallocation. 2259 * 2260 * Unfortunately this headroom changes the DMA alignment of the resulting 2261 * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive 2262 * on some architectures. An architecture can override this value, 2263 * perhaps setting it to a cacheline in size (since that will maintain 2264 * cacheline alignment of the DMA). It must be a power of 2. 2265 * 2266 * Various parts of the networking layer expect at least 32 bytes of 2267 * headroom, you should not reduce this. 2268 * 2269 * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS) 2270 * to reduce average number of cache lines per packet. 2271 * get_rps_cpus() for example only access one 64 bytes aligned block : 2272 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8) 2273 */ 2274 #ifndef NET_SKB_PAD 2275 #define NET_SKB_PAD max(32, L1_CACHE_BYTES) 2276 #endif 2277 2278 int ___pskb_trim(struct sk_buff *skb, unsigned int len); 2279 2280 static inline void __skb_trim(struct sk_buff *skb, unsigned int len) 2281 { 2282 if (unlikely(skb_is_nonlinear(skb))) { 2283 WARN_ON(1); 2284 return; 2285 } 2286 skb->len = len; 2287 skb_set_tail_pointer(skb, len); 2288 } 2289 2290 void skb_trim(struct sk_buff *skb, unsigned int len); 2291 2292 static inline int __pskb_trim(struct sk_buff *skb, unsigned int len) 2293 { 2294 if (skb->data_len) 2295 return ___pskb_trim(skb, len); 2296 __skb_trim(skb, len); 2297 return 0; 2298 } 2299 2300 static inline int pskb_trim(struct sk_buff *skb, unsigned int len) 2301 { 2302 return (len < skb->len) ? __pskb_trim(skb, len) : 0; 2303 } 2304 2305 /** 2306 * pskb_trim_unique - remove end from a paged unique (not cloned) buffer 2307 * @skb: buffer to alter 2308 * @len: new length 2309 * 2310 * This is identical to pskb_trim except that the caller knows that 2311 * the skb is not cloned so we should never get an error due to out- 2312 * of-memory. 2313 */ 2314 static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len) 2315 { 2316 int err = pskb_trim(skb, len); 2317 BUG_ON(err); 2318 } 2319 2320 /** 2321 * skb_orphan - orphan a buffer 2322 * @skb: buffer to orphan 2323 * 2324 * If a buffer currently has an owner then we call the owner's 2325 * destructor function and make the @skb unowned. The buffer continues 2326 * to exist but is no longer charged to its former owner. 2327 */ 2328 static inline void skb_orphan(struct sk_buff *skb) 2329 { 2330 if (skb->destructor) { 2331 skb->destructor(skb); 2332 skb->destructor = NULL; 2333 skb->sk = NULL; 2334 } else { 2335 BUG_ON(skb->sk); 2336 } 2337 } 2338 2339 /** 2340 * skb_orphan_frags - orphan the frags contained in a buffer 2341 * @skb: buffer to orphan frags from 2342 * @gfp_mask: allocation mask for replacement pages 2343 * 2344 * For each frag in the SKB which needs a destructor (i.e. has an 2345 * owner) create a copy of that frag and release the original 2346 * page by calling the destructor. 2347 */ 2348 static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask) 2349 { 2350 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY))) 2351 return 0; 2352 return skb_copy_ubufs(skb, gfp_mask); 2353 } 2354 2355 /** 2356 * __skb_queue_purge - empty a list 2357 * @list: list to empty 2358 * 2359 * Delete all buffers on an &sk_buff list. Each buffer is removed from 2360 * the list and one reference dropped. This function does not take the 2361 * list lock and the caller must hold the relevant locks to use it. 2362 */ 2363 void skb_queue_purge(struct sk_buff_head *list); 2364 static inline void __skb_queue_purge(struct sk_buff_head *list) 2365 { 2366 struct sk_buff *skb; 2367 while ((skb = __skb_dequeue(list)) != NULL) 2368 kfree_skb(skb); 2369 } 2370 2371 void *netdev_alloc_frag(unsigned int fragsz); 2372 2373 struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length, 2374 gfp_t gfp_mask); 2375 2376 /** 2377 * netdev_alloc_skb - allocate an skbuff for rx on a specific device 2378 * @dev: network device to receive on 2379 * @length: length to allocate 2380 * 2381 * Allocate a new &sk_buff and assign it a usage count of one. The 2382 * buffer has unspecified headroom built in. Users should allocate 2383 * the headroom they think they need without accounting for the 2384 * built in space. The built in space is used for optimisations. 2385 * 2386 * %NULL is returned if there is no free memory. Although this function 2387 * allocates memory it can be called from an interrupt. 2388 */ 2389 static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev, 2390 unsigned int length) 2391 { 2392 return __netdev_alloc_skb(dev, length, GFP_ATOMIC); 2393 } 2394 2395 /* legacy helper around __netdev_alloc_skb() */ 2396 static inline struct sk_buff *__dev_alloc_skb(unsigned int length, 2397 gfp_t gfp_mask) 2398 { 2399 return __netdev_alloc_skb(NULL, length, gfp_mask); 2400 } 2401 2402 /* legacy helper around netdev_alloc_skb() */ 2403 static inline struct sk_buff *dev_alloc_skb(unsigned int length) 2404 { 2405 return netdev_alloc_skb(NULL, length); 2406 } 2407 2408 2409 static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev, 2410 unsigned int length, gfp_t gfp) 2411 { 2412 struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp); 2413 2414 if (NET_IP_ALIGN && skb) 2415 skb_reserve(skb, NET_IP_ALIGN); 2416 return skb; 2417 } 2418 2419 static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev, 2420 unsigned int length) 2421 { 2422 return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC); 2423 } 2424 2425 static inline void skb_free_frag(void *addr) 2426 { 2427 __free_page_frag(addr); 2428 } 2429 2430 void *napi_alloc_frag(unsigned int fragsz); 2431 struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, 2432 unsigned int length, gfp_t gfp_mask); 2433 static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi, 2434 unsigned int length) 2435 { 2436 return __napi_alloc_skb(napi, length, GFP_ATOMIC); 2437 } 2438 void napi_consume_skb(struct sk_buff *skb, int budget); 2439 2440 void __kfree_skb_flush(void); 2441 void __kfree_skb_defer(struct sk_buff *skb); 2442 2443 /** 2444 * __dev_alloc_pages - allocate page for network Rx 2445 * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx 2446 * @order: size of the allocation 2447 * 2448 * Allocate a new page. 2449 * 2450 * %NULL is returned if there is no free memory. 2451 */ 2452 static inline struct page *__dev_alloc_pages(gfp_t gfp_mask, 2453 unsigned int order) 2454 { 2455 /* This piece of code contains several assumptions. 2456 * 1. This is for device Rx, therefor a cold page is preferred. 2457 * 2. The expectation is the user wants a compound page. 2458 * 3. If requesting a order 0 page it will not be compound 2459 * due to the check to see if order has a value in prep_new_page 2460 * 4. __GFP_MEMALLOC is ignored if __GFP_NOMEMALLOC is set due to 2461 * code in gfp_to_alloc_flags that should be enforcing this. 2462 */ 2463 gfp_mask |= __GFP_COLD | __GFP_COMP | __GFP_MEMALLOC; 2464 2465 return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order); 2466 } 2467 2468 static inline struct page *dev_alloc_pages(unsigned int order) 2469 { 2470 return __dev_alloc_pages(GFP_ATOMIC | __GFP_NOWARN, order); 2471 } 2472 2473 /** 2474 * __dev_alloc_page - allocate a page for network Rx 2475 * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx 2476 * 2477 * Allocate a new page. 2478 * 2479 * %NULL is returned if there is no free memory. 2480 */ 2481 static inline struct page *__dev_alloc_page(gfp_t gfp_mask) 2482 { 2483 return __dev_alloc_pages(gfp_mask, 0); 2484 } 2485 2486 static inline struct page *dev_alloc_page(void) 2487 { 2488 return dev_alloc_pages(0); 2489 } 2490 2491 /** 2492 * skb_propagate_pfmemalloc - Propagate pfmemalloc if skb is allocated after RX page 2493 * @page: The page that was allocated from skb_alloc_page 2494 * @skb: The skb that may need pfmemalloc set 2495 */ 2496 static inline void skb_propagate_pfmemalloc(struct page *page, 2497 struct sk_buff *skb) 2498 { 2499 if (page_is_pfmemalloc(page)) 2500 skb->pfmemalloc = true; 2501 } 2502 2503 /** 2504 * skb_frag_page - retrieve the page referred to by a paged fragment 2505 * @frag: the paged fragment 2506 * 2507 * Returns the &struct page associated with @frag. 2508 */ 2509 static inline struct page *skb_frag_page(const skb_frag_t *frag) 2510 { 2511 return frag->page.p; 2512 } 2513 2514 /** 2515 * __skb_frag_ref - take an addition reference on a paged fragment. 2516 * @frag: the paged fragment 2517 * 2518 * Takes an additional reference on the paged fragment @frag. 2519 */ 2520 static inline void __skb_frag_ref(skb_frag_t *frag) 2521 { 2522 get_page(skb_frag_page(frag)); 2523 } 2524 2525 /** 2526 * skb_frag_ref - take an addition reference on a paged fragment of an skb. 2527 * @skb: the buffer 2528 * @f: the fragment offset. 2529 * 2530 * Takes an additional reference on the @f'th paged fragment of @skb. 2531 */ 2532 static inline void skb_frag_ref(struct sk_buff *skb, int f) 2533 { 2534 __skb_frag_ref(&skb_shinfo(skb)->frags[f]); 2535 } 2536 2537 /** 2538 * __skb_frag_unref - release a reference on a paged fragment. 2539 * @frag: the paged fragment 2540 * 2541 * Releases a reference on the paged fragment @frag. 2542 */ 2543 static inline void __skb_frag_unref(skb_frag_t *frag) 2544 { 2545 put_page(skb_frag_page(frag)); 2546 } 2547 2548 /** 2549 * skb_frag_unref - release a reference on a paged fragment of an skb. 2550 * @skb: the buffer 2551 * @f: the fragment offset 2552 * 2553 * Releases a reference on the @f'th paged fragment of @skb. 2554 */ 2555 static inline void skb_frag_unref(struct sk_buff *skb, int f) 2556 { 2557 __skb_frag_unref(&skb_shinfo(skb)->frags[f]); 2558 } 2559 2560 /** 2561 * skb_frag_address - gets the address of the data contained in a paged fragment 2562 * @frag: the paged fragment buffer 2563 * 2564 * Returns the address of the data within @frag. The page must already 2565 * be mapped. 2566 */ 2567 static inline void *skb_frag_address(const skb_frag_t *frag) 2568 { 2569 return page_address(skb_frag_page(frag)) + frag->page_offset; 2570 } 2571 2572 /** 2573 * skb_frag_address_safe - gets the address of the data contained in a paged fragment 2574 * @frag: the paged fragment buffer 2575 * 2576 * Returns the address of the data within @frag. Checks that the page 2577 * is mapped and returns %NULL otherwise. 2578 */ 2579 static inline void *skb_frag_address_safe(const skb_frag_t *frag) 2580 { 2581 void *ptr = page_address(skb_frag_page(frag)); 2582 if (unlikely(!ptr)) 2583 return NULL; 2584 2585 return ptr + frag->page_offset; 2586 } 2587 2588 /** 2589 * __skb_frag_set_page - sets the page contained in a paged fragment 2590 * @frag: the paged fragment 2591 * @page: the page to set 2592 * 2593 * Sets the fragment @frag to contain @page. 2594 */ 2595 static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page) 2596 { 2597 frag->page.p = page; 2598 } 2599 2600 /** 2601 * skb_frag_set_page - sets the page contained in a paged fragment of an skb 2602 * @skb: the buffer 2603 * @f: the fragment offset 2604 * @page: the page to set 2605 * 2606 * Sets the @f'th fragment of @skb to contain @page. 2607 */ 2608 static inline void skb_frag_set_page(struct sk_buff *skb, int f, 2609 struct page *page) 2610 { 2611 __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page); 2612 } 2613 2614 bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio); 2615 2616 /** 2617 * skb_frag_dma_map - maps a paged fragment via the DMA API 2618 * @dev: the device to map the fragment to 2619 * @frag: the paged fragment to map 2620 * @offset: the offset within the fragment (starting at the 2621 * fragment's own offset) 2622 * @size: the number of bytes to map 2623 * @dir: the direction of the mapping (%PCI_DMA_*) 2624 * 2625 * Maps the page associated with @frag to @device. 2626 */ 2627 static inline dma_addr_t skb_frag_dma_map(struct device *dev, 2628 const skb_frag_t *frag, 2629 size_t offset, size_t size, 2630 enum dma_data_direction dir) 2631 { 2632 return dma_map_page(dev, skb_frag_page(frag), 2633 frag->page_offset + offset, size, dir); 2634 } 2635 2636 static inline struct sk_buff *pskb_copy(struct sk_buff *skb, 2637 gfp_t gfp_mask) 2638 { 2639 return __pskb_copy(skb, skb_headroom(skb), gfp_mask); 2640 } 2641 2642 2643 static inline struct sk_buff *pskb_copy_for_clone(struct sk_buff *skb, 2644 gfp_t gfp_mask) 2645 { 2646 return __pskb_copy_fclone(skb, skb_headroom(skb), gfp_mask, true); 2647 } 2648 2649 2650 /** 2651 * skb_clone_writable - is the header of a clone writable 2652 * @skb: buffer to check 2653 * @len: length up to which to write 2654 * 2655 * Returns true if modifying the header part of the cloned buffer 2656 * does not requires the data to be copied. 2657 */ 2658 static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len) 2659 { 2660 return !skb_header_cloned(skb) && 2661 skb_headroom(skb) + len <= skb->hdr_len; 2662 } 2663 2664 static inline int skb_try_make_writable(struct sk_buff *skb, 2665 unsigned int write_len) 2666 { 2667 return skb_cloned(skb) && !skb_clone_writable(skb, write_len) && 2668 pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2669 } 2670 2671 static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom, 2672 int cloned) 2673 { 2674 int delta = 0; 2675 2676 if (headroom > skb_headroom(skb)) 2677 delta = headroom - skb_headroom(skb); 2678 2679 if (delta || cloned) 2680 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0, 2681 GFP_ATOMIC); 2682 return 0; 2683 } 2684 2685 /** 2686 * skb_cow - copy header of skb when it is required 2687 * @skb: buffer to cow 2688 * @headroom: needed headroom 2689 * 2690 * If the skb passed lacks sufficient headroom or its data part 2691 * is shared, data is reallocated. If reallocation fails, an error 2692 * is returned and original skb is not changed. 2693 * 2694 * The result is skb with writable area skb->head...skb->tail 2695 * and at least @headroom of space at head. 2696 */ 2697 static inline int skb_cow(struct sk_buff *skb, unsigned int headroom) 2698 { 2699 return __skb_cow(skb, headroom, skb_cloned(skb)); 2700 } 2701 2702 /** 2703 * skb_cow_head - skb_cow but only making the head writable 2704 * @skb: buffer to cow 2705 * @headroom: needed headroom 2706 * 2707 * This function is identical to skb_cow except that we replace the 2708 * skb_cloned check by skb_header_cloned. It should be used when 2709 * you only need to push on some header and do not need to modify 2710 * the data. 2711 */ 2712 static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom) 2713 { 2714 return __skb_cow(skb, headroom, skb_header_cloned(skb)); 2715 } 2716 2717 /** 2718 * skb_padto - pad an skbuff up to a minimal size 2719 * @skb: buffer to pad 2720 * @len: minimal length 2721 * 2722 * Pads up a buffer to ensure the trailing bytes exist and are 2723 * blanked. If the buffer already contains sufficient data it 2724 * is untouched. Otherwise it is extended. Returns zero on 2725 * success. The skb is freed on error. 2726 */ 2727 static inline int skb_padto(struct sk_buff *skb, unsigned int len) 2728 { 2729 unsigned int size = skb->len; 2730 if (likely(size >= len)) 2731 return 0; 2732 return skb_pad(skb, len - size); 2733 } 2734 2735 /** 2736 * skb_put_padto - increase size and pad an skbuff up to a minimal size 2737 * @skb: buffer to pad 2738 * @len: minimal length 2739 * 2740 * Pads up a buffer to ensure the trailing bytes exist and are 2741 * blanked. If the buffer already contains sufficient data it 2742 * is untouched. Otherwise it is extended. Returns zero on 2743 * success. The skb is freed on error. 2744 */ 2745 static inline int skb_put_padto(struct sk_buff *skb, unsigned int len) 2746 { 2747 unsigned int size = skb->len; 2748 2749 if (unlikely(size < len)) { 2750 len -= size; 2751 if (skb_pad(skb, len)) 2752 return -ENOMEM; 2753 __skb_put(skb, len); 2754 } 2755 return 0; 2756 } 2757 2758 static inline int skb_add_data(struct sk_buff *skb, 2759 struct iov_iter *from, int copy) 2760 { 2761 const int off = skb->len; 2762 2763 if (skb->ip_summed == CHECKSUM_NONE) { 2764 __wsum csum = 0; 2765 if (csum_and_copy_from_iter(skb_put(skb, copy), copy, 2766 &csum, from) == copy) { 2767 skb->csum = csum_block_add(skb->csum, csum, off); 2768 return 0; 2769 } 2770 } else if (copy_from_iter(skb_put(skb, copy), copy, from) == copy) 2771 return 0; 2772 2773 __skb_trim(skb, off); 2774 return -EFAULT; 2775 } 2776 2777 static inline bool skb_can_coalesce(struct sk_buff *skb, int i, 2778 const struct page *page, int off) 2779 { 2780 if (i) { 2781 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; 2782 2783 return page == skb_frag_page(frag) && 2784 off == frag->page_offset + skb_frag_size(frag); 2785 } 2786 return false; 2787 } 2788 2789 static inline int __skb_linearize(struct sk_buff *skb) 2790 { 2791 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM; 2792 } 2793 2794 /** 2795 * skb_linearize - convert paged skb to linear one 2796 * @skb: buffer to linarize 2797 * 2798 * If there is no free memory -ENOMEM is returned, otherwise zero 2799 * is returned and the old skb data released. 2800 */ 2801 static inline int skb_linearize(struct sk_buff *skb) 2802 { 2803 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0; 2804 } 2805 2806 /** 2807 * skb_has_shared_frag - can any frag be overwritten 2808 * @skb: buffer to test 2809 * 2810 * Return true if the skb has at least one frag that might be modified 2811 * by an external entity (as in vmsplice()/sendfile()) 2812 */ 2813 static inline bool skb_has_shared_frag(const struct sk_buff *skb) 2814 { 2815 return skb_is_nonlinear(skb) && 2816 skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG; 2817 } 2818 2819 /** 2820 * skb_linearize_cow - make sure skb is linear and writable 2821 * @skb: buffer to process 2822 * 2823 * If there is no free memory -ENOMEM is returned, otherwise zero 2824 * is returned and the old skb data released. 2825 */ 2826 static inline int skb_linearize_cow(struct sk_buff *skb) 2827 { 2828 return skb_is_nonlinear(skb) || skb_cloned(skb) ? 2829 __skb_linearize(skb) : 0; 2830 } 2831 2832 /** 2833 * skb_postpull_rcsum - update checksum for received skb after pull 2834 * @skb: buffer to update 2835 * @start: start of data before pull 2836 * @len: length of data pulled 2837 * 2838 * After doing a pull on a received packet, you need to call this to 2839 * update the CHECKSUM_COMPLETE checksum, or set ip_summed to 2840 * CHECKSUM_NONE so that it can be recomputed from scratch. 2841 */ 2842 2843 static inline void skb_postpull_rcsum(struct sk_buff *skb, 2844 const void *start, unsigned int len) 2845 { 2846 if (skb->ip_summed == CHECKSUM_COMPLETE) 2847 skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0)); 2848 else if (skb->ip_summed == CHECKSUM_PARTIAL && 2849 skb_checksum_start_offset(skb) < 0) 2850 skb->ip_summed = CHECKSUM_NONE; 2851 } 2852 2853 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); 2854 2855 static inline void skb_postpush_rcsum(struct sk_buff *skb, 2856 const void *start, unsigned int len) 2857 { 2858 /* For performing the reverse operation to skb_postpull_rcsum(), 2859 * we can instead of ... 2860 * 2861 * skb->csum = csum_add(skb->csum, csum_partial(start, len, 0)); 2862 * 2863 * ... just use this equivalent version here to save a few 2864 * instructions. Feeding csum of 0 in csum_partial() and later 2865 * on adding skb->csum is equivalent to feed skb->csum in the 2866 * first place. 2867 */ 2868 if (skb->ip_summed == CHECKSUM_COMPLETE) 2869 skb->csum = csum_partial(start, len, skb->csum); 2870 } 2871 2872 /** 2873 * pskb_trim_rcsum - trim received skb and update checksum 2874 * @skb: buffer to trim 2875 * @len: new length 2876 * 2877 * This is exactly the same as pskb_trim except that it ensures the 2878 * checksum of received packets are still valid after the operation. 2879 */ 2880 2881 static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) 2882 { 2883 if (likely(len >= skb->len)) 2884 return 0; 2885 if (skb->ip_summed == CHECKSUM_COMPLETE) 2886 skb->ip_summed = CHECKSUM_NONE; 2887 return __pskb_trim(skb, len); 2888 } 2889 2890 #define skb_queue_walk(queue, skb) \ 2891 for (skb = (queue)->next; \ 2892 skb != (struct sk_buff *)(queue); \ 2893 skb = skb->next) 2894 2895 #define skb_queue_walk_safe(queue, skb, tmp) \ 2896 for (skb = (queue)->next, tmp = skb->next; \ 2897 skb != (struct sk_buff *)(queue); \ 2898 skb = tmp, tmp = skb->next) 2899 2900 #define skb_queue_walk_from(queue, skb) \ 2901 for (; skb != (struct sk_buff *)(queue); \ 2902 skb = skb->next) 2903 2904 #define skb_queue_walk_from_safe(queue, skb, tmp) \ 2905 for (tmp = skb->next; \ 2906 skb != (struct sk_buff *)(queue); \ 2907 skb = tmp, tmp = skb->next) 2908 2909 #define skb_queue_reverse_walk(queue, skb) \ 2910 for (skb = (queue)->prev; \ 2911 skb != (struct sk_buff *)(queue); \ 2912 skb = skb->prev) 2913 2914 #define skb_queue_reverse_walk_safe(queue, skb, tmp) \ 2915 for (skb = (queue)->prev, tmp = skb->prev; \ 2916 skb != (struct sk_buff *)(queue); \ 2917 skb = tmp, tmp = skb->prev) 2918 2919 #define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \ 2920 for (tmp = skb->prev; \ 2921 skb != (struct sk_buff *)(queue); \ 2922 skb = tmp, tmp = skb->prev) 2923 2924 static inline bool skb_has_frag_list(const struct sk_buff *skb) 2925 { 2926 return skb_shinfo(skb)->frag_list != NULL; 2927 } 2928 2929 static inline void skb_frag_list_init(struct sk_buff *skb) 2930 { 2931 skb_shinfo(skb)->frag_list = NULL; 2932 } 2933 2934 #define skb_walk_frags(skb, iter) \ 2935 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next) 2936 2937 2938 int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p, 2939 const struct sk_buff *skb); 2940 struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned flags, 2941 int *peeked, int *off, int *err, 2942 struct sk_buff **last); 2943 struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags, 2944 int *peeked, int *off, int *err); 2945 struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock, 2946 int *err); 2947 unsigned int datagram_poll(struct file *file, struct socket *sock, 2948 struct poll_table_struct *wait); 2949 int skb_copy_datagram_iter(const struct sk_buff *from, int offset, 2950 struct iov_iter *to, int size); 2951 static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset, 2952 struct msghdr *msg, int size) 2953 { 2954 return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size); 2955 } 2956 int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen, 2957 struct msghdr *msg); 2958 int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset, 2959 struct iov_iter *from, int len); 2960 int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm); 2961 void skb_free_datagram(struct sock *sk, struct sk_buff *skb); 2962 void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len); 2963 static inline void skb_free_datagram_locked(struct sock *sk, 2964 struct sk_buff *skb) 2965 { 2966 __skb_free_datagram_locked(sk, skb, 0); 2967 } 2968 int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags); 2969 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len); 2970 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len); 2971 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to, 2972 int len, __wsum csum); 2973 ssize_t skb_socket_splice(struct sock *sk, 2974 struct pipe_inode_info *pipe, 2975 struct splice_pipe_desc *spd); 2976 int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset, 2977 struct pipe_inode_info *pipe, unsigned int len, 2978 unsigned int flags, 2979 ssize_t (*splice_cb)(struct sock *, 2980 struct pipe_inode_info *, 2981 struct splice_pipe_desc *)); 2982 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); 2983 unsigned int skb_zerocopy_headlen(const struct sk_buff *from); 2984 int skb_zerocopy(struct sk_buff *to, struct sk_buff *from, 2985 int len, int hlen); 2986 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len); 2987 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen); 2988 void skb_scrub_packet(struct sk_buff *skb, bool xnet); 2989 unsigned int skb_gso_transport_seglen(const struct sk_buff *skb); 2990 struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features); 2991 struct sk_buff *skb_vlan_untag(struct sk_buff *skb); 2992 int skb_ensure_writable(struct sk_buff *skb, int write_len); 2993 int skb_vlan_pop(struct sk_buff *skb); 2994 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci); 2995 struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy, 2996 gfp_t gfp); 2997 2998 static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len) 2999 { 3000 return copy_from_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT; 3001 } 3002 3003 static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len) 3004 { 3005 return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT; 3006 } 3007 3008 struct skb_checksum_ops { 3009 __wsum (*update)(const void *mem, int len, __wsum wsum); 3010 __wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len); 3011 }; 3012 3013 __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, 3014 __wsum csum, const struct skb_checksum_ops *ops); 3015 __wsum skb_checksum(const struct sk_buff *skb, int offset, int len, 3016 __wsum csum); 3017 3018 static inline void * __must_check 3019 __skb_header_pointer(const struct sk_buff *skb, int offset, 3020 int len, void *data, int hlen, void *buffer) 3021 { 3022 if (hlen - offset >= len) 3023 return data + offset; 3024 3025 if (!skb || 3026 skb_copy_bits(skb, offset, buffer, len) < 0) 3027 return NULL; 3028 3029 return buffer; 3030 } 3031 3032 static inline void * __must_check 3033 skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer) 3034 { 3035 return __skb_header_pointer(skb, offset, len, skb->data, 3036 skb_headlen(skb), buffer); 3037 } 3038 3039 /** 3040 * skb_needs_linearize - check if we need to linearize a given skb 3041 * depending on the given device features. 3042 * @skb: socket buffer to check 3043 * @features: net device features 3044 * 3045 * Returns true if either: 3046 * 1. skb has frag_list and the device doesn't support FRAGLIST, or 3047 * 2. skb is fragmented and the device does not support SG. 3048 */ 3049 static inline bool skb_needs_linearize(struct sk_buff *skb, 3050 netdev_features_t features) 3051 { 3052 return skb_is_nonlinear(skb) && 3053 ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) || 3054 (skb_shinfo(skb)->nr_frags && !(features & NETIF_F_SG))); 3055 } 3056 3057 static inline void skb_copy_from_linear_data(const struct sk_buff *skb, 3058 void *to, 3059 const unsigned int len) 3060 { 3061 memcpy(to, skb->data, len); 3062 } 3063 3064 static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb, 3065 const int offset, void *to, 3066 const unsigned int len) 3067 { 3068 memcpy(to, skb->data + offset, len); 3069 } 3070 3071 static inline void skb_copy_to_linear_data(struct sk_buff *skb, 3072 const void *from, 3073 const unsigned int len) 3074 { 3075 memcpy(skb->data, from, len); 3076 } 3077 3078 static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb, 3079 const int offset, 3080 const void *from, 3081 const unsigned int len) 3082 { 3083 memcpy(skb->data + offset, from, len); 3084 } 3085 3086 void skb_init(void); 3087 3088 static inline ktime_t skb_get_ktime(const struct sk_buff *skb) 3089 { 3090 return skb->tstamp; 3091 } 3092 3093 /** 3094 * skb_get_timestamp - get timestamp from a skb 3095 * @skb: skb to get stamp from 3096 * @stamp: pointer to struct timeval to store stamp in 3097 * 3098 * Timestamps are stored in the skb as offsets to a base timestamp. 3099 * This function converts the offset back to a struct timeval and stores 3100 * it in stamp. 3101 */ 3102 static inline void skb_get_timestamp(const struct sk_buff *skb, 3103 struct timeval *stamp) 3104 { 3105 *stamp = ktime_to_timeval(skb->tstamp); 3106 } 3107 3108 static inline void skb_get_timestampns(const struct sk_buff *skb, 3109 struct timespec *stamp) 3110 { 3111 *stamp = ktime_to_timespec(skb->tstamp); 3112 } 3113 3114 static inline void __net_timestamp(struct sk_buff *skb) 3115 { 3116 skb->tstamp = ktime_get_real(); 3117 } 3118 3119 static inline ktime_t net_timedelta(ktime_t t) 3120 { 3121 return ktime_sub(ktime_get_real(), t); 3122 } 3123 3124 static inline ktime_t net_invalid_timestamp(void) 3125 { 3126 return ktime_set(0, 0); 3127 } 3128 3129 struct sk_buff *skb_clone_sk(struct sk_buff *skb); 3130 3131 #ifdef CONFIG_NETWORK_PHY_TIMESTAMPING 3132 3133 void skb_clone_tx_timestamp(struct sk_buff *skb); 3134 bool skb_defer_rx_timestamp(struct sk_buff *skb); 3135 3136 #else /* CONFIG_NETWORK_PHY_TIMESTAMPING */ 3137 3138 static inline void skb_clone_tx_timestamp(struct sk_buff *skb) 3139 { 3140 } 3141 3142 static inline bool skb_defer_rx_timestamp(struct sk_buff *skb) 3143 { 3144 return false; 3145 } 3146 3147 #endif /* !CONFIG_NETWORK_PHY_TIMESTAMPING */ 3148 3149 /** 3150 * skb_complete_tx_timestamp() - deliver cloned skb with tx timestamps 3151 * 3152 * PHY drivers may accept clones of transmitted packets for 3153 * timestamping via their phy_driver.txtstamp method. These drivers 3154 * must call this function to return the skb back to the stack with a 3155 * timestamp. 3156 * 3157 * @skb: clone of the the original outgoing packet 3158 * @hwtstamps: hardware time stamps 3159 * 3160 */ 3161 void skb_complete_tx_timestamp(struct sk_buff *skb, 3162 struct skb_shared_hwtstamps *hwtstamps); 3163 3164 void __skb_tstamp_tx(struct sk_buff *orig_skb, 3165 struct skb_shared_hwtstamps *hwtstamps, 3166 struct sock *sk, int tstype); 3167 3168 /** 3169 * skb_tstamp_tx - queue clone of skb with send time stamps 3170 * @orig_skb: the original outgoing packet 3171 * @hwtstamps: hardware time stamps, may be NULL if not available 3172 * 3173 * If the skb has a socket associated, then this function clones the 3174 * skb (thus sharing the actual data and optional structures), stores 3175 * the optional hardware time stamping information (if non NULL) or 3176 * generates a software time stamp (otherwise), then queues the clone 3177 * to the error queue of the socket. Errors are silently ignored. 3178 */ 3179 void skb_tstamp_tx(struct sk_buff *orig_skb, 3180 struct skb_shared_hwtstamps *hwtstamps); 3181 3182 static inline void sw_tx_timestamp(struct sk_buff *skb) 3183 { 3184 if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP && 3185 !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) 3186 skb_tstamp_tx(skb, NULL); 3187 } 3188 3189 /** 3190 * skb_tx_timestamp() - Driver hook for transmit timestamping 3191 * 3192 * Ethernet MAC Drivers should call this function in their hard_xmit() 3193 * function immediately before giving the sk_buff to the MAC hardware. 3194 * 3195 * Specifically, one should make absolutely sure that this function is 3196 * called before TX completion of this packet can trigger. Otherwise 3197 * the packet could potentially already be freed. 3198 * 3199 * @skb: A socket buffer. 3200 */ 3201 static inline void skb_tx_timestamp(struct sk_buff *skb) 3202 { 3203 skb_clone_tx_timestamp(skb); 3204 sw_tx_timestamp(skb); 3205 } 3206 3207 /** 3208 * skb_complete_wifi_ack - deliver skb with wifi status 3209 * 3210 * @skb: the original outgoing packet 3211 * @acked: ack status 3212 * 3213 */ 3214 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked); 3215 3216 __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len); 3217 __sum16 __skb_checksum_complete(struct sk_buff *skb); 3218 3219 static inline int skb_csum_unnecessary(const struct sk_buff *skb) 3220 { 3221 return ((skb->ip_summed == CHECKSUM_UNNECESSARY) || 3222 skb->csum_valid || 3223 (skb->ip_summed == CHECKSUM_PARTIAL && 3224 skb_checksum_start_offset(skb) >= 0)); 3225 } 3226 3227 /** 3228 * skb_checksum_complete - Calculate checksum of an entire packet 3229 * @skb: packet to process 3230 * 3231 * This function calculates the checksum over the entire packet plus 3232 * the value of skb->csum. The latter can be used to supply the 3233 * checksum of a pseudo header as used by TCP/UDP. It returns the 3234 * checksum. 3235 * 3236 * For protocols that contain complete checksums such as ICMP/TCP/UDP, 3237 * this function can be used to verify that checksum on received 3238 * packets. In that case the function should return zero if the 3239 * checksum is correct. In particular, this function will return zero 3240 * if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the 3241 * hardware has already verified the correctness of the checksum. 3242 */ 3243 static inline __sum16 skb_checksum_complete(struct sk_buff *skb) 3244 { 3245 return skb_csum_unnecessary(skb) ? 3246 0 : __skb_checksum_complete(skb); 3247 } 3248 3249 static inline void __skb_decr_checksum_unnecessary(struct sk_buff *skb) 3250 { 3251 if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 3252 if (skb->csum_level == 0) 3253 skb->ip_summed = CHECKSUM_NONE; 3254 else 3255 skb->csum_level--; 3256 } 3257 } 3258 3259 static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb) 3260 { 3261 if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 3262 if (skb->csum_level < SKB_MAX_CSUM_LEVEL) 3263 skb->csum_level++; 3264 } else if (skb->ip_summed == CHECKSUM_NONE) { 3265 skb->ip_summed = CHECKSUM_UNNECESSARY; 3266 skb->csum_level = 0; 3267 } 3268 } 3269 3270 static inline void __skb_mark_checksum_bad(struct sk_buff *skb) 3271 { 3272 /* Mark current checksum as bad (typically called from GRO 3273 * path). In the case that ip_summed is CHECKSUM_NONE 3274 * this must be the first checksum encountered in the packet. 3275 * When ip_summed is CHECKSUM_UNNECESSARY, this is the first 3276 * checksum after the last one validated. For UDP, a zero 3277 * checksum can not be marked as bad. 3278 */ 3279 3280 if (skb->ip_summed == CHECKSUM_NONE || 3281 skb->ip_summed == CHECKSUM_UNNECESSARY) 3282 skb->csum_bad = 1; 3283 } 3284 3285 /* Check if we need to perform checksum complete validation. 3286 * 3287 * Returns true if checksum complete is needed, false otherwise 3288 * (either checksum is unnecessary or zero checksum is allowed). 3289 */ 3290 static inline bool __skb_checksum_validate_needed(struct sk_buff *skb, 3291 bool zero_okay, 3292 __sum16 check) 3293 { 3294 if (skb_csum_unnecessary(skb) || (zero_okay && !check)) { 3295 skb->csum_valid = 1; 3296 __skb_decr_checksum_unnecessary(skb); 3297 return false; 3298 } 3299 3300 return true; 3301 } 3302 3303 /* For small packets <= CHECKSUM_BREAK peform checksum complete directly 3304 * in checksum_init. 3305 */ 3306 #define CHECKSUM_BREAK 76 3307 3308 /* Unset checksum-complete 3309 * 3310 * Unset checksum complete can be done when packet is being modified 3311 * (uncompressed for instance) and checksum-complete value is 3312 * invalidated. 3313 */ 3314 static inline void skb_checksum_complete_unset(struct sk_buff *skb) 3315 { 3316 if (skb->ip_summed == CHECKSUM_COMPLETE) 3317 skb->ip_summed = CHECKSUM_NONE; 3318 } 3319 3320 /* Validate (init) checksum based on checksum complete. 3321 * 3322 * Return values: 3323 * 0: checksum is validated or try to in skb_checksum_complete. In the latter 3324 * case the ip_summed will not be CHECKSUM_UNNECESSARY and the pseudo 3325 * checksum is stored in skb->csum for use in __skb_checksum_complete 3326 * non-zero: value of invalid checksum 3327 * 3328 */ 3329 static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb, 3330 bool complete, 3331 __wsum psum) 3332 { 3333 if (skb->ip_summed == CHECKSUM_COMPLETE) { 3334 if (!csum_fold(csum_add(psum, skb->csum))) { 3335 skb->csum_valid = 1; 3336 return 0; 3337 } 3338 } else if (skb->csum_bad) { 3339 /* ip_summed == CHECKSUM_NONE in this case */ 3340 return (__force __sum16)1; 3341 } 3342 3343 skb->csum = psum; 3344 3345 if (complete || skb->len <= CHECKSUM_BREAK) { 3346 __sum16 csum; 3347 3348 csum = __skb_checksum_complete(skb); 3349 skb->csum_valid = !csum; 3350 return csum; 3351 } 3352 3353 return 0; 3354 } 3355 3356 static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto) 3357 { 3358 return 0; 3359 } 3360 3361 /* Perform checksum validate (init). Note that this is a macro since we only 3362 * want to calculate the pseudo header which is an input function if necessary. 3363 * First we try to validate without any computation (checksum unnecessary) and 3364 * then calculate based on checksum complete calling the function to compute 3365 * pseudo header. 3366 * 3367 * Return values: 3368 * 0: checksum is validated or try to in skb_checksum_complete 3369 * non-zero: value of invalid checksum 3370 */ 3371 #define __skb_checksum_validate(skb, proto, complete, \ 3372 zero_okay, check, compute_pseudo) \ 3373 ({ \ 3374 __sum16 __ret = 0; \ 3375 skb->csum_valid = 0; \ 3376 if (__skb_checksum_validate_needed(skb, zero_okay, check)) \ 3377 __ret = __skb_checksum_validate_complete(skb, \ 3378 complete, compute_pseudo(skb, proto)); \ 3379 __ret; \ 3380 }) 3381 3382 #define skb_checksum_init(skb, proto, compute_pseudo) \ 3383 __skb_checksum_validate(skb, proto, false, false, 0, compute_pseudo) 3384 3385 #define skb_checksum_init_zero_check(skb, proto, check, compute_pseudo) \ 3386 __skb_checksum_validate(skb, proto, false, true, check, compute_pseudo) 3387 3388 #define skb_checksum_validate(skb, proto, compute_pseudo) \ 3389 __skb_checksum_validate(skb, proto, true, false, 0, compute_pseudo) 3390 3391 #define skb_checksum_validate_zero_check(skb, proto, check, \ 3392 compute_pseudo) \ 3393 __skb_checksum_validate(skb, proto, true, true, check, compute_pseudo) 3394 3395 #define skb_checksum_simple_validate(skb) \ 3396 __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo) 3397 3398 static inline bool __skb_checksum_convert_check(struct sk_buff *skb) 3399 { 3400 return (skb->ip_summed == CHECKSUM_NONE && 3401 skb->csum_valid && !skb->csum_bad); 3402 } 3403 3404 static inline void __skb_checksum_convert(struct sk_buff *skb, 3405 __sum16 check, __wsum pseudo) 3406 { 3407 skb->csum = ~pseudo; 3408 skb->ip_summed = CHECKSUM_COMPLETE; 3409 } 3410 3411 #define skb_checksum_try_convert(skb, proto, check, compute_pseudo) \ 3412 do { \ 3413 if (__skb_checksum_convert_check(skb)) \ 3414 __skb_checksum_convert(skb, check, \ 3415 compute_pseudo(skb, proto)); \ 3416 } while (0) 3417 3418 static inline void skb_remcsum_adjust_partial(struct sk_buff *skb, void *ptr, 3419 u16 start, u16 offset) 3420 { 3421 skb->ip_summed = CHECKSUM_PARTIAL; 3422 skb->csum_start = ((unsigned char *)ptr + start) - skb->head; 3423 skb->csum_offset = offset - start; 3424 } 3425 3426 /* Update skbuf and packet to reflect the remote checksum offload operation. 3427 * When called, ptr indicates the starting point for skb->csum when 3428 * ip_summed is CHECKSUM_COMPLETE. If we need create checksum complete 3429 * here, skb_postpull_rcsum is done so skb->csum start is ptr. 3430 */ 3431 static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr, 3432 int start, int offset, bool nopartial) 3433 { 3434 __wsum delta; 3435 3436 if (!nopartial) { 3437 skb_remcsum_adjust_partial(skb, ptr, start, offset); 3438 return; 3439 } 3440 3441 if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) { 3442 __skb_checksum_complete(skb); 3443 skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data); 3444 } 3445 3446 delta = remcsum_adjust(ptr, skb->csum, start, offset); 3447 3448 /* Adjust skb->csum since we changed the packet */ 3449 skb->csum = csum_add(skb->csum, delta); 3450 } 3451 3452 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 3453 void nf_conntrack_destroy(struct nf_conntrack *nfct); 3454 static inline void nf_conntrack_put(struct nf_conntrack *nfct) 3455 { 3456 if (nfct && atomic_dec_and_test(&nfct->use)) 3457 nf_conntrack_destroy(nfct); 3458 } 3459 static inline void nf_conntrack_get(struct nf_conntrack *nfct) 3460 { 3461 if (nfct) 3462 atomic_inc(&nfct->use); 3463 } 3464 #endif 3465 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 3466 static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge) 3467 { 3468 if (nf_bridge && atomic_dec_and_test(&nf_bridge->use)) 3469 kfree(nf_bridge); 3470 } 3471 static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge) 3472 { 3473 if (nf_bridge) 3474 atomic_inc(&nf_bridge->use); 3475 } 3476 #endif /* CONFIG_BRIDGE_NETFILTER */ 3477 static inline void nf_reset(struct sk_buff *skb) 3478 { 3479 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 3480 nf_conntrack_put(skb->nfct); 3481 skb->nfct = NULL; 3482 #endif 3483 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 3484 nf_bridge_put(skb->nf_bridge); 3485 skb->nf_bridge = NULL; 3486 #endif 3487 } 3488 3489 static inline void nf_reset_trace(struct sk_buff *skb) 3490 { 3491 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES) 3492 skb->nf_trace = 0; 3493 #endif 3494 } 3495 3496 /* Note: This doesn't put any conntrack and bridge info in dst. */ 3497 static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src, 3498 bool copy) 3499 { 3500 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 3501 dst->nfct = src->nfct; 3502 nf_conntrack_get(src->nfct); 3503 if (copy) 3504 dst->nfctinfo = src->nfctinfo; 3505 #endif 3506 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 3507 dst->nf_bridge = src->nf_bridge; 3508 nf_bridge_get(src->nf_bridge); 3509 #endif 3510 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES) 3511 if (copy) 3512 dst->nf_trace = src->nf_trace; 3513 #endif 3514 } 3515 3516 static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src) 3517 { 3518 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 3519 nf_conntrack_put(dst->nfct); 3520 #endif 3521 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 3522 nf_bridge_put(dst->nf_bridge); 3523 #endif 3524 __nf_copy(dst, src, true); 3525 } 3526 3527 #ifdef CONFIG_NETWORK_SECMARK 3528 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from) 3529 { 3530 to->secmark = from->secmark; 3531 } 3532 3533 static inline void skb_init_secmark(struct sk_buff *skb) 3534 { 3535 skb->secmark = 0; 3536 } 3537 #else 3538 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from) 3539 { } 3540 3541 static inline void skb_init_secmark(struct sk_buff *skb) 3542 { } 3543 #endif 3544 3545 static inline bool skb_irq_freeable(const struct sk_buff *skb) 3546 { 3547 return !skb->destructor && 3548 #if IS_ENABLED(CONFIG_XFRM) 3549 !skb->sp && 3550 #endif 3551 #if IS_ENABLED(CONFIG_NF_CONNTRACK) 3552 !skb->nfct && 3553 #endif 3554 !skb->_skb_refdst && 3555 !skb_has_frag_list(skb); 3556 } 3557 3558 static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping) 3559 { 3560 skb->queue_mapping = queue_mapping; 3561 } 3562 3563 static inline u16 skb_get_queue_mapping(const struct sk_buff *skb) 3564 { 3565 return skb->queue_mapping; 3566 } 3567 3568 static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from) 3569 { 3570 to->queue_mapping = from->queue_mapping; 3571 } 3572 3573 static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue) 3574 { 3575 skb->queue_mapping = rx_queue + 1; 3576 } 3577 3578 static inline u16 skb_get_rx_queue(const struct sk_buff *skb) 3579 { 3580 return skb->queue_mapping - 1; 3581 } 3582 3583 static inline bool skb_rx_queue_recorded(const struct sk_buff *skb) 3584 { 3585 return skb->queue_mapping != 0; 3586 } 3587 3588 static inline struct sec_path *skb_sec_path(struct sk_buff *skb) 3589 { 3590 #ifdef CONFIG_XFRM 3591 return skb->sp; 3592 #else 3593 return NULL; 3594 #endif 3595 } 3596 3597 /* Keeps track of mac header offset relative to skb->head. 3598 * It is useful for TSO of Tunneling protocol. e.g. GRE. 3599 * For non-tunnel skb it points to skb_mac_header() and for 3600 * tunnel skb it points to outer mac header. 3601 * Keeps track of level of encapsulation of network headers. 3602 */ 3603 struct skb_gso_cb { 3604 union { 3605 int mac_offset; 3606 int data_offset; 3607 }; 3608 int encap_level; 3609 __wsum csum; 3610 __u16 csum_start; 3611 }; 3612 #define SKB_SGO_CB_OFFSET 32 3613 #define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_SGO_CB_OFFSET)) 3614 3615 static inline int skb_tnl_header_len(const struct sk_buff *inner_skb) 3616 { 3617 return (skb_mac_header(inner_skb) - inner_skb->head) - 3618 SKB_GSO_CB(inner_skb)->mac_offset; 3619 } 3620 3621 static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra) 3622 { 3623 int new_headroom, headroom; 3624 int ret; 3625 3626 headroom = skb_headroom(skb); 3627 ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC); 3628 if (ret) 3629 return ret; 3630 3631 new_headroom = skb_headroom(skb); 3632 SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom); 3633 return 0; 3634 } 3635 3636 static inline void gso_reset_checksum(struct sk_buff *skb, __wsum res) 3637 { 3638 /* Do not update partial checksums if remote checksum is enabled. */ 3639 if (skb->remcsum_offload) 3640 return; 3641 3642 SKB_GSO_CB(skb)->csum = res; 3643 SKB_GSO_CB(skb)->csum_start = skb_checksum_start(skb) - skb->head; 3644 } 3645 3646 /* Compute the checksum for a gso segment. First compute the checksum value 3647 * from the start of transport header to SKB_GSO_CB(skb)->csum_start, and 3648 * then add in skb->csum (checksum from csum_start to end of packet). 3649 * skb->csum and csum_start are then updated to reflect the checksum of the 3650 * resultant packet starting from the transport header-- the resultant checksum 3651 * is in the res argument (i.e. normally zero or ~ of checksum of a pseudo 3652 * header. 3653 */ 3654 static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res) 3655 { 3656 unsigned char *csum_start = skb_transport_header(skb); 3657 int plen = (skb->head + SKB_GSO_CB(skb)->csum_start) - csum_start; 3658 __wsum partial = SKB_GSO_CB(skb)->csum; 3659 3660 SKB_GSO_CB(skb)->csum = res; 3661 SKB_GSO_CB(skb)->csum_start = csum_start - skb->head; 3662 3663 return csum_fold(csum_partial(csum_start, plen, partial)); 3664 } 3665 3666 static inline bool skb_is_gso(const struct sk_buff *skb) 3667 { 3668 return skb_shinfo(skb)->gso_size; 3669 } 3670 3671 /* Note: Should be called only if skb_is_gso(skb) is true */ 3672 static inline bool skb_is_gso_v6(const struct sk_buff *skb) 3673 { 3674 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; 3675 } 3676 3677 void __skb_warn_lro_forwarding(const struct sk_buff *skb); 3678 3679 static inline bool skb_warn_if_lro(const struct sk_buff *skb) 3680 { 3681 /* LRO sets gso_size but not gso_type, whereas if GSO is really 3682 * wanted then gso_type will be set. */ 3683 const struct skb_shared_info *shinfo = skb_shinfo(skb); 3684 3685 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 && 3686 unlikely(shinfo->gso_type == 0)) { 3687 __skb_warn_lro_forwarding(skb); 3688 return true; 3689 } 3690 return false; 3691 } 3692 3693 static inline void skb_forward_csum(struct sk_buff *skb) 3694 { 3695 /* Unfortunately we don't support this one. Any brave souls? */ 3696 if (skb->ip_summed == CHECKSUM_COMPLETE) 3697 skb->ip_summed = CHECKSUM_NONE; 3698 } 3699 3700 /** 3701 * skb_checksum_none_assert - make sure skb ip_summed is CHECKSUM_NONE 3702 * @skb: skb to check 3703 * 3704 * fresh skbs have their ip_summed set to CHECKSUM_NONE. 3705 * Instead of forcing ip_summed to CHECKSUM_NONE, we can 3706 * use this helper, to document places where we make this assertion. 3707 */ 3708 static inline void skb_checksum_none_assert(const struct sk_buff *skb) 3709 { 3710 #ifdef DEBUG 3711 BUG_ON(skb->ip_summed != CHECKSUM_NONE); 3712 #endif 3713 } 3714 3715 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off); 3716 3717 int skb_checksum_setup(struct sk_buff *skb, bool recalculate); 3718 struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb, 3719 unsigned int transport_len, 3720 __sum16(*skb_chkf)(struct sk_buff *skb)); 3721 3722 /** 3723 * skb_head_is_locked - Determine if the skb->head is locked down 3724 * @skb: skb to check 3725 * 3726 * The head on skbs build around a head frag can be removed if they are 3727 * not cloned. This function returns true if the skb head is locked down 3728 * due to either being allocated via kmalloc, or by being a clone with 3729 * multiple references to the head. 3730 */ 3731 static inline bool skb_head_is_locked(const struct sk_buff *skb) 3732 { 3733 return !skb->head_frag || skb_cloned(skb); 3734 } 3735 3736 /** 3737 * skb_gso_network_seglen - Return length of individual segments of a gso packet 3738 * 3739 * @skb: GSO skb 3740 * 3741 * skb_gso_network_seglen is used to determine the real size of the 3742 * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP). 3743 * 3744 * The MAC/L2 header is not accounted for. 3745 */ 3746 static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb) 3747 { 3748 unsigned int hdr_len = skb_transport_header(skb) - 3749 skb_network_header(skb); 3750 return hdr_len + skb_gso_transport_seglen(skb); 3751 } 3752 3753 /* Local Checksum Offload. 3754 * Compute outer checksum based on the assumption that the 3755 * inner checksum will be offloaded later. 3756 * See Documentation/networking/checksum-offloads.txt for 3757 * explanation of how this works. 3758 * Fill in outer checksum adjustment (e.g. with sum of outer 3759 * pseudo-header) before calling. 3760 * Also ensure that inner checksum is in linear data area. 3761 */ 3762 static inline __wsum lco_csum(struct sk_buff *skb) 3763 { 3764 unsigned char *csum_start = skb_checksum_start(skb); 3765 unsigned char *l4_hdr = skb_transport_header(skb); 3766 __wsum partial; 3767 3768 /* Start with complement of inner checksum adjustment */ 3769 partial = ~csum_unfold(*(__force __sum16 *)(csum_start + 3770 skb->csum_offset)); 3771 3772 /* Add in checksum of our headers (incl. outer checksum 3773 * adjustment filled in by caller) and return result. 3774 */ 3775 return csum_partial(l4_hdr, csum_start - l4_hdr, partial); 3776 } 3777 3778 #endif /* __KERNEL__ */ 3779 #endif /* _LINUX_SKBUFF_H */ 3780